p                 107 arch/alpha/include/asm/floppy.h 	unsigned long p = (unsigned long)a;
p                 108 arch/alpha/include/asm/floppy.h 	return ((p + s - 1) ^ p) & ~0xffffUL;
p                 491 arch/alpha/include/asm/io.h #define ioread16be(p) be16_to_cpu(ioread16(p))
p                 492 arch/alpha/include/asm/io.h #define ioread32be(p) be32_to_cpu(ioread32(p))
p                 493 arch/alpha/include/asm/io.h #define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
p                 494 arch/alpha/include/asm/io.h #define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
p                 572 arch/alpha/include/asm/io.h #define xlate_dev_mem_ptr(p)	__va(p)
p                 577 arch/alpha/include/asm/io.h #define xlate_dev_kmem_ptr(p)	p
p                  38 arch/alpha/include/asm/mmzone.h #define PLAT_NODE_DATA_LOCALNR(p, n)	\
p                  39 arch/alpha/include/asm/mmzone.h 	(((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
p                  42 arch/alpha/include/asm/mmzone.h PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
p                  45 arch/alpha/include/asm/mmzone.h 	temp = p >> PAGE_SHIFT;
p                  45 arch/alpha/include/asm/processor.h unsigned long get_wchan(struct task_struct *p);
p                  71 arch/alpha/include/asm/string.h static inline void *memset16(uint16_t *p, uint16_t v, size_t n)
p                  74 arch/alpha/include/asm/string.h 		return __constant_c_memset(p, 0x0001000100010001UL * v, n * 2);
p                  75 arch/alpha/include/asm/string.h 	return __memset16(p, v, n * 2);
p                 628 arch/alpha/kernel/core_cia.c #define cia_save_srm_settings(p)	do {} while (0)
p                  69 arch/alpha/kernel/irq.c int arch_show_interrupts(struct seq_file *p, int prec)
p                  74 arch/alpha/kernel/irq.c 	seq_puts(p, "IPI: ");
p                  76 arch/alpha/kernel/irq.c 		seq_printf(p, "%10lu ", cpu_data[j].ipi_count);
p                  77 arch/alpha/kernel/irq.c 	seq_putc(p, '\n');
p                  79 arch/alpha/kernel/irq.c 	seq_puts(p, "PMI: ");
p                  81 arch/alpha/kernel/irq.c 		seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j));
p                  82 arch/alpha/kernel/irq.c 	seq_puts(p, "          Performance Monitoring\n");
p                  83 arch/alpha/kernel/irq.c 	seq_printf(p, "ERR: %10lu\n", irq_err_count);
p                 896 arch/alpha/kernel/osf_sys.c 		unsigned __user *p = buffer;
p                 899 arch/alpha/kernel/osf_sys.c 		for (i = 0, p = buffer; i < nbytes; ++i, p += 2) {
p                 902 arch/alpha/kernel/osf_sys.c 			if (get_user(v, p) || get_user(w, p + 1))
p                 138 arch/alpha/kernel/pci_iommu.c 	long i, p, nent;
p                 154 arch/alpha/kernel/pci_iommu.c 	p = ALIGN(arena->next_entry, mask + 1);
p                 158 arch/alpha/kernel/pci_iommu.c 	while (i < n && p+i < nent) {
p                 159 arch/alpha/kernel/pci_iommu.c 		if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
p                 160 arch/alpha/kernel/pci_iommu.c 			p = ALIGN(p + 1, mask + 1);
p                 164 arch/alpha/kernel/pci_iommu.c 		if (ptes[p+i])
p                 165 arch/alpha/kernel/pci_iommu.c 			p = ALIGN(p + i + 1, mask + 1), i = 0;
p                 179 arch/alpha/kernel/pci_iommu.c 			p = 0;
p                 188 arch/alpha/kernel/pci_iommu.c 	return p;
p                 197 arch/alpha/kernel/pci_iommu.c 	long i, p, mask;
p                 204 arch/alpha/kernel/pci_iommu.c 	p = iommu_arena_find_pages(dev, arena, n, mask);
p                 205 arch/alpha/kernel/pci_iommu.c 	if (p < 0) {
p                 215 arch/alpha/kernel/pci_iommu.c 		ptes[p+i] = IOMMU_INVALID_PTE;
p                 217 arch/alpha/kernel/pci_iommu.c 	arena->next_entry = p + n;
p                 220 arch/alpha/kernel/pci_iommu.c 	return p;
p                 226 arch/alpha/kernel/pci_iommu.c 	unsigned long *p;
p                 229 arch/alpha/kernel/pci_iommu.c 	p = arena->ptes + ofs;
p                 231 arch/alpha/kernel/pci_iommu.c 		p[i] = 0;
p                 861 arch/alpha/kernel/pci_iommu.c 	long i, p;
p                 869 arch/alpha/kernel/pci_iommu.c 	p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
p                 870 arch/alpha/kernel/pci_iommu.c 	if (p < 0) {
p                 879 arch/alpha/kernel/pci_iommu.c 		ptes[p+i] = IOMMU_RESERVED_PTE;
p                 881 arch/alpha/kernel/pci_iommu.c 	arena->next_entry = p + pg_count;
p                 884 arch/alpha/kernel/pci_iommu.c 	return p;
p                 938 arch/alpha/kernel/pci_iommu.c 	unsigned long *p;
p                 943 arch/alpha/kernel/pci_iommu.c 	p = arena->ptes + pg_start;
p                 945 arch/alpha/kernel/pci_iommu.c 		p[i] = IOMMU_RESERVED_PTE;
p                 240 arch/alpha/kernel/process.c 	    struct task_struct *p)
p                 245 arch/alpha/kernel/process.c 	struct thread_info *childti = task_thread_info(p);
p                 246 arch/alpha/kernel/process.c 	struct pt_regs *childregs = task_pt_regs(p);
p                 254 arch/alpha/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 381 arch/alpha/kernel/process.c get_wchan(struct task_struct *p)
p                 385 arch/alpha/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 397 arch/alpha/kernel/process.c 	pc = thread_saved_pc(p);
p                 399 arch/alpha/kernel/process.c 		schedule_frame = ((unsigned long *)task_thread_info(p)->pcb.ksp)[6];
p                 423 arch/alpha/kernel/setup.c 		struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 424 arch/alpha/kernel/setup.c 		if (!p)
p                 426 arch/alpha/kernel/setup.c 		register_cpu(p, i);
p                 440 arch/alpha/kernel/setup.c 	char *type_name, *var_name, *p;
p                 499 arch/alpha/kernel/setup.c 	while ((p = strsep(&args, " \t")) != NULL) {
p                 500 arch/alpha/kernel/setup.c 		if (!*p) continue;
p                 501 arch/alpha/kernel/setup.c 		if (strncmp(p, "alpha_mv=", 9) == 0) {
p                 502 arch/alpha/kernel/setup.c 			vec = get_sysvec_byname(p+9);
p                 505 arch/alpha/kernel/setup.c 		if (strncmp(p, "cycle=", 6) == 0) {
p                 506 arch/alpha/kernel/setup.c 			est_cycle_freq = simple_strtol(p+6, NULL, 0);
p                 509 arch/alpha/kernel/setup.c 		if (strncmp(p, "mem=", 4) == 0) {
p                 510 arch/alpha/kernel/setup.c 			mem_size_limit = get_mem_size_limit(p+4);
p                 513 arch/alpha/kernel/setup.c 		if (strncmp(p, "srmcons", 7) == 0) {
p                 517 arch/alpha/kernel/setup.c 		if (strncmp(p, "console=srm", 11) == 0) {
p                 521 arch/alpha/kernel/setup.c 		if (strncmp(p, "gartsize=", 9) == 0) {
p                 523 arch/alpha/kernel/setup.c 				get_mem_size_limit(p+9) << PAGE_SHIFT;
p                 527 arch/alpha/kernel/setup.c 		if (strncmp(p, "verbose_mcheck=", 15) == 0) {
p                 528 arch/alpha/kernel/setup.c 			alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0);
p                  51 arch/arc/include/asm/cmpxchg.h 	volatile unsigned long *p = ptr;
p                  57 arch/arc/include/asm/cmpxchg.h 	prev = *p;
p                  59 arch/arc/include/asm/cmpxchg.h 		*p = new;
p                  10 arch/arc/include/asm/exec.h #define arch_align_stack(p) ((unsigned long)(p) & ~0xf)
p                  44 arch/arc/include/asm/io.h #define ioread16be(p)		({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
p                  45 arch/arc/include/asm/io.h #define ioread32be(p)		({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
p                  47 arch/arc/include/asm/io.h #define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
p                  48 arch/arc/include/asm/io.h #define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
p                 207 arch/arc/include/asm/io.h #define readsb(p,d,l)		({ __raw_readsb(p,d,l); __iormb(); })
p                 208 arch/arc/include/asm/io.h #define readsw(p,d,l)		({ __raw_readsw(p,d,l); __iormb(); })
p                 209 arch/arc/include/asm/io.h #define readsl(p,d,l)		({ __raw_readsl(p,d,l); __iormb(); })
p                 214 arch/arc/include/asm/io.h #define writesb(p,d,l)		({ __iowmb(); __raw_writesb(p,d,l); })
p                 215 arch/arc/include/asm/io.h #define writesw(p,d,l)		({ __iowmb(); __raw_writesw(p,d,l); })
p                 216 arch/arc/include/asm/io.h #define writesl(p,d,l)		({ __iowmb(); __raw_writesl(p,d,l); })
p                  27 arch/arc/include/asm/kprobes.h #define flush_insn_slot(p)  do {  } while (0)
p                  33 arch/arc/include/asm/kprobes.h void arch_remove_kprobe(struct kprobe *p);
p                  57 arch/arc/include/asm/processor.h #define task_pt_regs(p) \
p                  58 arch/arc/include/asm/processor.h 	((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
p                  96 arch/arc/include/asm/processor.h extern unsigned int get_wchan(struct task_struct *p);
p                  15 arch/arc/include/asm/switch_to.h extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
p                  16 arch/arc/include/asm/switch_to.h #define ARC_FPU_PREV(p, n)	fpu_save_restore(p, n)
p                  21 arch/arc/include/asm/switch_to.h #define ARC_FPU_PREV(p, n)
p                  27 arch/arc/include/asm/switch_to.h extern void dp_save_restore(struct task_struct *p, struct task_struct *n);
p                  28 arch/arc/include/asm/switch_to.h #define ARC_EZNPS_DP_PREV(p, n)      dp_save_restore(p, n)
p                  30 arch/arc/include/asm/switch_to.h #define ARC_EZNPS_DP_PREV(p, n)
p                  34 arch/arc/include/asm/switch_to.h struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
p                 435 arch/arc/kernel/disasm.c 	long *p;
p                 438 arch/arc/kernel/disasm.c 		p = &regs->r0;
p                 439 arch/arc/kernel/disasm.c 		return p[-reg];
p                 443 arch/arc/kernel/disasm.c 		p = &cregs->r13;
p                 444 arch/arc/kernel/disasm.c 		return p[13-reg];
p                 462 arch/arc/kernel/disasm.c 	long *p;
p                 466 arch/arc/kernel/disasm.c 		p = &regs->r0;
p                 467 arch/arc/kernel/disasm.c 		p[-reg] = val;
p                 471 arch/arc/kernel/disasm.c 			p = &cregs->r13;
p                 472 arch/arc/kernel/disasm.c 			p[13-reg] = val;
p                  23 arch/arc/kernel/kprobes.c int __kprobes arch_prepare_kprobe(struct kprobe *p)
p                  26 arch/arc/kernel/kprobes.c 	if ((unsigned long)p->addr & 0x01)
p                  31 arch/arc/kernel/kprobes.c 	p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
p                  32 arch/arc/kernel/kprobes.c 	p->opcode = *p->addr;
p                  37 arch/arc/kernel/kprobes.c void __kprobes arch_arm_kprobe(struct kprobe *p)
p                  39 arch/arc/kernel/kprobes.c 	*p->addr = UNIMP_S_INSTRUCTION;
p                  41 arch/arc/kernel/kprobes.c 	flush_icache_range((unsigned long)p->addr,
p                  42 arch/arc/kernel/kprobes.c 			   (unsigned long)p->addr + sizeof(kprobe_opcode_t));
p                  45 arch/arc/kernel/kprobes.c void __kprobes arch_disarm_kprobe(struct kprobe *p)
p                  47 arch/arc/kernel/kprobes.c 	*p->addr = p->opcode;
p                  49 arch/arc/kernel/kprobes.c 	flush_icache_range((unsigned long)p->addr,
p                  50 arch/arc/kernel/kprobes.c 			   (unsigned long)p->addr + sizeof(kprobe_opcode_t));
p                  53 arch/arc/kernel/kprobes.c void __kprobes arch_remove_kprobe(struct kprobe *p)
p                  55 arch/arc/kernel/kprobes.c 	arch_disarm_kprobe(p);
p                  58 arch/arc/kernel/kprobes.c 	if (p->ainsn.t1_addr) {
p                  59 arch/arc/kernel/kprobes.c 		*(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
p                  61 arch/arc/kernel/kprobes.c 		flush_icache_range((unsigned long)p->ainsn.t1_addr,
p                  62 arch/arc/kernel/kprobes.c 				   (unsigned long)p->ainsn.t1_addr +
p                  65 arch/arc/kernel/kprobes.c 		p->ainsn.t1_addr = NULL;
p                  68 arch/arc/kernel/kprobes.c 	if (p->ainsn.t2_addr) {
p                  69 arch/arc/kernel/kprobes.c 		*(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
p                  71 arch/arc/kernel/kprobes.c 		flush_icache_range((unsigned long)p->ainsn.t2_addr,
p                  72 arch/arc/kernel/kprobes.c 				   (unsigned long)p->ainsn.t2_addr +
p                  75 arch/arc/kernel/kprobes.c 		p->ainsn.t2_addr = NULL;
p                  91 arch/arc/kernel/kprobes.c static inline void __kprobes set_current_kprobe(struct kprobe *p)
p                  93 arch/arc/kernel/kprobes.c 	__this_cpu_write(current_kprobe, p);
p                  96 arch/arc/kernel/kprobes.c static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
p                 102 arch/arc/kernel/kprobes.c 	if (p->ainsn.t1_addr) {
p                 103 arch/arc/kernel/kprobes.c 		*(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
p                 105 arch/arc/kernel/kprobes.c 		flush_icache_range((unsigned long)p->ainsn.t1_addr,
p                 106 arch/arc/kernel/kprobes.c 				   (unsigned long)p->ainsn.t1_addr +
p                 109 arch/arc/kernel/kprobes.c 		p->ainsn.t1_addr = NULL;
p                 112 arch/arc/kernel/kprobes.c 	if (p->ainsn.t2_addr) {
p                 113 arch/arc/kernel/kprobes.c 		*(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
p                 115 arch/arc/kernel/kprobes.c 		flush_icache_range((unsigned long)p->ainsn.t2_addr,
p                 116 arch/arc/kernel/kprobes.c 				   (unsigned long)p->ainsn.t2_addr +
p                 119 arch/arc/kernel/kprobes.c 		p->ainsn.t2_addr = NULL;
p                 125 arch/arc/kernel/kprobes.c static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
p                 136 arch/arc/kernel/kprobes.c 	*(p->addr) = p->opcode;
p                 138 arch/arc/kernel/kprobes.c 	flush_icache_range((unsigned long)p->addr,
p                 139 arch/arc/kernel/kprobes.c 			   (unsigned long)p->addr + sizeof(kprobe_opcode_t));
p                 153 arch/arc/kernel/kprobes.c 		if (!p->ainsn.is_short) {
p                 170 arch/arc/kernel/kprobes.c 		    disasm_next_pc((unsigned long)p->addr, regs,
p                 174 arch/arc/kernel/kprobes.c 	p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
p                 175 arch/arc/kernel/kprobes.c 	p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
p                 176 arch/arc/kernel/kprobes.c 	*(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
p                 178 arch/arc/kernel/kprobes.c 	flush_icache_range((unsigned long)p->ainsn.t1_addr,
p                 179 arch/arc/kernel/kprobes.c 			   (unsigned long)p->ainsn.t1_addr +
p                 183 arch/arc/kernel/kprobes.c 		p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
p                 184 arch/arc/kernel/kprobes.c 		p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
p                 185 arch/arc/kernel/kprobes.c 		*(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
p                 187 arch/arc/kernel/kprobes.c 		flush_icache_range((unsigned long)p->ainsn.t2_addr,
p                 188 arch/arc/kernel/kprobes.c 				   (unsigned long)p->ainsn.t2_addr +
p                 195 arch/arc/kernel/kprobes.c 	struct kprobe *p;
p                 201 arch/arc/kernel/kprobes.c 	p = get_kprobe((unsigned long *)addr);
p                 203 arch/arc/kernel/kprobes.c 	if (p) {
p                 213 arch/arc/kernel/kprobes.c 			set_current_kprobe(p);
p                 214 arch/arc/kernel/kprobes.c 			kprobes_inc_nmissed_count(p);
p                 215 arch/arc/kernel/kprobes.c 			setup_singlestep(p, regs);
p                 220 arch/arc/kernel/kprobes.c 		set_current_kprobe(p);
p                 228 arch/arc/kernel/kprobes.c 		if (!p->pre_handler || !p->pre_handler(p, regs)) {
p                 229 arch/arc/kernel/kprobes.c 			setup_singlestep(p, regs);
p                 396 arch/arc/kernel/kprobes.c static int __kprobes trampoline_probe_handler(struct kprobe *p,
p                 469 arch/arc/kernel/kprobes.c int __kprobes arch_trampoline_kprobe(struct kprobe *p)
p                 471 arch/arc/kernel/kprobes.c 	if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
p                 176 arch/arc/kernel/process.c 		struct task_struct *p)
p                 185 arch/arc/kernel/process.c 	c_regs = task_pt_regs(p);
p                 197 arch/arc/kernel/process.c 	p->thread.ksp = (unsigned long)c_callee;	/* THREAD_KSP */
p                 203 arch/arc/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 234 arch/arc/kernel/process.c 		task_thread_info(p)->thr_ptr = regs->r3;
p                 237 arch/arc/kernel/process.c 		task_thread_info(p)->thr_ptr =
p                 249 arch/arc/kernel/process.c 	c_callee->r25 = task_thread_info(p)->thr_ptr;
p                 614 arch/arc/kernel/setup.c #define ptr_to_cpu(p)	(~0xFFFF0000UL & (unsigned int)(p))
p                  41 arch/arc/mm/cache.c 	struct cpuinfo_arc_cache *p;
p                  43 arch/arc/mm/cache.c #define PR_CACHE(p, cfg, str)						\
p                  44 arch/arc/mm/cache.c 	if (!(p)->line_len)						\
p                  49 arch/arc/mm/cache.c 			(p)->sz_k, (p)->assoc, (p)->line_len,		\
p                  50 arch/arc/mm/cache.c 			(p)->vipt ? "VIPT" : "PIPT",			\
p                  51 arch/arc/mm/cache.c 			(p)->alias ? " aliasing" : "",			\
p                  57 arch/arc/mm/cache.c 	p = &cpuinfo_arc700[c].slc;
p                  58 arch/arc/mm/cache.c 	if (p->line_len)
p                  61 arch/arc/mm/cache.c 			       p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
p                 575 arch/arc/mm/cache.c #define __dc_line_op_k(p, sz, op)	__dc_line_op(p, p, sz, op)
p                 629 arch/arc/mm/cache.c #define __ic_line_inv_vaddr(p, v, s)	__ic_line_inv_vaddr_local(p, v, s)
p                 108 arch/arm/boot/compressed/string.c 	const unsigned char *p = s;
p                 111 arch/arm/boot/compressed/string.c 		if ((unsigned char)c == *p++)
p                 112 arch/arm/boot/compressed/string.c 			return (void *)(p - 1);
p                 111 arch/arm/crypto/ghash-ce-glue.c 			int p = GHASH_BLOCK_SIZE - partial;
p                 113 arch/arm/crypto/ghash-ce-glue.c 			memcpy(ctx->buf + partial, src, p);
p                 114 arch/arm/crypto/ghash-ce-glue.c 			src += p;
p                 115 arch/arm/crypto/ghash-ce-glue.c 			len -= p;
p                  36 arch/arm/include/asm/bitops.h static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
p                  41 arch/arm/include/asm/bitops.h 	p += BIT_WORD(bit);
p                  44 arch/arm/include/asm/bitops.h 	*p |= mask;
p                  48 arch/arm/include/asm/bitops.h static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
p                  53 arch/arm/include/asm/bitops.h 	p += BIT_WORD(bit);
p                  56 arch/arm/include/asm/bitops.h 	*p &= ~mask;
p                  60 arch/arm/include/asm/bitops.h static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
p                  65 arch/arm/include/asm/bitops.h 	p += BIT_WORD(bit);
p                  68 arch/arm/include/asm/bitops.h 	*p ^= mask;
p                  73 arch/arm/include/asm/bitops.h ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
p                  79 arch/arm/include/asm/bitops.h 	p += BIT_WORD(bit);
p                  82 arch/arm/include/asm/bitops.h 	res = *p;
p                  83 arch/arm/include/asm/bitops.h 	*p = res | mask;
p                  90 arch/arm/include/asm/bitops.h ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
p                  96 arch/arm/include/asm/bitops.h 	p += BIT_WORD(bit);
p                  99 arch/arm/include/asm/bitops.h 	res = *p;
p                 100 arch/arm/include/asm/bitops.h 	*p = res & ~mask;
p                 107 arch/arm/include/asm/bitops.h ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
p                 113 arch/arm/include/asm/bitops.h 	p += BIT_WORD(bit);
p                 116 arch/arm/include/asm/bitops.h 	res = *p;
p                 117 arch/arm/include/asm/bitops.h 	*p = res ^ mask;
p                 153 arch/arm/include/asm/bitops.h extern void _set_bit(int nr, volatile unsigned long * p);
p                 154 arch/arm/include/asm/bitops.h extern void _clear_bit(int nr, volatile unsigned long * p);
p                 155 arch/arm/include/asm/bitops.h extern void _change_bit(int nr, volatile unsigned long * p);
p                 156 arch/arm/include/asm/bitops.h extern int _test_and_set_bit(int nr, volatile unsigned long * p);
p                 157 arch/arm/include/asm/bitops.h extern int _test_and_clear_bit(int nr, volatile unsigned long * p);
p                 158 arch/arm/include/asm/bitops.h extern int _test_and_change_bit(int nr, volatile unsigned long * p);
p                 163 arch/arm/include/asm/bitops.h extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size);
p                 164 arch/arm/include/asm/bitops.h extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset);
p                 165 arch/arm/include/asm/bitops.h extern int _find_first_bit_le(const unsigned long *p, unsigned size);
p                 166 arch/arm/include/asm/bitops.h extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
p                 171 arch/arm/include/asm/bitops.h extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size);
p                 172 arch/arm/include/asm/bitops.h extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset);
p                 173 arch/arm/include/asm/bitops.h extern int _find_first_bit_be(const unsigned long *p, unsigned size);
p                 174 arch/arm/include/asm/bitops.h extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
p                 180 arch/arm/include/asm/bitops.h #define ATOMIC_BITOP(name,nr,p)			\
p                 181 arch/arm/include/asm/bitops.h 	(__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p))
p                 183 arch/arm/include/asm/bitops.h #define ATOMIC_BITOP(name,nr,p)		_##name(nr,p)
p                 189 arch/arm/include/asm/bitops.h #define set_bit(nr,p)			ATOMIC_BITOP(set_bit,nr,p)
p                 190 arch/arm/include/asm/bitops.h #define clear_bit(nr,p)			ATOMIC_BITOP(clear_bit,nr,p)
p                 191 arch/arm/include/asm/bitops.h #define change_bit(nr,p)		ATOMIC_BITOP(change_bit,nr,p)
p                 192 arch/arm/include/asm/bitops.h #define test_and_set_bit(nr,p)		ATOMIC_BITOP(test_and_set_bit,nr,p)
p                 193 arch/arm/include/asm/bitops.h #define test_and_clear_bit(nr,p)	ATOMIC_BITOP(test_and_clear_bit,nr,p)
p                 194 arch/arm/include/asm/bitops.h #define test_and_change_bit(nr,p)	ATOMIC_BITOP(test_and_change_bit,nr,p)
p                 200 arch/arm/include/asm/bitops.h #define find_first_zero_bit(p,sz)	_find_first_zero_bit_le(p,sz)
p                 201 arch/arm/include/asm/bitops.h #define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_le(p,sz,off)
p                 202 arch/arm/include/asm/bitops.h #define find_first_bit(p,sz)		_find_first_bit_le(p,sz)
p                 203 arch/arm/include/asm/bitops.h #define find_next_bit(p,sz,off)		_find_next_bit_le(p,sz,off)
p                 209 arch/arm/include/asm/bitops.h #define find_first_zero_bit(p,sz)	_find_first_zero_bit_be(p,sz)
p                 210 arch/arm/include/asm/bitops.h #define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_be(p,sz,off)
p                 211 arch/arm/include/asm/bitops.h #define find_first_bit(p,sz)		_find_first_bit_be(p,sz)
p                 212 arch/arm/include/asm/bitops.h #define find_next_bit(p,sz,off)		_find_next_bit_be(p,sz,off)
p                 247 arch/arm/include/asm/bitops.h static inline int find_first_zero_bit_le(const void *p, unsigned size)
p                 249 arch/arm/include/asm/bitops.h 	return _find_first_zero_bit_le(p, size);
p                 253 arch/arm/include/asm/bitops.h static inline int find_next_zero_bit_le(const void *p, int size, int offset)
p                 255 arch/arm/include/asm/bitops.h 	return _find_next_zero_bit_le(p, size, offset);
p                 259 arch/arm/include/asm/bitops.h static inline int find_next_bit_le(const void *p, int size, int offset)
p                 261 arch/arm/include/asm/bitops.h 	return _find_next_bit_le(p, size, offset);
p                 392 arch/arm/include/asm/cacheflush.h static inline void __sync_cache_range_w(volatile void *p, size_t size)
p                 394 arch/arm/include/asm/cacheflush.h 	char *_p = (char *)p;
p                 406 arch/arm/include/asm/cacheflush.h static inline void __sync_cache_range_r(volatile void *p, size_t size)
p                 408 arch/arm/include/asm/cacheflush.h 	char *_p = (char *)p;
p                  16 arch/arm/include/asm/cpuidle.h #define ARM_CPUIDLE_WFI_STATE_PWR(p) {\
p                  20 arch/arm/include/asm/cpuidle.h 	.power_usage		= p,\
p                  28 arch/arm/include/asm/efi.h #define arch_efi_call_virt(p, f, args...)				\
p                  31 arch/arm/include/asm/efi.h 	__f = p->f;							\
p                 185 arch/arm/include/asm/hardware/locomo.h #define locomo_set_drvdata(d,p)	dev_set_drvdata(&(d)->dev, p)
p                 400 arch/arm/include/asm/hardware/sa1111.h #define sa1111_set_drvdata(d,p)	dev_set_drvdata(&(d)->dev, p)
p                 252 arch/arm/include/asm/io.h #define outb(v,p)	({ __iowmb(); __raw_writeb(v,__io(p)); })
p                 253 arch/arm/include/asm/io.h #define outw(v,p)	({ __iowmb(); __raw_writew((__force __u16) \
p                 254 arch/arm/include/asm/io.h 					cpu_to_le16(v),__io(p)); })
p                 255 arch/arm/include/asm/io.h #define outl(v,p)	({ __iowmb(); __raw_writel((__force __u32) \
p                 256 arch/arm/include/asm/io.h 					cpu_to_le32(v),__io(p)); })
p                 258 arch/arm/include/asm/io.h #define inb(p)	({ __u8 __v = __raw_readb(__io(p)); __iormb(); __v; })
p                 259 arch/arm/include/asm/io.h #define inw(p)	({ __u16 __v = le16_to_cpu((__force __le16) \
p                 260 arch/arm/include/asm/io.h 			__raw_readw(__io(p))); __iormb(); __v; })
p                 261 arch/arm/include/asm/io.h #define inl(p)	({ __u32 __v = le32_to_cpu((__force __le32) \
p                 262 arch/arm/include/asm/io.h 			__raw_readl(__io(p))); __iormb(); __v; })
p                 264 arch/arm/include/asm/io.h #define outsb(p,d,l)		__raw_writesb(__io(p),d,l)
p                 265 arch/arm/include/asm/io.h #define outsw(p,d,l)		__raw_writesw(__io(p),d,l)
p                 266 arch/arm/include/asm/io.h #define outsl(p,d,l)		__raw_writesl(__io(p),d,l)
p                 268 arch/arm/include/asm/io.h #define insb(p,d,l)		__raw_readsb(__io(p),d,l)
p                 269 arch/arm/include/asm/io.h #define insw(p,d,l)		__raw_readsw(__io(p),d,l)
p                 270 arch/arm/include/asm/io.h #define insl(p,d,l)		__raw_readsl(__io(p),d,l)
p                 309 arch/arm/include/asm/io.h #define readsb(p,d,l)		__raw_readsb(p,d,l)
p                 310 arch/arm/include/asm/io.h #define readsw(p,d,l)		__raw_readsw(p,d,l)
p                 311 arch/arm/include/asm/io.h #define readsl(p,d,l)		__raw_readsl(p,d,l)
p                 313 arch/arm/include/asm/io.h #define writesb(p,d,l)		__raw_writesb(p,d,l)
p                 314 arch/arm/include/asm/io.h #define writesw(p,d,l)		__raw_writesw(p,d,l)
p                 315 arch/arm/include/asm/io.h #define writesl(p,d,l)		__raw_writesl(p,d,l)
p                 422 arch/arm/include/asm/io.h #define ioread16be(p)		({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
p                 423 arch/arm/include/asm/io.h #define ioread32be(p)		({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
p                 425 arch/arm/include/asm/io.h #define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
p                 426 arch/arm/include/asm/io.h #define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
p                 446 arch/arm/include/asm/io.h #define xlate_dev_mem_ptr(p)	__va(p)
p                 451 arch/arm/include/asm/io.h #define xlate_dev_kmem_ptr(p)	p
p                  21 arch/arm/include/asm/kprobes.h #define flush_insn_slot(p)		do { } while (0)
p                  74 arch/arm/include/asm/mach/arch.h #define for_each_machine_desc(p)			\
p                  75 arch/arm/include/asm/mach/arch.h 	for (p = __arch_info_begin; p < __arch_info_end; p++)
p                  87 arch/arm/include/asm/pgtable.h #define _MOD_PROT(p, b)	__pgprot(pgprot_val(p) | (b))
p                  82 arch/arm/include/asm/proc-fns.h static inline void init_proc_vtable(const struct processor *p)
p                 116 arch/arm/include/asm/proc-fns.h static inline void init_proc_vtable(const struct processor *p)
p                 119 arch/arm/include/asm/proc-fns.h 	*cpu_vtable[cpu] = *p;
p                 128 arch/arm/include/asm/proc-fns.h static inline void init_proc_vtable(const struct processor *p)
p                 130 arch/arm/include/asm/proc-fns.h 	processor = *p;
p                  86 arch/arm/include/asm/processor.h unsigned long get_wchan(struct task_struct *p);
p                  98 arch/arm/include/asm/processor.h #define task_pt_regs(p) \
p                  99 arch/arm/include/asm/processor.h 	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
p                  30 arch/arm/include/asm/string.h static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n)
p                  32 arch/arm/include/asm/string.h 	return __memset32(p, v, n * 4);
p                  37 arch/arm/include/asm/string.h static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
p                  39 arch/arm/include/asm/string.h 	return __memset64(p, v, n * 8, v >> 32);
p                  17 arch/arm/include/asm/sync_bitops.h #define sync_set_bit(nr, p)		_set_bit(nr, p)
p                  18 arch/arm/include/asm/sync_bitops.h #define sync_clear_bit(nr, p)		_clear_bit(nr, p)
p                  19 arch/arm/include/asm/sync_bitops.h #define sync_change_bit(nr, p)		_change_bit(nr, p)
p                  20 arch/arm/include/asm/sync_bitops.h #define sync_test_and_set_bit(nr, p)	_test_and_set_bit(nr, p)
p                  21 arch/arm/include/asm/sync_bitops.h #define sync_test_and_clear_bit(nr, p)	_test_and_clear_bit(nr, p)
p                  22 arch/arm/include/asm/sync_bitops.h #define sync_test_and_change_bit(nr, p)	_test_and_change_bit(nr, p)
p                  15 arch/arm/include/asm/tls.h 	mcr	p15, 0, \tp, c13, c0, 3		@ set TLS register
p                  25 arch/arm/include/asm/tls.h 	streq	\tp, [\tmp2, #-15]		@ set TLS value at 0xffff0ff0
p                  27 arch/arm/include/asm/tls.h 	mcrne	p15, 0, \tp, c13, c0, 3		@ yes, set TLS register
p                  34 arch/arm/include/asm/tls.h 	str	\tp, [\tmp1, #-15]		@ set TLS value at 0xffff0ff0
p                 191 arch/arm/include/asm/uaccess.h #define __get_user_check(x, p)						\
p                 194 arch/arm/include/asm/uaccess.h 		register typeof(*(p)) __user *__p asm("r0") = (p);	\
p                 227 arch/arm/include/asm/uaccess.h 		x = (typeof(*(p))) __r2;				\
p                 231 arch/arm/include/asm/uaccess.h #define get_user(x, p)							\
p                 234 arch/arm/include/asm/uaccess.h 		__get_user_check(x, p);					\
p                 275 arch/arm/include/asm/uaccess.h #define get_user(x, p)	__get_user(x, p)
p                 182 arch/arm/kernel/atags_parse.c 	const struct machine_desc *mdesc = NULL, *p;
p                 190 arch/arm/kernel/atags_parse.c 	for_each_machine_desc(p)
p                 191 arch/arm/kernel/atags_parse.c 		if (machine_nr == p->nr) {
p                 192 arch/arm/kernel/atags_parse.c 			pr_info("Machine: %s\n", p->name);
p                 193 arch/arm/kernel/atags_parse.c 			mdesc = p;
p                  85 arch/arm/kernel/fiq.c int show_fiq_list(struct seq_file *p, int prec)
p                  88 arch/arm/kernel/fiq.c 		seq_printf(p, "%*s:              %s\n", prec, "FIQ",
p                  47 arch/arm/kernel/irq.c int arch_show_interrupts(struct seq_file *p, int prec)
p                  50 arch/arm/kernel/irq.c 	show_fiq_list(p, prec);
p                  53 arch/arm/kernel/irq.c 	show_ipi_list(p, prec);
p                  55 arch/arm/kernel/irq.c 	seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
p                  41 arch/arm/kernel/module.c 	void *p;
p                  47 arch/arm/kernel/module.c 	p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
p                  50 arch/arm/kernel/module.c 	if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
p                  51 arch/arm/kernel/module.c 		return p;
p                 228 arch/arm/kernel/process.c 	    unsigned long stk_sz, struct task_struct *p, unsigned long tls)
p                 230 arch/arm/kernel/process.c 	struct thread_info *thread = task_thread_info(p);
p                 231 arch/arm/kernel/process.c 	struct pt_regs *childregs = task_pt_regs(p);
p                 245 arch/arm/kernel/process.c 	if (likely(!(p->flags & PF_KTHREAD))) {
p                 259 arch/arm/kernel/process.c 	clear_ptrace_hw_breakpoint(p);
p                 268 arch/arm/kernel/process.c 	thread->stack_canary = p->stack_canary;
p                 298 arch/arm/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 303 arch/arm/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 306 arch/arm/kernel/process.c 	frame.fp = thread_saved_fp(p);
p                 307 arch/arm/kernel/process.c 	frame.sp = thread_saved_sp(p);
p                 309 arch/arm/kernel/process.c 	frame.pc = thread_saved_pc(p);
p                 310 arch/arm/kernel/process.c 	stack_page = (unsigned long)task_stack_page(p);
p                 737 arch/arm/kernel/setup.c 	const struct machine_desc *p;
p                 740 arch/arm/kernel/setup.c 	for_each_machine_desc(p)
p                 741 arch/arm/kernel/setup.c 		early_print("%08x\t%s\n", p->nr, p->name);
p                 815 arch/arm/kernel/setup.c static int __init early_mem(char *p)
p                 834 arch/arm/kernel/setup.c 	size  = memparse(p, &endp);
p                 530 arch/arm/kernel/smp.c void show_ipi_list(struct seq_file *p, int prec)
p                 535 arch/arm/kernel/smp.c 		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
p                 538 arch/arm/kernel/smp.c 			seq_printf(p, "%10u ",
p                 541 arch/arm/kernel/smp.c 		seq_printf(p, " %s\n", ipi_types[i]);
p                  79 arch/arm/kernel/traps.c 	char str[80], *p;
p                  83 arch/arm/kernel/traps.c 	for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
p                  85 arch/arm/kernel/traps.c 			p += sprintf(p, " r%d:%08x", reg, *stack--);
p                  88 arch/arm/kernel/traps.c 				p = str;
p                  93 arch/arm/kernel/traps.c 	if (p != str)
p                 134 arch/arm/kernel/traps.c 		unsigned long p;
p                 140 arch/arm/kernel/traps.c 		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
p                 141 arch/arm/kernel/traps.c 			if (p >= bottom && p < top) {
p                 143 arch/arm/kernel/traps.c 				if (__get_user(val, (unsigned long *)p) == 0)
p                 160 arch/arm/kernel/traps.c 	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
p                 177 arch/arm/kernel/traps.c 			p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
p                 180 arch/arm/kernel/traps.c 			p += sprintf(p, "bad PC value");
p                 117 arch/arm/kvm/coproc.c 			 const struct coproc_params *p,
p                 120 arch/arm/kvm/coproc.c 	if (p->is_write)
p                 121 arch/arm/kvm/coproc.c 		return ignore_write(vcpu, p);
p                 123 arch/arm/kvm/coproc.c 	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
p                 129 arch/arm/kvm/coproc.c 			const struct coproc_params *p,
p                 132 arch/arm/kvm/coproc.c 	if (p->is_write)
p                 133 arch/arm/kvm/coproc.c 		return write_to_read_only(vcpu, p);
p                 134 arch/arm/kvm/coproc.c 	return read_zero(vcpu, p);
p                 139 arch/arm/kvm/coproc.c 			  const struct coproc_params *p,
p                 142 arch/arm/kvm/coproc.c 	if (p->is_write)
p                 143 arch/arm/kvm/coproc.c 		return ignore_write(vcpu, p);
p                 145 arch/arm/kvm/coproc.c 	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
p                 185 arch/arm/kvm/coproc.c 			   const struct coproc_params *p,
p                 188 arch/arm/kvm/coproc.c 	if (p->is_write)
p                 189 arch/arm/kvm/coproc.c 		return ignore_write(vcpu, p);
p                 191 arch/arm/kvm/coproc.c 	*vcpu_reg(vcpu, p->Rt1) = 0;
p                 199 arch/arm/kvm/coproc.c 			const struct coproc_params *p,
p                 202 arch/arm/kvm/coproc.c 	if (!p->is_write)
p                 203 arch/arm/kvm/coproc.c 		return read_from_write_only(vcpu, p);
p                 217 arch/arm/kvm/coproc.c 		   const struct coproc_params *p,
p                 222 arch/arm/kvm/coproc.c 	BUG_ON(!p->is_write);
p                 224 arch/arm/kvm/coproc.c 	vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
p                 225 arch/arm/kvm/coproc.c 	if (p->is_64bit)
p                 226 arch/arm/kvm/coproc.c 		vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
p                 233 arch/arm/kvm/coproc.c 			   const struct coproc_params *p,
p                 239 arch/arm/kvm/coproc.c 	if (!p->is_write)
p                 240 arch/arm/kvm/coproc.c 		return read_from_write_only(vcpu, p);
p                 242 arch/arm/kvm/coproc.c 	reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
p                 243 arch/arm/kvm/coproc.c 	reg |= *vcpu_reg(vcpu, p->Rt1) ;
p                 252 arch/arm/kvm/coproc.c 	switch (p->Op1) {
p                 269 arch/arm/kvm/coproc.c 			   const struct coproc_params *p,
p                 272 arch/arm/kvm/coproc.c 	if (p->is_write)
p                 273 arch/arm/kvm/coproc.c 		return ignore_write(vcpu, p);
p                 275 arch/arm/kvm/coproc.c 	*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
p                 281 arch/arm/kvm/coproc.c 			     const struct coproc_params *p,
p                 286 arch/arm/kvm/coproc.c 	if (p->is_write) {
p                 287 arch/arm/kvm/coproc.c 		val = *vcpu_reg(vcpu, p->Rt1);
p                 293 arch/arm/kvm/coproc.c 		*vcpu_reg(vcpu, p->Rt1) = val;
p                 300 arch/arm/kvm/coproc.c 			    const struct coproc_params *p,
p                 305 arch/arm/kvm/coproc.c 	if (p->is_write) {
p                 306 arch/arm/kvm/coproc.c 		val = *vcpu_reg(vcpu, p->Rt1);
p                 312 arch/arm/kvm/coproc.c 		*vcpu_reg(vcpu, p->Rt1) = val;
p                 319 arch/arm/kvm/coproc.c 			     const struct coproc_params *p,
p                 324 arch/arm/kvm/coproc.c 	if (p->is_write) {
p                 325 arch/arm/kvm/coproc.c 		val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
p                 326 arch/arm/kvm/coproc.c 		val |= *vcpu_reg(vcpu, p->Rt1);
p                 332 arch/arm/kvm/coproc.c 		*vcpu_reg(vcpu, p->Rt1) = val;
p                 333 arch/arm/kvm/coproc.c 		*vcpu_reg(vcpu, p->Rt2) = val >> 32;
p                 349 arch/arm/kvm/coproc.c 		    const struct coproc_params *p,
p                 352 arch/arm/kvm/coproc.c 	if (p->is_write)
p                 353 arch/arm/kvm/coproc.c 		return ignore_write(vcpu, p);
p                 355 arch/arm/kvm/coproc.c 		return read_zero(vcpu, p);
p                  45 arch/arm/kvm/coproc.h static inline void print_cp_instr(const struct coproc_params *p)
p                  48 arch/arm/kvm/coproc.h 	if (p->is_64bit) {
p                  50 arch/arm/kvm/coproc.h 			      p->CRn, p->Op1, p->is_write ? "write" : "read");
p                  54 arch/arm/kvm/coproc.h 			      p->CRn, p->CRm, p->Op1, p->Op2,
p                  55 arch/arm/kvm/coproc.h 			      p->is_write ? "write" : "read");
p                  60 arch/arm/kvm/coproc.h 				const struct coproc_params *p)
p                  66 arch/arm/kvm/coproc.h 			     const struct coproc_params *p)
p                  68 arch/arm/kvm/coproc.h 	*vcpu_reg(vcpu, p->Rt1) = 0;
p                 127 arch/arm/kvm/coproc.h 		   const struct coproc_params *p,
p                 871 arch/arm/mach-davinci/devices-da8xx.c static int __init early_rproc_mem(char *p)
p                 875 arch/arm/mach-davinci/devices-da8xx.c 	if (p == NULL)
p                 878 arch/arm/mach-davinci/devices-da8xx.c 	rproc_size = memparse(p, &endp);
p                  20 arch/arm/mach-davinci/serial.c static inline void serial_write_reg(struct plat_serial8250_port *p, int offset,
p                  23 arch/arm/mach-davinci/serial.c 	offset <<= p->regshift;
p                  25 arch/arm/mach-davinci/serial.c 	WARN_ONCE(!p->membase, "unmapped write: uart[%d]\n", offset);
p                  27 arch/arm/mach-davinci/serial.c 	__raw_writel(value, p->membase + offset);
p                  30 arch/arm/mach-davinci/serial.c static void __init davinci_serial_reset(struct plat_serial8250_port *p)
p                  34 arch/arm/mach-davinci/serial.c 	serial_write_reg(p, UART_IER, 0);  /* disable all interrupts */
p                  37 arch/arm/mach-davinci/serial.c 	serial_write_reg(p, UART_DAVINCI_PWREMU, pwremu);
p                  42 arch/arm/mach-davinci/serial.c 	serial_write_reg(p, UART_DAVINCI_PWREMU, pwremu);
p                  45 arch/arm/mach-davinci/serial.c 		serial_write_reg(p, UART_DM646X_SCR,
p                  53 arch/arm/mach-davinci/serial.c 	struct plat_serial8250_port *p;
p                  62 arch/arm/mach-davinci/serial.c 		p = dev->platform_data;
p                  77 arch/arm/mach-davinci/serial.c 		p->uartclk = clk_get_rate(clk);
p                  79 arch/arm/mach-davinci/serial.c 		if (!p->membase && p->mapbase) {
p                  80 arch/arm/mach-davinci/serial.c 			p->membase = ioremap(p->mapbase, SZ_4K);
p                  82 arch/arm/mach-davinci/serial.c 			if (p->membase)
p                  83 arch/arm/mach-davinci/serial.c 				p->flags &= ~UPF_IOREMAP;
p                  88 arch/arm/mach-davinci/serial.c 		if (p->membase && p->type != PORT_AR7)
p                  89 arch/arm/mach-davinci/serial.c 			davinci_serial_reset(p);
p                  38 arch/arm/mach-ebsa110/include/mach/io.h #define inb(p) 			__inb16(p)
p                  39 arch/arm/mach-ebsa110/include/mach/io.h #define outb(v,p)		__outb16(v,p)
p                  41 arch/arm/mach-ebsa110/include/mach/io.h #define inb(p)			__inb8(p)
p                  42 arch/arm/mach-ebsa110/include/mach/io.h #define outb(v,p)		__outb8(v,p)
p                  45 arch/arm/mach-ebsa110/include/mach/io.h #define inw(p)			__inw(p)
p                  46 arch/arm/mach-ebsa110/include/mach/io.h #define outw(v,p)		__outw(v,p)
p                  48 arch/arm/mach-ebsa110/include/mach/io.h #define inl(p)			__inl(p)
p                  49 arch/arm/mach-ebsa110/include/mach/io.h #define outl(v,p)		__outl(v,p)
p                 201 arch/arm/mach-ebsa110/io.c #define SUPERIO_PORT(p) \
p                 202 arch/arm/mach-ebsa110/io.c 	(((p) >> 3) == (0x3f8 >> 3) || \
p                 203 arch/arm/mach-ebsa110/io.c 	 ((p) >> 3) == (0x2f8 >> 3) || \
p                 204 arch/arm/mach-ebsa110/io.c 	 ((p) >> 3) == (0x378 >> 3))
p                 108 arch/arm/mach-imx/iomux-imx31.c 	const unsigned int *p = pin_list;
p                 113 arch/arm/mach-imx/iomux-imx31.c 		ret = mxc_iomux_alloc_pin(*p, label);
p                 116 arch/arm/mach-imx/iomux-imx31.c 		p++;
p                 135 arch/arm/mach-imx/iomux-imx31.c 	const unsigned int *p = pin_list;
p                 139 arch/arm/mach-imx/iomux-imx31.c 		mxc_iomux_release_pin(*p);
p                 140 arch/arm/mach-imx/iomux-imx31.c 		p++;
p                  49 arch/arm/mach-imx/iomux-v3.c 	const iomux_v3_cfg_t *p = pad_list;
p                  54 arch/arm/mach-imx/iomux-v3.c 		ret = mxc_iomux_v3_setup_pad(*p);
p                  57 arch/arm/mach-imx/iomux-v3.c 		p++;
p                  59 arch/arm/mach-imx/mmdc.c #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
p                 149 arch/arm/mach-imx/tzic.c static int __init tzic_init_dt(struct device_node *np, struct device_node *p)
p                  54 arch/arm/mach-ixp4xx/include/mach/io.h #define writeb(v, p)			__indirect_writeb(v, p)
p                  55 arch/arm/mach-ixp4xx/include/mach/io.h #define writew(v, p)			__indirect_writew(v, p)
p                  56 arch/arm/mach-ixp4xx/include/mach/io.h #define writel(v, p)			__indirect_writel(v, p)
p                  58 arch/arm/mach-ixp4xx/include/mach/io.h #define writeb_relaxed(v, p)		__indirect_writeb(v, p)
p                  59 arch/arm/mach-ixp4xx/include/mach/io.h #define writew_relaxed(v, p)		__indirect_writew(v, p)
p                  60 arch/arm/mach-ixp4xx/include/mach/io.h #define writel_relaxed(v, p)		__indirect_writel(v, p)
p                  62 arch/arm/mach-ixp4xx/include/mach/io.h #define writesb(p, v, l)		__indirect_writesb(p, v, l)
p                  63 arch/arm/mach-ixp4xx/include/mach/io.h #define writesw(p, v, l)		__indirect_writesw(p, v, l)
p                  64 arch/arm/mach-ixp4xx/include/mach/io.h #define writesl(p, v, l)		__indirect_writesl(p, v, l)
p                  66 arch/arm/mach-ixp4xx/include/mach/io.h #define readb(p)			__indirect_readb(p)
p                  67 arch/arm/mach-ixp4xx/include/mach/io.h #define readw(p)			__indirect_readw(p)
p                  68 arch/arm/mach-ixp4xx/include/mach/io.h #define readl(p)			__indirect_readl(p)
p                  70 arch/arm/mach-ixp4xx/include/mach/io.h #define readb_relaxed(p)		__indirect_readb(p)
p                  71 arch/arm/mach-ixp4xx/include/mach/io.h #define readw_relaxed(p)		__indirect_readw(p)
p                  72 arch/arm/mach-ixp4xx/include/mach/io.h #define readl_relaxed(p)		__indirect_readl(p)
p                  74 arch/arm/mach-ixp4xx/include/mach/io.h #define readsb(p, v, l)			__indirect_readsb(p, v, l)
p                  75 arch/arm/mach-ixp4xx/include/mach/io.h #define readsw(p, v, l)			__indirect_readsw(p, v, l)
p                  76 arch/arm/mach-ixp4xx/include/mach/io.h #define readsl(p, v, l)			__indirect_readsl(p, v, l)
p                  78 arch/arm/mach-ixp4xx/include/mach/io.h static inline void __indirect_writeb(u8 value, volatile void __iomem *p)
p                  80 arch/arm/mach-ixp4xx/include/mach/io.h 	u32 addr = (u32)p;
p                  84 arch/arm/mach-ixp4xx/include/mach/io.h 		__raw_writeb(value, p);
p                  95 arch/arm/mach-ixp4xx/include/mach/io.h 				      const void *p, int count)
p                  97 arch/arm/mach-ixp4xx/include/mach/io.h 	const u8 *vaddr = p;
p                 103 arch/arm/mach-ixp4xx/include/mach/io.h static inline void __indirect_writew(u16 value, volatile void __iomem *p)
p                 105 arch/arm/mach-ixp4xx/include/mach/io.h 	u32 addr = (u32)p;
p                 109 arch/arm/mach-ixp4xx/include/mach/io.h 		__raw_writew(value, p);
p                 120 arch/arm/mach-ixp4xx/include/mach/io.h 				      const void *p, int count)
p                 122 arch/arm/mach-ixp4xx/include/mach/io.h 	const u16 *vaddr = p;
p                 128 arch/arm/mach-ixp4xx/include/mach/io.h static inline void __indirect_writel(u32 value, volatile void __iomem *p)
p                 130 arch/arm/mach-ixp4xx/include/mach/io.h 	u32 addr = (__force u32)p;
p                 133 arch/arm/mach-ixp4xx/include/mach/io.h 		__raw_writel(value, p);
p                 141 arch/arm/mach-ixp4xx/include/mach/io.h 				      const void *p, int count)
p                 143 arch/arm/mach-ixp4xx/include/mach/io.h 	const u32 *vaddr = p;
p                 148 arch/arm/mach-ixp4xx/include/mach/io.h static inline u8 __indirect_readb(const volatile void __iomem *p)
p                 150 arch/arm/mach-ixp4xx/include/mach/io.h 	u32 addr = (u32)p;
p                 154 arch/arm/mach-ixp4xx/include/mach/io.h 		return __raw_readb(p);
p                 165 arch/arm/mach-ixp4xx/include/mach/io.h 				     void *p, u32 count)
p                 167 arch/arm/mach-ixp4xx/include/mach/io.h 	u8 *vaddr = p;
p                 173 arch/arm/mach-ixp4xx/include/mach/io.h static inline u16 __indirect_readw(const volatile void __iomem *p)
p                 175 arch/arm/mach-ixp4xx/include/mach/io.h 	u32 addr = (u32)p;
p                 179 arch/arm/mach-ixp4xx/include/mach/io.h 		return __raw_readw(p);
p                 190 arch/arm/mach-ixp4xx/include/mach/io.h 				     void *p, u32 count)
p                 192 arch/arm/mach-ixp4xx/include/mach/io.h 	u16 *vaddr = p;
p                 198 arch/arm/mach-ixp4xx/include/mach/io.h static inline u32 __indirect_readl(const volatile void __iomem *p)
p                 200 arch/arm/mach-ixp4xx/include/mach/io.h 	u32 addr = (__force u32)p;
p                 204 arch/arm/mach-ixp4xx/include/mach/io.h 		return __raw_readl(p);
p                 213 arch/arm/mach-ixp4xx/include/mach/io.h 				     void *p, u32 count)
p                 215 arch/arm/mach-ixp4xx/include/mach/io.h 	u32 *vaddr = p;
p                 256 arch/arm/mach-ixp4xx/include/mach/io.h static inline void outsb(u32 io_addr, const void *p, u32 count)
p                 258 arch/arm/mach-ixp4xx/include/mach/io.h 	const u8 *vaddr = p;
p                 275 arch/arm/mach-ixp4xx/include/mach/io.h static inline void outsw(u32 io_addr, const void *p, u32 count)
p                 277 arch/arm/mach-ixp4xx/include/mach/io.h 	const u16 *vaddr = p;
p                 289 arch/arm/mach-ixp4xx/include/mach/io.h static inline void outsl(u32 io_addr, const void *p, u32 count)
p                 291 arch/arm/mach-ixp4xx/include/mach/io.h 	const u32 *vaddr = p;
p                 309 arch/arm/mach-ixp4xx/include/mach/io.h static inline void insb(u32 io_addr, void *p, u32 count)
p                 311 arch/arm/mach-ixp4xx/include/mach/io.h 	u8 *vaddr = p;
p                 329 arch/arm/mach-ixp4xx/include/mach/io.h static inline void insw(u32 io_addr, void *p, u32 count)
p                 331 arch/arm/mach-ixp4xx/include/mach/io.h 	u16 *vaddr = p;
p                 347 arch/arm/mach-ixp4xx/include/mach/io.h static inline void insl(u32 io_addr, void *p, u32 count)
p                 349 arch/arm/mach-ixp4xx/include/mach/io.h 	u32 *vaddr = p;
p                 357 arch/arm/mach-ixp4xx/include/mach/io.h #define	__is_io_address(p)	(((unsigned long)p >= PIO_OFFSET) && \
p                 358 arch/arm/mach-ixp4xx/include/mach/io.h 					((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
p                 360 arch/arm/mach-ixp4xx/include/mach/io.h #define	ioread8(p)			ioread8(p)
p                 374 arch/arm/mach-ixp4xx/include/mach/io.h #define	ioread8_rep(p, v, c)		ioread8_rep(p, v, c)
p                 388 arch/arm/mach-ixp4xx/include/mach/io.h #define	ioread16(p)			ioread16(p)
p                 402 arch/arm/mach-ixp4xx/include/mach/io.h #define	ioread16_rep(p, v, c)		ioread16_rep(p, v, c)
p                 417 arch/arm/mach-ixp4xx/include/mach/io.h #define	ioread32(p)			ioread32(p)
p                 432 arch/arm/mach-ixp4xx/include/mach/io.h #define	ioread32_rep(p, v, c)		ioread32_rep(p, v, c)
p                 447 arch/arm/mach-ixp4xx/include/mach/io.h #define	iowrite8(v, p)			iowrite8(v, p)
p                 461 arch/arm/mach-ixp4xx/include/mach/io.h #define	iowrite8_rep(p, v, c)		iowrite8_rep(p, v, c)
p                 476 arch/arm/mach-ixp4xx/include/mach/io.h #define	iowrite16(v, p)			iowrite16(v, p)
p                 490 arch/arm/mach-ixp4xx/include/mach/io.h #define	iowrite16_rep(p, v, c)		iowrite16_rep(p, v, c)
p                 505 arch/arm/mach-ixp4xx/include/mach/io.h #define	iowrite32(v, p)			iowrite32(v, p)
p                 519 arch/arm/mach-ixp4xx/include/mach/io.h #define	iowrite32_rep(p, v, c)		iowrite32_rep(p, v, c)
p                 194 arch/arm/mach-mvebu/coherency.c 		struct property *p;
p                 196 arch/arm/mach-mvebu/coherency.c 		p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 197 arch/arm/mach-mvebu/coherency.c 		p->name = kstrdup("arm,io-coherent", GFP_KERNEL);
p                 198 arch/arm/mach-mvebu/coherency.c 		of_add_property(cache_dn, p);
p                 312 arch/arm/mach-omap1/dma.c 	struct omap_system_dma_plat_info	p;
p                 370 arch/arm/mach-omap1/dma.c 	p = dma_plat_info;
p                 371 arch/arm/mach-omap1/dma.c 	p.dma_attr = d;
p                 372 arch/arm/mach-omap1/dma.c 	p.errata = configure_dma_errata();
p                 375 arch/arm/mach-omap1/dma.c 		p.slave_map = omap7xx_sdma_map;
p                 376 arch/arm/mach-omap1/dma.c 		p.slavecnt = ARRAY_SIZE(omap7xx_sdma_map);
p                 378 arch/arm/mach-omap1/dma.c 		p.slave_map = omap1xxx_sdma_map;
p                 379 arch/arm/mach-omap1/dma.c 		p.slavecnt = ARRAY_SIZE(omap1xxx_sdma_map);
p                 382 arch/arm/mach-omap1/dma.c 	ret = platform_device_add_data(pdev, &p, sizeof(p));
p                  78 arch/arm/mach-omap1/include/mach/uncompress.h #define DEBUG_LL_OMAP7XX(p, mach)					\
p                  79 arch/arm/mach-omap1/include/mach/uncompress.h 	_DEBUG_LL_ENTRY(mach, OMAP1_UART##p##_BASE, OMAP7XX_PORT_SHIFT,	\
p                  80 arch/arm/mach-omap1/include/mach/uncompress.h 		OMAP1UART##p)
p                  82 arch/arm/mach-omap1/include/mach/uncompress.h #define DEBUG_LL_OMAP1(p, mach)						\
p                  83 arch/arm/mach-omap1/include/mach/uncompress.h 	_DEBUG_LL_ENTRY(mach, OMAP1_UART##p##_BASE, OMAP_PORT_SHIFT,	\
p                  84 arch/arm/mach-omap1/include/mach/uncompress.h 		OMAP1UART##p)
p                  38 arch/arm/mach-omap1/serial.c static inline void omap_serial_outp(struct plat_serial8250_port *p, int offset,
p                  41 arch/arm/mach-omap1/serial.c 	offset <<= p->regshift;
p                  42 arch/arm/mach-omap1/serial.c 	__raw_writeb(value, p->membase + offset);
p                  50 arch/arm/mach-omap1/serial.c static void __init omap_serial_reset(struct plat_serial8250_port *p)
p                  52 arch/arm/mach-omap1/serial.c 	omap_serial_outp(p, UART_OMAP_MDR1,
p                  54 arch/arm/mach-omap1/serial.c 	omap_serial_outp(p, UART_OMAP_SCR, 0x08);	/* TX watermark */
p                  55 arch/arm/mach-omap1/serial.c 	omap_serial_outp(p, UART_OMAP_MDR1,
p                  59 arch/arm/mach-omap1/serial.c 		omap_serial_outp(p, UART_OMAP_SYSC, 0x01);
p                  60 arch/arm/mach-omap1/serial.c 		while (!(omap_serial_in(p, UART_OMAP_SYSC) & 0x01));
p                 233 arch/arm/mach-omap2/dma.c 	struct omap_system_dma_plat_info	p;
p                 238 arch/arm/mach-omap2/dma.c 	p = dma_plat_info;
p                 239 arch/arm/mach-omap2/dma.c 	p.dma_attr = (struct omap_dma_dev_attr *)oh->dev_attr;
p                 240 arch/arm/mach-omap2/dma.c 	p.errata = configure_dma_errata();
p                 244 arch/arm/mach-omap2/dma.c 		p.slave_map = omap24xx_sdma_dt_map;
p                 245 arch/arm/mach-omap2/dma.c 		p.slavecnt = ARRAY_SIZE(omap24xx_sdma_dt_map);
p                 248 arch/arm/mach-omap2/dma.c 	pdev = omap_device_build(name, 0, oh, &p, sizeof(p));
p                2133 arch/arm/mach-omap2/omap_hwmod.c 	const char *p;
p                2141 arch/arm/mach-omap2/omap_hwmod.c 						    i, &p);
p                2144 arch/arm/mach-omap2/omap_hwmod.c 		if (!strcmp(p, oh->name)) {
p                 323 arch/arm/mach-omap2/powerdomain.c 	struct powerdomain **p = NULL;
p                 331 arch/arm/mach-omap2/powerdomain.c 	for (p = ps; *p; p++)
p                 332 arch/arm/mach-omap2/powerdomain.c 		_pwrdm_register(*p);
p                  56 arch/arm/mach-omap2/prm2xxx.c 	struct prm_reset_src_map *p;
p                  62 arch/arm/mach-omap2/prm2xxx.c 	p = omap2xxx_prm_reset_src_map;
p                  63 arch/arm/mach-omap2/prm2xxx.c 	while (p->reg_shift >= 0 && p->std_shift >= 0) {
p                  64 arch/arm/mach-omap2/prm2xxx.c 		if (v & (1 << p->reg_shift))
p                  65 arch/arm/mach-omap2/prm2xxx.c 			r |= 1 << p->std_shift;
p                  66 arch/arm/mach-omap2/prm2xxx.c 		p++;
p                 448 arch/arm/mach-omap2/prm3xxx.c 	struct prm_reset_src_map *p;
p                 454 arch/arm/mach-omap2/prm3xxx.c 	p = omap3xxx_prm_reset_src_map;
p                 455 arch/arm/mach-omap2/prm3xxx.c 	while (p->reg_shift >= 0 && p->std_shift >= 0) {
p                 456 arch/arm/mach-omap2/prm3xxx.c 		if (v & (1 << p->reg_shift))
p                 457 arch/arm/mach-omap2/prm3xxx.c 			r |= 1 << p->std_shift;
p                 458 arch/arm/mach-omap2/prm3xxx.c 		p++;
p                 372 arch/arm/mach-omap2/prm44xx.c 	struct prm_reset_src_map *p;
p                 384 arch/arm/mach-omap2/prm44xx.c 	p = omap44xx_prm_reset_src_map;
p                 385 arch/arm/mach-omap2/prm44xx.c 	while (p->reg_shift >= 0 && p->std_shift >= 0) {
p                 386 arch/arm/mach-omap2/prm44xx.c 		if (v & (1 << p->reg_shift))
p                 387 arch/arm/mach-omap2/prm44xx.c 			r |= 1 << p->std_shift;
p                 388 arch/arm/mach-omap2/prm44xx.c 		p++;
p                 298 arch/arm/mach-pxa/mioa701.c irqreturn_t gsm_on_irq(int irq, void *p)
p                 101 arch/arm/mach-pxa/pxa3xx.c 	volatile unsigned long *p = (volatile void *)0xc0000000;
p                 102 arch/arm/mach-pxa/pxa3xx.c 	unsigned long saved_data = *p;
p                 126 arch/arm/mach-pxa/pxa3xx.c 	*p = __pa_symbol(cpu_resume);
p                 130 arch/arm/mach-pxa/pxa3xx.c 	*p = saved_data;
p                  48 arch/arm/mach-rpc/include/mach/acornfb.h 		u_int rr, v, p;
p                  58 arch/arm/mach-rpc/include/mach/acornfb.h 		p = (rr + v / 2) / v;
p                  60 arch/arm/mach-rpc/include/mach/acornfb.h 		d = pixclk - p;
p                  55 arch/arm/mach-rpc/include/mach/uncompress.h #define palette_setpixel(p)	*(unsigned long *)(IO_START+0x00400000) = 0x10000000|((p) & 255)
p                 196 arch/arm/mach-s3c24xx/include/mach/io.h #define inb(p)		(__builtin_constant_p((p)) ? __inbc(p)	   : __inb(p))
p                 197 arch/arm/mach-s3c24xx/include/mach/io.h #define inw(p)		(__builtin_constant_p((p)) ? __inwc(p)	   : __inw(p))
p                 198 arch/arm/mach-s3c24xx/include/mach/io.h #define inl(p)		(__builtin_constant_p((p)) ? __inlc(p)	   : __inl(p))
p                 199 arch/arm/mach-s3c24xx/include/mach/io.h #define outb(v,p)	(__builtin_constant_p((p)) ? __outbc(v,p) : __outb(v,p))
p                 200 arch/arm/mach-s3c24xx/include/mach/io.h #define outw(v,p)	(__builtin_constant_p((p)) ? __outwc(v,p) : __outw(v,p))
p                 201 arch/arm/mach-s3c24xx/include/mach/io.h #define outl(v,p)	(__builtin_constant_p((p)) ? __outlc(v,p) : __outl(v,p))
p                 202 arch/arm/mach-s3c24xx/include/mach/io.h #define __ioaddr(p)	(__builtin_constant_p((p)) ? __ioaddr(p)  : __ioaddrc(p))
p                 204 arch/arm/mach-s3c24xx/include/mach/io.h #define insb(p,d,l)	__raw_readsb(__ioaddr(p),d,l)
p                 205 arch/arm/mach-s3c24xx/include/mach/io.h #define insw(p,d,l)	__raw_readsw(__ioaddr(p),d,l)
p                 206 arch/arm/mach-s3c24xx/include/mach/io.h #define insl(p,d,l)	__raw_readsl(__ioaddr(p),d,l)
p                 208 arch/arm/mach-s3c24xx/include/mach/io.h #define outsb(p,d,l)	__raw_writesb(__ioaddr(p),d,l)
p                 209 arch/arm/mach-s3c24xx/include/mach/io.h #define outsw(p,d,l)	__raw_writesw(__ioaddr(p),d,l)
p                 210 arch/arm/mach-s3c24xx/include/mach/io.h #define outsl(p,d,l)	__raw_writesl(__ioaddr(p),d,l)
p                 600 arch/arm/mach-s3c24xx/mach-h1940.c #define DECLARE_BUTTON(p, k, n, w)	\
p                 602 arch/arm/mach-s3c24xx/mach-h1940.c 		.gpio		= p,	\
p                  46 arch/arm/mach-shmobile/platsmp-apmu.c static int __maybe_unused apmu_power_on(void __iomem *p, int bit)
p                  49 arch/arm/mach-shmobile/platsmp-apmu.c 	writel_relaxed(BIT(bit), p + WUPCR_OFFS);
p                  52 arch/arm/mach-shmobile/platsmp-apmu.c 	while (readl_relaxed(p + WUPCR_OFFS) != 0)
p                  58 arch/arm/mach-shmobile/platsmp-apmu.c static int __maybe_unused apmu_power_off(void __iomem *p, int bit)
p                  61 arch/arm/mach-shmobile/platsmp-apmu.c 	writel_relaxed(3, p + CPUNCR_OFFS(bit));
p                  65 arch/arm/mach-shmobile/platsmp-apmu.c static int __maybe_unused apmu_power_off_poll(void __iomem *p, int bit)
p                  70 arch/arm/mach-shmobile/platsmp-apmu.c 		if (CPUNST(readl_relaxed(p + PSTR_OFFS), bit) == CPUST_STANDBY)
p                  79 arch/arm/mach-shmobile/platsmp-apmu.c static int __maybe_unused apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu))
p                  81 arch/arm/mach-shmobile/platsmp-apmu.c 	void __iomem *p = apmu_cpus[cpu].iomem;
p                  83 arch/arm/mach-shmobile/platsmp-apmu.c 	return p ? fn(p, apmu_cpus[cpu].bit) : -EINVAL;
p                  47 arch/arm/mach-shmobile/pm-rcar-gen2.c 	void __iomem *p;
p                  89 arch/arm/mach-shmobile/pm-rcar-gen2.c 	p = ioremap(res.start, resource_size(&res));
p                  90 arch/arm/mach-shmobile/pm-rcar-gen2.c 	if (!p)
p                  98 arch/arm/mach-shmobile/pm-rcar-gen2.c 		memcpy_toio(p, shmobile_boot_vector_gen2,
p                 101 arch/arm/mach-shmobile/pm-rcar-gen2.c 		memcpy_toio(p, shmobile_boot_vector, shmobile_boot_size);
p                 103 arch/arm/mach-shmobile/pm-rcar-gen2.c 	iounmap(p);
p                 106 arch/arm/mach-shmobile/pm-rcar-gen2.c 	p = ioremap_nocache(RST, 0x63);
p                 109 arch/arm/mach-shmobile/pm-rcar-gen2.c 		writel_relaxed(bar, p + CA15BAR);
p                 110 arch/arm/mach-shmobile/pm-rcar-gen2.c 		writel_relaxed(bar | SBAR_BAREN, p + CA15BAR);
p                 113 arch/arm/mach-shmobile/pm-rcar-gen2.c 		writel_relaxed((readl_relaxed(p + CA15RESCNT) &
p                 115 arch/arm/mach-shmobile/pm-rcar-gen2.c 			       p + CA15RESCNT);
p                 118 arch/arm/mach-shmobile/pm-rcar-gen2.c 		writel_relaxed(bar, p + CA7BAR);
p                 119 arch/arm/mach-shmobile/pm-rcar-gen2.c 		writel_relaxed(bar | SBAR_BAREN, p + CA7BAR);
p                 122 arch/arm/mach-shmobile/pm-rcar-gen2.c 		writel_relaxed((readl_relaxed(p + CA7RESCNT) &
p                 124 arch/arm/mach-shmobile/pm-rcar-gen2.c 			       p + CA7RESCNT);
p                 126 arch/arm/mach-shmobile/pm-rcar-gen2.c 	iounmap(p);
p                 113 arch/arm/mach-tegra/iomap.h #define IO_TO_VIRT_BETWEEN(p, st, sz)	((p) >= (st) && (p) < ((st) + (sz)))
p                 114 arch/arm/mach-tegra/iomap.h #define IO_TO_VIRT_XLATE(p, pst, vst)	(((p) - (pst) + (vst)))
p                 303 arch/arm/mm/dma-mapping.c 	struct page *page, *p, *e;
p                 313 arch/arm/mm/dma-mapping.c 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
p                 314 arch/arm/mm/dma-mapping.c 		__free_page(p);
p                 348 arch/arm/mm/dma-mapping.c static int __init early_coherent_pool(char *p)
p                 350 arch/arm/mm/dma-mapping.c 	atomic_pool_size = memparse(p, &p);
p                 321 arch/arm/mm/init.c 	u32 *p = (u32 *)s;
p                 323 arch/arm/mm/init.c 		*p++ = 0xe7fddef0;
p                 156 arch/arm/mm/mmu.c static int __init early_cachepolicy(char *p)
p                 163 arch/arm/mm/mmu.c 		if (memcmp(p, cache_policies[i].policy, len) == 0) {
p                 197 arch/arm/mm/mmu.c 	char *p = "buffered";
p                 198 arch/arm/mm/mmu.c 	pr_warn("nocache is deprecated; use cachepolicy=%s\n", p);
p                 199 arch/arm/mm/mmu.c 	early_cachepolicy(p);
p                 206 arch/arm/mm/mmu.c 	char *p = "uncached";
p                 207 arch/arm/mm/mmu.c 	pr_warn("nowb is deprecated; use cachepolicy=%s\n", p);
p                 208 arch/arm/mm/mmu.c 	early_cachepolicy(p);
p                 214 arch/arm/mm/mmu.c static int __init early_ecc(char *p)
p                 216 arch/arm/mm/mmu.c 	if (memcmp(p, "on", 2) == 0)
p                 218 arch/arm/mm/mmu.c 	else if (memcmp(p, "off", 3) == 0)
p                 227 arch/arm/mm/mmu.c static int __init early_cachepolicy(char *p)
p                 775 arch/arm/mm/mmu.c 	pmd_t *p = pmd;
p                 795 arch/arm/mm/mmu.c 	flush_pmd_entry(p);
p                  29 arch/arm/nwfpe/fpa11_cpdt.c 	unsigned int *p;
p                  30 arch/arm/nwfpe/fpa11_cpdt.c 	p = (unsigned int *) &fpa11->fpreg[Fn].fDouble;
p                  33 arch/arm/nwfpe/fpa11_cpdt.c 	get_user(p[0], &pMem[0]);	/* sign & exponent */
p                  34 arch/arm/nwfpe/fpa11_cpdt.c 	get_user(p[1], &pMem[1]);
p                  36 arch/arm/nwfpe/fpa11_cpdt.c 	get_user(p[0], &pMem[1]);
p                  37 arch/arm/nwfpe/fpa11_cpdt.c 	get_user(p[1], &pMem[0]);	/* sign & exponent */
p                  45 arch/arm/nwfpe/fpa11_cpdt.c 	unsigned int *p;
p                  46 arch/arm/nwfpe/fpa11_cpdt.c 	p = (unsigned int *) &fpa11->fpreg[Fn].fExtended;
p                  48 arch/arm/nwfpe/fpa11_cpdt.c 	get_user(p[0], &pMem[0]);	/* sign & exponent */
p                  50 arch/arm/nwfpe/fpa11_cpdt.c 	get_user(p[1], &pMem[1]);	/* ms bits */
p                  51 arch/arm/nwfpe/fpa11_cpdt.c 	get_user(p[2], &pMem[2]);	/* ls bits */
p                  53 arch/arm/nwfpe/fpa11_cpdt.c 	get_user(p[1], &pMem[2]);	/* ls bits */
p                  54 arch/arm/nwfpe/fpa11_cpdt.c 	get_user(p[2], &pMem[1]);	/* ms bits */
p                  62 arch/arm/nwfpe/fpa11_cpdt.c 	register unsigned int *p;
p                  65 arch/arm/nwfpe/fpa11_cpdt.c 	p = (unsigned int *) &(fpa11->fpreg[Fn]);
p                  73 arch/arm/nwfpe/fpa11_cpdt.c 			get_user(p[0], &pMem[2]);	/* Single */
p                  74 arch/arm/nwfpe/fpa11_cpdt.c 			get_user(p[1], &pMem[1]);	/* double msw */
p                  75 arch/arm/nwfpe/fpa11_cpdt.c 			p[2] = 0;			/* empty */
p                  82 arch/arm/nwfpe/fpa11_cpdt.c 			get_user(p[1], &pMem[2]);
p                  83 arch/arm/nwfpe/fpa11_cpdt.c 			get_user(p[2], &pMem[1]);	/* msw */
p                  84 arch/arm/nwfpe/fpa11_cpdt.c 			p[0] = (x & 0x80003fff);
p                 185 arch/arm/nwfpe/fpa11_cpdt.c 	register unsigned int nType, *p;
p                 187 arch/arm/nwfpe/fpa11_cpdt.c 	p = (unsigned int *) &(fpa11->fpreg[Fn]);
p                 194 arch/arm/nwfpe/fpa11_cpdt.c 			put_user(p[0], &pMem[2]);	/* single */
p                 195 arch/arm/nwfpe/fpa11_cpdt.c 			put_user(p[1], &pMem[1]);	/* double msw */
p                 203 arch/arm/nwfpe/fpa11_cpdt.c 			put_user(p[2], &pMem[1]);	/* msw */
p                 204 arch/arm/nwfpe/fpa11_cpdt.c 			put_user(p[1], &pMem[2]);
p                 205 arch/arm/nwfpe/fpa11_cpdt.c 			put_user((p[0] & 0x80003fff) | (nType << 14), &pMem[0]);
p                  40 arch/arm/nwfpe/fpmodule.c void fp_send_sig(unsigned long sig, struct task_struct *p, int priv);
p                  65 arch/arm/plat-omap/dma.c static struct omap_system_dma_plat_info *p;
p                 208 arch/arm/plat-omap/dma.c 	ccr = p->dma_read(CCR, lch);
p                 213 arch/arm/plat-omap/dma.c 	p->dma_write(ccr, CCR, lch);
p                 224 arch/arm/plat-omap/dma.c 	l = p->dma_read(CSDP, lch);
p                 227 arch/arm/plat-omap/dma.c 	p->dma_write(l, CSDP, lch);
p                 232 arch/arm/plat-omap/dma.c 		ccr = p->dma_read(CCR, lch);
p                 236 arch/arm/plat-omap/dma.c 		p->dma_write(ccr, CCR, lch);
p                 238 arch/arm/plat-omap/dma.c 		ccr = p->dma_read(CCR2, lch);
p                 242 arch/arm/plat-omap/dma.c 		p->dma_write(ccr, CCR2, lch);
p                 248 arch/arm/plat-omap/dma.c 		val = p->dma_read(CCR, lch);
p                 273 arch/arm/plat-omap/dma.c 		p->dma_write(val, CCR, lch);
p                 276 arch/arm/plat-omap/dma.c 	p->dma_write(elem_count, CEN, lch);
p                 277 arch/arm/plat-omap/dma.c 	p->dma_write(frame_count, CFN, lch);
p                 286 arch/arm/plat-omap/dma.c 		csdp = p->dma_read(CSDP, lch);
p                 289 arch/arm/plat-omap/dma.c 		p->dma_write(csdp, CSDP, lch);
p                 299 arch/arm/plat-omap/dma.c 		l = p->dma_read(LCH_CTRL, lch);
p                 302 arch/arm/plat-omap/dma.c 		p->dma_write(l, LCH_CTRL, lch);
p                 317 arch/arm/plat-omap/dma.c 		w = p->dma_read(CSDP, lch);
p                 320 arch/arm/plat-omap/dma.c 		p->dma_write(w, CSDP, lch);
p                 323 arch/arm/plat-omap/dma.c 	l = p->dma_read(CCR, lch);
p                 326 arch/arm/plat-omap/dma.c 	p->dma_write(l, CCR, lch);
p                 328 arch/arm/plat-omap/dma.c 	p->dma_write(src_start, CSSA, lch);
p                 330 arch/arm/plat-omap/dma.c 	p->dma_write(src_ei, CSEI, lch);
p                 331 arch/arm/plat-omap/dma.c 	p->dma_write(src_fi, CSFI, lch);
p                 358 arch/arm/plat-omap/dma.c 	l = p->dma_read(CSDP, lch);
p                 362 arch/arm/plat-omap/dma.c 	p->dma_write(l, CSDP, lch);
p                 371 arch/arm/plat-omap/dma.c 	l = p->dma_read(CSDP, lch);
p                 405 arch/arm/plat-omap/dma.c 	p->dma_write(l, CSDP, lch);
p                 417 arch/arm/plat-omap/dma.c 		l = p->dma_read(CSDP, lch);
p                 420 arch/arm/plat-omap/dma.c 		p->dma_write(l, CSDP, lch);
p                 423 arch/arm/plat-omap/dma.c 	l = p->dma_read(CCR, lch);
p                 426 arch/arm/plat-omap/dma.c 	p->dma_write(l, CCR, lch);
p                 428 arch/arm/plat-omap/dma.c 	p->dma_write(dest_start, CDSA, lch);
p                 430 arch/arm/plat-omap/dma.c 	p->dma_write(dst_ei, CDEI, lch);
p                 431 arch/arm/plat-omap/dma.c 	p->dma_write(dst_fi, CDFI, lch);
p                 439 arch/arm/plat-omap/dma.c 	l = p->dma_read(CSDP, lch);
p                 443 arch/arm/plat-omap/dma.c 	p->dma_write(l, CSDP, lch);
p                 452 arch/arm/plat-omap/dma.c 	l = p->dma_read(CSDP, lch);
p                 483 arch/arm/plat-omap/dma.c 	p->dma_write(l, CSDP, lch);
p                 491 arch/arm/plat-omap/dma.c 		p->dma_read(CSR, lch);
p                 493 arch/arm/plat-omap/dma.c 		p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
p                 496 arch/arm/plat-omap/dma.c 	p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
p                 502 arch/arm/plat-omap/dma.c 	p->dma_write(0, CICR, lch);
p                 505 arch/arm/plat-omap/dma.c 		p->dma_read(CSR, lch);
p                 507 arch/arm/plat-omap/dma.c 		p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
p                 526 arch/arm/plat-omap/dma.c 	l = p->dma_read(CLNK_CTRL, lch);
p                 541 arch/arm/plat-omap/dma.c 	p->dma_write(l, CLNK_CTRL, lch);
p                 548 arch/arm/plat-omap/dma.c 	l = p->dma_read(CLNK_CTRL, lch);
p                 563 arch/arm/plat-omap/dma.c 	p->dma_write(l, CLNK_CTRL, lch);
p                 577 arch/arm/plat-omap/dma.c 	p->dma_write(1 << lch, IRQSTATUS_L0, lch);
p                 579 arch/arm/plat-omap/dma.c 	val = p->dma_read(IRQENABLE_L0, lch);
p                 581 arch/arm/plat-omap/dma.c 	p->dma_write(val, IRQENABLE_L0, lch);
p                 595 arch/arm/plat-omap/dma.c 	val = p->dma_read(IRQENABLE_L0, lch);
p                 597 arch/arm/plat-omap/dma.c 	p->dma_write(val, IRQENABLE_L0, lch);
p                 599 arch/arm/plat-omap/dma.c 	p->dma_write(1 << lch, IRQSTATUS_L0, lch);
p                 628 arch/arm/plat-omap/dma.c 	if (p->clear_lch_regs)
p                 629 arch/arm/plat-omap/dma.c 		p->clear_lch_regs(free_ch);
p                 666 arch/arm/plat-omap/dma.c 		p->dma_write(dev_id | (1 << 10), CCR, free_ch);
p                 668 arch/arm/plat-omap/dma.c 		p->dma_write(dev_id, CCR, free_ch);
p                 700 arch/arm/plat-omap/dma.c 	p->dma_write(0, CCR, lch);
p                 743 arch/arm/plat-omap/dma.c 	p->dma_write(reg, GCR, 0);
p                 766 arch/arm/plat-omap/dma.c 	l = p->dma_read(CCR, lch);
p                 773 arch/arm/plat-omap/dma.c 	p->dma_write(l, CCR, lch);
p                 788 arch/arm/plat-omap/dma.c 	p->clear_dma(lch);
p                 801 arch/arm/plat-omap/dma.c 		p->dma_write(0, CPC, lch);
p                 803 arch/arm/plat-omap/dma.c 		p->dma_write(0, CDAC, lch);
p                 831 arch/arm/plat-omap/dma.c 		p->dma_write(lch, CLNK_CTRL, lch);
p                 835 arch/arm/plat-omap/dma.c 	l = p->dma_read(CCR, lch);
p                 848 arch/arm/plat-omap/dma.c 	p->dma_write(l, CCR, lch);
p                 861 arch/arm/plat-omap/dma.c 	l = p->dma_read(CCR, lch);
p                 868 arch/arm/plat-omap/dma.c 		l = p->dma_read(OCP_SYSCONFIG, lch);
p                 872 arch/arm/plat-omap/dma.c 		p->dma_write(l , OCP_SYSCONFIG, 0);
p                 874 arch/arm/plat-omap/dma.c 		l = p->dma_read(CCR, lch);
p                 876 arch/arm/plat-omap/dma.c 		p->dma_write(l, CCR, lch);
p                 879 arch/arm/plat-omap/dma.c 		l = p->dma_read(CCR, lch);
p                 884 arch/arm/plat-omap/dma.c 			l = p->dma_read(CCR, lch);
p                 889 arch/arm/plat-omap/dma.c 		p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
p                 892 arch/arm/plat-omap/dma.c 		p->dma_write(l, CCR, lch);
p                 965 arch/arm/plat-omap/dma.c 		offset = p->dma_read(CPC, lch);
p                 967 arch/arm/plat-omap/dma.c 		offset = p->dma_read(CSAC, lch);
p                 970 arch/arm/plat-omap/dma.c 		offset = p->dma_read(CSAC, lch);
p                 978 arch/arm/plat-omap/dma.c 		if (likely(p->dma_read(CDAC, lch)))
p                 979 arch/arm/plat-omap/dma.c 			offset = p->dma_read(CSAC, lch);
p                 981 arch/arm/plat-omap/dma.c 			offset = p->dma_read(CSSA, lch);
p                 985 arch/arm/plat-omap/dma.c 		offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
p                1004 arch/arm/plat-omap/dma.c 		offset = p->dma_read(CPC, lch);
p                1006 arch/arm/plat-omap/dma.c 		offset = p->dma_read(CDAC, lch);
p                1013 arch/arm/plat-omap/dma.c 		offset = p->dma_read(CDAC, lch);
p                1020 arch/arm/plat-omap/dma.c 			offset = p->dma_read(CDSA, lch);
p                1024 arch/arm/plat-omap/dma.c 		offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
p                1032 arch/arm/plat-omap/dma.c 	return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
p                1045 arch/arm/plat-omap/dma.c 		if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
p                1060 arch/arm/plat-omap/dma.c 			p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
p                1091 arch/arm/plat-omap/dma.c 		csr = p->dma_read(CSR, ch);
p                1143 arch/arm/plat-omap/dma.c 	u32 status = p->dma_read(CSR, ch);
p                1148 arch/arm/plat-omap/dma.c 		p->dma_write(1 << ch, IRQSTATUS_L0, ch);
p                1166 arch/arm/plat-omap/dma.c 			ccr = p->dma_read(CCR, ch);
p                1168 arch/arm/plat-omap/dma.c 			p->dma_write(ccr, CCR, ch);
p                1179 arch/arm/plat-omap/dma.c 	p->dma_write(status, CSR, ch);
p                1180 arch/arm/plat-omap/dma.c 	p->dma_write(1 << ch, IRQSTATUS_L0, ch);
p                1182 arch/arm/plat-omap/dma.c 	p->dma_read(IRQSTATUS_L0, ch);
p                1188 arch/arm/plat-omap/dma.c 		if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
p                1198 arch/arm/plat-omap/dma.c 		status = p->dma_read(CSR, ch);
p                1199 arch/arm/plat-omap/dma.c 		p->dma_write(status, CSR, ch);
p                1214 arch/arm/plat-omap/dma.c 	val = p->dma_read(IRQSTATUS_L0, 0);
p                1220 arch/arm/plat-omap/dma.c 	enable_reg = p->dma_read(IRQENABLE_L0, 0);
p                1250 arch/arm/plat-omap/dma.c 		p->dma_read(IRQENABLE_L0, 0);
p                1252 arch/arm/plat-omap/dma.c 		p->dma_read(IRQENABLE_L1, 0);
p                1254 arch/arm/plat-omap/dma.c 		p->dma_read(OCP_SYSCONFIG, 0);
p                1255 arch/arm/plat-omap/dma.c 	omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
p                1262 arch/arm/plat-omap/dma.c 	p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
p                1263 arch/arm/plat-omap/dma.c 	p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
p                1265 arch/arm/plat-omap/dma.c 	p->dma_write(omap_dma_global_context.dma_irqenable_l0,
p                1267 arch/arm/plat-omap/dma.c 	p->dma_write(omap_dma_global_context.dma_irqenable_l1,
p                1271 arch/arm/plat-omap/dma.c 		p->dma_write(0x3 , IRQSTATUS_L0, 0);
p                1280 arch/arm/plat-omap/dma.c 	return p;
p                1291 arch/arm/plat-omap/dma.c 	p = pdev->dev.platform_data;
p                1292 arch/arm/plat-omap/dma.c 	if (!p) {
p                1299 arch/arm/plat-omap/dma.c 	d			= p->dma_attr;
p                1300 arch/arm/plat-omap/dma.c 	errata			= p->errata;
p                1388 arch/arm/plat-omap/dma.c 	p->show_dma_caps();
p                 135 arch/arm/plat-pxa/mfp.c #define mfp_configured(p)	((p)->config != -1)
p                 144 arch/arm/plat-pxa/mfp.c static inline void __mfp_config_run(struct mfp_pin *p)
p                 146 arch/arm/plat-pxa/mfp.c 	if (mfp_configured(p))
p                 147 arch/arm/plat-pxa/mfp.c 		mfpr_writel(p->mfpr_off, p->mfpr_run);
p                 150 arch/arm/plat-pxa/mfp.c static inline void __mfp_config_lpm(struct mfp_pin *p)
p                 152 arch/arm/plat-pxa/mfp.c 	if (mfp_configured(p)) {
p                 153 arch/arm/plat-pxa/mfp.c 		unsigned long mfpr_clr = (p->mfpr_run & ~MFPR_EDGE_BOTH) | MFPR_EDGE_CLEAR;
p                 154 arch/arm/plat-pxa/mfp.c 		if (mfpr_clr != p->mfpr_run)
p                 155 arch/arm/plat-pxa/mfp.c 			mfpr_writel(p->mfpr_off, mfpr_clr);
p                 156 arch/arm/plat-pxa/mfp.c 		if (p->mfpr_lpm != mfpr_clr)
p                 157 arch/arm/plat-pxa/mfp.c 			mfpr_writel(p->mfpr_off, p->mfpr_lpm);
p                 170 arch/arm/plat-pxa/mfp.c 		struct mfp_pin *p;
p                 175 arch/arm/plat-pxa/mfp.c 		p = &mfp_table[pin];
p                 190 arch/arm/plat-pxa/mfp.c 			p->mfpr_run = tmp | mfpr_lpm[lpm] | mfpr_edge[edge];
p                 191 arch/arm/plat-pxa/mfp.c 			p->mfpr_lpm = p->mfpr_run;
p                 193 arch/arm/plat-pxa/mfp.c 			p->mfpr_lpm = tmp | mfpr_lpm[lpm] | mfpr_edge[edge];
p                 194 arch/arm/plat-pxa/mfp.c 			p->mfpr_run = tmp | mfpr_pull[pull];
p                 197 arch/arm/plat-pxa/mfp.c 		p->config = c; __mfp_config_run(p);
p                 242 arch/arm/plat-pxa/mfp.c 	struct mfp_addr_map *p;
p                 251 arch/arm/plat-pxa/mfp.c 	for (p = map; p->start != MFP_PIN_INVALID; p++) {
p                 252 arch/arm/plat-pxa/mfp.c 		offset = p->offset;
p                 253 arch/arm/plat-pxa/mfp.c 		i = p->start;
p                 260 arch/arm/plat-pxa/mfp.c 		} while ((i <= p->end) && (p->end != -1));
p                 268 arch/arm/plat-pxa/mfp.c 	struct mfp_pin *p = &mfp_table[0];
p                 271 arch/arm/plat-pxa/mfp.c 	for (pin = 0; pin < ARRAY_SIZE(mfp_table); pin++, p++)
p                 272 arch/arm/plat-pxa/mfp.c 		__mfp_config_lpm(p);
p                 277 arch/arm/plat-pxa/mfp.c 	struct mfp_pin *p = &mfp_table[0];
p                 280 arch/arm/plat-pxa/mfp.c 	for (pin = 0; pin < ARRAY_SIZE(mfp_table); pin++, p++)
p                 281 arch/arm/plat-pxa/mfp.c 		__mfp_config_run(p);
p                 262 arch/arm/plat-samsung/adc.c 		struct list_head *p, *n;
p                 265 arch/arm/plat-samsung/adc.c 		list_for_each_safe(p, n, &adc_pending) {
p                 266 arch/arm/plat-samsung/adc.c 			tmp = list_entry(p, struct s3c_adc_client, pend);
p                 347 arch/arm/probes/decode.c 	const struct decode_checker **p;
p                 352 arch/arm/probes/decode.c 	p = checkers;
p                 353 arch/arm/probes/decode.c 	while (*p != NULL) {
p                 355 arch/arm/probes/decode.c 		probes_check_t *checker_func = (*p)[action].checker;
p                 362 arch/arm/probes/decode.c 		p++;
p                  46 arch/arm/probes/kprobes/core.c int __kprobes arch_prepare_kprobe(struct kprobe *p)
p                  50 arch/arm/probes/kprobes/core.c 	unsigned long addr = (unsigned long)p->addr;
p                  76 arch/arm/probes/kprobes/core.c 	insn = __mem_to_opcode_arm(*p->addr);
p                  82 arch/arm/probes/kprobes/core.c 	p->opcode = insn;
p                  83 arch/arm/probes/kprobes/core.c 	p->ainsn.insn = tmp_insn;
p                  85 arch/arm/probes/kprobes/core.c 	switch ((*decode_insn)(insn, &p->ainsn, true, actions, checkers)) {
p                  90 arch/arm/probes/kprobes/core.c 		p->ainsn.insn = get_insn_slot();
p                  91 arch/arm/probes/kprobes/core.c 		if (!p->ainsn.insn)
p                  94 arch/arm/probes/kprobes/core.c 			p->ainsn.insn[is] = tmp_insn[is];
p                  95 arch/arm/probes/kprobes/core.c 		flush_insns(p->ainsn.insn,
p                  96 arch/arm/probes/kprobes/core.c 				sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE);
p                  97 arch/arm/probes/kprobes/core.c 		p->ainsn.insn_fn = (probes_insn_fn_t *)
p                  98 arch/arm/probes/kprobes/core.c 					((uintptr_t)p->ainsn.insn | thumb);
p                 102 arch/arm/probes/kprobes/core.c 		p->ainsn.insn = NULL;
p                 111 arch/arm/probes/kprobes/core.c 	if ((p->ainsn.stack_space < 0) ||
p                 112 arch/arm/probes/kprobes/core.c 			(p->ainsn.stack_space > MAX_STACK_SIZE))
p                 118 arch/arm/probes/kprobes/core.c void __kprobes arch_arm_kprobe(struct kprobe *p)
p                 125 arch/arm/probes/kprobes/core.c 		addr = (void *)((uintptr_t)p->addr & ~1);
p                 127 arch/arm/probes/kprobes/core.c 		if (is_wide_instruction(p->opcode))
p                 132 arch/arm/probes/kprobes/core.c 		kprobe_opcode_t insn = p->opcode;
p                 134 arch/arm/probes/kprobes/core.c 		addr = p->addr;
p                 161 arch/arm/probes/kprobes/core.c 	struct patch *p = data;
p                 162 arch/arm/probes/kprobes/core.c 	__patch_text(p->addr, p->insn);
p                 168 arch/arm/probes/kprobes/core.c 	struct patch p = {
p                 172 arch/arm/probes/kprobes/core.c 	stop_machine_cpuslocked(__kprobes_remove_breakpoint, &p,
p                 176 arch/arm/probes/kprobes/core.c void __kprobes arch_disarm_kprobe(struct kprobe *p)
p                 178 arch/arm/probes/kprobes/core.c 	kprobes_remove_breakpoint((void *)((uintptr_t)p->addr & ~1),
p                 179 arch/arm/probes/kprobes/core.c 			p->opcode);
p                 182 arch/arm/probes/kprobes/core.c void __kprobes arch_remove_kprobe(struct kprobe *p)
p                 184 arch/arm/probes/kprobes/core.c 	if (p->ainsn.insn) {
p                 185 arch/arm/probes/kprobes/core.c 		free_insn_slot(p->ainsn.insn, 0);
p                 186 arch/arm/probes/kprobes/core.c 		p->ainsn.insn = NULL;
p                 202 arch/arm/probes/kprobes/core.c static void __kprobes set_current_kprobe(struct kprobe *p)
p                 204 arch/arm/probes/kprobes/core.c 	__this_cpu_write(current_kprobe, p);
p                 208 arch/arm/probes/kprobes/core.c singlestep_skip(struct kprobe *p, struct pt_regs *regs)
p                 212 arch/arm/probes/kprobes/core.c 	if (is_wide_instruction(p->opcode))
p                 222 arch/arm/probes/kprobes/core.c singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
p                 224 arch/arm/probes/kprobes/core.c 	p->ainsn.insn_singlestep(p->opcode, &p->ainsn, regs);
p                 236 arch/arm/probes/kprobes/core.c 	struct kprobe *p, *cur;
p                 248 arch/arm/probes/kprobes/core.c 	p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1));
p                 249 arch/arm/probes/kprobes/core.c 	if (!p)
p                 250 arch/arm/probes/kprobes/core.c 		p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
p                 253 arch/arm/probes/kprobes/core.c 	p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
p                 256 arch/arm/probes/kprobes/core.c 	if (p) {
p                 257 arch/arm/probes/kprobes/core.c 		if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
p                 264 arch/arm/probes/kprobes/core.c 			singlestep_skip(p, regs);
p                 272 arch/arm/probes/kprobes/core.c 				kprobes_inc_nmissed_count(p);
p                 274 arch/arm/probes/kprobes/core.c 				set_current_kprobe(p);
p                 276 arch/arm/probes/kprobes/core.c 				singlestep(p, regs, kcb);
p                 282 arch/arm/probes/kprobes/core.c 				dump_kprobe(p);
p                 290 arch/arm/probes/kprobes/core.c 			set_current_kprobe(p);
p                 300 arch/arm/probes/kprobes/core.c 			if (!p->pre_handler || !p->pre_handler(p, regs)) {
p                 302 arch/arm/probes/kprobes/core.c 				singlestep(p, regs, kcb);
p                 303 arch/arm/probes/kprobes/core.c 				if (p->post_handler) {
p                 305 arch/arm/probes/kprobes/core.c 					p->post_handler(p, regs, 0);
p                 502 arch/arm/probes/kprobes/core.c int __kprobes arch_trampoline_kprobe(struct kprobe *p)
p                 154 arch/arm/probes/kprobes/opt-arm.c 	struct kprobe *p = &op->kp;
p                 177 arch/arm/probes/kprobes/opt-arm.c 	if (!p->ainsn.kprobe_direct_exec)
p                 178 arch/arm/probes/kprobes/opt-arm.c 		op->kp.ainsn.insn_singlestep(p->opcode, &p->ainsn, regs);
p                 310 arch/arm/probes/kprobes/test-core.c static int __kprobes pre_handler(struct kprobe *p, struct pt_regs *regs)
p                 318 arch/arm/probes/kprobes/test-core.c static void __kprobes post_handler(struct kprobe *p, struct pt_regs *regs,
p                 498 arch/arm/probes/kprobes/test-core.c benchmark_pre_handler(struct kprobe *p, struct pt_regs *regs)
p                1191 arch/arm/probes/kprobes/test-core.c test_before_pre_handler(struct kprobe *p, struct pt_regs *regs)
p                1193 arch/arm/probes/kprobes/test-core.c 	container_of(p, struct test_probe, kprobe)->hit = test_instance;
p                1198 arch/arm/probes/kprobes/test-core.c test_before_post_handler(struct kprobe *p, struct pt_regs *regs,
p                1207 arch/arm/probes/kprobes/test-core.c test_case_pre_handler(struct kprobe *p, struct pt_regs *regs)
p                1209 arch/arm/probes/kprobes/test-core.c 	container_of(p, struct test_probe, kprobe)->hit = test_instance;
p                1214 arch/arm/probes/kprobes/test-core.c test_after_pre_handler(struct kprobe *p, struct pt_regs *regs)
p                1218 arch/arm/probes/kprobes/test-core.c 	if (container_of(p, struct test_probe, kprobe)->hit == test_instance)
p                1237 arch/arm/probes/kprobes/test-core.c 	container_of(p, struct test_probe, kprobe)->hit = test_instance;
p                1352 arch/arm/probes/kprobes/test-core.c 		u16 *p = (u16 *)(test_code & ~1);
p                1353 arch/arm/probes/kprobes/test-core.c 		current_instruction = __mem_to_opcode_thumb16(p[0]);
p                1355 arch/arm/probes/kprobes/test-core.c 			u16 instr2 = __mem_to_opcode_thumb16(p[1]);
p                 164 arch/arm64/crypto/aes-ce-ccm-glue.c 		u8 *p;
p                 170 arch/arm64/crypto/aes-ce-ccm-glue.c 		p = scatterwalk_map(&walk);
p                 171 arch/arm64/crypto/aes-ce-ccm-glue.c 		ccm_update_mac(ctx, mac, p, n, &macp);
p                 174 arch/arm64/crypto/aes-ce-ccm-glue.c 		scatterwalk_unmap(p);
p                 896 arch/arm64/crypto/aes-glue.c static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
p                 911 arch/arm64/crypto/aes-glue.c 			mac_do_update(&tctx->key, p, blocks, ctx->dg,
p                 914 arch/arm64/crypto/aes-glue.c 			p += blocks * AES_BLOCK_SIZE;
p                 926 arch/arm64/crypto/aes-glue.c 			crypto_xor(ctx->dg + ctx->len, p, l);
p                 929 arch/arm64/crypto/aes-glue.c 			p += l;
p                 135 arch/arm64/crypto/ghash-ce-glue.c 			int p = GHASH_BLOCK_SIZE - partial;
p                 137 arch/arm64/crypto/ghash-ce-glue.c 			memcpy(ctx->buf + partial, src, p);
p                 138 arch/arm64/crypto/ghash-ce-glue.c 			src += p;
p                 139 arch/arm64/crypto/ghash-ce-glue.c 			len -= p;
p                 378 arch/arm64/crypto/ghash-ce-glue.c 		u8 *p;
p                 384 arch/arm64/crypto/ghash-ce-glue.c 		p = scatterwalk_map(&walk);
p                 386 arch/arm64/crypto/ghash-ce-glue.c 		gcm_update_mac(dg, p, n, buf, &buf_count, ctx);
p                 389 arch/arm64/crypto/ghash-ce-glue.c 		scatterwalk_unmap(p);
p                  43 arch/arm64/crypto/sha3-ce-glue.c 			int p = sctx->rsiz - sctx->partial;
p                  45 arch/arm64/crypto/sha3-ce-glue.c 			memcpy(sctx->buf + sctx->partial, data, p);
p                  50 arch/arm64/crypto/sha3-ce-glue.c 			data += p;
p                  51 arch/arm64/crypto/sha3-ce-glue.c 			len -= p;
p                 378 arch/arm64/include/asm/assembler.h 	dc	\op, \kaddr
p                 390 arch/arm64/include/asm/assembler.h 	.ifc	\op, cvau
p                 391 arch/arm64/include/asm/assembler.h 	__dcache_op_workaround_clean_cache \op, \kaddr
p                 393 arch/arm64/include/asm/assembler.h 	.ifc	\op, cvac
p                 394 arch/arm64/include/asm/assembler.h 	__dcache_op_workaround_clean_cache \op, \kaddr
p                 396 arch/arm64/include/asm/assembler.h 	.ifc	\op, cvap
p                 399 arch/arm64/include/asm/assembler.h 	.ifc	\op, cvadp
p                 402 arch/arm64/include/asm/assembler.h 	dc	\op, \kaddr
p                 648 arch/arm64/include/asm/assembler.h 	\op\()r		\reg1, [sp, #(\num + 1) * 8]
p                 650 arch/arm64/include/asm/assembler.h 	\op\()p		\reg1, \reg2, [sp, #(\num + 1) * 8]
p                 655 arch/arm64/include/asm/assembler.h 	.ifc		\op, st
p                 674 arch/arm64/include/asm/assembler.h 	__frame_regs	x19, x20, \op, 1
p                 675 arch/arm64/include/asm/assembler.h 	__frame_regs	x21, x22, \op, 3
p                 676 arch/arm64/include/asm/assembler.h 	__frame_regs	x23, x24, \op, 5
p                 677 arch/arm64/include/asm/assembler.h 	__frame_regs	x25, x26, \op, 7
p                 678 arch/arm64/include/asm/assembler.h 	__frame_regs	x27, x28, \op, 9
p                 680 arch/arm64/include/asm/assembler.h 	.ifc		\op, ld
p                  64 arch/arm64/include/asm/barrier.h #define __smp_store_release(p, v)					\
p                  66 arch/arm64/include/asm/barrier.h 	typeof(p) __p = (p);						\
p                  67 arch/arm64/include/asm/barrier.h 	union { typeof(*p) __val; char __c[1]; } __u =			\
p                  68 arch/arm64/include/asm/barrier.h 		{ .__val = (__force typeof(*p)) (v) };			\
p                  69 arch/arm64/include/asm/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                  70 arch/arm64/include/asm/barrier.h 	kasan_check_write(__p, sizeof(*p));				\
p                  71 arch/arm64/include/asm/barrier.h 	switch (sizeof(*p)) {						\
p                  99 arch/arm64/include/asm/barrier.h #define __smp_load_acquire(p)						\
p                 101 arch/arm64/include/asm/barrier.h 	union { typeof(*p) __val; char __c[1]; } __u;			\
p                 102 arch/arm64/include/asm/barrier.h 	typeof(p) __p = (p);						\
p                 103 arch/arm64/include/asm/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                 104 arch/arm64/include/asm/barrier.h 	kasan_check_read(__p, sizeof(*p));				\
p                 105 arch/arm64/include/asm/barrier.h 	switch (sizeof(*p)) {						\
p                  30 arch/arm64/include/asm/efi.h #define arch_efi_call_virt(p, f, args...)				\
p                  33 arch/arm64/include/asm/efi.h 	__f = p->f;							\
p                 122 arch/arm64/include/asm/fpsimdmacros.h 	_sve_check_preg \np
p                 126 arch/arm64/include/asm/fpsimdmacros.h 		| (\np)				\
p                 134 arch/arm64/include/asm/fpsimdmacros.h 	_sve_check_preg \np
p                 138 arch/arm64/include/asm/fpsimdmacros.h 		| (\np)				\
p                 155 arch/arm64/include/asm/fpsimdmacros.h 	_sve_check_preg \np
p                 157 arch/arm64/include/asm/fpsimdmacros.h 		| (\np)
p                 162 arch/arm64/include/asm/fpsimdmacros.h 	_sve_check_preg \np
p                 164 arch/arm64/include/asm/fpsimdmacros.h 		| ((\np) << 5)
p                 187 arch/arm64/include/asm/io.h #define ioread16be(p)		({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(__v); __v; })
p                 188 arch/arm64/include/asm/io.h #define ioread32be(p)		({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(__v); __v; })
p                 189 arch/arm64/include/asm/io.h #define ioread64be(p)		({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(__v); __v; })
p                 191 arch/arm64/include/asm/io.h #define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
p                 192 arch/arm64/include/asm/io.h #define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
p                 193 arch/arm64/include/asm/io.h #define iowrite64be(v,p)	({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); })
p                  21 arch/arm64/include/asm/kprobes.h #define flush_insn_slot(p)		do { } while (0)
p                  18 arch/arm64/include/asm/page.h extern void __cpu_clear_user_page(void *p, unsigned long user);
p                  21 arch/arm64/include/asm/preempt.h #define init_task_preempt_count(p) do { \
p                  22 arch/arm64/include/asm/preempt.h 	task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
p                  25 arch/arm64/include/asm/preempt.h #define init_idle_preempt_count(p, cpu) do { \
p                  26 arch/arm64/include/asm/preempt.h 	task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
p                 243 arch/arm64/include/asm/processor.h unsigned long get_wchan(struct task_struct *p);
p                 254 arch/arm64/include/asm/processor.h #define task_pt_regs(p) \
p                 255 arch/arm64/include/asm/processor.h 	((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
p                  56 arch/arm64/include/asm/smp.h extern void show_ipi_list(struct seq_file *p, int prec);
p                  18 arch/arm64/include/asm/sync_bitops.h #define sync_set_bit(nr, p)			set_bit(nr, p)
p                  19 arch/arm64/include/asm/sync_bitops.h #define sync_clear_bit(nr, p)			clear_bit(nr, p)
p                  20 arch/arm64/include/asm/sync_bitops.h #define sync_change_bit(nr, p)			change_bit(nr, p)
p                  21 arch/arm64/include/asm/sync_bitops.h #define sync_test_and_set_bit(nr, p)		test_and_set_bit(nr, p)
p                  22 arch/arm64/include/asm/sync_bitops.h #define sync_test_and_clear_bit(nr, p)		test_and_clear_bit(nr, p)
p                  23 arch/arm64/include/asm/sync_bitops.h #define sync_test_and_change_bit(nr, p)		test_and_change_bit(nr, p)
p                  30 arch/arm64/kernel/acpi_parking_protocol.c 				   struct acpi_madt_generic_interrupt *p)
p                  34 arch/arm64/kernel/acpi_parking_protocol.c 	cpu_entry->mailbox_addr = p->parked_address;
p                  35 arch/arm64/kernel/acpi_parking_protocol.c 	cpu_entry->version = p->parking_version;
p                  36 arch/arm64/kernel/acpi_parking_protocol.c 	cpu_entry->gic_cpu_id = p->cpu_interface_number;
p                  63 arch/arm64/kernel/cpufeature.c static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
p                1254 arch/arm64/kernel/cpufeature.c static int __init early_enable_pseudo_nmi(char *p)
p                1256 arch/arm64/kernel/cpufeature.c 	return strtobool(p, &enable_pseudo_nmi);
p                 415 arch/arm64/kernel/fpsimd.c 	__uint128_t *p;
p                 418 arch/arm64/kernel/fpsimd.c 		p = (__uint128_t *)ZREG(sst, vq, i);
p                 419 arch/arm64/kernel/fpsimd.c 		*p = arm64_cpu_to_le128(fst->vregs[i]);
p                 465 arch/arm64/kernel/fpsimd.c 	__uint128_t const *p;
p                 472 arch/arm64/kernel/fpsimd.c 		p = (__uint128_t const *)ZREG(sst, vq, i);
p                 473 arch/arm64/kernel/fpsimd.c 		fst->vregs[i] = arm64_le128_to_cpu(*p);
p                 808 arch/arm64/kernel/fpsimd.c void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
p                  32 arch/arm64/kernel/irq.c int arch_show_interrupts(struct seq_file *p, int prec)
p                  34 arch/arm64/kernel/irq.c 	show_ipi_list(p, prec);
p                  35 arch/arm64/kernel/irq.c 	seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
p                  43 arch/arm64/kernel/irq.c 	unsigned long *p;
p                  46 arch/arm64/kernel/irq.c 		p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
p                  47 arch/arm64/kernel/irq.c 		per_cpu(irq_stack_ptr, cpu) = p;
p                  41 arch/arm64/kernel/module-plts.c 	u64 p, q;
p                  52 arch/arm64/kernel/module-plts.c 	p = ALIGN_DOWN((u64)a, SZ_4K);
p                  59 arch/arm64/kernel/module-plts.c 	if (a->adrp == b->adrp && p == q)
p                  62 arch/arm64/kernel/module-plts.c 	return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) ==
p                  26 arch/arm64/kernel/module.c 	void *p;
p                  36 arch/arm64/kernel/module.c 	p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
p                  40 arch/arm64/kernel/module.c 	if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
p                  51 arch/arm64/kernel/module.c 		p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
p                  56 arch/arm64/kernel/module.c 	if (p && (kasan_module_alloc(p, size) < 0)) {
p                  57 arch/arm64/kernel/module.c 		vfree(p);
p                  61 arch/arm64/kernel/module.c 	return p;
p                  50 arch/arm64/kernel/probes/kprobes.c static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
p                  53 arch/arm64/kernel/probes/kprobes.c 	patch_text(p->ainsn.api.insn, p->opcode);
p                  55 arch/arm64/kernel/probes/kprobes.c 	flush_icache_range((uintptr_t) (p->ainsn.api.insn),
p                  56 arch/arm64/kernel/probes/kprobes.c 			   (uintptr_t) (p->ainsn.api.insn) +
p                  62 arch/arm64/kernel/probes/kprobes.c 	p->ainsn.api.restore = (unsigned long) p->addr +
p                  66 arch/arm64/kernel/probes/kprobes.c static void __kprobes arch_prepare_simulate(struct kprobe *p)
p                  69 arch/arm64/kernel/probes/kprobes.c 	p->ainsn.api.restore = 0;
p                  72 arch/arm64/kernel/probes/kprobes.c static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
p                  76 arch/arm64/kernel/probes/kprobes.c 	if (p->ainsn.api.handler)
p                  77 arch/arm64/kernel/probes/kprobes.c 		p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
p                  83 arch/arm64/kernel/probes/kprobes.c int __kprobes arch_prepare_kprobe(struct kprobe *p)
p                  85 arch/arm64/kernel/probes/kprobes.c 	unsigned long probe_addr = (unsigned long)p->addr;
p                  91 arch/arm64/kernel/probes/kprobes.c 	p->opcode = le32_to_cpu(*p->addr);
p                  97 arch/arm64/kernel/probes/kprobes.c 	switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
p                 102 arch/arm64/kernel/probes/kprobes.c 		p->ainsn.api.insn = NULL;
p                 106 arch/arm64/kernel/probes/kprobes.c 		p->ainsn.api.insn = get_insn_slot();
p                 107 arch/arm64/kernel/probes/kprobes.c 		if (!p->ainsn.api.insn)
p                 113 arch/arm64/kernel/probes/kprobes.c 	if (p->ainsn.api.insn)
p                 114 arch/arm64/kernel/probes/kprobes.c 		arch_prepare_ss_slot(p);
p                 116 arch/arm64/kernel/probes/kprobes.c 		arch_prepare_simulate(p);
p                 135 arch/arm64/kernel/probes/kprobes.c void __kprobes arch_arm_kprobe(struct kprobe *p)
p                 137 arch/arm64/kernel/probes/kprobes.c 	patch_text(p->addr, BRK64_OPCODE_KPROBES);
p                 141 arch/arm64/kernel/probes/kprobes.c void __kprobes arch_disarm_kprobe(struct kprobe *p)
p                 143 arch/arm64/kernel/probes/kprobes.c 	patch_text(p->addr, p->opcode);
p                 146 arch/arm64/kernel/probes/kprobes.c void __kprobes arch_remove_kprobe(struct kprobe *p)
p                 148 arch/arm64/kernel/probes/kprobes.c 	if (p->ainsn.api.insn) {
p                 149 arch/arm64/kernel/probes/kprobes.c 		free_insn_slot(p->ainsn.api.insn, 0);
p                 150 arch/arm64/kernel/probes/kprobes.c 		p->ainsn.api.insn = NULL;
p                 166 arch/arm64/kernel/probes/kprobes.c static void __kprobes set_current_kprobe(struct kprobe *p)
p                 168 arch/arm64/kernel/probes/kprobes.c 	__this_cpu_write(current_kprobe, p);
p                 208 arch/arm64/kernel/probes/kprobes.c static void __kprobes setup_singlestep(struct kprobe *p,
p                 216 arch/arm64/kernel/probes/kprobes.c 		set_current_kprobe(p);
p                 223 arch/arm64/kernel/probes/kprobes.c 	if (p->ainsn.api.insn) {
p                 225 arch/arm64/kernel/probes/kprobes.c 		slot = (unsigned long)p->ainsn.api.insn;
p                 235 arch/arm64/kernel/probes/kprobes.c 		arch_simulate_insn(p, regs);
p                 239 arch/arm64/kernel/probes/kprobes.c static int __kprobes reenter_kprobe(struct kprobe *p,
p                 246 arch/arm64/kernel/probes/kprobes.c 		kprobes_inc_nmissed_count(p);
p                 247 arch/arm64/kernel/probes/kprobes.c 		setup_singlestep(p, regs, kcb, 1);
p                 252 arch/arm64/kernel/probes/kprobes.c 		dump_kprobe(p);
p                 350 arch/arm64/kernel/probes/kprobes.c 	struct kprobe *p, *cur_kprobe;
p                 357 arch/arm64/kernel/probes/kprobes.c 	p = get_kprobe((kprobe_opcode_t *) addr);
p                 359 arch/arm64/kernel/probes/kprobes.c 	if (p) {
p                 361 arch/arm64/kernel/probes/kprobes.c 			if (reenter_kprobe(p, regs, kcb))
p                 365 arch/arm64/kernel/probes/kprobes.c 			set_current_kprobe(p);
p                 379 arch/arm64/kernel/probes/kprobes.c 			if (!p->pre_handler || !p->pre_handler(p, regs)) {
p                 380 arch/arm64/kernel/probes/kprobes.c 				setup_singlestep(p, regs, kcb, 0);
p                 563 arch/arm64/kernel/probes/kprobes.c int __kprobes arch_trampoline_kprobe(struct kprobe *p)
p                 364 arch/arm64/kernel/process.c 		unsigned long stk_sz, struct task_struct *p, unsigned long tls)
p                 366 arch/arm64/kernel/process.c 	struct pt_regs *childregs = task_pt_regs(p);
p                 368 arch/arm64/kernel/process.c 	memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
p                 377 arch/arm64/kernel/process.c 	fpsimd_flush_task_state(p);
p                 379 arch/arm64/kernel/process.c 	if (likely(!(p->flags & PF_KTHREAD))) {
p                 387 arch/arm64/kernel/process.c 		*task_user_tls(p) = read_sysreg(tpidr_el0);
p                 390 arch/arm64/kernel/process.c 			if (is_compat_thread(task_thread_info(p)))
p                 401 arch/arm64/kernel/process.c 			p->thread.uw.tp_value = tls;
p                 415 arch/arm64/kernel/process.c 		p->thread.cpu_context.x19 = stack_start;
p                 416 arch/arm64/kernel/process.c 		p->thread.cpu_context.x20 = stk_sz;
p                 418 arch/arm64/kernel/process.c 	p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p                 419 arch/arm64/kernel/process.c 	p->thread.cpu_context.sp = (unsigned long)childregs;
p                 421 arch/arm64/kernel/process.c 	ptrace_hw_copy_thread(p);
p                 532 arch/arm64/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 537 arch/arm64/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 540 arch/arm64/kernel/process.c 	stack_page = (unsigned long)try_get_task_stack(p);
p                 544 arch/arm64/kernel/process.c 	start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
p                 547 arch/arm64/kernel/process.c 		if (unwind_frame(p, &frame))
p                 556 arch/arm64/kernel/process.c 	put_task_stack(p);
p                  41 arch/arm64/kernel/sdei.c 	unsigned long *p;
p                  43 arch/arm64/kernel/sdei.c 	p = per_cpu(*ptr, cpu);
p                  44 arch/arm64/kernel/sdei.c 	if (p) {
p                  46 arch/arm64/kernel/sdei.c 		vfree(p);
p                  62 arch/arm64/kernel/sdei.c 	unsigned long *p;
p                  64 arch/arm64/kernel/sdei.c 	p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
p                  65 arch/arm64/kernel/sdei.c 	if (!p)
p                  67 arch/arm64/kernel/sdei.c 	per_cpu(*ptr, cpu) = p;
p                 397 arch/arm64/kernel/setup.c 			      void *p)
p                 780 arch/arm64/kernel/smp.c void show_ipi_list(struct seq_file *p, int prec)
p                 785 arch/arm64/kernel/smp.c 		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
p                 788 arch/arm64/kernel/smp.c 			seq_printf(p, "%10u ",
p                 790 arch/arm64/kernel/smp.c 		seq_printf(p, "      %s\n", ipi_types[i]);
p                  63 arch/arm64/kernel/traps.c 	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
p                  75 arch/arm64/kernel/traps.c 			p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
p                  77 arch/arm64/kernel/traps.c 			p += sprintf(p, "bad PC value");
p                 180 arch/arm64/kvm/sys_regs.c 			struct sys_reg_params *p,
p                 183 arch/arm64/kvm/sys_regs.c 	if (!p->is_write)
p                 184 arch/arm64/kvm/sys_regs.c 		return read_from_write_only(vcpu, p, r);
p                 205 arch/arm64/kvm/sys_regs.c 			  struct sys_reg_params *p,
p                 212 arch/arm64/kvm/sys_regs.c 	BUG_ON(!p->is_write);
p                 215 arch/arm64/kvm/sys_regs.c 	if (p->is_aarch32)
p                 218 arch/arm64/kvm/sys_regs.c 	if (!p->is_aarch32 || !p->is_32bit) {
p                 219 arch/arm64/kvm/sys_regs.c 		val = p->regval;
p                 223 arch/arm64/kvm/sys_regs.c 			val = (p->regval << 32) | (u64)lower_32_bits(val);
p                 226 arch/arm64/kvm/sys_regs.c 				lower_32_bits(p->regval);
p                 241 arch/arm64/kvm/sys_regs.c 			   struct sys_reg_params *p,
p                 246 arch/arm64/kvm/sys_regs.c 	if (!p->is_write)
p                 247 arch/arm64/kvm/sys_regs.c 		return read_from_write_only(vcpu, p, r);
p                 256 arch/arm64/kvm/sys_regs.c 	if (p->is_aarch32) {
p                 257 arch/arm64/kvm/sys_regs.c 		switch (p->Op1) {
p                 268 arch/arm64/kvm/sys_regs.c 		switch (p->Op2) {
p                 280 arch/arm64/kvm/sys_regs.c 	vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
p                 286 arch/arm64/kvm/sys_regs.c 			   struct sys_reg_params *p,
p                 289 arch/arm64/kvm/sys_regs.c 	if (p->is_write)
p                 290 arch/arm64/kvm/sys_regs.c 		return ignore_write(vcpu, p);
p                 292 arch/arm64/kvm/sys_regs.c 	p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
p                 297 arch/arm64/kvm/sys_regs.c 			struct sys_reg_params *p,
p                 300 arch/arm64/kvm/sys_regs.c 	if (p->is_write)
p                 301 arch/arm64/kvm/sys_regs.c 		return ignore_write(vcpu, p);
p                 303 arch/arm64/kvm/sys_regs.c 		return read_zero(vcpu, p);
p                 313 arch/arm64/kvm/sys_regs.c 			  struct sys_reg_params *p,
p                 325 arch/arm64/kvm/sys_regs.c 	if (p->is_write && sr == SYS_LORID_EL1)
p                 326 arch/arm64/kvm/sys_regs.c 		return write_to_read_only(vcpu, p, r);
p                 328 arch/arm64/kvm/sys_regs.c 	return trap_raz_wi(vcpu, p, r);
p                 332 arch/arm64/kvm/sys_regs.c 			   struct sys_reg_params *p,
p                 335 arch/arm64/kvm/sys_regs.c 	if (p->is_write) {
p                 336 arch/arm64/kvm/sys_regs.c 		return ignore_write(vcpu, p);
p                 338 arch/arm64/kvm/sys_regs.c 		p->regval = (1 << 3);
p                 344 arch/arm64/kvm/sys_regs.c 				   struct sys_reg_params *p,
p                 347 arch/arm64/kvm/sys_regs.c 	if (p->is_write) {
p                 348 arch/arm64/kvm/sys_regs.c 		return ignore_write(vcpu, p);
p                 350 arch/arm64/kvm/sys_regs.c 		p->regval = read_sysreg(dbgauthstatus_el1);
p                 383 arch/arm64/kvm/sys_regs.c 			    struct sys_reg_params *p,
p                 386 arch/arm64/kvm/sys_regs.c 	if (p->is_write) {
p                 387 arch/arm64/kvm/sys_regs.c 		vcpu_write_sys_reg(vcpu, p->regval, r->reg);
p                 390 arch/arm64/kvm/sys_regs.c 		p->regval = vcpu_read_sys_reg(vcpu, r->reg);
p                 393 arch/arm64/kvm/sys_regs.c 	trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
p                 408 arch/arm64/kvm/sys_regs.c 		       struct sys_reg_params *p,
p                 411 arch/arm64/kvm/sys_regs.c 	u64 val = p->regval;
p                 413 arch/arm64/kvm/sys_regs.c 	if (p->is_32bit) {
p                 423 arch/arm64/kvm/sys_regs.c 		       struct sys_reg_params *p,
p                 426 arch/arm64/kvm/sys_regs.c 	p->regval = *dbg_reg;
p                 427 arch/arm64/kvm/sys_regs.c 	if (p->is_32bit)
p                 428 arch/arm64/kvm/sys_regs.c 		p->regval &= 0xffffffffUL;
p                 432 arch/arm64/kvm/sys_regs.c 		     struct sys_reg_params *p,
p                 437 arch/arm64/kvm/sys_regs.c 	if (p->is_write)
p                 438 arch/arm64/kvm/sys_regs.c 		reg_to_dbg(vcpu, p, dbg_reg);
p                 440 arch/arm64/kvm/sys_regs.c 		dbg_to_reg(vcpu, p, dbg_reg);
p                 442 arch/arm64/kvm/sys_regs.c 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
p                 474 arch/arm64/kvm/sys_regs.c 		     struct sys_reg_params *p,
p                 479 arch/arm64/kvm/sys_regs.c 	if (p->is_write)
p                 480 arch/arm64/kvm/sys_regs.c 		reg_to_dbg(vcpu, p, dbg_reg);
p                 482 arch/arm64/kvm/sys_regs.c 		dbg_to_reg(vcpu, p, dbg_reg);
p                 484 arch/arm64/kvm/sys_regs.c 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
p                 517 arch/arm64/kvm/sys_regs.c 		     struct sys_reg_params *p,
p                 522 arch/arm64/kvm/sys_regs.c 	if (p->is_write)
p                 523 arch/arm64/kvm/sys_regs.c 		reg_to_dbg(vcpu, p, dbg_reg);
p                 525 arch/arm64/kvm/sys_regs.c 		dbg_to_reg(vcpu, p, dbg_reg);
p                 527 arch/arm64/kvm/sys_regs.c 	trace_trap_reg(__func__, rd->reg, p->is_write,
p                 560 arch/arm64/kvm/sys_regs.c 		     struct sys_reg_params *p,
p                 565 arch/arm64/kvm/sys_regs.c 	if (p->is_write)
p                 566 arch/arm64/kvm/sys_regs.c 		reg_to_dbg(vcpu, p, dbg_reg);
p                 568 arch/arm64/kvm/sys_regs.c 		dbg_to_reg(vcpu, p, dbg_reg);
p                 570 arch/arm64/kvm/sys_regs.c 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
p                 671 arch/arm64/kvm/sys_regs.c static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 677 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
p                 682 arch/arm64/kvm/sys_regs.c 	if (p->is_write) {
p                 686 arch/arm64/kvm/sys_regs.c 		val |= p->regval & ARMV8_PMU_PMCR_MASK;
p                 696 arch/arm64/kvm/sys_regs.c 		p->regval = val;
p                 702 arch/arm64/kvm/sys_regs.c static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 706 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
p                 711 arch/arm64/kvm/sys_regs.c 	if (p->is_write)
p                 712 arch/arm64/kvm/sys_regs.c 		__vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
p                 715 arch/arm64/kvm/sys_regs.c 		p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
p                 721 arch/arm64/kvm/sys_regs.c static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 727 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
p                 729 arch/arm64/kvm/sys_regs.c 	BUG_ON(p->is_write);
p                 734 arch/arm64/kvm/sys_regs.c 	if (!(p->Op2 & 1))
p                 739 arch/arm64/kvm/sys_regs.c 	p->regval = pmceid;
p                 759 arch/arm64/kvm/sys_regs.c 			      struct sys_reg_params *p,
p                 765 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
p                 803 arch/arm64/kvm/sys_regs.c 	if (p->is_write) {
p                 807 arch/arm64/kvm/sys_regs.c 		kvm_pmu_set_counter_value(vcpu, idx, p->regval);
p                 809 arch/arm64/kvm/sys_regs.c 		p->regval = kvm_pmu_get_counter_value(vcpu, idx);
p                 815 arch/arm64/kvm/sys_regs.c static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 821 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
p                 844 arch/arm64/kvm/sys_regs.c 	if (p->is_write) {
p                 845 arch/arm64/kvm/sys_regs.c 		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
p                 846 arch/arm64/kvm/sys_regs.c 		__vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
p                 849 arch/arm64/kvm/sys_regs.c 		p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
p                 855 arch/arm64/kvm/sys_regs.c static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 861 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
p                 867 arch/arm64/kvm/sys_regs.c 	if (p->is_write) {
p                 868 arch/arm64/kvm/sys_regs.c 		val = p->regval & mask;
p                 880 arch/arm64/kvm/sys_regs.c 		p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
p                 886 arch/arm64/kvm/sys_regs.c static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 892 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
p                 899 arch/arm64/kvm/sys_regs.c 	if (p->is_write) {
p                 900 arch/arm64/kvm/sys_regs.c 		u64 val = p->regval & mask;
p                 909 arch/arm64/kvm/sys_regs.c 		p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
p                 915 arch/arm64/kvm/sys_regs.c static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 921 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
p                 926 arch/arm64/kvm/sys_regs.c 	if (p->is_write) {
p                 929 arch/arm64/kvm/sys_regs.c 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
p                 932 arch/arm64/kvm/sys_regs.c 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
p                 934 arch/arm64/kvm/sys_regs.c 		p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
p                 940 arch/arm64/kvm/sys_regs.c static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 946 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
p                 948 arch/arm64/kvm/sys_regs.c 	if (!p->is_write)
p                 949 arch/arm64/kvm/sys_regs.c 		return read_from_write_only(vcpu, p, r);
p                 955 arch/arm64/kvm/sys_regs.c 	kvm_pmu_software_increment(vcpu, p->regval & mask);
p                 959 arch/arm64/kvm/sys_regs.c static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 963 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
p                 965 arch/arm64/kvm/sys_regs.c 	if (p->is_write) {
p                 972 arch/arm64/kvm/sys_regs.c 			       p->regval & ARMV8_PMU_USERENR_MASK;
p                 974 arch/arm64/kvm/sys_regs.c 		p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
p                1007 arch/arm64/kvm/sys_regs.c 			 struct sys_reg_params *p,
p                1038 arch/arm64/kvm/sys_regs.c 			      struct sys_reg_params *p,
p                1065 arch/arm64/kvm/sys_regs.c 	if (p->is_write)
p                1066 arch/arm64/kvm/sys_regs.c 		kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
p                1068 arch/arm64/kvm/sys_regs.c 		p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
p                1096 arch/arm64/kvm/sys_regs.c 			    struct sys_reg_params *p,
p                1100 arch/arm64/kvm/sys_regs.c 	if (p->is_write)
p                1101 arch/arm64/kvm/sys_regs.c 		return write_to_read_only(vcpu, p, r);
p                1103 arch/arm64/kvm/sys_regs.c 	p->regval = read_id_reg(vcpu, r, raz);
p                1108 arch/arm64/kvm/sys_regs.c 			  struct sys_reg_params *p,
p                1111 arch/arm64/kvm/sys_regs.c 	return __access_id_reg(vcpu, p, r, false);
p                1115 arch/arm64/kvm/sys_regs.c 			      struct sys_reg_params *p,
p                1118 arch/arm64/kvm/sys_regs.c 	return __access_id_reg(vcpu, p, r, true);
p                1155 arch/arm64/kvm/sys_regs.c 				   struct sys_reg_params *p,
p                1158 arch/arm64/kvm/sys_regs.c 	if (p->is_write)
p                1159 arch/arm64/kvm/sys_regs.c 		return write_to_read_only(vcpu, p, rd);
p                1161 arch/arm64/kvm/sys_regs.c 	p->regval = guest_id_aa64zfr0_el1(vcpu);
p                1260 arch/arm64/kvm/sys_regs.c static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                1263 arch/arm64/kvm/sys_regs.c 	if (p->is_write)
p                1264 arch/arm64/kvm/sys_regs.c 		return write_to_read_only(vcpu, p, r);
p                1266 arch/arm64/kvm/sys_regs.c 	p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
p                1270 arch/arm64/kvm/sys_regs.c static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                1273 arch/arm64/kvm/sys_regs.c 	if (p->is_write)
p                1274 arch/arm64/kvm/sys_regs.c 		return write_to_read_only(vcpu, p, r);
p                1276 arch/arm64/kvm/sys_regs.c 	p->regval = read_sysreg(clidr_el1);
p                1280 arch/arm64/kvm/sys_regs.c static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                1286 arch/arm64/kvm/sys_regs.c 	if (p->is_aarch32)
p                1289 arch/arm64/kvm/sys_regs.c 	if (p->is_write)
p                1290 arch/arm64/kvm/sys_regs.c 		vcpu_write_sys_reg(vcpu, p->regval, reg);
p                1292 arch/arm64/kvm/sys_regs.c 		p->regval = vcpu_read_sys_reg(vcpu, reg);
p                1296 arch/arm64/kvm/sys_regs.c static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                1301 arch/arm64/kvm/sys_regs.c 	if (p->is_write)
p                1302 arch/arm64/kvm/sys_regs.c 		return write_to_read_only(vcpu, p, r);
p                1305 arch/arm64/kvm/sys_regs.c 	p->regval = get_ccsidr(csselr);
p                1320 arch/arm64/kvm/sys_regs.c 		p->regval &= ~GENMASK(27, 3);
p                1654 arch/arm64/kvm/sys_regs.c 			struct sys_reg_params *p,
p                1657 arch/arm64/kvm/sys_regs.c 	if (p->is_write) {
p                1658 arch/arm64/kvm/sys_regs.c 		return ignore_write(vcpu, p);
p                1664 arch/arm64/kvm/sys_regs.c 		p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
p                1673 arch/arm64/kvm/sys_regs.c 			 struct sys_reg_params *p,
p                1676 arch/arm64/kvm/sys_regs.c 	if (p->is_write) {
p                1677 arch/arm64/kvm/sys_regs.c 		vcpu_cp14(vcpu, r->reg) = p->regval;
p                1680 arch/arm64/kvm/sys_regs.c 		p->regval = vcpu_cp14(vcpu, r->reg);
p                1698 arch/arm64/kvm/sys_regs.c 		     struct sys_reg_params *p,
p                1703 arch/arm64/kvm/sys_regs.c 	if (p->is_write) {
p                1707 arch/arm64/kvm/sys_regs.c 		val |= p->regval << 32;
p                1712 arch/arm64/kvm/sys_regs.c 		p->regval = *dbg_reg >> 32;
p                1715 arch/arm64/kvm/sys_regs.c 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
p                  65 arch/arm64/kvm/sys_regs.h static inline void print_sys_reg_instr(const struct sys_reg_params *p)
p                  69 arch/arm64/kvm/sys_regs.h 		      p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read");
p                  73 arch/arm64/kvm/sys_regs.h 				const struct sys_reg_params *p)
p                  79 arch/arm64/kvm/sys_regs.h 			     struct sys_reg_params *p)
p                  81 arch/arm64/kvm/sys_regs.h 	p->regval = 0;
p                  24 arch/arm64/kvm/sys_regs_generic_v8.c 			 struct sys_reg_params *p,
p                  27 arch/arm64/kvm/sys_regs_generic_v8.c 	if (p->is_write)
p                  28 arch/arm64/kvm/sys_regs_generic_v8.c 		return ignore_write(vcpu, p);
p                  30 arch/arm64/kvm/sys_regs_generic_v8.c 	p->regval = vcpu_read_sys_reg(vcpu, ACTLR_EL1);
p                  13 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                  22 arch/arm64/kvm/vgic-sys-reg-v3.c 	if (p->is_write) {
p                  23 arch/arm64/kvm/vgic-sys-reg-v3.c 		val = p->regval;
p                  81 arch/arm64/kvm/vgic-sys-reg-v3.c 		p->regval = val;
p                  87 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_pmr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                  93 arch/arm64/kvm/vgic-sys-reg-v3.c 	if (p->is_write) {
p                  94 arch/arm64/kvm/vgic-sys-reg-v3.c 		vmcr.pmr = (p->regval & ICC_PMR_EL1_MASK) >> ICC_PMR_EL1_SHIFT;
p                  97 arch/arm64/kvm/vgic-sys-reg-v3.c 		p->regval = (vmcr.pmr << ICC_PMR_EL1_SHIFT) & ICC_PMR_EL1_MASK;
p                 103 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_bpr0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 109 arch/arm64/kvm/vgic-sys-reg-v3.c 	if (p->is_write) {
p                 110 arch/arm64/kvm/vgic-sys-reg-v3.c 		vmcr.bpr = (p->regval & ICC_BPR0_EL1_MASK) >>
p                 114 arch/arm64/kvm/vgic-sys-reg-v3.c 		p->regval = (vmcr.bpr << ICC_BPR0_EL1_SHIFT) &
p                 121 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 126 arch/arm64/kvm/vgic-sys-reg-v3.c 	if (!p->is_write)
p                 127 arch/arm64/kvm/vgic-sys-reg-v3.c 		p->regval = 0;
p                 131 arch/arm64/kvm/vgic-sys-reg-v3.c 		if (p->is_write) {
p                 132 arch/arm64/kvm/vgic-sys-reg-v3.c 			vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
p                 136 arch/arm64/kvm/vgic-sys-reg-v3.c 			p->regval = (vmcr.abpr << ICC_BPR1_EL1_SHIFT) &
p                 140 arch/arm64/kvm/vgic-sys-reg-v3.c 		if (!p->is_write)
p                 141 arch/arm64/kvm/vgic-sys-reg-v3.c 			p->regval = min((vmcr.bpr + 1), 7U);
p                 147 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_grpen0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 153 arch/arm64/kvm/vgic-sys-reg-v3.c 	if (p->is_write) {
p                 154 arch/arm64/kvm/vgic-sys-reg-v3.c 		vmcr.grpen0 = (p->regval & ICC_IGRPEN0_EL1_MASK) >>
p                 158 arch/arm64/kvm/vgic-sys-reg-v3.c 		p->regval = (vmcr.grpen0 << ICC_IGRPEN0_EL1_SHIFT) &
p                 165 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_grpen1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 171 arch/arm64/kvm/vgic-sys-reg-v3.c 	if (p->is_write) {
p                 172 arch/arm64/kvm/vgic-sys-reg-v3.c 		vmcr.grpen1 = (p->regval & ICC_IGRPEN1_EL1_MASK) >>
p                 176 arch/arm64/kvm/vgic-sys-reg-v3.c 		p->regval = (vmcr.grpen1 << ICC_IGRPEN1_EL1_SHIFT) &
p                 184 arch/arm64/kvm/vgic-sys-reg-v3.c 				   struct sys_reg_params *p, u8 apr, u8 idx)
p                 194 arch/arm64/kvm/vgic-sys-reg-v3.c 	if (p->is_write)
p                 195 arch/arm64/kvm/vgic-sys-reg-v3.c 		*ap_reg = p->regval;
p                 197 arch/arm64/kvm/vgic-sys-reg-v3.c 		p->regval = *ap_reg;
p                 200 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 208 arch/arm64/kvm/vgic-sys-reg-v3.c 	vgic_v3_access_apr_reg(vcpu, p, apr, idx);
p                 211 arch/arm64/kvm/vgic-sys-reg-v3.c 	if (!p->is_write)
p                 212 arch/arm64/kvm/vgic-sys-reg-v3.c 		p->regval = 0;
p                 217 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_ap0r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 221 arch/arm64/kvm/vgic-sys-reg-v3.c 	return access_gic_aprn(vcpu, p, r, 0);
p                 224 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_ap1r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 227 arch/arm64/kvm/vgic-sys-reg-v3.c 	return access_gic_aprn(vcpu, p, r, 1);
p                 230 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p                 236 arch/arm64/kvm/vgic-sys-reg-v3.c 	if (p->is_write) {
p                 237 arch/arm64/kvm/vgic-sys-reg-v3.c 		if (!(p->regval & ICC_SRE_EL1_SRE))
p                 240 arch/arm64/kvm/vgic-sys-reg-v3.c 		p->regval = vgicv3->vgic_sre;
p                 264 arch/arm64/mm/init.c static int __init early_mem(char *p)
p                 266 arch/arm64/mm/init.c 	if (!p)
p                 269 arch/arm64/mm/init.c 	memory_limit = memparse(p, &p) & PAGE_MASK;
p                 598 arch/arm64/mm/init.c static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
p                  36 arch/arm64/mm/kasan_init.c 	void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
p                  39 arch/arm64/mm/kasan_init.c 	if (!p)
p                  44 arch/arm64/mm/kasan_init.c 	return __pa(p);
p                  49 arch/arm64/mm/kasan_init.c 	void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
p                  52 arch/arm64/mm/kasan_init.c 	if (!p)
p                  57 arch/arm64/mm/kasan_init.c 	return __pa(p);
p                 758 arch/arm64/mm/mmu.c 			void *p = NULL;
p                 760 arch/arm64/mm/mmu.c 			p = vmemmap_alloc_block_buf(PMD_SIZE, node);
p                 761 arch/arm64/mm/mmu.c 			if (!p)
p                 764 arch/arm64/mm/mmu.c 			pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
p                  96 arch/c6x/include/asm/processor.h extern unsigned long get_wchan(struct task_struct *p);
p                  19 arch/c6x/include/asm/unaligned.h static inline u16 get_unaligned_le16(const void *p)
p                  21 arch/c6x/include/asm/unaligned.h 	const u8 *_p = p;
p                  25 arch/c6x/include/asm/unaligned.h static inline u16 get_unaligned_be16(const void *p)
p                  27 arch/c6x/include/asm/unaligned.h 	const u8 *_p = p;
p                  31 arch/c6x/include/asm/unaligned.h static inline void put_unaligned_le16(u16 val, void *p)
p                  33 arch/c6x/include/asm/unaligned.h 	u8 *_p = p;
p                  38 arch/c6x/include/asm/unaligned.h static inline void put_unaligned_be16(u16 val, void *p)
p                  40 arch/c6x/include/asm/unaligned.h 	u8 *_p = p;
p                  45 arch/c6x/include/asm/unaligned.h static inline u32 get_unaligned32(const void *p)
p                  47 arch/c6x/include/asm/unaligned.h 	u32 val = (u32) p;
p                  54 arch/c6x/include/asm/unaligned.h static inline void put_unaligned32(u32 val, void *p)
p                  57 arch/c6x/include/asm/unaligned.h 		      : : "a"(val), "b"(p) : "memory");
p                  60 arch/c6x/include/asm/unaligned.h static inline u64 get_unaligned64(const void *p)
p                  65 arch/c6x/include/asm/unaligned.h 		      : "=a"(val) : "a"(p));
p                  69 arch/c6x/include/asm/unaligned.h static inline void put_unaligned64(u64 val, const void *p)
p                  72 arch/c6x/include/asm/unaligned.h 		      : : "a"(val), "b"(p) : "memory");
p                  77 arch/c6x/include/asm/unaligned.h #define get_unaligned_le32(p)	 __swab32(get_unaligned32(p))
p                  78 arch/c6x/include/asm/unaligned.h #define get_unaligned_le64(p)	 __swab64(get_unaligned64(p))
p                  79 arch/c6x/include/asm/unaligned.h #define get_unaligned_be32(p)	 get_unaligned32(p)
p                  80 arch/c6x/include/asm/unaligned.h #define get_unaligned_be64(p)	 get_unaligned64(p)
p                  81 arch/c6x/include/asm/unaligned.h #define put_unaligned_le32(v, p) put_unaligned32(__swab32(v), (p))
p                  82 arch/c6x/include/asm/unaligned.h #define put_unaligned_le64(v, p) put_unaligned64(__swab64(v), (p))
p                  83 arch/c6x/include/asm/unaligned.h #define put_unaligned_be32(v, p) put_unaligned32((v), (p))
p                  84 arch/c6x/include/asm/unaligned.h #define put_unaligned_be64(v, p) put_unaligned64((v), (p))
p                  90 arch/c6x/include/asm/unaligned.h #define get_unaligned_le32(p)	 get_unaligned32(p)
p                  91 arch/c6x/include/asm/unaligned.h #define get_unaligned_le64(p)	 get_unaligned64(p)
p                  92 arch/c6x/include/asm/unaligned.h #define get_unaligned_be32(p)	 __swab32(get_unaligned32(p))
p                  93 arch/c6x/include/asm/unaligned.h #define get_unaligned_be64(p)	 __swab64(get_unaligned64(p))
p                  94 arch/c6x/include/asm/unaligned.h #define put_unaligned_le32(v, p) put_unaligned32((v), (p))
p                  95 arch/c6x/include/asm/unaligned.h #define put_unaligned_le64(v, p) put_unaligned64((v), (p))
p                  96 arch/c6x/include/asm/unaligned.h #define put_unaligned_be32(v, p) put_unaligned32(__swab32(v), (p))
p                  97 arch/c6x/include/asm/unaligned.h #define put_unaligned_be64(v, p) put_unaligned64(__swab64(v), (p))
p                 123 arch/c6x/kernel/irq.c int arch_show_interrupts(struct seq_file *p, int prec)
p                 125 arch/c6x/kernel/irq.c 	seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
p                 109 arch/c6x/kernel/process.c 		struct task_struct *p)
p                 113 arch/c6x/kernel/process.c 	childregs = task_pt_regs(p);
p                 115 arch/c6x/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 119 arch/c6x/kernel/process.c 		p->thread.pc = (unsigned long) ret_from_kernel_thread;
p                 127 arch/c6x/kernel/process.c 		p->thread.pc = (unsigned long) ret_from_fork;
p                 131 arch/c6x/kernel/process.c 	p->thread.usp = childregs->sp;
p                 132 arch/c6x/kernel/process.c 	thread_saved_ksp(p) = (unsigned long)childregs - 8;
p                 133 arch/c6x/kernel/process.c 	p->thread.wchan	= p->thread.pc;
p                 140 arch/c6x/kernel/process.c 		thread_saved_dp(p) = dp;
p                 148 arch/c6x/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 150 arch/c6x/kernel/process.c 	return p->thread.wchan;
p                  94 arch/c6x/kernel/setup.c 	struct cpuinfo_c6x *p;
p                  97 arch/c6x/kernel/setup.c 	p = &per_cpu(cpu_data, smp_processor_id());
p                 117 arch/c6x/kernel/setup.c 	p->mmu = "none";
p                 118 arch/c6x/kernel/setup.c 	p->fpu = "none";
p                 119 arch/c6x/kernel/setup.c 	p->cpu_voltage = "unknown";
p                 123 arch/c6x/kernel/setup.c 		p->cpu_name = "C67x";
p                 124 arch/c6x/kernel/setup.c 		p->fpu = "yes";
p                 127 arch/c6x/kernel/setup.c 		p->cpu_name = "C62x";
p                 130 arch/c6x/kernel/setup.c 		p->cpu_name = "C64x";
p                 133 arch/c6x/kernel/setup.c 		p->cpu_name = "C64x";
p                 136 arch/c6x/kernel/setup.c 		p->cpu_name = "C64x+";
p                 137 arch/c6x/kernel/setup.c 		p->cpu_voltage = "1.2";
p                 140 arch/c6x/kernel/setup.c 		p->cpu_name = "C66X";
p                 141 arch/c6x/kernel/setup.c 		p->cpu_voltage = "1.2";
p                 144 arch/c6x/kernel/setup.c 		p->cpu_name = "unknown";
p                 152 arch/c6x/kernel/setup.c 				p->cpu_rev = "DM640/DM641/DM642/DM643";
p                 153 arch/c6x/kernel/setup.c 				p->cpu_voltage = "1.2 - 1.4";
p                 155 arch/c6x/kernel/setup.c 				p->cpu_rev = "C6201";
p                 156 arch/c6x/kernel/setup.c 				p->cpu_voltage = "2.5";
p                 160 arch/c6x/kernel/setup.c 			p->cpu_rev = "C6201B/C6202/C6211";
p                 161 arch/c6x/kernel/setup.c 			p->cpu_voltage = "1.8";
p                 164 arch/c6x/kernel/setup.c 			p->cpu_rev = "C6202B/C6203/C6204/C6205";
p                 165 arch/c6x/kernel/setup.c 			p->cpu_voltage = "1.5";
p                 168 arch/c6x/kernel/setup.c 			p->cpu_rev = "C6701 revision 0 (early CPU)";
p                 169 arch/c6x/kernel/setup.c 			p->cpu_voltage = "1.8";
p                 172 arch/c6x/kernel/setup.c 			p->cpu_rev = "C6701/C6711/C6712";
p                 173 arch/c6x/kernel/setup.c 			p->cpu_voltage = "1.8";
p                 176 arch/c6x/kernel/setup.c 			p->cpu_rev = "C64x";
p                 177 arch/c6x/kernel/setup.c 			p->cpu_voltage = "1.5";
p                 180 arch/c6x/kernel/setup.c 			p->cpu_rev = "unknown";
p                 183 arch/c6x/kernel/setup.c 		p->cpu_rev = p->__cpu_rev;
p                 184 arch/c6x/kernel/setup.c 		snprintf(p->__cpu_rev, sizeof(p->__cpu_rev), "0x%x", cpu_id);
p                 187 arch/c6x/kernel/setup.c 	p->core_id = get_coreid();
p                 201 arch/c6x/kernel/setup.c 	       p->core_id, p->cpu_name, p->cpu_rev,
p                 202 arch/c6x/kernel/setup.c 	       p->cpu_voltage, c6x_core_freq / 1000000);
p                 211 arch/c6x/kernel/setup.c static int __init early_mem(char *p)
p                 213 arch/c6x/kernel/setup.c 	if (!p)
p                 216 arch/c6x/kernel/setup.c 	mem_size = memparse(p, &p);
p                 226 arch/c6x/kernel/setup.c static int __init early_memdma(char *p)
p                 228 arch/c6x/kernel/setup.c 	if (!p)
p                 231 arch/c6x/kernel/setup.c 	dma_size = memparse(p, &p);
p                 232 arch/c6x/kernel/setup.c 	if (*p == '@')
p                 233 arch/c6x/kernel/setup.c 		dma_start = memparse(p, &p);
p                 407 arch/c6x/kernel/setup.c #define ptr_to_cpu(p) ((long)(p) - 1)
p                 412 arch/c6x/kernel/setup.c 	struct cpuinfo_c6x *p = &per_cpu(cpu_data, n);
p                 434 arch/c6x/kernel/setup.c 		   p->cpu_name, p->cpu_rev, p->cpu_voltage,
p                 435 arch/c6x/kernel/setup.c 		   p->core_id, p->mmu, p->fpu,
p                 378 arch/c6x/kernel/traps.c 	unsigned long *p, *endstack;
p                 393 arch/c6x/kernel/traps.c 	for (i = 0, p = stack; i < kstack_depth_to_print; i++) {
p                 394 arch/c6x/kernel/traps.c 		if (p + 1 > endstack)
p                 398 arch/c6x/kernel/traps.c 		pr_cont(" %08lx", *p++);
p                 346 arch/c6x/platforms/dscr.c 	const __be32 *p;
p                 350 arch/c6x/platforms/dscr.c 	p = of_get_property(node, "ti,dscr-rmii-resets", &size);
p                 351 arch/c6x/platforms/dscr.c 	if (p) {
p                 353 arch/c6x/platforms/dscr.c 		size /= (sizeof(*p) * 2);
p                 358 arch/c6x/platforms/dscr.c 			dscr.rmii_resets[i].reg = be32_to_cpup(p++);
p                 359 arch/c6x/platforms/dscr.c 			dscr.rmii_resets[i].mask = be32_to_cpup(p++);
p                 397 arch/c6x/platforms/dscr.c 	const __be32 *p;
p                 400 arch/c6x/platforms/dscr.c 	p = of_get_property(node, "ti,dscr-locked-regs", &size);
p                 401 arch/c6x/platforms/dscr.c 	if (p) {
p                 403 arch/c6x/platforms/dscr.c 		size /= (sizeof(*p) * 3);
p                 410 arch/c6x/platforms/dscr.c 			r->reg = be32_to_cpup(p++);
p                 411 arch/c6x/platforms/dscr.c 			r->lockreg = be32_to_cpup(p++);
p                 412 arch/c6x/platforms/dscr.c 			r->key = be32_to_cpup(p++);
p                 470 arch/c6x/platforms/dscr.c 	const __be32 *p;
p                 473 arch/c6x/platforms/dscr.c 	p = of_get_property(node, "ti,dscr-devstate-ctl-regs", &size);
p                 474 arch/c6x/platforms/dscr.c 	if (p) {
p                 476 arch/c6x/platforms/dscr.c 		size /= (sizeof(*p) * 7);
p                 483 arch/c6x/platforms/dscr.c 			r->start_id = be32_to_cpup(p++);
p                 484 arch/c6x/platforms/dscr.c 			r->num_ids = be32_to_cpup(p++);
p                 485 arch/c6x/platforms/dscr.c 			r->reg = be32_to_cpup(p++);
p                 486 arch/c6x/platforms/dscr.c 			r->enable = be32_to_cpup(p++);
p                 487 arch/c6x/platforms/dscr.c 			r->disable = be32_to_cpup(p++);
p                 490 arch/c6x/platforms/dscr.c 			r->shift = be32_to_cpup(p++);
p                 491 arch/c6x/platforms/dscr.c 			r->nbits = be32_to_cpup(p++);
p                 526 arch/c6x/platforms/dscr.c 	const __be32 *p;
p                 529 arch/c6x/platforms/dscr.c 	p = of_get_property(node, "ti,dscr-devstate-stat-regs", &size);
p                 530 arch/c6x/platforms/dscr.c 	if (p) {
p                 532 arch/c6x/platforms/dscr.c 		size /= (sizeof(*p) * 7);
p                 539 arch/c6x/platforms/dscr.c 			r->start_id = be32_to_cpup(p++);
p                 540 arch/c6x/platforms/dscr.c 			r->num_ids = be32_to_cpup(p++);
p                 541 arch/c6x/platforms/dscr.c 			r->reg = be32_to_cpup(p++);
p                 542 arch/c6x/platforms/dscr.c 			r->enable = be32_to_cpup(p++);
p                 543 arch/c6x/platforms/dscr.c 			r->disable = be32_to_cpup(p++);
p                 544 arch/c6x/platforms/dscr.c 			r->shift = be32_to_cpup(p++);
p                 545 arch/c6x/platforms/dscr.c 			r->nbits = be32_to_cpup(p++);
p                  45 arch/c6x/platforms/emif.c 	const __be32 *p;
p                  63 arch/c6x/platforms/emif.c 	p = of_get_property(node, "ti,emifa-ce-config", &len);
p                  64 arch/c6x/platforms/emif.c 	if (p) {
p                  69 arch/c6x/platforms/emif.c 			soc_writel(be32_to_cpup(&p[i]), &regs->cecfg[i]);
p                  28 arch/csky/include/asm/pgalloc.h extern void pgd_init(unsigned long *p);
p                 116 arch/csky/include/asm/pgtable.h static inline void set_pte(pte_t *p, pte_t pte)
p                 118 arch/csky/include/asm/pgtable.h 	*p = pte;
p                 120 arch/csky/include/asm/pgtable.h 	dcache_wb_line((u32)p);
p                 138 arch/csky/include/asm/pgtable.h static inline void set_pmd(pmd_t *p, pmd_t pmd)
p                 140 arch/csky/include/asm/pgtable.h 	*p = pmd;
p                 142 arch/csky/include/asm/pgtable.h 	dcache_wb_line((u32)p);
p                 161 arch/csky/include/asm/pgtable.h static inline void pmd_clear(pmd_t *p)
p                 163 arch/csky/include/asm/pgtable.h 	pmd_val(*p) = (__pa(invalid_pte_table));
p                 165 arch/csky/include/asm/pgtable.h 	dcache_wb_line((u32)p);
p                  93 arch/csky/include/asm/processor.h unsigned long get_wchan(struct task_struct *p);
p                  98 arch/csky/include/asm/processor.h #define task_pt_regs(p) \
p                  99 arch/csky/include/asm/processor.h 	((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
p                  18 arch/csky/include/asm/spinlock.h 	u32 *p = &lock->lock;
p                  28 arch/csky/include/asm/spinlock.h 		: "r"(p), "r"(ticket_next)
p                  41 arch/csky/include/asm/spinlock.h 	u32 *p = &lock->lock;
p                  55 arch/csky/include/asm/spinlock.h 		: "r"(p), "r"(ticket_next)
p                 101 arch/csky/include/asm/spinlock.h 	u32 *p = &lock->lock;
p                 111 arch/csky/include/asm/spinlock.h 		: "r"(p)
p                 124 arch/csky/include/asm/spinlock.h 	u32 *p = &lock->lock;
p                 136 arch/csky/include/asm/spinlock.h 		: "r"(p)
p                 152 arch/csky/include/asm/spinlock.h 	u32 *p = &lock->lock;
p                 162 arch/csky/include/asm/spinlock.h 		: "r"(p)
p                 169 arch/csky/include/asm/spinlock.h 	u32 *p = &lock->lock;
p                 179 arch/csky/include/asm/spinlock.h 		: "r"(p)
p                 185 arch/csky/include/asm/spinlock.h 	u32 *p = &lock->lock;
p                 197 arch/csky/include/asm/spinlock.h 		: "r"(p)
p                 211 arch/csky/include/asm/spinlock.h 	u32 *p = &lock->lock;
p                 221 arch/csky/include/asm/spinlock.h 		: "r"(p)
p                 234 arch/csky/include/asm/spinlock.h 	u32 *p = &lock->lock;
p                 246 arch/csky/include/asm/spinlock.h 		: "r"(p)
p                  49 arch/csky/kernel/perf_event.c #define to_csky_pmu(p)  (container_of(p, struct csky_pmu, pmu))
p                  40 arch/csky/kernel/process.c 		struct task_struct *p,
p                  44 arch/csky/kernel/process.c 	struct pt_regs *childregs = task_pt_regs(p);
p                  47 arch/csky/kernel/process.c 	save_to_user_fp(&p->thread.user_fp);
p                  54 arch/csky/kernel/process.c 	p->thread.ksp = (unsigned long)childstack;
p                  56 arch/csky/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                  67 arch/csky/kernel/process.c 			task_thread_info(p)->tp_value = childregs->tls
p                  95 arch/csky/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 101 arch/csky/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 104 arch/csky/kernel/process.c 	stack_start = (unsigned long *)end_of_stack(p);
p                 105 arch/csky/kernel/process.c 	stack_end = (unsigned long *)(task_stack_page(p) + THREAD_SIZE);
p                 107 arch/csky/kernel/process.c 	fp = (unsigned long *) thread_saved_fp(p);
p                  84 arch/csky/mm/init.c void pgd_init(unsigned long *p)
p                  89 arch/csky/mm/init.c 		p[i] = __pa(invalid_pte_table);
p                  31 arch/h8300/include/asm/flat.h 	u32 *p = (__force u32 *)rp;
p                  32 arch/h8300/include/asm/flat.h 	put_unaligned((addr & 0x00ffffff) | (*(char *)p << 24), p);
p                 108 arch/h8300/include/asm/processor.h unsigned long get_wchan(struct task_struct *p);
p                  89 arch/h8300/kernel/kgdb.c void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
p                  92 arch/h8300/kernel/kgdb.c 	gdb_regs[GDB_SP] = p->thread.ksp;
p                  93 arch/h8300/kernel/kgdb.c 	gdb_regs[GDB_PC] = KSTK_EIP(p);
p                 111 arch/h8300/kernel/process.c 		struct task_struct *p)
p                 115 arch/h8300/kernel/process.c 	childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1;
p                 117 arch/h8300/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 126 arch/h8300/kernel/process.c 		p->thread.usp = usp ?: rdusp();
p                 128 arch/h8300/kernel/process.c 	p->thread.ksp = (unsigned long)childregs;
p                 133 arch/h8300/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 139 arch/h8300/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 142 arch/h8300/kernel/process.c 	stack_page = (unsigned long)p;
p                 143 arch/h8300/kernel/process.c 	fp = ((struct pt_regs *)p->thread.ksp)->er6;
p                  39 arch/hexagon/include/asm/io.h #define readsw(p, d, l)	__raw_readsw(p, d, l)
p                  40 arch/hexagon/include/asm/io.h #define writesw(p, d, l) __raw_writesw(p, d, l)
p                  42 arch/hexagon/include/asm/io.h #define readsl(p, d, l)   __raw_readsl(p, d, l)
p                  43 arch/hexagon/include/asm/io.h #define writesl(p, d, l)  __raw_writesl(p, d, l)
p                  67 arch/hexagon/include/asm/io.h #define xlate_dev_kmem_ptr(p)    __va(p)
p                  68 arch/hexagon/include/asm/io.h #define xlate_dev_mem_ptr(p)    __va(p)
p                  67 arch/hexagon/include/asm/processor.h extern unsigned long get_wchan(struct task_struct *p);
p                  17 arch/hexagon/include/asm/switch_to.h #define switch_to(p, n, r) do {\
p                  18 arch/hexagon/include/asm/switch_to.h 	r = __switch_to((p), (n), (r));\
p                  54 arch/hexagon/kernel/process.c 		unsigned long arg, struct task_struct *p)
p                  56 arch/hexagon/kernel/process.c 	struct thread_info *ti = task_thread_info(p);
p                  75 arch/hexagon/kernel/process.c 	p->thread.switch_sp = ss;
p                  76 arch/hexagon/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 133 arch/hexagon/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 138 arch/hexagon/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 141 arch/hexagon/kernel/process.c 	stack_page = (unsigned long)task_stack_page(p);
p                 142 arch/hexagon/kernel/process.c 	fp = ((struct hexagon_switch_stack *)p->thread.switch_sp)->fp;
p                  41 arch/hexagon/kernel/setup.c 	char *p = &external_cmdline_buffer;
p                  68 arch/hexagon/kernel/setup.c 	if (p[0] != '\0')
p                  69 arch/hexagon/kernel/setup.c 		strlcpy(boot_command_line, p, COMMAND_LINE_SIZE);
p                 126 arch/hexagon/mm/init.c static int __init early_mem(char *p)
p                 131 arch/hexagon/mm/init.c 	size = memparse(p, &endp);
p                  56 arch/ia64/include/asm/barrier.h #define __smp_store_release(p, v)						\
p                  58 arch/ia64/include/asm/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                  60 arch/ia64/include/asm/barrier.h 	WRITE_ONCE(*p, v);						\
p                  63 arch/ia64/include/asm/barrier.h #define __smp_load_acquire(p)						\
p                  65 arch/ia64/include/asm/barrier.h 	typeof(*p) ___p1 = READ_ONCE(*p);				\
p                  66 arch/ia64/include/asm/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                 238 arch/ia64/include/asm/bitops.h 	__u32 *p = (__u32 *) addr + (nr >> 5);
p                 240 arch/ia64/include/asm/bitops.h 	int oldbitset = (*p & m) != 0;
p                 242 arch/ia64/include/asm/bitops.h 	*p |= m;
p                 283 arch/ia64/include/asm/bitops.h 	__u32 *p = (__u32 *) addr + (nr >> 5);
p                 285 arch/ia64/include/asm/bitops.h 	int oldbitset = (*p & m) != 0;
p                 287 arch/ia64/include/asm/bitops.h 	*p &= ~m;
p                  48 arch/ia64/include/asm/io.h #define IO_SPACE_SPARSE_ENCODING(p)	((((p) >> 2) << 12) | ((p) & 0xfff))
p                  90 arch/ia64/include/asm/kprobes.h #define flush_insn_slot(p)		do { } while (0)
p                 115 arch/ia64/include/asm/kprobes.h extern void arch_remove_kprobe(struct kprobe *p);
p                 120 arch/ia64/include/asm/mmu_context.h init_new_context (struct task_struct *p, struct mm_struct *mm)
p                 135 arch/ia64/include/asm/page.h 	void *p;
p                 145 arch/ia64/include/asm/page.h #define __va(x)		({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
p                 343 arch/ia64/include/asm/processor.h extern unsigned long get_wchan (struct task_struct *p);
p                  36 arch/ia64/include/asm/sections.h 	void *p;
p                  38 arch/ia64/include/asm/sections.h 	if (!probe_kernel_address(&desc->ip, p))
p                  39 arch/ia64/include/asm/sections.h 		ptr = p;
p                  45 arch/ia64/include/asm/spinlock.h 	int	*p = (int *)&lock->lock, ticket, serve;
p                  47 arch/ia64/include/asm/spinlock.h 	ticket = ia64_fetchadd(1, p, acq);
p                  55 arch/ia64/include/asm/spinlock.h 		asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
p                  74 arch/ia64/include/asm/spinlock.h 	unsigned short	*p = (unsigned short *)&lock->lock + 1, tmp;
p                  78 arch/ia64/include/asm/spinlock.h 	asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
p                  79 arch/ia64/include/asm/spinlock.h 	WRITE_ONCE(*p, (tmp + 2) & ~1);
p                  71 arch/ia64/include/asm/thread_info.h #define setup_thread_stack(p, org)			\
p                  72 arch/ia64/include/asm/thread_info.h 	*task_thread_info(p) = *task_thread_info(org);	\
p                  73 arch/ia64/include/asm/thread_info.h 	task_thread_info(p)->ac_stime = 0;		\
p                  74 arch/ia64/include/asm/thread_info.h 	task_thread_info(p)->ac_utime = 0;		\
p                  75 arch/ia64/include/asm/thread_info.h 	task_thread_info(p)->task = (p);
p                  77 arch/ia64/include/asm/thread_info.h #define setup_thread_stack(p, org) \
p                  78 arch/ia64/include/asm/thread_info.h 	*task_thread_info(p) = *task_thread_info(org); \
p                  79 arch/ia64/include/asm/thread_info.h 	task_thread_info(p)->task = (p);
p                  81 arch/ia64/include/asm/thread_info.h #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
p                  62 arch/ia64/include/asm/uaccess.h static inline int __access_ok(const void __user *p, unsigned long size)
p                  64 arch/ia64/include/asm/uaccess.h 	unsigned long addr = (unsigned long)p;
p                 262 arch/ia64/include/asm/uaccess.h xlate_dev_mem_ptr(phys_addr_t p)
p                 267 arch/ia64/include/asm/uaccess.h 	page = pfn_to_page(p >> PAGE_SHIFT);
p                 269 arch/ia64/include/asm/uaccess.h 		ptr = (void *)p + __IA64_UNCACHED_OFFSET;
p                 271 arch/ia64/include/asm/uaccess.h 		ptr = __va(p);
p                 280 arch/ia64/include/asm/uaccess.h xlate_dev_kmem_ptr(void *p)
p                 285 arch/ia64/include/asm/uaccess.h 	page = virt_to_page((unsigned long)p);
p                 287 arch/ia64/include/asm/uaccess.h 		ptr = (void *)__pa(p) + __IA64_UNCACHED_OFFSET;
p                 289 arch/ia64/include/asm/uaccess.h 		ptr = p;
p                 123 arch/ia64/include/asm/uv/uv_hub.h #define UV_PNODE_TO_NASID(p)		(((p) << 1) | uv_hub_info->gnode_upper)
p                 132 arch/ia64/include/asm/uv/uv_hub.h #define UV_GLOBAL_MMR32_PNODE_BITS(p)	((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
p                 134 arch/ia64/include/asm/uv/uv_hub.h #define UV_GLOBAL_MMR64_PNODE_BITS(p)					\
p                 135 arch/ia64/include/asm/uv/uv_hub.h 	((unsigned long)(p) << UV_GLOBAL_MMR64_PNODE_SHIFT)
p                  46 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	p        :  1;  /* RO */
p                 274 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	p        :  1;  /* RO */
p                 312 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	p        :  1;  /* RO */
p                 350 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	p        :  1;  /* RO */
p                 388 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	p        :  1;  /* RO */
p                 628 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	p        :  1;  /* RO */
p                 666 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	p        :  1;  /* RO */
p                 704 arch/ia64/include/asm/uv/uv_mmrs.h 	unsigned long	p        :  1;  /* RO */
p                 234 arch/ia64/include/uapi/asm/gcc_intrin.h #define ia64_fetchadd4_acq(p, inc)						\
p                 239 arch/ia64/include/uapi/asm/gcc_intrin.h 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
p                 245 arch/ia64/include/uapi/asm/gcc_intrin.h #define ia64_fetchadd4_rel(p, inc)						\
p                 249 arch/ia64/include/uapi/asm/gcc_intrin.h 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
p                 255 arch/ia64/include/uapi/asm/gcc_intrin.h #define ia64_fetchadd8_acq(p, inc)						\
p                 260 arch/ia64/include/uapi/asm/gcc_intrin.h 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
p                 266 arch/ia64/include/uapi/asm/gcc_intrin.h #define ia64_fetchadd8_rel(p, inc)						\
p                 270 arch/ia64/include/uapi/asm/gcc_intrin.h 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\
p                 246 arch/ia64/kernel/acpi.c 	struct acpi_madt_interrupt_override *p;
p                 248 arch/ia64/kernel/acpi.c 	p = (struct acpi_madt_interrupt_override *)header;
p                 250 arch/ia64/kernel/acpi.c 	if (BAD_MADT_ENTRY(p, end))
p                 253 arch/ia64/kernel/acpi.c 	iosapic_override_isa_irq(p->source_irq, p->global_irq,
p                 254 arch/ia64/kernel/acpi.c 				 ((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
p                 257 arch/ia64/kernel/acpi.c 				 ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
p                 406 arch/ia64/kernel/acpi.c 	struct node_memblk_s *p, *q, *pend;
p                 428 arch/ia64/kernel/acpi.c 	for (p = &node_memblk[0]; p < pend; p++) {
p                 429 arch/ia64/kernel/acpi.c 		if (paddr < p->start_paddr)
p                 432 arch/ia64/kernel/acpi.c 	if (p < pend) {
p                 433 arch/ia64/kernel/acpi.c 		for (q = pend - 1; q >= p; q--)
p                 436 arch/ia64/kernel/acpi.c 	p->start_paddr = paddr;
p                 437 arch/ia64/kernel/acpi.c 	p->size = size;
p                 438 arch/ia64/kernel/acpi.c 	p->nid = pxm;
p                 354 arch/ia64/kernel/efi.c 	void *efi_map_start, *efi_map_end, *p;
p                 364 arch/ia64/kernel/efi.c 	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
p                 365 arch/ia64/kernel/efi.c 		md = p;
p                 565 arch/ia64/kernel/efi.c 		void *p;
p                 567 arch/ia64/kernel/efi.c 		for (i = 0, p = efi_map_start; p < efi_map_end;
p                 568 arch/ia64/kernel/efi.c 		     ++i, p += efi_desc_size)
p                 574 arch/ia64/kernel/efi.c 			md = p;
p                 607 arch/ia64/kernel/efi.c 	void *efi_map_start, *efi_map_end, *p;
p                 616 arch/ia64/kernel/efi.c 	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
p                 617 arch/ia64/kernel/efi.c 		md = p;
p                 692 arch/ia64/kernel/efi.c 	void *efi_map_start, *efi_map_end, *p;
p                 700 arch/ia64/kernel/efi.c 	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
p                 701 arch/ia64/kernel/efi.c 		md = p;
p                 725 arch/ia64/kernel/efi.c 	void *efi_map_start, *efi_map_end, *p;
p                 733 arch/ia64/kernel/efi.c 	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
p                 734 arch/ia64/kernel/efi.c 		md = p;
p                 745 arch/ia64/kernel/efi.c 	void *efi_map_start, *efi_map_end, *p;
p                 756 arch/ia64/kernel/efi.c 	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
p                 757 arch/ia64/kernel/efi.c 		md = p;
p                 977 arch/ia64/kernel/efi.c 	void *efi_map_start, *efi_map_end, *p, *q;
p                 994 arch/ia64/kernel/efi.c 	for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
p                 995 arch/ia64/kernel/efi.c 		md = p;
p                1003 arch/ia64/kernel/efi.c 			for (q = p + efi_desc_size; q < efi_map_end;
p                1037 arch/ia64/kernel/efi.c 	if (p >= efi_map_end)
p                1054 arch/ia64/kernel/efi.c 	void *efi_map_start, *efi_map_end, *p, *q;
p                1065 arch/ia64/kernel/efi.c 	for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
p                1066 arch/ia64/kernel/efi.c 		md = p;
p                1082 arch/ia64/kernel/efi.c 			for (q = p + efi_desc_size; q < efi_map_end;
p                1178 arch/ia64/kernel/efi.c 	void *efi_map_start, *efi_map_end, *p;
p                1190 arch/ia64/kernel/efi.c 	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
p                1191 arch/ia64/kernel/efi.c 		md = p;
p                1289 arch/ia64/kernel/efi.c 	void *efi_map_start, *efi_map_end, *p;
p                1297 arch/ia64/kernel/efi.c 	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
p                1298 arch/ia64/kernel/efi.c 		md = p;
p                1330 arch/ia64/kernel/efi.c 	void *efi_map_start, *efi_map_end, *p;
p                1339 arch/ia64/kernel/efi.c 	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
p                1340 arch/ia64/kernel/efi.c 		md = p;
p                  17 arch/ia64/kernel/entry.h # define pLvSys		PASTE(p,PRED_LEAVE_SYSCALL)
p                  18 arch/ia64/kernel/entry.h # define pKStk		PASTE(p,PRED_KERNEL_STACK)
p                  19 arch/ia64/kernel/entry.h # define pUStk		PASTE(p,PRED_USER_STACK)
p                  20 arch/ia64/kernel/entry.h # define pSys		PASTE(p,PRED_SYSCALL)
p                  21 arch/ia64/kernel/entry.h # define pNonSys	PASTE(p,PRED_NON_SYSCALL)
p                  56 arch/ia64/kernel/esi.c 	char *p;
p                  78 arch/ia64/kernel/esi.c 	p = (char *) (systab + 1);
p                  84 arch/ia64/kernel/esi.c 		switch (*p) {
p                  89 arch/ia64/kernel/esi.c 			       "ESI table, ignoring rest of table\n", *p);
p                  93 arch/ia64/kernel/esi.c 		p += ESI_DESC_SIZE(*p);
p                 109 arch/ia64/kernel/esi.c 	char *p;
p                 114 arch/ia64/kernel/esi.c 	p = (char *) (esi_systab + 1);
p                 116 arch/ia64/kernel/esi.c 		if (*p == ESI_DESC_ENTRY_POINT) {
p                 117 arch/ia64/kernel/esi.c 			ia64_esi_desc_entry_point_t *esi = (void *)p;
p                 147 arch/ia64/kernel/esi.c 		p += ESI_DESC_SIZE(*p);
p                 160 arch/ia64/kernel/esi.c 	char *p;
p                 166 arch/ia64/kernel/esi.c 	p = (char *) (esi_systab + 1);
p                 168 arch/ia64/kernel/esi.c 		if (*p == ESI_DESC_ENTRY_POINT) {
p                 169 arch/ia64/kernel/esi.c 			ia64_esi_desc_entry_point_t *esi = (void *)p;
p                 195 arch/ia64/kernel/esi.c 		p += ESI_DESC_SIZE(*p);
p                  47 arch/ia64/kernel/irq.c int arch_show_interrupts(struct seq_file *p, int prec)
p                  49 arch/ia64/kernel/irq.c 	seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
p                  88 arch/ia64/kernel/kprobes.c 					      struct kprobe *p)
p                  90 arch/ia64/kernel/kprobes.c 	p->ainsn.inst_flag = 0;
p                  91 arch/ia64/kernel/kprobes.c 	p->ainsn.target_br_reg = 0;
p                  92 arch/ia64/kernel/kprobes.c 	p->ainsn.slot = slot;
p                 101 arch/ia64/kernel/kprobes.c 	 	p->ainsn.inst_flag |= INST_FLAG_BREAK_INST;
p                 108 arch/ia64/kernel/kprobes.c 	 		p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
p                 109 arch/ia64/kernel/kprobes.c 			p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
p                 113 arch/ia64/kernel/kprobes.c 			p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
p                 116 arch/ia64/kernel/kprobes.c 			p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
p                 117 arch/ia64/kernel/kprobes.c 			p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
p                 118 arch/ia64/kernel/kprobes.c 			p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
p                 124 arch/ia64/kernel/kprobes.c 			p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
p                 125 arch/ia64/kernel/kprobes.c 			p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
p                 288 arch/ia64/kernel/kprobes.c 					 struct kprobe *p,
p                 292 arch/ia64/kernel/kprobes.c 	bundle_t *bundle = &p->opcode.bundle;
p                 318 arch/ia64/kernel/kprobes.c 	update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p);
p                 389 arch/ia64/kernel/kprobes.c static void __kprobes set_current_kprobe(struct kprobe *p,
p                 392 arch/ia64/kernel/kprobes.c 	__this_cpu_write(current_kprobe, p);
p                 407 arch/ia64/kernel/kprobes.c int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
p                 545 arch/ia64/kernel/kprobes.c static void __kprobes prepare_booster(struct kprobe *p)
p                 547 arch/ia64/kernel/kprobes.c 	unsigned long addr = (unsigned long)p->addr & ~0xFULL;
p                 548 arch/ia64/kernel/kprobes.c 	unsigned int slot = (unsigned long)p->addr & 0xf;
p                 551 arch/ia64/kernel/kprobes.c 	if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) {
p                 552 arch/ia64/kernel/kprobes.c 		set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1);
p                 553 arch/ia64/kernel/kprobes.c 		p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE;
p                 557 arch/ia64/kernel/kprobes.c 	for (; addr < (unsigned long)p->addr; addr++) {
p                 564 arch/ia64/kernel/kprobes.c int __kprobes arch_prepare_kprobe(struct kprobe *p)
p                 566 arch/ia64/kernel/kprobes.c 	unsigned long addr = (unsigned long) p->addr;
p                 590 arch/ia64/kernel/kprobes.c 	p->ainsn.insn = get_insn_slot();
p                 591 arch/ia64/kernel/kprobes.c 	if (!p->ainsn.insn)
p                 593 arch/ia64/kernel/kprobes.c 	memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t));
p                 594 arch/ia64/kernel/kprobes.c 	memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t));
p                 596 arch/ia64/kernel/kprobes.c 	prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp);
p                 598 arch/ia64/kernel/kprobes.c 	prepare_booster(p);
p                 603 arch/ia64/kernel/kprobes.c void __kprobes arch_arm_kprobe(struct kprobe *p)
p                 608 arch/ia64/kernel/kprobes.c 	arm_addr = ((unsigned long)p->addr) & ~0xFUL;
p                 610 arch/ia64/kernel/kprobes.c 	src = &p->opcode.bundle;
p                 612 arch/ia64/kernel/kprobes.c 	flush_icache_range((unsigned long)p->ainsn.insn,
p                 613 arch/ia64/kernel/kprobes.c 			   (unsigned long)p->ainsn.insn +
p                 616 arch/ia64/kernel/kprobes.c 	switch (p->ainsn.slot) {
p                 630 arch/ia64/kernel/kprobes.c void __kprobes arch_disarm_kprobe(struct kprobe *p)
p                 635 arch/ia64/kernel/kprobes.c 	arm_addr = ((unsigned long)p->addr) & ~0xFUL;
p                 638 arch/ia64/kernel/kprobes.c 	src = &p->ainsn.insn->bundle;
p                 639 arch/ia64/kernel/kprobes.c 	switch (p->ainsn.slot) {
p                 653 arch/ia64/kernel/kprobes.c void __kprobes arch_remove_kprobe(struct kprobe *p)
p                 655 arch/ia64/kernel/kprobes.c 	if (p->ainsn.insn) {
p                 656 arch/ia64/kernel/kprobes.c 		free_insn_slot(p->ainsn.insn,
p                 657 arch/ia64/kernel/kprobes.c 			       p->ainsn.inst_flag & INST_FLAG_BOOSTABLE);
p                 658 arch/ia64/kernel/kprobes.c 		p->ainsn.insn = NULL;
p                 669 arch/ia64/kernel/kprobes.c static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
p                 671 arch/ia64/kernel/kprobes.c 	unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle);
p                 672 arch/ia64/kernel/kprobes.c 	unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
p                 674 arch/ia64/kernel/kprobes.c 	int slot = ((unsigned long)p->addr & 0xf);
p                 676 arch/ia64/kernel/kprobes.c 	template = p->ainsn.insn->bundle.quad0.template;
p                 681 arch/ia64/kernel/kprobes.c 	if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) {
p                 683 arch/ia64/kernel/kprobes.c 		if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
p                 689 arch/ia64/kernel/kprobes.c 		if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) {
p                 695 arch/ia64/kernel/kprobes.c 			switch (p->ainsn.target_br_reg) {
p                 737 arch/ia64/kernel/kprobes.c static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
p                 739 arch/ia64/kernel/kprobes.c 	unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle;
p                 740 arch/ia64/kernel/kprobes.c 	unsigned long slot = (unsigned long)p->addr & 0xf;
p                 743 arch/ia64/kernel/kprobes.c 	if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)
p                 744 arch/ia64/kernel/kprobes.c 		regs->cr_iip = (unsigned long)p->addr & ~0xFULL;
p                 770 arch/ia64/kernel/kprobes.c 	struct kprobe *p;
p                 785 arch/ia64/kernel/kprobes.c 		p = get_kprobe(addr);
p                 786 arch/ia64/kernel/kprobes.c 		if (p) {
p                 788 arch/ia64/kernel/kprobes.c 	 		     (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
p                 799 arch/ia64/kernel/kprobes.c 			set_current_kprobe(p, kcb);
p                 800 arch/ia64/kernel/kprobes.c 			kprobes_inc_nmissed_count(p);
p                 801 arch/ia64/kernel/kprobes.c 			prepare_ss(p, regs);
p                 817 arch/ia64/kernel/kprobes.c 	p = get_kprobe(addr);
p                 818 arch/ia64/kernel/kprobes.c 	if (!p) {
p                 835 arch/ia64/kernel/kprobes.c 	set_current_kprobe(p, kcb);
p                 838 arch/ia64/kernel/kprobes.c 	if (p->pre_handler && p->pre_handler(p, regs)) {
p                 845 arch/ia64/kernel/kprobes.c 	if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
p                 847 arch/ia64/kernel/kprobes.c 		ia64_psr(regs)->ri = p->ainsn.slot;
p                 848 arch/ia64/kernel/kprobes.c 		regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL;
p                 857 arch/ia64/kernel/kprobes.c 	prepare_ss(p, regs);
p                 998 arch/ia64/kernel/kprobes.c int __kprobes arch_trampoline_kprobe(struct kprobe *p)
p                1000 arch/ia64/kernel/kprobes.c 	if (p->addr ==
p                 211 arch/ia64/kernel/mca.c 	char *p;
p                 223 arch/ia64/kernel/mca.c 		for (p = temp_buf; *p; p++) {
p                 226 arch/ia64/kernel/mca.c 				mlogbuf[mlogbuf_end] = *p;
p                 246 arch/ia64/kernel/mca.c 	char *p;
p                 254 arch/ia64/kernel/mca.c 		p = temp_buf;
p                 261 arch/ia64/kernel/mca.c 			*p = mlogbuf[index];
p                 263 arch/ia64/kernel/mca.c 			if (!*p)
p                 265 arch/ia64/kernel/mca.c 			p++;
p                 269 arch/ia64/kernel/mca.c 		*p = '\0';
p                 875 arch/ia64/kernel/mca.c 	char *p, comm[sizeof(current->comm)];
p                 881 arch/ia64/kernel/mca.c 		if ((p = strchr(previous_current->comm, ' ')))
p                 882 arch/ia64/kernel/mca.c 			l = p - previous_current->comm;
p                 969 arch/ia64/kernel/mca.c 	char *p;
p                 990 arch/ia64/kernel/mca.c 	if ((p = strchr(current->comm, ' ')))
p                 991 arch/ia64/kernel/mca.c 		*p = '\0';
p                1062 arch/ia64/kernel/mca.c 		va.p = old_bspstore;
p                1084 arch/ia64/kernel/mca.c 	p = (char *)r12 - sizeof(*regs);
p                1085 arch/ia64/kernel/mca.c 	old_regs = (struct pt_regs *)p;
p                1107 arch/ia64/kernel/mca.c 	p -= sizeof(struct switch_stack);
p                1108 arch/ia64/kernel/mca.c 	old_sw = (struct switch_stack *)p;
p                1121 arch/ia64/kernel/mca.c 	previous_current->thread.ksp = (u64)p - 16;
p                1226 arch/ia64/kernel/mca.c 	struct ia64_tr_entry *p;
p                1235 arch/ia64/kernel/mca.c 		p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
p                1236 arch/ia64/kernel/mca.c 		if (p->pte & 0x1) {
p                1237 arch/ia64/kernel/mca.c 			old_rr = ia64_get_rr(p->ifa);
p                1238 arch/ia64/kernel/mca.c 			if (old_rr != p->rr) {
p                1239 arch/ia64/kernel/mca.c 				ia64_set_rr(p->ifa, p->rr);
p                1242 arch/ia64/kernel/mca.c 			ia64_ptr(iord, p->ifa, p->itir >> 2);
p                1245 arch/ia64/kernel/mca.c 				ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2);
p                1249 arch/ia64/kernel/mca.c 				ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2);
p                1252 arch/ia64/kernel/mca.c 			if (old_rr != p->rr) {
p                1253 arch/ia64/kernel/mca.c 				ia64_set_rr(p->ifa, old_rr);
p                1809 arch/ia64/kernel/mca.c 	struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
p                1811 arch/ia64/kernel/mca.c 	memset(p, 0, KERNEL_STACK_SIZE);
p                1812 arch/ia64/kernel/mca.c 	ti = task_thread_info(p);
p                1815 arch/ia64/kernel/mca.c 	ti->task = p;
p                1817 arch/ia64/kernel/mca.c 	p->stack = ti;
p                1818 arch/ia64/kernel/mca.c 	p->state = TASK_UNINTERRUPTIBLE;
p                1819 arch/ia64/kernel/mca.c 	cpumask_set_cpu(cpu, &p->cpus_mask);
p                1820 arch/ia64/kernel/mca.c 	INIT_LIST_HEAD(&p->tasks);
p                1821 arch/ia64/kernel/mca.c 	p->parent = p->real_parent = p->group_leader = p;
p                1822 arch/ia64/kernel/mca.c 	INIT_LIST_HEAD(&p->children);
p                1823 arch/ia64/kernel/mca.c 	INIT_LIST_HEAD(&p->sibling);
p                1824 arch/ia64/kernel/mca.c 	strncpy(p->comm, type, sizeof(p->comm)-1);
p                 117 arch/ia64/kernel/mca_drv.c 	struct page *p;
p                 127 arch/ia64/kernel/mca_drv.c 	p = pfn_to_page(paddr>>PAGE_SHIFT);
p                 131 arch/ia64/kernel/mca_drv.c 		if (page_isolate[i] == p)
p                 139 arch/ia64/kernel/mca_drv.c 	if (PageSlab(p) || PageReserved(p))
p                 143 arch/ia64/kernel/mca_drv.c 	get_page(p);
p                 144 arch/ia64/kernel/mca_drv.c 	SetPageReserved(p);
p                 145 arch/ia64/kernel/mca_drv.c 	page_isolate[num_page_isolate++] = p;
p                  43 arch/ia64/kernel/mca_drv.h #define peidx_head(p)   (((p)->info))
p                  44 arch/ia64/kernel/mca_drv.h #define peidx_mid(p)    (((p)->id))
p                  45 arch/ia64/kernel/mca_drv.h #define peidx_bottom(p) (((p)->regs))
p                  47 arch/ia64/kernel/mca_drv.h #define peidx_psp(p)           (&(peidx_head(p)->proc_state_parameter))
p                  48 arch/ia64/kernel/mca_drv.h #define peidx_field_valid(p)   (&(peidx_head(p)->valid))
p                  49 arch/ia64/kernel/mca_drv.h #define peidx_minstate_area(p) (&(peidx_bottom(p)->min_state_area))
p                  51 arch/ia64/kernel/mca_drv.h #define peidx_cache_check_num(p)    (peidx_head(p)->valid.num_cache_check)
p                  52 arch/ia64/kernel/mca_drv.h #define peidx_tlb_check_num(p)      (peidx_head(p)->valid.num_tlb_check)
p                  53 arch/ia64/kernel/mca_drv.h #define peidx_bus_check_num(p)      (peidx_head(p)->valid.num_bus_check)
p                  54 arch/ia64/kernel/mca_drv.h #define peidx_reg_file_check_num(p) (peidx_head(p)->valid.num_reg_file_check)
p                  55 arch/ia64/kernel/mca_drv.h #define peidx_ms_check_num(p)       (peidx_head(p)->valid.num_ms_check)
p                  57 arch/ia64/kernel/mca_drv.h #define peidx_cache_check_idx(p, n)    (n)
p                  58 arch/ia64/kernel/mca_drv.h #define peidx_tlb_check_idx(p, n)      (peidx_cache_check_idx(p, peidx_cache_check_num(p)) + n)
p                  59 arch/ia64/kernel/mca_drv.h #define peidx_bus_check_idx(p, n)      (peidx_tlb_check_idx(p, peidx_tlb_check_num(p)) + n)
p                  60 arch/ia64/kernel/mca_drv.h #define peidx_reg_file_check_idx(p, n) (peidx_bus_check_idx(p, peidx_bus_check_num(p)) + n)
p                  61 arch/ia64/kernel/mca_drv.h #define peidx_ms_check_idx(p, n)       (peidx_reg_file_check_idx(p, peidx_reg_file_check_num(p)) + n)
p                  63 arch/ia64/kernel/mca_drv.h #define peidx_mod_error_info(p, name, n) \
p                  64 arch/ia64/kernel/mca_drv.h ({	int __idx = peidx_##name##_idx(p, n); \
p                  66 arch/ia64/kernel/mca_drv.h 	if (peidx_##name##_num(p) > n) /*BUG*/ \
p                  67 arch/ia64/kernel/mca_drv.h 		__ret = &(peidx_head(p)->info[__idx]); \
p                  70 arch/ia64/kernel/mca_drv.h #define peidx_cache_check(p, n)    peidx_mod_error_info(p, cache_check, n)
p                  71 arch/ia64/kernel/mca_drv.h #define peidx_tlb_check(p, n)      peidx_mod_error_info(p, tlb_check, n)
p                  72 arch/ia64/kernel/mca_drv.h #define peidx_bus_check(p, n)      peidx_mod_error_info(p, bus_check, n)
p                  73 arch/ia64/kernel/mca_drv.h #define peidx_reg_file_check(p, n) peidx_mod_error_info(p, reg_file_check, n)
p                  74 arch/ia64/kernel/mca_drv.h #define peidx_ms_check(p, n)       peidx_mod_error_info(p, ms_check, n)
p                 732 arch/ia64/kernel/palinfo.c 		unsigned long p:1;
p                 800 arch/ia64/kernel/palinfo.c 			   gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
p                  32 arch/ia64/kernel/patch.c 	u64 *p = (u64 *) (insn_addr & -16);	/* mask out slot number */
p                  34 arch/ia64/kernel/patch.c 	return ( (p[1] & 0x0800000000000000UL) << 4)  | /*A*/
p                  35 arch/ia64/kernel/patch.c 		((p[1] & 0x00000000007fffffUL) << 40) | /*B*/
p                  36 arch/ia64/kernel/patch.c 		((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/
p                  37 arch/ia64/kernel/patch.c 		((p[1] & 0x0000100000000000UL) >> 23) | /*D*/
p                  38 arch/ia64/kernel/patch.c 		((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/
p                  39 arch/ia64/kernel/patch.c 		((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/
p                  40 arch/ia64/kernel/patch.c 		((p[1] & 0x000007f000000000UL) >> 36);  /*G*/
p                2557 arch/ia64/kernel/perfmon.c 	struct task_struct *p = current;
p                2565 arch/ia64/kernel/perfmon.c 		p = find_get_task_by_vpid(pid);
p                2566 arch/ia64/kernel/perfmon.c 		if (!p)
p                2570 arch/ia64/kernel/perfmon.c 	ret = pfm_task_incompatible(ctx, p);
p                2572 arch/ia64/kernel/perfmon.c 		*task = p;
p                2573 arch/ia64/kernel/perfmon.c 	} else if (p != current) {
p                2574 arch/ia64/kernel/perfmon.c 		pfm_put_task(p);
p                6456 arch/ia64/kernel/perfmon.c 	pmu_config_t **p;
p                6460 arch/ia64/kernel/perfmon.c 	p      = pmu_confs;
p                6462 arch/ia64/kernel/perfmon.c 	while(*p) {
p                6463 arch/ia64/kernel/perfmon.c 		if ((*p)->probe) {
p                6464 arch/ia64/kernel/perfmon.c 			if ((*p)->probe() == 0) goto found;
p                6465 arch/ia64/kernel/perfmon.c 		} else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
p                6468 arch/ia64/kernel/perfmon.c 		p++;
p                6472 arch/ia64/kernel/perfmon.c 	pmu_conf = *p;
p                 337 arch/ia64/kernel/process.c 	     struct task_struct *p)
p                 346 arch/ia64/kernel/process.c 	child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1;
p                 350 arch/ia64/kernel/process.c 	child_rbs = (unsigned long) p + IA64_RBS_OFFSET;
p                 353 arch/ia64/kernel/process.c 	p->thread.ksp = (unsigned long) child_stack - 16;
p                 374 arch/ia64/kernel/process.c 	p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
p                 377 arch/ia64/kernel/process.c 	ia64_drop_fpu(p);	/* don't pick up stale state from a CPU's fph */
p                 379 arch/ia64/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 438 arch/ia64/kernel/process.c 		pfm_inherit(p, child_ptregs);
p                 594 arch/ia64/kernel/process.c get_wchan (struct task_struct *p)
p                 600 arch/ia64/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 611 arch/ia64/kernel/process.c 	unw_init_from_blocked_task(&info, p);
p                 613 arch/ia64/kernel/process.c 		if (p->state == TASK_RUNNING)
p                 116 arch/ia64/kernel/sal.c sal_desc_entry_point (void *p)
p                 118 arch/ia64/kernel/sal.c 	struct ia64_sal_desc_entry_point *ep = p;
p                 150 arch/ia64/kernel/sal.c sal_desc_platform_feature (void *p)
p                 152 arch/ia64/kernel/sal.c 	struct ia64_sal_desc_platform_feature *pf = p;
p                 178 arch/ia64/kernel/sal.c sal_desc_ap_wakeup (void *p)
p                 180 arch/ia64/kernel/sal.c 	struct ia64_sal_desc_ap_wakeup *ap = p;
p                 214 arch/ia64/kernel/sal.c static void __init sal_desc_ap_wakeup(void *p) { }
p                 303 arch/ia64/kernel/sal.c 	char *p;
p                 326 arch/ia64/kernel/sal.c 	p = (char *) (systab + 1);
p                 332 arch/ia64/kernel/sal.c 		switch (*p) {
p                 334 arch/ia64/kernel/sal.c 			sal_desc_entry_point(p);
p                 337 arch/ia64/kernel/sal.c 			sal_desc_platform_feature(p);
p                 340 arch/ia64/kernel/sal.c 			ia64_ptc_domain_info = (ia64_sal_desc_ptc_t *)p;
p                 343 arch/ia64/kernel/sal.c 			sal_desc_ap_wakeup(p);
p                 346 arch/ia64/kernel/sal.c 		p += SAL_DESC_SIZE(*p);
p                 669 arch/ia64/kernel/unwind.c 	struct unw_reg_state *p, *next;
p                 671 arch/ia64/kernel/unwind.c 	for (p = rs->next; p != NULL; p = next) {
p                 672 arch/ia64/kernel/unwind.c 		next = p->next;
p                 673 arch/ia64/kernel/unwind.c 		free_reg_state(p);
p                1190 arch/ia64/kernel/unwind.c #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg)	desc_spill_reg_p(p,t,a,x,y,arg)
p                1192 arch/ia64/kernel/unwind.c #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg)	desc_spill_psprel_p(p,t,a,o,arg)
p                1194 arch/ia64/kernel/unwind.c #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg)	desc_spill_sprel_p(p,t,a,o,arg)
p                1196 arch/ia64/kernel/unwind.c #define UNW_DEC_RESTORE_P(f,p,t,a,arg)		desc_restore_p(p,t,a,arg)
p                1875 arch/ia64/kernel/unwind.c unw_valid(const struct unw_frame_info *info, unsigned long* p)
p                1877 arch/ia64/kernel/unwind.c 	unsigned long loc = (unsigned long)p;
p                 422 arch/ia64/mm/tlb.c static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size)
p                 430 arch/ia64/mm/tlb.c 	if (va_rid != RR_TO_RID(p->rr))
p                 432 arch/ia64/mm/tlb.c 	tr_log_size = (p->itir & 0xff) >> 2;
p                 433 arch/ia64/mm/tlb.c 	tr_end = p->ifa + (1<<tr_log_size) - 1;
p                 435 arch/ia64/mm/tlb.c 	if (va > tr_end || p->ifa > va_end)
p                 459 arch/ia64/mm/tlb.c 	struct ia64_tr_entry *p;
p                 472 arch/ia64/mm/tlb.c 		p = ia64_idtrs[cpu];
p                 474 arch/ia64/mm/tlb.c 								i++, p++) {
p                 475 arch/ia64/mm/tlb.c 			if (p->pte & 0x1)
p                 476 arch/ia64/mm/tlb.c 				if (is_tr_overlap(p, va, log_size)) {
p                 484 arch/ia64/mm/tlb.c 		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
p                 486 arch/ia64/mm/tlb.c 								i++, p++) {
p                 487 arch/ia64/mm/tlb.c 			if (p->pte & 0x1)
p                 488 arch/ia64/mm/tlb.c 				if (is_tr_overlap(p, va, log_size)) {
p                 528 arch/ia64/mm/tlb.c 		p = ia64_idtrs[cpu] + i;
p                 529 arch/ia64/mm/tlb.c 		p->ifa = va;
p                 530 arch/ia64/mm/tlb.c 		p->pte = pte;
p                 531 arch/ia64/mm/tlb.c 		p->itir = log_size << 2;
p                 532 arch/ia64/mm/tlb.c 		p->rr = ia64_get_rr(va);
p                 537 arch/ia64/mm/tlb.c 		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
p                 538 arch/ia64/mm/tlb.c 		p->ifa = va;
p                 539 arch/ia64/mm/tlb.c 		p->pte = pte;
p                 540 arch/ia64/mm/tlb.c 		p->itir = log_size << 2;
p                 541 arch/ia64/mm/tlb.c 		p->rr = ia64_get_rr(va);
p                 562 arch/ia64/mm/tlb.c 	struct ia64_tr_entry *p;
p                 568 arch/ia64/mm/tlb.c 		p = ia64_idtrs[cpu] + slot;
p                 569 arch/ia64/mm/tlb.c 		if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
p                 570 arch/ia64/mm/tlb.c 			p->pte = 0;
p                 571 arch/ia64/mm/tlb.c 			ia64_ptr(0x1, p->ifa, p->itir>>2);
p                 577 arch/ia64/mm/tlb.c 		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
p                 578 arch/ia64/mm/tlb.c 		if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
p                 579 arch/ia64/mm/tlb.c 			p->pte = 0;
p                 580 arch/ia64/mm/tlb.c 			ia64_ptr(0x2, p->ifa, p->itir>>2);
p                  58 arch/m68k/68000/m68EZ328.c   unsigned char *p;
p                  64 arch/m68k/68000/m68EZ328.c   p = cs8900a_hwaddr = gethwaddr(0);
p                  65 arch/m68k/68000/m68EZ328.c   pr_info("uCsimm hwaddr %pM\n", p);
p                  67 arch/m68k/68000/m68EZ328.c   p = getbenv("APPEND");
p                  68 arch/m68k/68000/m68EZ328.c   if (p) strcpy(p,command);
p                 151 arch/m68k/68000/m68VZ328.c 	char *p;
p                 154 arch/m68k/68000/m68VZ328.c 	p = cs8900a_hwaddr = gethwaddr(0);
p                 155 arch/m68k/68000/m68VZ328.c 	pr_info("uCdimm hwaddr %pM\n", p);
p                 156 arch/m68k/68000/m68VZ328.c 	p = getbenv("APPEND");
p                 157 arch/m68k/68000/m68VZ328.c 	if (p)
p                 158 arch/m68k/68000/m68VZ328.c 		strcpy(p, command);
p                  48 arch/m68k/amiga/chipram.c 	void *p;
p                  55 arch/m68k/amiga/chipram.c 	p = amiga_chip_alloc_res(size, res);
p                  56 arch/m68k/amiga/chipram.c 	if (!p) {
p                  61 arch/m68k/amiga/chipram.c 	return p;
p                 151 arch/m68k/atari/config.c 	char *p;
p                 163 arch/m68k/atari/config.c 	while ((p = strsep(&args, ",")) != NULL) {
p                 164 arch/m68k/atari/config.c 		if (!*p)
p                 167 arch/m68k/atari/config.c 		if (strncmp(p, "ov_", 3) == 0) {
p                 168 arch/m68k/atari/config.c 			p += 3;
p                 172 arch/m68k/atari/config.c 		if (strcmp(p, "ikbd") == 0) {
p                 175 arch/m68k/atari/config.c 		} else if (strcmp(p, "midi") == 0) {
p                 178 arch/m68k/atari/config.c 		} else if (strcmp(p, "snd6") == 0) {
p                 180 arch/m68k/atari/config.c 		} else if (strcmp(p, "snd7") == 0) {
p                  99 arch/m68k/atari/nvram.c 	char *p = buf;
p                 107 arch/m68k/atari/nvram.c 	for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
p                 108 arch/m68k/atari/nvram.c 		*p = __nvram_read_byte(i);
p                 112 arch/m68k/atari/nvram.c 	return p - buf;
p                 117 arch/m68k/atari/nvram.c 	char *p = buf;
p                 125 arch/m68k/atari/nvram.c 	for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
p                 126 arch/m68k/atari/nvram.c 		__nvram_write_byte(*p, i);
p                 131 arch/m68k/atari/nvram.c 	return p - buf;
p                  34 arch/m68k/coldfire/intc-2.c #define MCFSIM_ICR_PRI(p)	(p)		/* Priority p intr */
p                  33 arch/m68k/include/asm/bitops.h 	char *p = (char *)vaddr + (nr ^ 31) / 8;
p                  37 arch/m68k/include/asm/bitops.h 		: "a" (p), "di" (nr & 7)
p                  43 arch/m68k/include/asm/bitops.h 	char *p = (char *)vaddr + (nr ^ 31) / 8;
p                  46 arch/m68k/include/asm/bitops.h 		: "+m" (*p)
p                  73 arch/m68k/include/asm/bitops.h 	char *p = (char *)vaddr + (nr ^ 31) / 8;
p                  77 arch/m68k/include/asm/bitops.h 		: "a" (p), "di" (nr & 7)
p                  83 arch/m68k/include/asm/bitops.h 	char *p = (char *)vaddr + (nr ^ 31) / 8;
p                  86 arch/m68k/include/asm/bitops.h 		: "+m" (*p)
p                 113 arch/m68k/include/asm/bitops.h 	char *p = (char *)vaddr + (nr ^ 31) / 8;
p                 117 arch/m68k/include/asm/bitops.h 		: "a" (p), "di" (nr & 7)
p                 123 arch/m68k/include/asm/bitops.h 	char *p = (char *)vaddr + (nr ^ 31) / 8;
p                 126 arch/m68k/include/asm/bitops.h 		: "+m" (*p)
p                 160 arch/m68k/include/asm/bitops.h 	char *p = (char *)vaddr + (nr ^ 31) / 8;
p                 165 arch/m68k/include/asm/bitops.h 		: "a" (p), "di" (nr & 7)
p                 173 arch/m68k/include/asm/bitops.h 	char *p = (char *)vaddr + (nr ^ 31) / 8;
p                 177 arch/m68k/include/asm/bitops.h 		: "=d" (retval), "+m" (*p)
p                 210 arch/m68k/include/asm/bitops.h 	char *p = (char *)vaddr + (nr ^ 31) / 8;
p                 215 arch/m68k/include/asm/bitops.h 		: "a" (p), "di" (nr & 7)
p                 223 arch/m68k/include/asm/bitops.h 	char *p = (char *)vaddr + (nr ^ 31) / 8;
p                 227 arch/m68k/include/asm/bitops.h 		: "=d" (retval), "+m" (*p)
p                 260 arch/m68k/include/asm/bitops.h 	char *p = (char *)vaddr + (nr ^ 31) / 8;
p                 265 arch/m68k/include/asm/bitops.h 		: "a" (p), "di" (nr & 7)
p                 273 arch/m68k/include/asm/bitops.h 	char *p = (char *)vaddr + (nr ^ 31) / 8;
p                 277 arch/m68k/include/asm/bitops.h 		: "=d" (retval), "+m" (*p)
p                 320 arch/m68k/include/asm/bitops.h 	const unsigned long *p = vaddr;
p                 329 arch/m68k/include/asm/bitops.h 	while (!(num = ~*p++)) {
p                 338 arch/m68k/include/asm/bitops.h 	res += ((long)p - (long)vaddr - 4) * 8;
p                 346 arch/m68k/include/asm/bitops.h 	const unsigned long *p = vaddr + (offset >> 5);
p                 353 arch/m68k/include/asm/bitops.h 		unsigned long num = ~*p++ & (~0UL << bit);
p                 369 arch/m68k/include/asm/bitops.h 	return offset + find_first_zero_bit(p, size - offset);
p                 375 arch/m68k/include/asm/bitops.h 	const unsigned long *p = vaddr;
p                 384 arch/m68k/include/asm/bitops.h 	while (!(num = *p++)) {
p                 393 arch/m68k/include/asm/bitops.h 	res += ((long)p - (long)vaddr - 4) * 8;
p                 401 arch/m68k/include/asm/bitops.h 	const unsigned long *p = vaddr + (offset >> 5);
p                 408 arch/m68k/include/asm/bitops.h 		unsigned long num = *p++ & (~0UL << bit);
p                 424 arch/m68k/include/asm/bitops.h 	return offset + find_first_bit(p, size - offset);
p                  95 arch/m68k/include/asm/cmpxchg.h static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
p                 101 arch/m68k/include/asm/cmpxchg.h 				      : "=d" (old), "=m" (*(char *)p)
p                 102 arch/m68k/include/asm/cmpxchg.h 				      : "d" (new), "0" (old), "m" (*(char *)p));
p                 106 arch/m68k/include/asm/cmpxchg.h 				      : "=d" (old), "=m" (*(short *)p)
p                 107 arch/m68k/include/asm/cmpxchg.h 				      : "d" (new), "0" (old), "m" (*(short *)p));
p                 111 arch/m68k/include/asm/cmpxchg.h 				      : "=d" (old), "=m" (*(int *)p)
p                 112 arch/m68k/include/asm/cmpxchg.h 				      : "d" (new), "0" (old), "m" (*(int *)p));
p                 115 arch/m68k/include/asm/cmpxchg.h 		old = __invalid_cmpxchg_size(p, old, new, size);
p                 210 arch/m68k/include/asm/io_mm.h #define isa_readb(p)       in_8(isa_mtb((unsigned long)(p)))
p                 211 arch/m68k/include/asm/io_mm.h #define isa_readw(p)       \
p                 212 arch/m68k/include/asm/io_mm.h 	(ISA_SEX ? in_be16(isa_mtw((unsigned long)(p)))	\
p                 213 arch/m68k/include/asm/io_mm.h 		 : in_le16(isa_mtw((unsigned long)(p))))
p                 214 arch/m68k/include/asm/io_mm.h #define isa_writeb(val,p)  out_8(isa_mtb((unsigned long)(p)),(val))
p                 215 arch/m68k/include/asm/io_mm.h #define isa_writew(val,p)  \
p                 216 arch/m68k/include/asm/io_mm.h 	(ISA_SEX ? out_be16(isa_mtw((unsigned long)(p)),(val))	\
p                 217 arch/m68k/include/asm/io_mm.h 		 : out_le16(isa_mtw((unsigned long)(p)),(val)))
p                 230 arch/m68k/include/asm/io_mm.h #define isa_rom_readb(p)       rom_in_8(isa_mtb((unsigned long)(p)))
p                 231 arch/m68k/include/asm/io_mm.h #define isa_rom_readw(p)       \
p                 232 arch/m68k/include/asm/io_mm.h 	(ISA_SEX ? rom_in_be16(isa_mtw((unsigned long)(p)))	\
p                 233 arch/m68k/include/asm/io_mm.h 		 : rom_in_le16(isa_mtw((unsigned long)(p))))
p                 234 arch/m68k/include/asm/io_mm.h #define isa_rom_readw_swap(p)       \
p                 235 arch/m68k/include/asm/io_mm.h 	(ISA_SEX ? rom_in_le16(isa_mtw((unsigned long)(p)))	\
p                 236 arch/m68k/include/asm/io_mm.h 		 : rom_in_be16(isa_mtw((unsigned long)(p))))
p                 237 arch/m68k/include/asm/io_mm.h #define isa_rom_readw_raw(p)   rom_in_be16(isa_mtw((unsigned long)(p)))
p                 239 arch/m68k/include/asm/io_mm.h #define isa_rom_writeb(val, p)  rom_out_8(isa_mtb((unsigned long)(p)), (val))
p                 240 arch/m68k/include/asm/io_mm.h #define isa_rom_writew(val, p)  \
p                 241 arch/m68k/include/asm/io_mm.h 	(ISA_SEX ? rom_out_be16(isa_mtw((unsigned long)(p)), (val))	\
p                 242 arch/m68k/include/asm/io_mm.h 		 : rom_out_le16(isa_mtw((unsigned long)(p)), (val)))
p                 243 arch/m68k/include/asm/io_mm.h #define isa_rom_writew_swap(val, p)  \
p                 244 arch/m68k/include/asm/io_mm.h 	(ISA_SEX ? rom_out_le16(isa_mtw((unsigned long)(p)), (val))	\
p                 245 arch/m68k/include/asm/io_mm.h 		 : rom_out_be16(isa_mtw((unsigned long)(p)), (val)))
p                 246 arch/m68k/include/asm/io_mm.h #define isa_rom_writew_raw(val, p)  rom_out_be16(isa_mtw((unsigned long)(p)), (val))
p                 266 arch/m68k/include/asm/io_mm.h #define isa_inb_p(p)      ({u8 v=isa_inb(p);isa_delay();v;})
p                 267 arch/m68k/include/asm/io_mm.h #define isa_outb_p(v,p)   ({isa_outb((v),(p));isa_delay();})
p                 268 arch/m68k/include/asm/io_mm.h #define isa_inw_p(p)      ({u16 v=isa_inw(p);isa_delay();v;})
p                 269 arch/m68k/include/asm/io_mm.h #define isa_outw_p(v,p)   ({isa_outw((v),(p));isa_delay();})
p                 270 arch/m68k/include/asm/io_mm.h #define isa_inl_p(p)      ({u32 v=isa_inl(p);isa_delay();v;})
p                 271 arch/m68k/include/asm/io_mm.h #define isa_outl_p(v,p)   ({isa_outl((v),(p));isa_delay();})
p                 294 arch/m68k/include/asm/io_mm.h #define isa_rom_inb_p(p)	({ u8 _v = isa_rom_inb(p); isa_delay(); _v; })
p                 295 arch/m68k/include/asm/io_mm.h #define isa_rom_inw_p(p)	({ u16 _v = isa_rom_inw(p); isa_delay(); _v; })
p                 296 arch/m68k/include/asm/io_mm.h #define isa_rom_outb_p(v, p)	({ isa_rom_outb((v), (p)); isa_delay(); })
p                 297 arch/m68k/include/asm/io_mm.h #define isa_rom_outw_p(v, p)	({ isa_rom_outw((v), (p)); isa_delay(); })
p                 398 arch/m68k/include/asm/io_mm.h #define xlate_dev_mem_ptr(p)	__va(p)
p                 403 arch/m68k/include/asm/io_mm.h #define xlate_dev_kmem_ptr(p)	p
p                  70 arch/m68k/include/asm/kmap.h static inline void ioport_unmap(void __iomem *p)
p                 283 arch/m68k/include/asm/math-emu.h 	lea	\fp,%a0
p                 128 arch/m68k/include/asm/processor.h unsigned long get_wchan(struct task_struct *p);
p                  35 arch/m68k/kernel/irq.c int arch_show_interrupts(struct seq_file *p, int prec)
p                  37 arch/m68k/kernel/irq.c 	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
p                 123 arch/m68k/kernel/process.c 		 unsigned long arg, struct task_struct *p)
p                 130 arch/m68k/kernel/process.c 	frame = (struct fork_frame *) (task_stack_page(p) + THREAD_SIZE) - 1;
p                 132 arch/m68k/kernel/process.c 	p->thread.ksp = (unsigned long)frame;
p                 133 arch/m68k/kernel/process.c 	p->thread.esp0 = (unsigned long)&frame->regs;
p                 139 arch/m68k/kernel/process.c 	p->thread.fs = get_fs().seg;
p                 141 arch/m68k/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 148 arch/m68k/kernel/process.c 		p->thread.usp = 0;
p                 155 arch/m68k/kernel/process.c 	p->thread.usp = usp ?: rdusp();
p                 158 arch/m68k/kernel/process.c 		task_thread_info(p)->tp_value = frame->regs.d5;
p                 163 arch/m68k/kernel/process.c 		asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
p                 165 arch/m68k/kernel/process.c 		if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) {
p                 172 arch/m68k/kernel/process.c 					      : "m" (p->thread.fp[0]),
p                 173 arch/m68k/kernel/process.c 						"m" (p->thread.fpcntl[0]),
p                 174 arch/m68k/kernel/process.c 						"m" (p->thread.fpcntl[1]),
p                 175 arch/m68k/kernel/process.c 						"m" (p->thread.fpcntl[2])
p                 181 arch/m68k/kernel/process.c 					      : "m" (p->thread.fp[0]),
p                 182 arch/m68k/kernel/process.c 						"m" (p->thread.fpcntl[0])
p                 188 arch/m68k/kernel/process.c 		asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
p                 247 arch/m68k/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 252 arch/m68k/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 255 arch/m68k/kernel/process.c 	stack_page = (unsigned long)task_stack_page(p);
p                 256 arch/m68k/kernel/process.c 	fp = ((struct switch_stack *)p->thread.ksp)->a6;
p                 940 arch/m68k/kernel/traps.c 	unsigned long *p;
p                 953 arch/m68k/kernel/traps.c 	p = stack;
p                 955 arch/m68k/kernel/traps.c 		if (p + 1 > endstack)
p                 959 arch/m68k/kernel/traps.c 		pr_cont(" %08lx", *p++);
p                  60 arch/m68k/mm/kmap.c 	struct vm_struct **p, *tmp, *area;
p                  66 arch/m68k/mm/kmap.c 	for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
p                  77 arch/m68k/mm/kmap.c 	area->next = *p;
p                  78 arch/m68k/mm/kmap.c 	*p = area;
p                  84 arch/m68k/mm/kmap.c 	struct vm_struct **p, *tmp;
p                  89 arch/m68k/mm/kmap.c 	for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
p                  91 arch/m68k/mm/kmap.c 			*p = tmp->next;
p                  78 arch/m68k/mvme16x/config.c     p_bdid p = &mvme_bdid;
p                  81 arch/m68k/mvme16x/config.c     suf[1] = p->brdsuffix[0];
p                  82 arch/m68k/mvme16x/config.c     suf[2] = p->brdsuffix[1];
p                  86 arch/m68k/mvme16x/config.c     sprintf(model, "Motorola MVME%x%s", be16_to_cpu(p->brdno), suf);
p                 268 arch/m68k/mvme16x/config.c     p_bdid p = &mvme_bdid;
p                 270 arch/m68k/mvme16x/config.c     uint16_t brdno = be16_to_cpu(p->brdno);
p                 282 arch/m68k/mvme16x/config.c     if (strncmp("BDID", p->bdid, 4))
p                 293 arch/m68k/mvme16x/config.c     pr_info("BRD_ID: %s   BUG %x.%x %02x/%02x/%02x\n", id, p->rev >> 4,
p                 294 arch/m68k/mvme16x/config.c 	    p->rev & 0xf, p->yr, p->mth, p->day);
p                  70 arch/m68k/q40/config.c 	const char *p = s;
p                  74 arch/m68k/q40/config.c 			*q40_mem_cptr = *p++;
p                  98 arch/m68k/q40/config.c 	char *p = q40_mem_cptr;
p                 101 arch/m68k/q40/config.c 		*p = *str++;
p                 102 arch/m68k/q40/config.c 		p += 4;
p                 104 arch/m68k/q40/config.c 	q40_mem_cptr = p;
p                  71 arch/m68k/sun3/prom/console.c 	char *p;
p                  98 arch/m68k/sun3/prom/console.c 		p = propb;
p                  99 arch/m68k/sun3/prom/console.c 		while(*p) p++; p -= 2;
p                 100 arch/m68k/sun3/prom/console.c 		if(p[0] == ':') {
p                 101 arch/m68k/sun3/prom/console.c 			if(p[1] == 'a')
p                 103 arch/m68k/sun3/prom/console.c 			else if(p[1] == 'b')
p                 120 arch/m68k/sun3/prom/console.c 	char *p;
p                 150 arch/m68k/sun3/prom/console.c 			p = propb;
p                 151 arch/m68k/sun3/prom/console.c 			while(*p) p++; p -= 2;
p                 152 arch/m68k/sun3/prom/console.c 			if(p[0]==':') {
p                 153 arch/m68k/sun3/prom/console.c 				if(p[1] == 'a')
p                 155 arch/m68k/sun3/prom/console.c 				else if(p[1] == 'b')
p                  43 arch/m68k/tools/amiga/dmesg.c     u_long start = CHIPMEM_START, end = CHIPMEM_END, p;
p                  50 arch/m68k/tools/amiga/dmesg.c     for (p = start; p <= end-sizeof(struct savekmsg); p += 4) {
p                  51 arch/m68k/tools/amiga/dmesg.c 	m = (struct savekmsg *)p;
p                  53 arch/m68k/tools/amiga/dmesg.c 	    (m->magicptr == p)) {
p                  33 arch/microblaze/include/asm/flat.h 	u32 *p = (__force u32 *)rp;
p                  40 arch/microblaze/include/asm/flat.h 		val_hi = get_unaligned(p);
p                  41 arch/microblaze/include/asm/flat.h 		val_lo = get_unaligned(p+1);
p                  47 arch/microblaze/include/asm/flat.h 		*addr = get_unaligned(p);
p                  61 arch/microblaze/include/asm/flat.h 	u32 *p = (__force u32 *)rp;
p                  65 arch/microblaze/include/asm/flat.h 		unsigned long val_hi = get_unaligned(p);
p                  66 arch/microblaze/include/asm/flat.h 		unsigned long val_lo = get_unaligned(p + 1);
p                  73 arch/microblaze/include/asm/flat.h 		put_unaligned(val_hi, p);
p                  74 arch/microblaze/include/asm/flat.h 		put_unaligned(val_lo, p+1);
p                  77 arch/microblaze/include/asm/flat.h 		put_unaligned(addr, p);
p                 397 arch/microblaze/include/asm/pgtable.h static inline unsigned long pte_update(pte_t *p, unsigned long clr,
p                 409 arch/microblaze/include/asm/pgtable.h 			: "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
p                  66 arch/microblaze/include/asm/processor.h extern unsigned long get_wchan(struct task_struct *p);
p                 110 arch/microblaze/include/asm/processor.h unsigned long get_wchan(struct task_struct *p);
p                  23 arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c #define CI(c, p) { ci->c = PVR_##p(pvr); }
p                  66 arch/microblaze/kernel/cpu/pvr.c void get_pvr(struct pvr_s *p)
p                  68 arch/microblaze/kernel/cpu/pvr.c 	get_single_pvr(0, p->pvr[0]);
p                  69 arch/microblaze/kernel/cpu/pvr.c 	get_single_pvr(1, p->pvr[1]);
p                  70 arch/microblaze/kernel/cpu/pvr.c 	get_single_pvr(2, p->pvr[2]);
p                  71 arch/microblaze/kernel/cpu/pvr.c 	get_single_pvr(3, p->pvr[3]);
p                  72 arch/microblaze/kernel/cpu/pvr.c 	get_single_pvr(4, p->pvr[4]);
p                  73 arch/microblaze/kernel/cpu/pvr.c 	get_single_pvr(5, p->pvr[5]);
p                  74 arch/microblaze/kernel/cpu/pvr.c 	get_single_pvr(6, p->pvr[6]);
p                  75 arch/microblaze/kernel/cpu/pvr.c 	get_single_pvr(7, p->pvr[7]);
p                  76 arch/microblaze/kernel/cpu/pvr.c 	get_single_pvr(8, p->pvr[8]);
p                  77 arch/microblaze/kernel/cpu/pvr.c 	get_single_pvr(9, p->pvr[9]);
p                  78 arch/microblaze/kernel/cpu/pvr.c 	get_single_pvr(10, p->pvr[10]);
p                  79 arch/microblaze/kernel/cpu/pvr.c 	get_single_pvr(11, p->pvr[11]);
p                  94 arch/microblaze/kernel/kgdb.c void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
p                  97 arch/microblaze/kernel/kgdb.c 	unsigned long *pt_regb = (unsigned long *)(p->thread.regs);
p                  58 arch/microblaze/kernel/process.c 		unsigned long arg, struct task_struct *p)
p                  60 arch/microblaze/kernel/process.c 	struct pt_regs *childregs = task_pt_regs(p);
p                  61 arch/microblaze/kernel/process.c 	struct thread_info *ti = task_thread_info(p);
p                  63 arch/microblaze/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 122 arch/microblaze/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 232 arch/microblaze/mm/init.c 	char *p = cmd_line;
p                 235 arch/microblaze/mm/init.c 	p = strstr(cmd_line, "mem=");
p                 236 arch/microblaze/mm/init.c 	if (p) {
p                 237 arch/microblaze/mm/init.c 		p += 4;
p                 238 arch/microblaze/mm/init.c 		maxmem = memparse(p, &p);
p                 363 arch/microblaze/mm/init.c 	void *p;
p                 366 arch/microblaze/mm/init.c 		p = kzalloc(size, mask);
p                 368 arch/microblaze/mm/init.c 		p = memblock_alloc(size, SMP_CACHE_BYTES);
p                 369 arch/microblaze/mm/init.c 		if (!p)
p                 374 arch/microblaze/mm/init.c 	return p;
p                  51 arch/microblaze/mm/pgtable.c 	phys_addr_t p;
p                  60 arch/microblaze/mm/pgtable.c 	p = addr & PAGE_MASK;
p                  61 arch/microblaze/mm/pgtable.c 	size = PAGE_ALIGN(addr + size) - p;
p                  71 arch/microblaze/mm/pgtable.c 		p >= memory_start && p < virt_to_phys(high_memory) &&
p                  72 arch/microblaze/mm/pgtable.c 		!(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
p                  73 arch/microblaze/mm/pgtable.c 		p < __virt_to_phys((phys_addr_t)__bss_stop))) {
p                  75 arch/microblaze/mm/pgtable.c 			(unsigned long)p, __builtin_return_address(0));
p                 110 arch/microblaze/mm/pgtable.c 		err = map_page(v + i, p + i, flags);
p                 161 arch/microblaze/mm/pgtable.c 	unsigned long v, p, s, f;
p                 164 arch/microblaze/mm/pgtable.c 	p = memory_start;
p                 174 arch/microblaze/mm/pgtable.c 		map_page(v, p, f);
p                 176 arch/microblaze/mm/pgtable.c 		p += PAGE_SIZE;
p                 611 arch/microblaze/pci/pci-common.c 	struct resource *p, **pp;
p                 614 arch/microblaze/pci/pci-common.c 	for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
p                 615 arch/microblaze/pci/pci-common.c 		if (p->end < res->start)
p                 617 arch/microblaze/pci/pci-common.c 		if (res->end < p->start)
p                 619 arch/microblaze/pci/pci-common.c 		if (p->start < res->start || p->end > res->end)
p                 631 arch/microblaze/pci/pci-common.c 	for (p = res->child; p != NULL; p = p->sibling) {
p                 632 arch/microblaze/pci/pci-common.c 		p->parent = res;
p                 634 arch/microblaze/pci/pci-common.c 			 p->name,
p                 635 arch/microblaze/pci/pci-common.c 			 (unsigned long long)p->start,
p                 636 arch/microblaze/pci/pci-common.c 			 (unsigned long long)p->end, res->name);
p                 203 arch/mips/alchemy/common/dbdma.c 	dbdev_tab_t *p;
p                 205 arch/mips/alchemy/common/dbdma.c 		p = &dbdev_tab[i];
p                 206 arch/mips/alchemy/common/dbdma.c 		if (p->dev_id == id)
p                 207 arch/mips/alchemy/common/dbdma.c 			return p;
p                 221 arch/mips/alchemy/common/dbdma.c 	dbdev_tab_t *p;
p                 224 arch/mips/alchemy/common/dbdma.c 	p = find_dbdev_id(~0);
p                 225 arch/mips/alchemy/common/dbdma.c 	if (NULL != p) {
p                 226 arch/mips/alchemy/common/dbdma.c 		memcpy(p, dev, sizeof(dbdev_tab_t));
p                 227 arch/mips/alchemy/common/dbdma.c 		p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id);
p                 228 arch/mips/alchemy/common/dbdma.c 		ret = p->dev_id;
p                 232 arch/mips/alchemy/common/dbdma.c 				  p->dev_id, p->dev_flags, p->dev_physaddr);
p                 242 arch/mips/alchemy/common/dbdma.c 	dbdev_tab_t *p = find_dbdev_id(devid);
p                 244 arch/mips/alchemy/common/dbdma.c 	if (p != NULL) {
p                 245 arch/mips/alchemy/common/dbdma.c 		memset(p, 0, sizeof(dbdev_tab_t));
p                 246 arch/mips/alchemy/common/dbdma.c 		p->dev_id = ~0;
p                 566 arch/mips/alchemy/common/irq.c void au1300_set_irq_priority(unsigned int irq, int p)
p                 569 arch/mips/alchemy/common/irq.c 	au1300_gpic_chgcfg(irq, GPIC_CFG_IL_MASK, GPIC_CFG_IL_SET(p));
p                 450 arch/mips/alchemy/devboards/db1000.c 	struct clk *c, *p;
p                 481 arch/mips/alchemy/devboards/db1000.c 		p = clk_get(NULL, "auxpll_clk");
p                 483 arch/mips/alchemy/devboards/db1000.c 		if (!IS_ERR(c) && !IS_ERR(p)) {
p                 484 arch/mips/alchemy/devboards/db1000.c 			clk_set_parent(c, p);
p                 485 arch/mips/alchemy/devboards/db1000.c 			clk_set_rate(c, clk_get_rate(p));
p                 489 arch/mips/alchemy/devboards/db1000.c 		if (!IS_ERR(p))
p                 490 arch/mips/alchemy/devboards/db1000.c 			clk_put(p);
p                 193 arch/mips/ar7/prom.c 	char *s, *p;
p                 200 arch/mips/ar7/prom.c 		baud = simple_strtoul(s, &p, 10);
p                 201 arch/mips/ar7/prom.c 		s = p;
p                  38 arch/mips/bcm47xx/serial.c 		struct plat_serial8250_port *p = &(uart8250_data[i]);
p                  41 arch/mips/bcm47xx/serial.c 		p->mapbase = (unsigned int)ssb_port->regs;
p                  42 arch/mips/bcm47xx/serial.c 		p->membase = (void *)ssb_port->regs;
p                  43 arch/mips/bcm47xx/serial.c 		p->irq = ssb_port->irq + 2;
p                  44 arch/mips/bcm47xx/serial.c 		p->uartclk = ssb_port->baud_base;
p                  45 arch/mips/bcm47xx/serial.c 		p->regshift = ssb_port->reg_shift;
p                  46 arch/mips/bcm47xx/serial.c 		p->iotype = UPIO_MEM;
p                  47 arch/mips/bcm47xx/serial.c 		p->flags = UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
p                  63 arch/mips/bcm47xx/serial.c 		struct plat_serial8250_port *p = &(uart8250_data[i]);
p                  67 arch/mips/bcm47xx/serial.c 		p->mapbase = (unsigned int)bcma_port->regs;
p                  68 arch/mips/bcm47xx/serial.c 		p->membase = (void *)bcma_port->regs;
p                  69 arch/mips/bcm47xx/serial.c 		p->irq = bcma_port->irq;
p                  70 arch/mips/bcm47xx/serial.c 		p->uartclk = bcma_port->baud_base;
p                  71 arch/mips/bcm47xx/serial.c 		p->regshift = bcma_port->reg_shift;
p                  72 arch/mips/bcm47xx/serial.c 		p->iotype = UPIO_MEM;
p                  73 arch/mips/bcm47xx/serial.c 		p->flags = UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
p                  71 arch/mips/bcm63xx/nvram.c 		u8 *p = mac + ETH_ALEN - 1;
p                  74 arch/mips/bcm63xx/nvram.c 			(*p)++;
p                  75 arch/mips/bcm63xx/nvram.c 			if (*p != 0)
p                  77 arch/mips/bcm63xx/nvram.c 			p--;
p                  78 arch/mips/bcm63xx/nvram.c 		} while (p != oui);
p                  80 arch/mips/bcm63xx/nvram.c 		if (p == oui) {
p                 125 arch/mips/bcm63xx/setup.c static void __bcm63xx_machine_reboot(char *p)
p                 186 arch/mips/boot/elf2ecoff.c static void convert_elf_phdrs(Elf32_Phdr * p, int num)
p                 190 arch/mips/boot/elf2ecoff.c 	for (i = 0; i < num; i++, p++) {
p                 191 arch/mips/boot/elf2ecoff.c 		p->p_type = swab32(p->p_type);
p                 192 arch/mips/boot/elf2ecoff.c 		p->p_offset = swab32(p->p_offset);
p                 193 arch/mips/boot/elf2ecoff.c 		p->p_vaddr = swab32(p->p_vaddr);
p                 194 arch/mips/boot/elf2ecoff.c 		p->p_paddr = swab32(p->p_paddr);
p                 195 arch/mips/boot/elf2ecoff.c 		p->p_filesz = swab32(p->p_filesz);
p                 196 arch/mips/boot/elf2ecoff.c 		p->p_memsz = swab32(p->p_memsz);
p                 197 arch/mips/boot/elf2ecoff.c 		p->p_flags = swab32(p->p_flags);
p                 198 arch/mips/boot/elf2ecoff.c 		p->p_align = swab32(p->p_align);
p                 125 arch/mips/cavium-octeon/crypto/octeon-md5.c 	char *p = (char *)mctx->block + offset;
p                 130 arch/mips/cavium-octeon/crypto/octeon-md5.c 	*p++ = 0x80;
p                 136 arch/mips/cavium-octeon/crypto/octeon-md5.c 		memset(p, 0x00, padding + sizeof(u64));
p                 138 arch/mips/cavium-octeon/crypto/octeon-md5.c 		p = (char *)mctx->block;
p                 142 arch/mips/cavium-octeon/crypto/octeon-md5.c 	memset(p, 0, padding);
p                 486 arch/mips/cavium-octeon/octeon-platform.c 	const char *p;
p                 539 arch/mips/cavium-octeon/octeon-platform.c 		p = fdt_getprop(initial_boot_params, phy, "compatible",
p                 541 arch/mips/cavium-octeon/octeon-platform.c 		if (p && current_len >= strlen(new_name))
p                 554 arch/mips/cavium-octeon/octeon-platform.c 	p = fdt_get_name(initial_boot_params, phy, &current_len);
p                 555 arch/mips/cavium-octeon/octeon-platform.c 	if (p && current_len == strlen(new_name))
p                 558 arch/mips/cavium-octeon/octeon-platform.c 		pr_err("Error: could not rename ethernet phy: <%s>", p);
p                 598 arch/mips/cavium-octeon/octeon-platform.c 		int p = fdt_node_offset_by_phandle(initial_boot_params, ph);
p                 600 arch/mips/cavium-octeon/octeon-platform.c 		if (p >= 0)
p                 601 arch/mips/cavium-octeon/octeon-platform.c 			fdt_nop_node(initial_boot_params, p);
p                 645 arch/mips/cavium-octeon/octeon-platform.c static void __init octeon_fdt_pip_port(int iface, int i, int p, int max)
p                 653 arch/mips/cavium-octeon/octeon-platform.c 	snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x", p);
p                 657 arch/mips/cavium-octeon/octeon-platform.c 	if (p > max) {
p                 658 arch/mips/cavium-octeon/octeon-platform.c 		pr_debug("Deleting port %x:%x\n", i, p);
p                 663 arch/mips/cavium-octeon/octeon-platform.c 		ipd_port = (0x100 * i) + (0x10 * p) + 0x800;
p                 665 arch/mips/cavium-octeon/octeon-platform.c 		ipd_port = 16 * i + p;
p                 675 arch/mips/cavium-octeon/octeon-platform.c 	octeon_rx_tx_delay(eth, i, p);
p                 682 arch/mips/cavium-octeon/octeon-platform.c 	int p;
p                 693 arch/mips/cavium-octeon/octeon-platform.c 	for (p = 0; p < 16; p++)
p                 694 arch/mips/cavium-octeon/octeon-platform.c 		octeon_fdt_pip_port(iface, idx, p, count - 1);
p                 742 arch/mips/cavium-octeon/octeon-platform.c 		int p;
p                 749 arch/mips/cavium-octeon/octeon-platform.c 		for (p = 0; p < 16; p++) {
p                 753 arch/mips/cavium-octeon/octeon-platform.c 				 "ethernet@%x", p);
p                 663 arch/mips/cavium-octeon/setup.c 	char *p;
p                 861 arch/mips/cavium-octeon/setup.c 		max_memory = memparse(arg + 4, &p);
p                 864 arch/mips/cavium-octeon/setup.c 		if (*p == '@')
p                 865 arch/mips/cavium-octeon/setup.c 			reserve_low_mem = memparse(p + 1, &p);
p                 875 arch/mips/cavium-octeon/setup.c 			max_memory = memparse(arg + 4, &p);
p                 878 arch/mips/cavium-octeon/setup.c 			if (*p == '@')
p                 879 arch/mips/cavium-octeon/setup.c 				reserve_low_mem = memparse(p + 1, &p);
p                 882 arch/mips/cavium-octeon/setup.c 			crashk_size = memparse(arg+12, &p);
p                 883 arch/mips/cavium-octeon/setup.c 			if (*p == '@')
p                 884 arch/mips/cavium-octeon/setup.c 				crashk_base = memparse(p+1, &p);
p                 335 arch/mips/cavium-octeon/smp.c 		uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
p                 337 arch/mips/cavium-octeon/smp.c 		*p |= mask;
p                 338 arch/mips/cavium-octeon/smp.c 		new_mask = *p;
p                  70 arch/mips/crypto/crc32-mips.c static u32 crc32_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
p                  76 arch/mips/crypto/crc32-mips.c 		u64 value = get_unaligned_le64(p);
p                  79 arch/mips/crypto/crc32-mips.c 		p += sizeof(u64);
p                  87 arch/mips/crypto/crc32-mips.c 		u32 value = get_unaligned_le32(p);
p                  90 arch/mips/crypto/crc32-mips.c 		p += sizeof(u32);
p                  95 arch/mips/crypto/crc32-mips.c 		u16 value = get_unaligned_le16(p);
p                  98 arch/mips/crypto/crc32-mips.c 		p += sizeof(u16);
p                 102 arch/mips/crypto/crc32-mips.c 		u8 value = *p++;
p                 110 arch/mips/crypto/crc32-mips.c static u32 crc32c_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
p                 116 arch/mips/crypto/crc32-mips.c 		u64 value = get_unaligned_le64(p);
p                 119 arch/mips/crypto/crc32-mips.c 		p += sizeof(u64);
p                 127 arch/mips/crypto/crc32-mips.c 		u32 value = get_unaligned_le32(p);
p                 130 arch/mips/crypto/crc32-mips.c 		p += sizeof(u32);
p                 135 arch/mips/crypto/crc32-mips.c 		u16 value = get_unaligned_le16(p);
p                 138 arch/mips/crypto/crc32-mips.c 		p += sizeof(u16);
p                 142 arch/mips/crypto/crc32-mips.c 		u8 value = *p++;
p                  98 arch/mips/dec/setup.c 	{ { .i = ~0 }, { .p = dec_intr_unimplemented } },
p                 101 arch/mips/dec/setup.c 	{ { .i = ~0 }, { .p = asic_intr_unimplemented } },
p                 231 arch/mips/dec/setup.c 		{ .p = cpu_all_int } },
p                 306 arch/mips/dec/setup.c 		{ .p = cpu_all_int } },
p                 377 arch/mips/dec/setup.c 		{ .p = kn02_io_int } },
p                 379 arch/mips/dec/setup.c 		{ .p = cpu_all_int } },
p                 396 arch/mips/dec/setup.c 		{ .p = kn02_all_int } },
p                 470 arch/mips/dec/setup.c 		{ .p = kn02xa_io_int } },
p                 478 arch/mips/dec/setup.c 		{ .p = cpu_all_int } },
p                 487 arch/mips/dec/setup.c 		{ .p = asic_dma_int } },
p                 497 arch/mips/dec/setup.c 		{ .p = asic_all_int } },
p                 575 arch/mips/dec/setup.c 		{ .p = kn02xa_io_int } },
p                 577 arch/mips/dec/setup.c 		{ .p = cpu_all_int } },
p                 582 arch/mips/dec/setup.c 		{ .p = asic_dma_int } },
p                 594 arch/mips/dec/setup.c 		{ .p = asic_all_int } },
p                 672 arch/mips/dec/setup.c 		{ .p = kn03_io_int } },
p                 674 arch/mips/dec/setup.c 		{ .p = cpu_all_int } },
p                 679 arch/mips/dec/setup.c 		{ .p = asic_dma_int } },
p                 695 arch/mips/dec/setup.c 		{ .p = asic_all_int } },
p                  92 arch/mips/fw/arc/identify.c 	pcomponent *p;
p                  99 arch/mips/fw/arc/identify.c 	p = ArcGetChild(PROM_NULL_COMPONENT);
p                 100 arch/mips/fw/arc/identify.c 	if (p == NULL) {
p                 109 arch/mips/fw/arc/identify.c 		iname = (char *) (long) p->iname;
p                 122 arch/mips/fw/arc/memory.c 	struct linux_mdesc *p;
p                 128 arch/mips/fw/arc/memory.c 	p = ArcGetMemoryDescriptor(PROM_NULL_MDESC);
p                 129 arch/mips/fw/arc/memory.c 	while(p) {
p                 131 arch/mips/fw/arc/memory.c 		       i, p, p->base, p->pages, mtypes(p->type));
p                 132 arch/mips/fw/arc/memory.c 		p = ArcGetMemoryDescriptor(p);
p                 138 arch/mips/fw/arc/memory.c 	p = PROM_NULL_MDESC;
p                 139 arch/mips/fw/arc/memory.c 	while ((p = ArcGetMemoryDescriptor(p))) {
p                 143 arch/mips/fw/arc/memory.c 		base = p->base << ARC_PAGE_SHIFT;
p                 144 arch/mips/fw/arc/memory.c 		size = p->pages << ARC_PAGE_SHIFT;
p                 145 arch/mips/fw/arc/memory.c 		type = prom_memtype_classify(p->type);
p                  94 arch/mips/fw/arc/tree.c dump_component(pcomponent *p)
p                  97 arch/mips/fw/arc/tree.c 	       p, classes[p->class], types[p->type],
p                  98 arch/mips/fw/arc/tree.c 	       iflags[p->iflags], p->vers, p->rev);
p                 100 arch/mips/fw/arc/tree.c 	       p->key, p->amask, (int)p->cdsize, (int)p->ilen, p->iname);
p                 104 arch/mips/fw/arc/tree.c traverse(pcomponent *p, int op)
p                 106 arch/mips/fw/arc/tree.c 	dump_component(p);
p                 107 arch/mips/fw/arc/tree.c 	if(ArcGetChild(p))
p                 108 arch/mips/fw/arc/tree.c 		traverse(ArcGetChild(p), 1);
p                 109 arch/mips/fw/arc/tree.c 	if(ArcGetPeer(p) && op)
p                 110 arch/mips/fw/arc/tree.c 		traverse(ArcGetPeer(p), 1);
p                 116 arch/mips/fw/arc/tree.c 	pcomponent *p;
p                 118 arch/mips/fw/arc/tree.c 	p = ArcGetChild(PROM_NULL_COMPONENT);
p                 119 arch/mips/fw/arc/tree.c 	dump_component(p);
p                 120 arch/mips/fw/arc/tree.c 	p = ArcGetChild(p);
p                 121 arch/mips/fw/arc/tree.c 	while(p) {
p                 122 arch/mips/fw/arc/tree.c 		dump_component(p);
p                 123 arch/mips/fw/arc/tree.c 		p = ArcGetPeer(p);
p                 126 arch/mips/include/asm/addrspace.h #define PHYS_TO_XKSEG_UNCACHED(p)	PHYS_TO_XKPHYS(K_CALG_UNCACHED, (p))
p                 127 arch/mips/include/asm/addrspace.h #define PHYS_TO_XKSEG_CACHED(p)		PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE, (p))
p                 128 arch/mips/include/asm/addrspace.h #define XKPHYS_TO_PHYS(p)		((p) & TO_PHYS_MASK)
p                 204 arch/mips/include/asm/asmmacro.h 	.macro	_EXT	rd, rs, p, s
p                 208 arch/mips/include/asm/asmmacro.h 	.macro	_EXT	rd, rs, p, s
p                  72 arch/mips/include/asm/cdmm.h #define mips_cdmm_set_drvdata(d, p)	dev_set_drvdata(&d->dev, p)
p                 103 arch/mips/include/asm/dec/interrupts.h typedef union { int i; void *p; } int_ptr;
p                 407 arch/mips/include/asm/elf.h 	unsigned int p;							\
p                 421 arch/mips/include/asm/elf.h 	p = personality(current->personality);				\
p                 422 arch/mips/include/asm/elf.h 	if (p != PER_LINUX32 && p != PER_LINUX)				\
p                 114 arch/mips/include/asm/emma/emma2rh.h 	volatile u32 *p = (volatile u32 *)0xbfc00000;
p                 115 arch/mips/include/asm/emma/emma2rh.h 	(void)(*p);
p                 396 arch/mips/include/asm/io.h #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p)	\
p                 398 arch/mips/include/asm/io.h static inline void pfx##out##bwlq##p(type val, unsigned long port)	\
p                 418 arch/mips/include/asm/io.h static inline type pfx##in##bwlq##p(unsigned long port)			\
p                 652 arch/mips/include/asm/io.h #define xlate_dev_mem_ptr(p)	__va(p)
p                 657 arch/mips/include/asm/io.h #define xlate_dev_kmem_ptr(p)	p
p                  32 arch/mips/include/asm/kprobes.h #define flush_insn_slot(p)						\
p                  34 arch/mips/include/asm/kprobes.h 	if (p->addr)							\
p                  35 arch/mips/include/asm/kprobes.h 		flush_icache_range((unsigned long)p->addr,		\
p                  36 arch/mips/include/asm/kprobes.h 			   (unsigned long)p->addr +			\
p                  43 arch/mips/include/asm/kprobes.h void arch_remove_kprobe(struct kprobe *p);
p                 878 arch/mips/include/asm/mach-au1x00/au1000.h extern void au1300_set_irq_priority(unsigned int irq, int p);
p                  32 arch/mips/include/asm/mach-cavium-octeon/mangle-port.h static inline bool __should_swizzle_addr(u64 p)
p                  35 arch/mips/include/asm/mach-cavium-octeon/mangle-port.h 	return ((p >> 40) & 0xff) == 0;
p                  48 arch/mips/include/asm/mach-lantiq/xway/xway_dma.h extern void ltq_dma_init_port(int p);
p                 206 arch/mips/include/asm/nile4.h     volatile u32 *p = (volatile u32 *)0xbfc00000;
p                 207 arch/mips/include/asm/nile4.h     (void)(*p);
p                 379 arch/mips/include/asm/processor.h unsigned long get_wchan(struct task_struct *p);
p                  18 arch/mips/include/asm/tlbex.h void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
p                  20 arch/mips/include/asm/tlbex.h void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr);
p                  21 arch/mips/include/asm/tlbex.h void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr);
p                  22 arch/mips/include/asm/tlbex.h void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep);
p                  23 arch/mips/include/asm/tlbex.h void build_tlb_write_entry(u32 **p, struct uasm_label **l,
p                 127 arch/mips/include/asm/uaccess.h static inline int __access_ok(const void __user *p, unsigned long size)
p                 129 arch/mips/include/asm/uaccess.h 	unsigned long addr = (unsigned long)p;
p                 254 arch/mips/include/asm/uasm.h static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1,
p                 258 arch/mips/include/asm/uasm.h 		uasm_i_drotr(p, a1, a2, a3);
p                 260 arch/mips/include/asm/uasm.h 		uasm_i_drotr32(p, a1, a2, a3 - 32);
p                 263 arch/mips/include/asm/uasm.h static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1,
p                 267 arch/mips/include/asm/uasm.h 		uasm_i_dsll(p, a1, a2, a3);
p                 269 arch/mips/include/asm/uasm.h 		uasm_i_dsll32(p, a1, a2, a3 - 32);
p                 272 arch/mips/include/asm/uasm.h static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
p                 276 arch/mips/include/asm/uasm.h 		uasm_i_dsrl(p, a1, a2, a3);
p                 278 arch/mips/include/asm/uasm.h 		uasm_i_dsrl32(p, a1, a2, a3 - 32);
p                 281 arch/mips/include/asm/uasm.h static inline void uasm_i_dsra_safe(u32 **p, unsigned int a1,
p                 285 arch/mips/include/asm/uasm.h 		uasm_i_dsra(p, a1, a2, a3);
p                 287 arch/mips/include/asm/uasm.h 		uasm_i_dsra32(p, a1, a2, a3 - 32);
p                 309 arch/mips/include/asm/uasm.h void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
p                 310 arch/mips/include/asm/uasm.h void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
p                 312 arch/mips/include/asm/uasm.h void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
p                 314 arch/mips/include/asm/uasm.h void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1,
p                 316 arch/mips/include/asm/uasm.h void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
p                 317 arch/mips/include/asm/uasm.h void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
p                 318 arch/mips/include/asm/uasm.h void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
p                 319 arch/mips/include/asm/uasm.h void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
p                 320 arch/mips/include/asm/uasm.h void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
p                 321 arch/mips/include/asm/uasm.h void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
p                 323 arch/mips/include/asm/uasm.h void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
p                  48 arch/mips/include/asm/xtalk/xtalk.h #define XIO_PACK(p, o)	((((uint64_t)(p))<<XIO_PORT_SHIFT) | ((o)&XIO_ADDR_BITS))
p                  48 arch/mips/kernel/cpu-bugs64.c 	long p, s, lv1, lv2, lw;
p                 105 arch/mips/kernel/cpu-bugs64.c 	p = m1 * m2;
p                 110 arch/mips/kernel/cpu-bugs64.c 		: "0" (lv2), "r" (p));
p                  41 arch/mips/kernel/irq.c int arch_show_interrupts(struct seq_file *p, int prec)
p                  43 arch/mips/kernel/irq.c 	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
p                 238 arch/mips/kernel/kgdb.c void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
p                 251 arch/mips/kernel/kgdb.c 	*(ptr++) = p->thread.reg16;
p                 252 arch/mips/kernel/kgdb.c 	*(ptr++) = p->thread.reg17;
p                 253 arch/mips/kernel/kgdb.c 	*(ptr++) = p->thread.reg18;
p                 254 arch/mips/kernel/kgdb.c 	*(ptr++) = p->thread.reg19;
p                 255 arch/mips/kernel/kgdb.c 	*(ptr++) = p->thread.reg20;
p                 256 arch/mips/kernel/kgdb.c 	*(ptr++) = p->thread.reg21;
p                 257 arch/mips/kernel/kgdb.c 	*(ptr++) = p->thread.reg22;
p                 258 arch/mips/kernel/kgdb.c 	*(ptr++) = p->thread.reg23;
p                 264 arch/mips/kernel/kgdb.c 	*(ptr++) = (long)p;
p                 265 arch/mips/kernel/kgdb.c 	*(ptr++) = p->thread.reg29;
p                 266 arch/mips/kernel/kgdb.c 	*(ptr++) = p->thread.reg30;
p                 267 arch/mips/kernel/kgdb.c 	*(ptr++) = p->thread.reg31;
p                 269 arch/mips/kernel/kgdb.c 	*(ptr++) = p->thread.cp0_status;
p                 287 arch/mips/kernel/kgdb.c 	*(ptr++) = p->thread.reg31;
p                  74 arch/mips/kernel/kprobes.c int __kprobes arch_prepare_kprobe(struct kprobe *p)
p                  80 arch/mips/kernel/kprobes.c 	insn = p->addr[0];
p                  89 arch/mips/kernel/kprobes.c 	if ((probe_kernel_read(&prev_insn, p->addr - 1,
p                 104 arch/mips/kernel/kprobes.c 	p->ainsn.insn = get_insn_slot();
p                 105 arch/mips/kernel/kprobes.c 	if (!p->ainsn.insn) {
p                 124 arch/mips/kernel/kprobes.c 		memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t));
p                 126 arch/mips/kernel/kprobes.c 		memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
p                 128 arch/mips/kernel/kprobes.c 	p->ainsn.insn[1] = breakpoint2_insn;
p                 129 arch/mips/kernel/kprobes.c 	p->opcode = *p->addr;
p                 135 arch/mips/kernel/kprobes.c void __kprobes arch_arm_kprobe(struct kprobe *p)
p                 137 arch/mips/kernel/kprobes.c 	*p->addr = breakpoint_insn;
p                 138 arch/mips/kernel/kprobes.c 	flush_insn_slot(p);
p                 141 arch/mips/kernel/kprobes.c void __kprobes arch_disarm_kprobe(struct kprobe *p)
p                 143 arch/mips/kernel/kprobes.c 	*p->addr = p->opcode;
p                 144 arch/mips/kernel/kprobes.c 	flush_insn_slot(p);
p                 147 arch/mips/kernel/kprobes.c void __kprobes arch_remove_kprobe(struct kprobe *p)
p                 149 arch/mips/kernel/kprobes.c 	if (p->ainsn.insn) {
p                 150 arch/mips/kernel/kprobes.c 		free_insn_slot(p->ainsn.insn, 0);
p                 151 arch/mips/kernel/kprobes.c 		p->ainsn.insn = NULL;
p                 173 arch/mips/kernel/kprobes.c static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
p                 176 arch/mips/kernel/kprobes.c 	__this_cpu_write(current_kprobe, p);
p                 194 arch/mips/kernel/kprobes.c static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs,
p                 197 arch/mips/kernel/kprobes.c 	union mips_instruction insn = p->opcode;
p                 205 arch/mips/kernel/kprobes.c 	if (p->ainsn.insn->word == 0)
p                 228 arch/mips/kernel/kprobes.c static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
p                 236 arch/mips/kernel/kprobes.c 	if (p->opcode.word == breakpoint_insn.word ||
p                 237 arch/mips/kernel/kprobes.c 	    p->opcode.word == breakpoint2_insn.word)
p                 238 arch/mips/kernel/kprobes.c 		regs->cp0_epc = (unsigned long)p->addr;
p                 239 arch/mips/kernel/kprobes.c 	else if (insn_has_delayslot(p->opcode)) {
p                 240 arch/mips/kernel/kprobes.c 		ret = evaluate_branch_instruction(p, regs, kcb);
p                 246 arch/mips/kernel/kprobes.c 	regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
p                 261 arch/mips/kernel/kprobes.c static void __kprobes resume_execution(struct kprobe *p,
p                 265 arch/mips/kernel/kprobes.c 	if (insn_has_delayslot(p->opcode))
p                 275 arch/mips/kernel/kprobes.c 	struct kprobe *p;
p                 291 arch/mips/kernel/kprobes.c 		p = get_kprobe(addr);
p                 292 arch/mips/kernel/kprobes.c 		if (p) {
p                 294 arch/mips/kernel/kprobes.c 			    p->ainsn.insn->word == breakpoint_insn.word) {
p                 307 arch/mips/kernel/kprobes.c 			set_current_kprobe(p, regs, kcb);
p                 308 arch/mips/kernel/kprobes.c 			kprobes_inc_nmissed_count(p);
p                 309 arch/mips/kernel/kprobes.c 			prepare_singlestep(p, regs, kcb);
p                 312 arch/mips/kernel/kprobes.c 				resume_execution(p, regs, kcb);
p                 328 arch/mips/kernel/kprobes.c 	p = get_kprobe(addr);
p                 329 arch/mips/kernel/kprobes.c 	if (!p) {
p                 344 arch/mips/kernel/kprobes.c 	set_current_kprobe(p, regs, kcb);
p                 347 arch/mips/kernel/kprobes.c 	if (p->pre_handler && p->pre_handler(p, regs)) {
p                 354 arch/mips/kernel/kprobes.c 	prepare_singlestep(p, regs, kcb);
p                 357 arch/mips/kernel/kprobes.c 		if (p->post_handler)
p                 358 arch/mips/kernel/kprobes.c 			p->post_handler(p, regs, 0);
p                 359 arch/mips/kernel/kprobes.c 		resume_execution(p, regs, kcb);
p                 488 arch/mips/kernel/kprobes.c static int __kprobes trampoline_probe_handler(struct kprobe *p,
p                 550 arch/mips/kernel/kprobes.c int __kprobes arch_trampoline_kprobe(struct kprobe *p)
p                 552 arch/mips/kernel/kprobes.c 	if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
p                  90 arch/mips/kernel/linux32.c 	unsigned int p = personality & 0xffffffff;
p                  94 arch/mips/kernel/linux32.c 	    personality(p) == PER_LINUX)
p                  95 arch/mips/kernel/linux32.c 		p = (p & ~PER_MASK) | PER_LINUX32;
p                  96 arch/mips/kernel/linux32.c 	ret = sys_personality(p);
p                  49 arch/mips/kernel/mips-mt-fpaff.c static bool check_same_owner(struct task_struct *p)
p                  55 arch/mips/kernel/mips-mt-fpaff.c 	pcred = __task_cred(p);
p                  70 arch/mips/kernel/mips-mt-fpaff.c 	struct task_struct *p;
p                  82 arch/mips/kernel/mips-mt-fpaff.c 	p = find_process_by_pid(pid);
p                  83 arch/mips/kernel/mips-mt-fpaff.c 	if (!p) {
p                  90 arch/mips/kernel/mips-mt-fpaff.c 	get_task_struct(p);
p                 105 arch/mips/kernel/mips-mt-fpaff.c 	if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) {
p                 110 arch/mips/kernel/mips-mt-fpaff.c 	retval = security_task_setscheduler(p);
p                 115 arch/mips/kernel/mips-mt-fpaff.c 	cpumask_copy(&p->thread.user_cpus_allowed, new_mask);
p                 119 arch/mips/kernel/mips-mt-fpaff.c 	ti = task_thread_info(p);
p                 123 arch/mips/kernel/mips-mt-fpaff.c 		retval = set_cpus_allowed_ptr(p, effective_mask);
p                 127 arch/mips/kernel/mips-mt-fpaff.c 		retval = set_cpus_allowed_ptr(p, new_mask);
p                 131 arch/mips/kernel/mips-mt-fpaff.c 		cpuset_cpus_allowed(p, cpus_allowed);
p                 149 arch/mips/kernel/mips-mt-fpaff.c 	put_task_struct(p);
p                 163 arch/mips/kernel/mips-mt-fpaff.c 	struct task_struct *p;
p                 173 arch/mips/kernel/mips-mt-fpaff.c 	p = find_process_by_pid(pid);
p                 174 arch/mips/kernel/mips-mt-fpaff.c 	if (!p)
p                 176 arch/mips/kernel/mips-mt-fpaff.c 	retval = security_task_getscheduler(p);
p                 180 arch/mips/kernel/mips-mt-fpaff.c 	cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
p                 888 arch/mips/kernel/mips-r2-to-r6-emul.c 	const struct r2_decoder_table *p;
p                 891 arch/mips/kernel/mips-r2-to-r6-emul.c 	for (p = table; p->func; p++) {
p                 892 arch/mips/kernel/mips-r2-to-r6-emul.c 		if ((inst & p->mask) == p->code) {
p                 893 arch/mips/kernel/mips-r2-to-r6-emul.c 			err = (p->func)(regs, inst);
p                 346 arch/mips/kernel/pm-cps.c 	u32 *buf, *p;
p                 368 arch/mips/kernel/pm-cps.c 	p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
p                 386 arch/mips/kernel/pm-cps.c 		UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
p                 387 arch/mips/kernel/pm-cps.c 		uasm_i_jalr(&p, v0, t0);
p                 388 arch/mips/kernel/pm-cps.c 		uasm_i_nop(&p);
p                 396 arch/mips/kernel/pm-cps.c 	UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
p                 400 arch/mips/kernel/pm-cps.c 		uasm_i_sync(&p, STYPE_SYNC_MB);
p                 401 arch/mips/kernel/pm-cps.c 		uasm_build_label(&l, p, lbl_incready);
p                 402 arch/mips/kernel/pm-cps.c 		uasm_i_ll(&p, t1, 0, r_nc_count);
p                 403 arch/mips/kernel/pm-cps.c 		uasm_i_addiu(&p, t2, t1, 1);
p                 404 arch/mips/kernel/pm-cps.c 		uasm_i_sc(&p, t2, 0, r_nc_count);
p                 405 arch/mips/kernel/pm-cps.c 		uasm_il_beqz(&p, &r, t2, lbl_incready);
p                 406 arch/mips/kernel/pm-cps.c 		uasm_i_addiu(&p, t1, t1, 1);
p                 409 arch/mips/kernel/pm-cps.c 		uasm_i_sync(&p, STYPE_SYNC_MB);
p                 415 arch/mips/kernel/pm-cps.c 		uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
p                 416 arch/mips/kernel/pm-cps.c 		uasm_i_nop(&p);
p                 425 arch/mips/kernel/pm-cps.c 			uasm_i_addiu(&p, t1, zero, -1);
p                 426 arch/mips/kernel/pm-cps.c 			uasm_build_label(&l, p, lbl_poll_cont);
p                 427 arch/mips/kernel/pm-cps.c 			uasm_i_lw(&p, t0, 0, r_nc_count);
p                 428 arch/mips/kernel/pm-cps.c 			uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
p                 429 arch/mips/kernel/pm-cps.c 			uasm_i_ehb(&p);
p                 431 arch/mips/kernel/pm-cps.c 				uasm_i_yield(&p, zero, t1);
p                 432 arch/mips/kernel/pm-cps.c 			uasm_il_b(&p, &r, lbl_poll_cont);
p                 433 arch/mips/kernel/pm-cps.c 			uasm_i_nop(&p);
p                 441 arch/mips/kernel/pm-cps.c 				uasm_i_addiu(&p, t0, zero, TCHALT_H);
p                 442 arch/mips/kernel/pm-cps.c 				uasm_i_mtc0(&p, t0, 2, 4);
p                 448 arch/mips/kernel/pm-cps.c 				uasm_i_addiu(&p, t0, zero, 1 << vpe_id);
p                 449 arch/mips/kernel/pm-cps.c 				UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop());
p                 450 arch/mips/kernel/pm-cps.c 				uasm_i_sw(&p, t0, 0, t1);
p                 454 arch/mips/kernel/pm-cps.c 			uasm_build_label(&l, p, lbl_secondary_hang);
p                 455 arch/mips/kernel/pm-cps.c 			uasm_il_b(&p, &r, lbl_secondary_hang);
p                 456 arch/mips/kernel/pm-cps.c 			uasm_i_nop(&p);
p                 465 arch/mips/kernel/pm-cps.c 	uasm_build_label(&l, p, lbl_disable_coherence);
p                 468 arch/mips/kernel/pm-cps.c 	cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
p                 472 arch/mips/kernel/pm-cps.c 	cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
p                 476 arch/mips/kernel/pm-cps.c 	uasm_i_sync(&p, STYPE_SYNC);
p                 477 arch/mips/kernel/pm-cps.c 	uasm_i_ehb(&p);
p                 485 arch/mips/kernel/pm-cps.c 		uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu]));
p                 486 arch/mips/kernel/pm-cps.c 		uasm_i_sw(&p, t0, 0, r_pcohctl);
p                 487 arch/mips/kernel/pm-cps.c 		uasm_i_lw(&p, t0, 0, r_pcohctl);
p                 490 arch/mips/kernel/pm-cps.c 		uasm_i_sync(&p, STYPE_SYNC);
p                 491 arch/mips/kernel/pm-cps.c 		uasm_i_ehb(&p);
p                 495 arch/mips/kernel/pm-cps.c 	uasm_i_sw(&p, zero, 0, r_pcohctl);
p                 496 arch/mips/kernel/pm-cps.c 	uasm_i_lw(&p, t0, 0, r_pcohctl);
p                 499 arch/mips/kernel/pm-cps.c 		err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
p                 518 arch/mips/kernel/pm-cps.c 		UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
p                 519 arch/mips/kernel/pm-cps.c 		uasm_i_addiu(&p, t1, zero, cpc_cmd);
p                 520 arch/mips/kernel/pm-cps.c 		uasm_i_sw(&p, t1, 0, t0);
p                 524 arch/mips/kernel/pm-cps.c 			uasm_build_label(&l, p, lbl_hang);
p                 525 arch/mips/kernel/pm-cps.c 			uasm_il_b(&p, &r, lbl_hang);
p                 526 arch/mips/kernel/pm-cps.c 			uasm_i_nop(&p);
p                 537 arch/mips/kernel/pm-cps.c 		uasm_i_sync(&p, STYPE_SYNC);
p                 538 arch/mips/kernel/pm-cps.c 		uasm_i_ehb(&p);
p                 548 arch/mips/kernel/pm-cps.c 			cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
p                 556 arch/mips/kernel/pm-cps.c 		uasm_build_label(&l, p, lbl_secondary_cont);
p                 559 arch/mips/kernel/pm-cps.c 		uasm_i_wait(&p, 0);
p                 567 arch/mips/kernel/pm-cps.c 	uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3
p                 571 arch/mips/kernel/pm-cps.c 	uasm_i_sw(&p, t0, 0, r_pcohctl);
p                 572 arch/mips/kernel/pm-cps.c 	uasm_i_lw(&p, t0, 0, r_pcohctl);
p                 575 arch/mips/kernel/pm-cps.c 	uasm_i_sync(&p, STYPE_SYNC);
p                 576 arch/mips/kernel/pm-cps.c 	uasm_i_ehb(&p);
p                 580 arch/mips/kernel/pm-cps.c 		uasm_build_label(&l, p, lbl_decready);
p                 581 arch/mips/kernel/pm-cps.c 		uasm_i_sync(&p, STYPE_SYNC_MB);
p                 582 arch/mips/kernel/pm-cps.c 		uasm_i_ll(&p, t1, 0, r_nc_count);
p                 583 arch/mips/kernel/pm-cps.c 		uasm_i_addiu(&p, t2, t1, -1);
p                 584 arch/mips/kernel/pm-cps.c 		uasm_i_sc(&p, t2, 0, r_nc_count);
p                 585 arch/mips/kernel/pm-cps.c 		uasm_il_beqz(&p, &r, t2, lbl_decready);
p                 586 arch/mips/kernel/pm-cps.c 		uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
p                 589 arch/mips/kernel/pm-cps.c 		uasm_i_sync(&p, STYPE_SYNC_MB);
p                 598 arch/mips/kernel/pm-cps.c 		cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
p                 608 arch/mips/kernel/pm-cps.c 		uasm_build_label(&l, p, lbl_secondary_cont);
p                 611 arch/mips/kernel/pm-cps.c 		uasm_i_sync(&p, STYPE_SYNC_MB);
p                 615 arch/mips/kernel/pm-cps.c 	uasm_i_jr(&p, ra);
p                 616 arch/mips/kernel/pm-cps.c 	uasm_i_nop(&p);
p                 620 arch/mips/kernel/pm-cps.c 	BUG_ON((p - buf) > max_instrs);
p                 628 arch/mips/kernel/pm-cps.c 	local_flush_icache_range((unsigned long)buf, (unsigned long)p);
p                 122 arch/mips/kernel/process.c 	unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
p                 124 arch/mips/kernel/process.c 	struct thread_info *ti = task_thread_info(p);
p                 128 arch/mips/kernel/process.c 	childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
p                 134 arch/mips/kernel/process.c 	p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
p                 135 arch/mips/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 137 arch/mips/kernel/process.c 		unsigned long status = p->thread.cp0_status;
p                 140 arch/mips/kernel/process.c 		p->thread.reg16 = usp; /* fn */
p                 141 arch/mips/kernel/process.c 		p->thread.reg17 = kthread_arg;
p                 142 arch/mips/kernel/process.c 		p->thread.reg29 = childksp;
p                 143 arch/mips/kernel/process.c 		p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
p                 162 arch/mips/kernel/process.c 	p->thread.reg29 = (unsigned long) childregs;
p                 163 arch/mips/kernel/process.c 	p->thread.reg31 = (unsigned long) ret_from_fork;
p                 171 arch/mips/kernel/process.c 	clear_tsk_thread_flag(p, TIF_USEDFPU);
p                 172 arch/mips/kernel/process.c 	clear_tsk_thread_flag(p, TIF_USEDMSA);
p                 173 arch/mips/kernel/process.c 	clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
p                 176 arch/mips/kernel/process.c 	clear_tsk_thread_flag(p, TIF_FPUBOUND);
p                 179 arch/mips/kernel/process.c 	atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
p                 424 arch/mips/kernel/relocate.c 				       unsigned long v, void *p)
p                  26 arch/mips/kernel/rtlx-cmp.c 	struct rtlx_info **p = vpe_get_shared(aprp_cpu_index());
p                  28 arch/mips/kernel/rtlx-cmp.c 	if (p == NULL || *p == NULL)
p                  31 arch/mips/kernel/rtlx-cmp.c 	info = *p;
p                  93 arch/mips/kernel/rtlx.c 	struct rtlx_info **p;
p                 110 arch/mips/kernel/rtlx.c 		p = vpe_get_shared(aprp_cpu_index());
p                 111 arch/mips/kernel/rtlx.c 		if (p == NULL) {
p                 115 arch/mips/kernel/rtlx.c 					(p = vpe_get_shared(aprp_cpu_index())));
p                 126 arch/mips/kernel/rtlx.c 		if (*p == NULL) {
p                 135 arch/mips/kernel/rtlx.c 					if (*p != NULL)
p                 153 arch/mips/kernel/rtlx.c 		if ((unsigned int)*p < KSEG0) {
p                 155 arch/mips/kernel/rtlx.c 				(int)*p);
p                 160 arch/mips/kernel/rtlx.c 		ret = rtlx_init(*p);
p                 154 arch/mips/kernel/setup.c static int __init rd_start_early(char *p)
p                 156 arch/mips/kernel/setup.c 	unsigned long start = memparse(p, &p);
p                 169 arch/mips/kernel/setup.c static int __init rd_size_early(char *p)
p                 171 arch/mips/kernel/setup.c 	initrd_end += memparse(p, &p);
p                 389 arch/mips/kernel/setup.c static int __init early_parse_mem(char *p)
p                 404 arch/mips/kernel/setup.c 	size = memparse(p, &p);
p                 405 arch/mips/kernel/setup.c 	if (*p == '@')
p                 406 arch/mips/kernel/setup.c 		start = memparse(p + 1, &p);
p                 414 arch/mips/kernel/setup.c static int __init early_parse_memmap(char *p)
p                 419 arch/mips/kernel/setup.c 	if (!p)
p                 422 arch/mips/kernel/setup.c 	if (!strncmp(p, "exactmap", 8)) {
p                 427 arch/mips/kernel/setup.c 	oldp = p;
p                 428 arch/mips/kernel/setup.c 	mem_size = memparse(p, &p);
p                 429 arch/mips/kernel/setup.c 	if (p == oldp)
p                 432 arch/mips/kernel/setup.c 	if (*p == '@') {
p                 433 arch/mips/kernel/setup.c 		start_at = memparse(p+1, &p);
p                 435 arch/mips/kernel/setup.c 	} else if (*p == '#') {
p                 438 arch/mips/kernel/setup.c 	} else if (*p == '$') {
p                 439 arch/mips/kernel/setup.c 		start_at = memparse(p+1, &p);
p                 446 arch/mips/kernel/setup.c 	if (*p == '\0') {
p                 456 arch/mips/kernel/setup.c static int __init early_parse_elfcorehdr(char *p)
p                 460 arch/mips/kernel/setup.c 	setup_elfcorehdr = memparse(p, &p);
p                 119 arch/mips/kernel/traps.c 		unsigned long __user *p =
p                 121 arch/mips/kernel/traps.c 		if (__get_user(addr, p)) {
p                 151 arch/mips/kvm/entry.c static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
p                 155 arch/mips/kvm/entry.c 	UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
p                 156 arch/mips/kvm/entry.c 	UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
p                 160 arch/mips/kvm/entry.c 		UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
p                 161 arch/mips/kvm/entry.c 		UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
p                 165 arch/mips/kvm/entry.c static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
p                 172 arch/mips/kvm/entry.c 	UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
p                 173 arch/mips/kvm/entry.c 	UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
p                 176 arch/mips/kvm/entry.c 		UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
p                 177 arch/mips/kvm/entry.c 		UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
p                 189 arch/mips/kvm/entry.c static inline void build_set_exc_base(u32 **p, unsigned int reg)
p                 193 arch/mips/kvm/entry.c 		uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
p                 194 arch/mips/kvm/entry.c 		UASM_i_MTC0(p, reg, C0_EBASE);
p                 196 arch/mips/kvm/entry.c 		uasm_i_mtc0(p, reg, C0_EBASE);
p                 216 arch/mips/kvm/entry.c 	u32 *p = addr;
p                 225 arch/mips/kvm/entry.c 	UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
p                 229 arch/mips/kvm/entry.c 		UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
p                 233 arch/mips/kvm/entry.c 	uasm_i_mfc0(&p, V0, C0_STATUS);
p                 234 arch/mips/kvm/entry.c 	UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
p                 237 arch/mips/kvm/entry.c 	kvm_mips_build_save_scratch(&p, V1, K1);
p                 240 arch/mips/kvm/entry.c 	UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
p                 243 arch/mips/kvm/entry.c 	UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
p                 249 arch/mips/kvm/entry.c 	UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
p                 252 arch/mips/kvm/entry.c 	UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
p                 258 arch/mips/kvm/entry.c 	UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
p                 259 arch/mips/kvm/entry.c 	uasm_i_mtc0(&p, K0, C0_STATUS);
p                 260 arch/mips/kvm/entry.c 	uasm_i_ehb(&p);
p                 263 arch/mips/kvm/entry.c 	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
p                 264 arch/mips/kvm/entry.c 	build_set_exc_base(&p, K0);
p                 271 arch/mips/kvm/entry.c 	uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
p                 272 arch/mips/kvm/entry.c 	uasm_i_andi(&p, V0, V0, ST0_IM);
p                 273 arch/mips/kvm/entry.c 	uasm_i_or(&p, K0, K0, V0);
p                 274 arch/mips/kvm/entry.c 	uasm_i_mtc0(&p, K0, C0_STATUS);
p                 275 arch/mips/kvm/entry.c 	uasm_i_ehb(&p);
p                 277 arch/mips/kvm/entry.c 	p = kvm_mips_build_enter_guest(p);
p                 279 arch/mips/kvm/entry.c 	return p;
p                 294 arch/mips/kvm/entry.c 	u32 *p = addr;
p                 305 arch/mips/kvm/entry.c 	UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
p                 306 arch/mips/kvm/entry.c 	UASM_i_MTC0(&p, T0, C0_EPC);
p                 310 arch/mips/kvm/entry.c 	UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
p                 311 arch/mips/kvm/entry.c 	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
p                 321 arch/mips/kvm/entry.c 	UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
p                 323 arch/mips/kvm/entry.c 	UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
p                 324 arch/mips/kvm/entry.c 	UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
p                 325 arch/mips/kvm/entry.c 	uasm_i_jalr(&p, RA, T9);
p                 328 arch/mips/kvm/entry.c 		UASM_i_MTC0(&p, A0, C0_PWBASE);
p                 330 arch/mips/kvm/entry.c 		uasm_i_nop(&p);
p                 333 arch/mips/kvm/entry.c 	uasm_i_addiu(&p, V1, ZERO, 1);
p                 334 arch/mips/kvm/entry.c 	uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
p                 335 arch/mips/kvm/entry.c 	uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
p                 336 arch/mips/kvm/entry.c 	uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
p                 345 arch/mips/kvm/entry.c 		uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
p                 347 arch/mips/kvm/entry.c 		uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
p                 349 arch/mips/kvm/entry.c 		uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
p                 351 arch/mips/kvm/entry.c 		uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
p                 360 arch/mips/kvm/entry.c 	UASM_i_MFC0(&p, K0, C0_ENTRYHI);
p                 361 arch/mips/kvm/entry.c 	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
p                 365 arch/mips/kvm/entry.c 	UASM_i_ADDIU(&p, T1, S0,
p                 369 arch/mips/kvm/entry.c 	UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
p                 370 arch/mips/kvm/entry.c 	UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
p                 372 arch/mips/kvm/entry.c 	uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
p                 373 arch/mips/kvm/entry.c 	uasm_i_xori(&p, T0, T0, KSU_USER);
p                 374 arch/mips/kvm/entry.c 	uasm_il_bnez(&p, &r, T0, label_kernel_asid);
p                 375 arch/mips/kvm/entry.c 	 UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
p                 378 arch/mips/kvm/entry.c 	UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
p                 380 arch/mips/kvm/entry.c 	uasm_l_kernel_asid(&l, p);
p                 385 arch/mips/kvm/entry.c 	uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
p                 387 arch/mips/kvm/entry.c 	uasm_i_sll(&p, T2, T2, ilog2(sizeof(long)));
p                 388 arch/mips/kvm/entry.c 	UASM_i_ADDU(&p, T3, T1, T2);
p                 389 arch/mips/kvm/entry.c 	UASM_i_LW(&p, K0, 0, T3);
p                 395 arch/mips/kvm/entry.c 	uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
p                 396 arch/mips/kvm/entry.c 	uasm_i_mul(&p, T2, T2, T3);
p                 398 arch/mips/kvm/entry.c 	UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
p                 399 arch/mips/kvm/entry.c 	UASM_i_ADDU(&p, AT, AT, T2);
p                 400 arch/mips/kvm/entry.c 	UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
p                 401 arch/mips/kvm/entry.c 	uasm_i_and(&p, K0, K0, T2);
p                 403 arch/mips/kvm/entry.c 	uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
p                 413 arch/mips/kvm/entry.c 	UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) -
p                 416 arch/mips/kvm/entry.c 	UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
p                 417 arch/mips/kvm/entry.c 	uasm_i_jalr(&p, RA, T9);
p                 418 arch/mips/kvm/entry.c 	 uasm_i_mtc0(&p, K0, C0_ENTRYHI);
p                 421 arch/mips/kvm/entry.c 	uasm_i_mtc0(&p, K0, C0_ENTRYHI);
p                 424 arch/mips/kvm/entry.c 	uasm_i_ehb(&p);
p                 427 arch/mips/kvm/entry.c 	uasm_i_mtc0(&p, ZERO, C0_HWRENA);
p                 434 arch/mips/kvm/entry.c 		UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
p                 439 arch/mips/kvm/entry.c 	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
p                 440 arch/mips/kvm/entry.c 	uasm_i_mthi(&p, K0);
p                 442 arch/mips/kvm/entry.c 	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
p                 443 arch/mips/kvm/entry.c 	uasm_i_mtlo(&p, K0);
p                 447 arch/mips/kvm/entry.c 	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
p                 448 arch/mips/kvm/entry.c 	UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
p                 451 arch/mips/kvm/entry.c 	uasm_i_eret(&p);
p                 455 arch/mips/kvm/entry.c 	return p;
p                 469 arch/mips/kvm/entry.c 	u32 *p = addr;
p                 479 arch/mips/kvm/entry.c 	UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
p                 482 arch/mips/kvm/entry.c 	UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
p                 485 arch/mips/kvm/entry.c 	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
p                 505 arch/mips/kvm/entry.c 	build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
p                 507 arch/mips/kvm/entry.c 	build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
p                 512 arch/mips/kvm/entry.c 	build_get_ptep(&p, K0, K1);
p                 513 arch/mips/kvm/entry.c 	build_update_entries(&p, K0, K1);
p                 514 arch/mips/kvm/entry.c 	build_tlb_write_entry(&p, &l, &r, tlb_random);
p                 519 arch/mips/kvm/entry.c 	UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
p                 522 arch/mips/kvm/entry.c 	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
p                 523 arch/mips/kvm/entry.c 	uasm_i_ehb(&p);
p                 524 arch/mips/kvm/entry.c 	UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
p                 527 arch/mips/kvm/entry.c 	uasm_i_eret(&p);
p                 529 arch/mips/kvm/entry.c 	return p;
p                 544 arch/mips/kvm/entry.c 	u32 *p = addr;
p                 554 arch/mips/kvm/entry.c 	UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
p                 557 arch/mips/kvm/entry.c 	UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
p                 558 arch/mips/kvm/entry.c 	UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
p                 561 arch/mips/kvm/entry.c 	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
p                 564 arch/mips/kvm/entry.c 	uasm_il_b(&p, &r, label_exit_common);
p                 565 arch/mips/kvm/entry.c 	 uasm_i_nop(&p);
p                 570 arch/mips/kvm/entry.c 	return p;
p                 586 arch/mips/kvm/entry.c 	u32 *p = addr;
p                 611 arch/mips/kvm/entry.c 		UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
p                 616 arch/mips/kvm/entry.c 	uasm_i_mfhi(&p, T0);
p                 617 arch/mips/kvm/entry.c 	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
p                 619 arch/mips/kvm/entry.c 	uasm_i_mflo(&p, T0);
p                 620 arch/mips/kvm/entry.c 	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
p                 624 arch/mips/kvm/entry.c 	uasm_i_ehb(&p);
p                 625 arch/mips/kvm/entry.c 	UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
p                 626 arch/mips/kvm/entry.c 	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
p                 631 arch/mips/kvm/entry.c 	UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
p                 634 arch/mips/kvm/entry.c 	UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1);
p                 640 arch/mips/kvm/entry.c 	UASM_i_MFC0(&p, K0, C0_EPC);
p                 641 arch/mips/kvm/entry.c 	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
p                 643 arch/mips/kvm/entry.c 	UASM_i_MFC0(&p, K0, C0_BADVADDR);
p                 644 arch/mips/kvm/entry.c 	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
p                 647 arch/mips/kvm/entry.c 	uasm_i_mfc0(&p, K0, C0_CAUSE);
p                 648 arch/mips/kvm/entry.c 	uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
p                 651 arch/mips/kvm/entry.c 		uasm_i_mfc0(&p, K0, C0_BADINSTR);
p                 652 arch/mips/kvm/entry.c 		uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
p                 657 arch/mips/kvm/entry.c 		uasm_i_mfc0(&p, K0, C0_BADINSTRP);
p                 658 arch/mips/kvm/entry.c 		uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
p                 666 arch/mips/kvm/entry.c 	uasm_i_mfc0(&p, V0, C0_STATUS);
p                 668 arch/mips/kvm/entry.c 	uasm_i_lui(&p, AT, ST0_BEV >> 16);
p                 669 arch/mips/kvm/entry.c 	uasm_i_or(&p, K0, V0, AT);
p                 671 arch/mips/kvm/entry.c 	uasm_i_mtc0(&p, K0, C0_STATUS);
p                 672 arch/mips/kvm/entry.c 	uasm_i_ehb(&p);
p                 674 arch/mips/kvm/entry.c 	UASM_i_LA_mostly(&p, K0, (long)&ebase);
p                 675 arch/mips/kvm/entry.c 	UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
p                 676 arch/mips/kvm/entry.c 	build_set_exc_base(&p, K0);
p                 683 arch/mips/kvm/entry.c 		uasm_i_lui(&p, AT, ST0_CU1 >> 16);
p                 684 arch/mips/kvm/entry.c 		uasm_i_and(&p, V1, V0, AT);
p                 685 arch/mips/kvm/entry.c 		uasm_il_beqz(&p, &r, V1, label_fpu_1);
p                 686 arch/mips/kvm/entry.c 		 uasm_i_nop(&p);
p                 687 arch/mips/kvm/entry.c 		uasm_i_cfc1(&p, T0, 31);
p                 688 arch/mips/kvm/entry.c 		uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
p                 690 arch/mips/kvm/entry.c 		uasm_i_ctc1(&p, ZERO, 31);
p                 691 arch/mips/kvm/entry.c 		uasm_l_fpu_1(&l, p);
p                 699 arch/mips/kvm/entry.c 		uasm_i_mfc0(&p, T0, C0_CONFIG5);
p                 700 arch/mips/kvm/entry.c 		uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
p                 701 arch/mips/kvm/entry.c 		uasm_il_beqz(&p, &r, T0, label_msa_1);
p                 702 arch/mips/kvm/entry.c 		 uasm_i_nop(&p);
p                 703 arch/mips/kvm/entry.c 		uasm_i_cfcmsa(&p, T0, MSA_CSR);
p                 704 arch/mips/kvm/entry.c 		uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
p                 706 arch/mips/kvm/entry.c 		uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
p                 707 arch/mips/kvm/entry.c 		uasm_l_msa_1(&l, p);
p                 713 arch/mips/kvm/entry.c 		UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
p                 715 arch/mips/kvm/entry.c 		UASM_i_MTC0(&p, K0, C0_ENTRYHI);
p                 724 arch/mips/kvm/entry.c 	UASM_i_LW(&p, A0,
p                 726 arch/mips/kvm/entry.c 	UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
p                 727 arch/mips/kvm/entry.c 	uasm_i_jalr(&p, RA, T9);
p                 730 arch/mips/kvm/entry.c 		UASM_i_MTC0(&p, A0, C0_PWBASE);
p                 732 arch/mips/kvm/entry.c 		uasm_i_nop(&p);
p                 735 arch/mips/kvm/entry.c 	uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
p                 736 arch/mips/kvm/entry.c 	uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
p                 737 arch/mips/kvm/entry.c 	uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
p                 740 arch/mips/kvm/entry.c 	uasm_i_sw(&p, K0,
p                 748 arch/mips/kvm/entry.c 		uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
p                 750 arch/mips/kvm/entry.c 		uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
p                 752 arch/mips/kvm/entry.c 		uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
p                 757 arch/mips/kvm/entry.c 	uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
p                 758 arch/mips/kvm/entry.c 	uasm_i_and(&p, V0, V0, AT);
p                 759 arch/mips/kvm/entry.c 	uasm_i_lui(&p, AT, ST0_CU0 >> 16);
p                 760 arch/mips/kvm/entry.c 	uasm_i_or(&p, V0, V0, AT);
p                 762 arch/mips/kvm/entry.c 	uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
p                 764 arch/mips/kvm/entry.c 	uasm_i_mtc0(&p, V0, C0_STATUS);
p                 765 arch/mips/kvm/entry.c 	uasm_i_ehb(&p);
p                 768 arch/mips/kvm/entry.c 	UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
p                 771 arch/mips/kvm/entry.c 	UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
p                 774 arch/mips/kvm/entry.c 	UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
p                 782 arch/mips/kvm/entry.c 	kvm_mips_build_restore_scratch(&p, K0, SP);
p                 785 arch/mips/kvm/entry.c 	UASM_i_LA_mostly(&p, K0, (long)&hwrena);
p                 786 arch/mips/kvm/entry.c 	uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
p                 787 arch/mips/kvm/entry.c 	uasm_i_mtc0(&p, K0, C0_HWRENA);
p                 795 arch/mips/kvm/entry.c 	uasm_i_move(&p, A0, S0);
p                 796 arch/mips/kvm/entry.c 	uasm_i_move(&p, A1, S1);
p                 797 arch/mips/kvm/entry.c 	UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
p                 798 arch/mips/kvm/entry.c 	uasm_i_jalr(&p, RA, T9);
p                 799 arch/mips/kvm/entry.c 	 UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
p                 803 arch/mips/kvm/entry.c 	p = kvm_mips_build_ret_from_exit(p);
p                 805 arch/mips/kvm/entry.c 	return p;
p                 819 arch/mips/kvm/entry.c 	u32 *p = addr;
p                 829 arch/mips/kvm/entry.c 	uasm_i_di(&p, ZERO);
p                 830 arch/mips/kvm/entry.c 	uasm_i_ehb(&p);
p                 838 arch/mips/kvm/entry.c 	uasm_i_move(&p, K1, S1);
p                 839 arch/mips/kvm/entry.c 	UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
p                 845 arch/mips/kvm/entry.c 	uasm_i_andi(&p, T0, V0, RESUME_HOST);
p                 846 arch/mips/kvm/entry.c 	uasm_il_bnez(&p, &r, T0, label_return_to_host);
p                 847 arch/mips/kvm/entry.c 	 uasm_i_nop(&p);
p                 849 arch/mips/kvm/entry.c 	p = kvm_mips_build_ret_to_guest(p);
p                 851 arch/mips/kvm/entry.c 	uasm_l_return_to_host(&l, p);
p                 852 arch/mips/kvm/entry.c 	p = kvm_mips_build_ret_to_host(p);
p                 856 arch/mips/kvm/entry.c 	return p;
p                 870 arch/mips/kvm/entry.c 	u32 *p = addr;
p                 873 arch/mips/kvm/entry.c 	UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
p                 876 arch/mips/kvm/entry.c 	UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
p                 879 arch/mips/kvm/entry.c 	uasm_i_mfc0(&p, V1, C0_STATUS);
p                 880 arch/mips/kvm/entry.c 	uasm_i_lui(&p, AT, ST0_BEV >> 16);
p                 881 arch/mips/kvm/entry.c 	uasm_i_or(&p, K0, V1, AT);
p                 882 arch/mips/kvm/entry.c 	uasm_i_mtc0(&p, K0, C0_STATUS);
p                 883 arch/mips/kvm/entry.c 	uasm_i_ehb(&p);
p                 884 arch/mips/kvm/entry.c 	build_set_exc_base(&p, T0);
p                 887 arch/mips/kvm/entry.c 	uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
p                 888 arch/mips/kvm/entry.c 	UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
p                 889 arch/mips/kvm/entry.c 	uasm_i_and(&p, V1, V1, AT);
p                 890 arch/mips/kvm/entry.c 	uasm_i_mtc0(&p, V1, C0_STATUS);
p                 891 arch/mips/kvm/entry.c 	uasm_i_ehb(&p);
p                 893 arch/mips/kvm/entry.c 	p = kvm_mips_build_enter_guest(p);
p                 895 arch/mips/kvm/entry.c 	return p;
p                 910 arch/mips/kvm/entry.c 	u32 *p = addr;
p                 914 arch/mips/kvm/entry.c 	UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
p                 915 arch/mips/kvm/entry.c 	UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
p                 921 arch/mips/kvm/entry.c 	uasm_i_sra(&p, K0, V0, 2);
p                 922 arch/mips/kvm/entry.c 	uasm_i_move(&p, V0, K0);
p                 928 arch/mips/kvm/entry.c 		UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
p                 932 arch/mips/kvm/entry.c 	UASM_i_LA_mostly(&p, K0, (long)&hwrena);
p                 933 arch/mips/kvm/entry.c 	uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
p                 934 arch/mips/kvm/entry.c 	uasm_i_mtc0(&p, K0, C0_HWRENA);
p                 937 arch/mips/kvm/entry.c 	UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
p                 938 arch/mips/kvm/entry.c 	uasm_i_jr(&p, RA);
p                 939 arch/mips/kvm/entry.c 	 uasm_i_nop(&p);
p                 941 arch/mips/kvm/entry.c 	return p;
p                 268 arch/mips/kvm/mips.c 	u32 *p;
p                 275 arch/mips/kvm/mips.c 	for (p = start; p < (u32 *)end; ++p)
p                 276 arch/mips/kvm/mips.c 		pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
p                 286 arch/mips/kvm/mips.c 	void *gebase, *p, *handler, *refill_start, *refill_end;
p                 357 arch/mips/kvm/mips.c 	p = handler;
p                 358 arch/mips/kvm/mips.c 	p = kvm_mips_build_exit(p);
p                 361 arch/mips/kvm/mips.c 	vcpu->arch.vcpu_run = p;
p                 362 arch/mips/kvm/mips.c 	p = kvm_mips_build_vcpu_run(p);
p                 368 arch/mips/kvm/mips.c 	dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
p                  53 arch/mips/kvm/mmu.c 	void *p;
p                  56 arch/mips/kvm/mmu.c 	p = mc->objects[--mc->nobjs];
p                  57 arch/mips/kvm/mmu.c 	return p;
p                  77 arch/mips/kvm/mmu.c 	unsigned long *p, *end;
p                  86 arch/mips/kvm/mmu.c 	p = (unsigned long *)page;
p                  87 arch/mips/kvm/mmu.c 	end = p + PTRS_PER_PGD;
p                  90 arch/mips/kvm/mmu.c 		p[0] = entry;
p                  91 arch/mips/kvm/mmu.c 		p[1] = entry;
p                  92 arch/mips/kvm/mmu.c 		p[2] = entry;
p                  93 arch/mips/kvm/mmu.c 		p[3] = entry;
p                  94 arch/mips/kvm/mmu.c 		p[4] = entry;
p                  95 arch/mips/kvm/mmu.c 		p += 8;
p                  96 arch/mips/kvm/mmu.c 		p[-3] = entry;
p                  97 arch/mips/kvm/mmu.c 		p[-2] = entry;
p                  98 arch/mips/kvm/mmu.c 		p[-1] = entry;
p                  99 arch/mips/kvm/mmu.c 	} while (p != end);
p                  60 arch/mips/lantiq/prom.c 		char *p = (char *) KSEG1ADDR(argv[i]);
p                  62 arch/mips/lantiq/prom.c 		if (CPHYSADDR(p) && *p) {
p                  63 arch/mips/lantiq/prom.c 			strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
p                 179 arch/mips/lantiq/xway/dma.c ltq_dma_init_port(int p)
p                 181 arch/mips/lantiq/xway/dma.c 	ltq_dma_w32(p, LTQ_DMA_PS);
p                 182 arch/mips/lantiq/xway/dma.c 	switch (p) {
p                  84 arch/mips/lasat/sysctl.c 	char *p, c;
p                  96 arch/mips/lasat/sysctl.c 		p = buffer;
p                  98 arch/mips/lasat/sysctl.c 			if (get_user(c, p++))
p                  51 arch/mips/loongson32/common/platform.c 	struct plat_serial8250_port *p;
p                  61 arch/mips/loongson32/common/platform.c 	for (p = pdev->dev.platform_data; p->flags != 0; ++p)
p                  62 arch/mips/loongson32/common/platform.c 		p->uartclk = clk_get_rate(clk);
p                  33 arch/mips/loongson64/common/env.c #define parse_even_earlier(res, option, p)				\
p                  37 arch/mips/loongson64/common/env.c 	if (strncmp(option, (char *)p, strlen(option)) == 0)		\
p                  38 arch/mips/loongson64/common/env.c 		tmp = kstrtou32((char *)p + strlen(option"="), 10, &res); \
p                  41 arch/mips/loongson64/common/machtype.c 	char *p, str[MACHTYPE_LEN + 1];
p                  46 arch/mips/loongson64/common/machtype.c 	p = strstr(arcs_cmdline, "machtype=");
p                  47 arch/mips/loongson64/common/machtype.c 	if (!p) {
p                  51 arch/mips/loongson64/common/machtype.c 	p += strlen("machtype=");
p                  52 arch/mips/loongson64/common/machtype.c 	strncpy(str, p, MACHTYPE_LEN);
p                  54 arch/mips/loongson64/common/machtype.c 	p = strstr(str, " ");
p                  55 arch/mips/loongson64/common/machtype.c 	if (p)
p                  56 arch/mips/loongson64/common/machtype.c 		*p = '\0';
p                1418 arch/mips/math-emu/cp1emu.c #define DEF3OP(name, p, f1, f2, f3)					\
p                1419 arch/mips/math-emu/cp1emu.c static union ieee754##p fpemu_##p##_##name(union ieee754##p r,		\
p                1420 arch/mips/math-emu/cp1emu.c 	union ieee754##p s, union ieee754##p t)				\
p                  32 arch/mips/mm/c-r3k.c 	volatile unsigned long *p;
p                  34 arch/mips/mm/c-r3k.c 	p = (volatile unsigned long *) KSEG0;
p                  41 arch/mips/mm/c-r3k.c 	*p = 0xa5a55a5a;
p                  42 arch/mips/mm/c-r3k.c 	dummy = *p;
p                  49 arch/mips/mm/c-r3k.c 			*(p + size) = 0;
p                  50 arch/mips/mm/c-r3k.c 		*p = -1;
p                  52 arch/mips/mm/c-r3k.c 		     (size <= 0x40000) && (*(p + size) == 0);
p                  61 arch/mips/mm/c-r3k.c 	return size * sizeof(*p);
p                  67 arch/mips/mm/c-r3k.c 	volatile unsigned long *p;
p                  69 arch/mips/mm/c-r3k.c 	p = (volatile unsigned long *) KSEG0;
p                  77 arch/mips/mm/c-r3k.c 		*(p + i) = 0;
p                  78 arch/mips/mm/c-r3k.c 	*(volatile unsigned char *)p = 0;
p                  80 arch/mips/mm/c-r3k.c 		*(p + lsize);
p                  86 arch/mips/mm/c-r3k.c 		*(volatile unsigned char *)(p + i) = 0;
p                  90 arch/mips/mm/c-r3k.c 	return lsize * sizeof(*p);
p                 107 arch/mips/mm/c-r3k.c 	volatile unsigned char *p;
p                 114 arch/mips/mm/c-r3k.c 	p = (char *)start;
p                 154 arch/mips/mm/c-r3k.c 			: : "r" (p) );
p                 155 arch/mips/mm/c-r3k.c 		p += 0x080;
p                 164 arch/mips/mm/c-r3k.c 	volatile unsigned char *p;
p                 171 arch/mips/mm/c-r3k.c 	p = (char *)start;
p                 211 arch/mips/mm/c-r3k.c 			: : "r" (p) );
p                 212 arch/mips/mm/c-r3k.c 		p += 0x080;
p                1530 arch/mips/mm/c-r4k.c 		unsigned long *p = (unsigned long *) addr;
p                1531 arch/mips/mm/c-r4k.c 		__asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
p                 427 arch/mips/mm/cerr-sb1.c 	uint8_t	 p;
p                 430 arch/mips/mm/cerr-sb1.c 	p = 0;
p                 433 arch/mips/mm/cerr-sb1.c 		p <<= 1;
p                 436 arch/mips/mm/cerr-sb1.c 		p ^= (parity[w>>24] ^ parity[(w>>16) & 0xFF]
p                 439 arch/mips/mm/cerr-sb1.c 		p ^= (parity[w>>24] ^ parity[(w>>16) & 0xFF]
p                 442 arch/mips/mm/cerr-sb1.c 	return p;
p                 193 arch/mips/mm/ioremap.c 	struct vm_struct *p;
p                 198 arch/mips/mm/ioremap.c 	p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
p                 199 arch/mips/mm/ioremap.c 	if (!p)
p                 202 arch/mips/mm/ioremap.c 	kfree(p);
p                  19 arch/mips/mm/pgtable-32.c 	unsigned long *p = (unsigned long *) page;
p                  23 arch/mips/mm/pgtable-32.c 		p[i + 0] = (unsigned long) invalid_pte_table;
p                  24 arch/mips/mm/pgtable-32.c 		p[i + 1] = (unsigned long) invalid_pte_table;
p                  25 arch/mips/mm/pgtable-32.c 		p[i + 2] = (unsigned long) invalid_pte_table;
p                  26 arch/mips/mm/pgtable-32.c 		p[i + 3] = (unsigned long) invalid_pte_table;
p                  27 arch/mips/mm/pgtable-32.c 		p[i + 4] = (unsigned long) invalid_pte_table;
p                  28 arch/mips/mm/pgtable-32.c 		p[i + 5] = (unsigned long) invalid_pte_table;
p                  29 arch/mips/mm/pgtable-32.c 		p[i + 6] = (unsigned long) invalid_pte_table;
p                  30 arch/mips/mm/pgtable-32.c 		p[i + 7] = (unsigned long) invalid_pte_table;
p                  19 arch/mips/mm/pgtable-64.c 	unsigned long *p, *end;
p                  30 arch/mips/mm/pgtable-64.c 	p = (unsigned long *) page;
p                  31 arch/mips/mm/pgtable-64.c 	end = p + PTRS_PER_PGD;
p                  34 arch/mips/mm/pgtable-64.c 		p[0] = entry;
p                  35 arch/mips/mm/pgtable-64.c 		p[1] = entry;
p                  36 arch/mips/mm/pgtable-64.c 		p[2] = entry;
p                  37 arch/mips/mm/pgtable-64.c 		p[3] = entry;
p                  38 arch/mips/mm/pgtable-64.c 		p[4] = entry;
p                  39 arch/mips/mm/pgtable-64.c 		p += 8;
p                  40 arch/mips/mm/pgtable-64.c 		p[-3] = entry;
p                  41 arch/mips/mm/pgtable-64.c 		p[-2] = entry;
p                  42 arch/mips/mm/pgtable-64.c 		p[-1] = entry;
p                  43 arch/mips/mm/pgtable-64.c 	} while (p != end);
p                  49 arch/mips/mm/pgtable-64.c 	unsigned long *p, *end;
p                  51 arch/mips/mm/pgtable-64.c 	p = (unsigned long *) addr;
p                  52 arch/mips/mm/pgtable-64.c 	end = p + PTRS_PER_PMD;
p                  55 arch/mips/mm/pgtable-64.c 		p[0] = pagetable;
p                  56 arch/mips/mm/pgtable-64.c 		p[1] = pagetable;
p                  57 arch/mips/mm/pgtable-64.c 		p[2] = pagetable;
p                  58 arch/mips/mm/pgtable-64.c 		p[3] = pagetable;
p                  59 arch/mips/mm/pgtable-64.c 		p[4] = pagetable;
p                  60 arch/mips/mm/pgtable-64.c 		p += 8;
p                  61 arch/mips/mm/pgtable-64.c 		p[-3] = pagetable;
p                  62 arch/mips/mm/pgtable-64.c 		p[-2] = pagetable;
p                  63 arch/mips/mm/pgtable-64.c 		p[-1] = pagetable;
p                  64 arch/mips/mm/pgtable-64.c 	} while (p != end);
p                  72 arch/mips/mm/pgtable-64.c 	unsigned long *p, *end;
p                  74 arch/mips/mm/pgtable-64.c 	p = (unsigned long *)addr;
p                  75 arch/mips/mm/pgtable-64.c 	end = p + PTRS_PER_PUD;
p                  78 arch/mips/mm/pgtable-64.c 		p[0] = pagetable;
p                  79 arch/mips/mm/pgtable-64.c 		p[1] = pagetable;
p                  80 arch/mips/mm/pgtable-64.c 		p[2] = pagetable;
p                  81 arch/mips/mm/pgtable-64.c 		p[3] = pagetable;
p                  82 arch/mips/mm/pgtable-64.c 		p[4] = pagetable;
p                  83 arch/mips/mm/pgtable-64.c 		p += 8;
p                  84 arch/mips/mm/pgtable-64.c 		p[-3] = pagetable;
p                  85 arch/mips/mm/pgtable-64.c 		p[-2] = pagetable;
p                  86 arch/mips/mm/pgtable-64.c 		p[-1] = pagetable;
p                  87 arch/mips/mm/pgtable-64.c 	} while (p != end);
p                 202 arch/mips/mm/sc-rm7k.c 		unsigned long *p = (unsigned long *) addr;
p                 203 arch/mips/mm/sc-rm7k.c 		__asm__ __volatile__("nop" : : "r" (*p));
p                 201 arch/mips/mm/tlbex.c static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
p                 205 arch/mips/mm/tlbex.c 		uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
p                 212 arch/mips/mm/tlbex.c static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
p                 216 arch/mips/mm/tlbex.c 		uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
p                 355 arch/mips/mm/tlbex.c static struct work_registers build_get_work_registers(u32 **p)
p                 361 arch/mips/mm/tlbex.c 		UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
p                 370 arch/mips/mm/tlbex.c 		UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
p                 371 arch/mips/mm/tlbex.c 		UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
p                 374 arch/mips/mm/tlbex.c 		UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
p                 376 arch/mips/mm/tlbex.c 		UASM_i_LA(p, K1, (long)&handler_reg_save);
p                 377 arch/mips/mm/tlbex.c 		UASM_i_ADDU(p, K0, K0, K1);
p                 379 arch/mips/mm/tlbex.c 		UASM_i_LA(p, K0, (long)&handler_reg_save);
p                 382 arch/mips/mm/tlbex.c 	UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
p                 383 arch/mips/mm/tlbex.c 	UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
p                 391 arch/mips/mm/tlbex.c static void build_restore_work_registers(u32 **p)
p                 394 arch/mips/mm/tlbex.c 		uasm_i_ehb(p);
p                 395 arch/mips/mm/tlbex.c 		UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
p                 399 arch/mips/mm/tlbex.c 	UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
p                 400 arch/mips/mm/tlbex.c 	UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
p                 414 arch/mips/mm/tlbex.c 	u32 *p;
p                 417 arch/mips/mm/tlbex.c 	p = tlb_handler;
p                 419 arch/mips/mm/tlbex.c 	uasm_i_mfc0(&p, K0, C0_BADVADDR);
p                 420 arch/mips/mm/tlbex.c 	uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
p                 421 arch/mips/mm/tlbex.c 	uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
p                 422 arch/mips/mm/tlbex.c 	uasm_i_srl(&p, K0, K0, 22); /* load delay */
p                 423 arch/mips/mm/tlbex.c 	uasm_i_sll(&p, K0, K0, 2);
p                 424 arch/mips/mm/tlbex.c 	uasm_i_addu(&p, K1, K1, K0);
p                 425 arch/mips/mm/tlbex.c 	uasm_i_mfc0(&p, K0, C0_CONTEXT);
p                 426 arch/mips/mm/tlbex.c 	uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
p                 427 arch/mips/mm/tlbex.c 	uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
p                 428 arch/mips/mm/tlbex.c 	uasm_i_addu(&p, K1, K1, K0);
p                 429 arch/mips/mm/tlbex.c 	uasm_i_lw(&p, K0, 0, K1);
p                 430 arch/mips/mm/tlbex.c 	uasm_i_nop(&p); /* load delay */
p                 431 arch/mips/mm/tlbex.c 	uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
p                 432 arch/mips/mm/tlbex.c 	uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
p                 433 arch/mips/mm/tlbex.c 	uasm_i_tlbwr(&p); /* cp0 delay */
p                 434 arch/mips/mm/tlbex.c 	uasm_i_jr(&p, K1);
p                 435 arch/mips/mm/tlbex.c 	uasm_i_rfe(&p); /* branch delay */
p                 437 arch/mips/mm/tlbex.c 	if (p > tlb_handler + 32)
p                 441 arch/mips/mm/tlbex.c 		 (unsigned int)(p - tlb_handler));
p                 480 arch/mips/mm/tlbex.c static void __maybe_unused build_tlb_probe_entry(u32 **p)
p                 488 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 489 arch/mips/mm/tlbex.c 		uasm_i_tlbp(p);
p                 493 arch/mips/mm/tlbex.c 		uasm_i_tlbp(p);
p                 498 arch/mips/mm/tlbex.c void build_tlb_write_entry(u32 **p, struct uasm_label **l,
p                 511 arch/mips/mm/tlbex.c 			uasm_i_ehb(p);
p                 512 arch/mips/mm/tlbex.c 		tlbw(p);
p                 527 arch/mips/mm/tlbex.c 		uasm_bgezl_hazard(p, r, hazard_instance);
p                 528 arch/mips/mm/tlbex.c 		tlbw(p);
p                 529 arch/mips/mm/tlbex.c 		uasm_bgezl_label(l, p, hazard_instance);
p                 531 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 536 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 537 arch/mips/mm/tlbex.c 		tlbw(p);
p                 538 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 543 arch/mips/mm/tlbex.c 		uasm_i_nop(p); /* QED specifies 2 nops hazard */
p                 544 arch/mips/mm/tlbex.c 		uasm_i_nop(p); /* QED specifies 2 nops hazard */
p                 545 arch/mips/mm/tlbex.c 		tlbw(p);
p                 552 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 553 arch/mips/mm/tlbex.c 		tlbw(p);
p                 578 arch/mips/mm/tlbex.c 			uasm_i_nop(p);
p                 581 arch/mips/mm/tlbex.c 		tlbw(p);
p                 585 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 586 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 587 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 588 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 589 arch/mips/mm/tlbex.c 		tlbw(p);
p                 597 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 598 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 599 arch/mips/mm/tlbex.c 		tlbw(p);
p                 600 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 601 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 606 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 607 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 608 arch/mips/mm/tlbex.c 		tlbw(p);
p                 612 arch/mips/mm/tlbex.c 		tlbw(p);
p                 613 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                 624 arch/mips/mm/tlbex.c static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
p                 634 arch/mips/mm/tlbex.c 			UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
p                 636 arch/mips/mm/tlbex.c 			UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
p                 637 arch/mips/mm/tlbex.c 			UASM_i_ROTR(p, reg, reg,
p                 642 arch/mips/mm/tlbex.c 		uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
p                 644 arch/mips/mm/tlbex.c 		UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
p                 651 arch/mips/mm/tlbex.c static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
p                 661 arch/mips/mm/tlbex.c 			uasm_i_ehb(p);
p                 665 arch/mips/mm/tlbex.c 			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
p                 666 arch/mips/mm/tlbex.c 			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
p                 667 arch/mips/mm/tlbex.c 			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
p                 668 arch/mips/mm/tlbex.c 			uasm_il_b(p, r, lid);
p                 670 arch/mips/mm/tlbex.c 			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
p                 671 arch/mips/mm/tlbex.c 			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
p                 672 arch/mips/mm/tlbex.c 			uasm_il_b(p, r, lid);
p                 674 arch/mips/mm/tlbex.c 			uasm_i_mtc0(p, 0, C0_PAGEMASK);
p                 675 arch/mips/mm/tlbex.c 			uasm_il_b(p, r, lid);
p                 678 arch/mips/mm/tlbex.c 			UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
p                 680 arch/mips/mm/tlbex.c 			UASM_i_LW(p, 1, scratchpad_offset(0), 0);
p                 684 arch/mips/mm/tlbex.c 			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
p                 685 arch/mips/mm/tlbex.c 			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
p                 686 arch/mips/mm/tlbex.c 			uasm_il_b(p, r, lid);
p                 687 arch/mips/mm/tlbex.c 			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
p                 689 arch/mips/mm/tlbex.c 			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
p                 690 arch/mips/mm/tlbex.c 			uasm_il_b(p, r, lid);
p                 691 arch/mips/mm/tlbex.c 			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
p                 693 arch/mips/mm/tlbex.c 			uasm_il_b(p, r, lid);
p                 694 arch/mips/mm/tlbex.c 			uasm_i_mtc0(p, 0, C0_PAGEMASK);
p                 699 arch/mips/mm/tlbex.c static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
p                 706 arch/mips/mm/tlbex.c 	uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
p                 707 arch/mips/mm/tlbex.c 	uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
p                 708 arch/mips/mm/tlbex.c 	uasm_i_mtc0(p, tmp, C0_PAGEMASK);
p                 710 arch/mips/mm/tlbex.c 	build_tlb_write_entry(p, l, r, wmode);
p                 712 arch/mips/mm/tlbex.c 	build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
p                 719 arch/mips/mm/tlbex.c build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
p                 722 arch/mips/mm/tlbex.c 	UASM_i_LW(p, tmp, 0, pmd);
p                 724 arch/mips/mm/tlbex.c 		uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
p                 726 arch/mips/mm/tlbex.c 		uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
p                 727 arch/mips/mm/tlbex.c 		uasm_il_bnez(p, r, tmp, lid);
p                 731 arch/mips/mm/tlbex.c static void build_huge_update_entries(u32 **p, unsigned int pte,
p                 749 arch/mips/mm/tlbex.c 		uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
p                 751 arch/mips/mm/tlbex.c 	build_convert_pte_to_entrylo(p, pte);
p                 752 arch/mips/mm/tlbex.c 	UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
p                 755 arch/mips/mm/tlbex.c 		UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
p                 757 arch/mips/mm/tlbex.c 		UASM_i_ADDU(p, pte, pte, tmp);
p                 759 arch/mips/mm/tlbex.c 	UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
p                 762 arch/mips/mm/tlbex.c static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
p                 769 arch/mips/mm/tlbex.c 	UASM_i_SC(p, pte, 0, ptr);
p                 770 arch/mips/mm/tlbex.c 	uasm_il_beqz(p, r, pte, label_tlb_huge_update);
p                 771 arch/mips/mm/tlbex.c 	UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
p                 773 arch/mips/mm/tlbex.c 	UASM_i_SW(p, pte, 0, ptr);
p                 778 arch/mips/mm/tlbex.c 		UASM_i_MFC0(p, ptr, C0_ENTRYHI);
p                 779 arch/mips/mm/tlbex.c 		uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
p                 780 arch/mips/mm/tlbex.c 		UASM_i_MTC0(p, ptr, C0_ENTRYHI);
p                 781 arch/mips/mm/tlbex.c 		build_tlb_write_entry(p, l, r, tlb_indexed);
p                 783 arch/mips/mm/tlbex.c 		uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
p                 784 arch/mips/mm/tlbex.c 		UASM_i_MTC0(p, ptr, C0_ENTRYHI);
p                 785 arch/mips/mm/tlbex.c 		build_huge_update_entries(p, pte, ptr);
p                 786 arch/mips/mm/tlbex.c 		build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
p                 791 arch/mips/mm/tlbex.c 	build_huge_update_entries(p, pte, ptr);
p                 792 arch/mips/mm/tlbex.c 	build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
p                 801 arch/mips/mm/tlbex.c void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
p                 810 arch/mips/mm/tlbex.c 	uasm_i_dmfc0(p, tmp, C0_BADVADDR);
p                 824 arch/mips/mm/tlbex.c 		uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
p                 825 arch/mips/mm/tlbex.c 		uasm_il_bnez(p, r, ptr, label_vmalloc);
p                 827 arch/mips/mm/tlbex.c 		uasm_il_bltz(p, r, tmp, label_vmalloc);
p                 834 arch/mips/mm/tlbex.c 			UASM_i_MFC0(p, ptr, C0_PWBASE);
p                 836 arch/mips/mm/tlbex.c 			UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
p                 842 arch/mips/mm/tlbex.c 		UASM_i_MFC0(p, ptr, C0_CONTEXT);
p                 845 arch/mips/mm/tlbex.c 		uasm_i_dins(p, ptr, 0, 0, 23);
p                 848 arch/mips/mm/tlbex.c 		uasm_i_ori(p, ptr, ptr, 0x540);
p                 849 arch/mips/mm/tlbex.c 		uasm_i_drotr(p, ptr, ptr, 11);
p                 851 arch/mips/mm/tlbex.c 		UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
p                 852 arch/mips/mm/tlbex.c 		uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
p                 853 arch/mips/mm/tlbex.c 		UASM_i_LA_mostly(p, tmp, pgdc);
p                 854 arch/mips/mm/tlbex.c 		uasm_i_daddu(p, ptr, ptr, tmp);
p                 855 arch/mips/mm/tlbex.c 		uasm_i_dmfc0(p, tmp, C0_BADVADDR);
p                 856 arch/mips/mm/tlbex.c 		uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
p                 858 arch/mips/mm/tlbex.c 		UASM_i_LA_mostly(p, ptr, pgdc);
p                 859 arch/mips/mm/tlbex.c 		uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
p                 863 arch/mips/mm/tlbex.c 	uasm_l_vmalloc_done(l, *p);
p                 866 arch/mips/mm/tlbex.c 	uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
p                 868 arch/mips/mm/tlbex.c 	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
p                 869 arch/mips/mm/tlbex.c 	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
p                 871 arch/mips/mm/tlbex.c 	uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
p                 872 arch/mips/mm/tlbex.c 	uasm_i_ld(p, ptr, 0, ptr); /* get pud pointer */
p                 873 arch/mips/mm/tlbex.c 	uasm_i_dsrl_safe(p, tmp, tmp, PUD_SHIFT - 3); /* get pud offset in bytes */
p                 874 arch/mips/mm/tlbex.c 	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1) << 3);
p                 875 arch/mips/mm/tlbex.c 	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pud offset */
p                 878 arch/mips/mm/tlbex.c 	uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
p                 879 arch/mips/mm/tlbex.c 	uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
p                 880 arch/mips/mm/tlbex.c 	uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
p                 881 arch/mips/mm/tlbex.c 	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
p                 882 arch/mips/mm/tlbex.c 	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
p                 892 arch/mips/mm/tlbex.c build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
p                 902 arch/mips/mm/tlbex.c 	uasm_l_vmalloc(l, *p);
p                 906 arch/mips/mm/tlbex.c 			uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
p                 907 arch/mips/mm/tlbex.c 			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
p                 911 arch/mips/mm/tlbex.c 			uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
p                 916 arch/mips/mm/tlbex.c 			uasm_il_b(p, r, label_vmalloc_done);
p                 917 arch/mips/mm/tlbex.c 			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
p                 919 arch/mips/mm/tlbex.c 			UASM_i_LA_mostly(p, ptr, swpd);
p                 920 arch/mips/mm/tlbex.c 			uasm_il_b(p, r, label_vmalloc_done);
p                 922 arch/mips/mm/tlbex.c 				uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
p                 924 arch/mips/mm/tlbex.c 				uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
p                 928 arch/mips/mm/tlbex.c 		uasm_l_large_segbits_fault(l, *p);
p                 931 arch/mips/mm/tlbex.c 			uasm_i_ehb(p);
p                 946 arch/mips/mm/tlbex.c 			uasm_i_sync(p, 0);
p                 947 arch/mips/mm/tlbex.c 		UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
p                 948 arch/mips/mm/tlbex.c 		uasm_i_jr(p, ptr);
p                 952 arch/mips/mm/tlbex.c 				UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
p                 954 arch/mips/mm/tlbex.c 				UASM_i_LW(p, 1, scratchpad_offset(0), 0);
p                 956 arch/mips/mm/tlbex.c 			uasm_i_nop(p);
p                 967 arch/mips/mm/tlbex.c void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
p                 971 arch/mips/mm/tlbex.c 		uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg);
p                 972 arch/mips/mm/tlbex.c 		uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
p                 978 arch/mips/mm/tlbex.c 		uasm_i_mfc0(p, ptr, SMP_CPUID_REG);
p                 979 arch/mips/mm/tlbex.c 		UASM_i_LA_mostly(p, tmp, pgdc);
p                 980 arch/mips/mm/tlbex.c 		uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
p                 981 arch/mips/mm/tlbex.c 		uasm_i_addu(p, ptr, tmp, ptr);
p                 983 arch/mips/mm/tlbex.c 		UASM_i_LA_mostly(p, ptr, pgdc);
p                 985 arch/mips/mm/tlbex.c 		uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
p                 986 arch/mips/mm/tlbex.c 		uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
p                 988 arch/mips/mm/tlbex.c 	uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
p                 989 arch/mips/mm/tlbex.c 	uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
p                 990 arch/mips/mm/tlbex.c 	uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
p                 996 arch/mips/mm/tlbex.c static void build_adjust_context(u32 **p, unsigned int ctx)
p                1018 arch/mips/mm/tlbex.c 		UASM_i_SRL(p, ctx, ctx, shift);
p                1019 arch/mips/mm/tlbex.c 	uasm_i_andi(p, ctx, ctx, mask);
p                1022 arch/mips/mm/tlbex.c void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
p                1033 arch/mips/mm/tlbex.c 		UASM_i_LW(p, ptr, 0, ptr);
p                1034 arch/mips/mm/tlbex.c 		GET_CONTEXT(p, tmp); /* get context reg */
p                1038 arch/mips/mm/tlbex.c 		GET_CONTEXT(p, tmp); /* get context reg */
p                1039 arch/mips/mm/tlbex.c 		UASM_i_LW(p, ptr, 0, ptr);
p                1043 arch/mips/mm/tlbex.c 	build_adjust_context(p, tmp);
p                1044 arch/mips/mm/tlbex.c 	UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
p                1048 arch/mips/mm/tlbex.c void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
p                1060 arch/mips/mm/tlbex.c 		uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
p                1061 arch/mips/mm/tlbex.c 		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
p                1062 arch/mips/mm/tlbex.c 		UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
p                1065 arch/mips/mm/tlbex.c 			uasm_i_lw(p, tmp, 0, ptep);
p                1066 arch/mips/mm/tlbex.c 			uasm_i_ext(p, tmp, tmp, 0, 24);
p                1067 arch/mips/mm/tlbex.c 			uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
p                1070 arch/mips/mm/tlbex.c 		uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
p                1071 arch/mips/mm/tlbex.c 		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
p                1072 arch/mips/mm/tlbex.c 		UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
p                1075 arch/mips/mm/tlbex.c 			uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
p                1076 arch/mips/mm/tlbex.c 			uasm_i_ext(p, tmp, tmp, 0, 24);
p                1077 arch/mips/mm/tlbex.c 			uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
p                1082 arch/mips/mm/tlbex.c 	UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */
p                1083 arch/mips/mm/tlbex.c 	UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */
p                1085 arch/mips/mm/tlbex.c 		build_tlb_probe_entry(p);
p                1086 arch/mips/mm/tlbex.c 	build_convert_pte_to_entrylo(p, tmp);
p                1088 arch/mips/mm/tlbex.c 		UASM_i_MTC0(p, 0, C0_ENTRYLO0);
p                1089 arch/mips/mm/tlbex.c 	UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
p                1090 arch/mips/mm/tlbex.c 	build_convert_pte_to_entrylo(p, ptep);
p                1092 arch/mips/mm/tlbex.c 		uasm_i_mfc0(p, tmp, C0_INDEX);
p                1094 arch/mips/mm/tlbex.c 		UASM_i_MTC0(p, 0, C0_ENTRYLO1);
p                1095 arch/mips/mm/tlbex.c 	UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
p                1106 arch/mips/mm/tlbex.c build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
p                1120 arch/mips/mm/tlbex.c 		UASM_i_MFC0(p, tmp, C0_BADVADDR);
p                1123 arch/mips/mm/tlbex.c 			UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
p                1125 arch/mips/mm/tlbex.c 			UASM_i_MFC0(p, ptr, C0_CONTEXT);
p                1128 arch/mips/mm/tlbex.c 			UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
p                1130 arch/mips/mm/tlbex.c 			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
p                1132 arch/mips/mm/tlbex.c 		uasm_i_dsrl_safe(p, scratch, tmp,
p                1134 arch/mips/mm/tlbex.c 		uasm_il_bnez(p, r, scratch, label_vmalloc);
p                1139 arch/mips/mm/tlbex.c 			uasm_i_dins(p, ptr, 0, 0, 23);
p                1143 arch/mips/mm/tlbex.c 			UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
p                1145 arch/mips/mm/tlbex.c 			UASM_i_MFC0(p, ptr, C0_CONTEXT);
p                1147 arch/mips/mm/tlbex.c 		UASM_i_MFC0(p, tmp, C0_BADVADDR);
p                1150 arch/mips/mm/tlbex.c 			UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
p                1152 arch/mips/mm/tlbex.c 			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
p                1156 arch/mips/mm/tlbex.c 			uasm_i_dins(p, ptr, 0, 0, 23);
p                1158 arch/mips/mm/tlbex.c 		uasm_il_bltz(p, r, tmp, label_vmalloc);
p                1164 arch/mips/mm/tlbex.c 		uasm_i_ori(p, ptr, ptr, 0x540);
p                1165 arch/mips/mm/tlbex.c 		uasm_i_drotr(p, ptr, ptr, 11);
p                1176 arch/mips/mm/tlbex.c 		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
p                1178 arch/mips/mm/tlbex.c 	uasm_l_vmalloc_done(l, *p);
p                1188 arch/mips/mm/tlbex.c 		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
p                1191 arch/mips/mm/tlbex.c 	GET_CONTEXT(p, tmp); /* get context reg */
p                1193 arch/mips/mm/tlbex.c 	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
p                1196 arch/mips/mm/tlbex.c 		UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
p                1198 arch/mips/mm/tlbex.c 		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
p                1199 arch/mips/mm/tlbex.c 		uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
p                1204 arch/mips/mm/tlbex.c 	uasm_i_dsrl_safe(p, scratch, tmp, PUD_SHIFT - 3);
p                1205 arch/mips/mm/tlbex.c 	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3);
p                1208 arch/mips/mm/tlbex.c 		UASM_i_LWX(p, ptr, scratch, ptr);
p                1210 arch/mips/mm/tlbex.c 		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
p                1211 arch/mips/mm/tlbex.c 		UASM_i_LW(p, ptr, 0, ptr);
p                1219 arch/mips/mm/tlbex.c 	uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
p                1220 arch/mips/mm/tlbex.c 	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
p                1221 arch/mips/mm/tlbex.c 	GET_CONTEXT(p, tmp); /* get context reg */
p                1224 arch/mips/mm/tlbex.c 		UASM_i_LWX(p, scratch, scratch, ptr);
p                1226 arch/mips/mm/tlbex.c 		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
p                1227 arch/mips/mm/tlbex.c 		UASM_i_LW(p, scratch, 0, ptr);
p                1231 arch/mips/mm/tlbex.c 	build_adjust_context(p, tmp);
p                1234 arch/mips/mm/tlbex.c 	uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
p                1241 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                1249 arch/mips/mm/tlbex.c 		UASM_i_LWX(p, even, scratch, tmp);
p                1250 arch/mips/mm/tlbex.c 		UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
p                1251 arch/mips/mm/tlbex.c 		UASM_i_LWX(p, odd, scratch, tmp);
p                1253 arch/mips/mm/tlbex.c 		UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
p                1256 arch/mips/mm/tlbex.c 		UASM_i_LW(p, even, 0, ptr); /* get even pte */
p                1257 arch/mips/mm/tlbex.c 		UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
p                1260 arch/mips/mm/tlbex.c 		uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
p                1261 arch/mips/mm/tlbex.c 		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
p                1262 arch/mips/mm/tlbex.c 		uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
p                1264 arch/mips/mm/tlbex.c 		uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
p                1265 arch/mips/mm/tlbex.c 		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
p                1266 arch/mips/mm/tlbex.c 		uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
p                1268 arch/mips/mm/tlbex.c 	UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
p                1271 arch/mips/mm/tlbex.c 		uasm_i_ehb(p);
p                1272 arch/mips/mm/tlbex.c 		UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
p                1273 arch/mips/mm/tlbex.c 		build_tlb_write_entry(p, l, r, tlb_random);
p                1274 arch/mips/mm/tlbex.c 		uasm_l_leave(l, *p);
p                1277 arch/mips/mm/tlbex.c 		build_tlb_write_entry(p, l, r, tlb_random);
p                1278 arch/mips/mm/tlbex.c 		uasm_l_leave(l, *p);
p                1279 arch/mips/mm/tlbex.c 		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
p                1281 arch/mips/mm/tlbex.c 		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
p                1282 arch/mips/mm/tlbex.c 		build_tlb_write_entry(p, l, r, tlb_random);
p                1283 arch/mips/mm/tlbex.c 		uasm_l_leave(l, *p);
p                1287 arch/mips/mm/tlbex.c 	uasm_i_eret(p); /* return from trap */
p                1302 arch/mips/mm/tlbex.c 	u32 *p = tlb_handler;
p                1316 arch/mips/mm/tlbex.c 		htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
p                1330 arch/mips/mm/tlbex.c 			uasm_i_dmfc0(&p, K0, C0_BADVADDR);
p                1331 arch/mips/mm/tlbex.c 			uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
p                1332 arch/mips/mm/tlbex.c 			uasm_i_xor(&p, K0, K0, K1);
p                1333 arch/mips/mm/tlbex.c 			uasm_i_dsrl_safe(&p, K1, K0, 62);
p                1334 arch/mips/mm/tlbex.c 			uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
p                1335 arch/mips/mm/tlbex.c 			uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
p                1336 arch/mips/mm/tlbex.c 			uasm_i_or(&p, K0, K0, K1);
p                1337 arch/mips/mm/tlbex.c 			uasm_il_bnez(&p, &r, K0, label_leave);
p                1342 arch/mips/mm/tlbex.c 		build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
p                1344 arch/mips/mm/tlbex.c 		build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
p                1348 arch/mips/mm/tlbex.c 		build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
p                1351 arch/mips/mm/tlbex.c 		build_get_ptep(&p, K0, K1);
p                1352 arch/mips/mm/tlbex.c 		build_update_entries(&p, K0, K1);
p                1353 arch/mips/mm/tlbex.c 		build_tlb_write_entry(&p, &l, &r, tlb_random);
p                1354 arch/mips/mm/tlbex.c 		uasm_l_leave(&l, p);
p                1355 arch/mips/mm/tlbex.c 		uasm_i_eret(&p); /* return from trap */
p                1358 arch/mips/mm/tlbex.c 	uasm_l_tlb_huge_update(&l, p);
p                1360 arch/mips/mm/tlbex.c 		UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
p                1361 arch/mips/mm/tlbex.c 	build_huge_update_entries(&p, htlb_info.huge_pte, K1);
p                1362 arch/mips/mm/tlbex.c 	build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
p                1367 arch/mips/mm/tlbex.c 	build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
p                1382 arch/mips/mm/tlbex.c 			if ((p - tlb_handler) > 64)
p                1389 arch/mips/mm/tlbex.c 			uasm_copy_handler(relocs, labels, tlb_handler, p, f);
p                1390 arch/mips/mm/tlbex.c 			final_len = p - tlb_handler;
p                1393 arch/mips/mm/tlbex.c 			if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
p                1394 arch/mips/mm/tlbex.c 			    || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
p                1402 arch/mips/mm/tlbex.c 			if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
p                1404 arch/mips/mm/tlbex.c 				uasm_copy_handler(relocs, labels, tlb_handler, p, f);
p                1405 arch/mips/mm/tlbex.c 				final_len = p - tlb_handler;
p                1425 arch/mips/mm/tlbex.c 				    split < p - MIPS64_REFILL_INSNS)
p                1464 arch/mips/mm/tlbex.c 				uasm_copy_handler(relocs, labels, split, p, final_handler);
p                1466 arch/mips/mm/tlbex.c 					    (p - split);
p                1530 arch/mips/mm/tlbex.c 	u32 *p = tlb_handler;
p                1539 arch/mips/mm/tlbex.c 		uasm_i_dmfc0(&p, K0, C0_BADVADDR);
p                1540 arch/mips/mm/tlbex.c 		uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
p                1541 arch/mips/mm/tlbex.c 		uasm_il_beqz(&p, &r, K1, label_vmalloc);
p                1542 arch/mips/mm/tlbex.c 		uasm_i_nop(&p);
p                1544 arch/mips/mm/tlbex.c 		uasm_il_bgez(&p, &r, K0, label_large_segbits_fault);
p                1545 arch/mips/mm/tlbex.c 		uasm_i_nop(&p);
p                1546 arch/mips/mm/tlbex.c 		uasm_l_vmalloc(&l, p);
p                1549 arch/mips/mm/tlbex.c 	uasm_i_dmfc0(&p, K1, C0_PGD);
p                1551 arch/mips/mm/tlbex.c 	uasm_i_lddir(&p, K0, K1, 3);  /* global page dir */
p                1553 arch/mips/mm/tlbex.c 	uasm_i_lddir(&p, K1, K0, 1);  /* middle page dir */
p                1555 arch/mips/mm/tlbex.c 	uasm_i_ldpte(&p, K1, 0);      /* even */
p                1556 arch/mips/mm/tlbex.c 	uasm_i_ldpte(&p, K1, 1);      /* odd */
p                1557 arch/mips/mm/tlbex.c 	uasm_i_tlbwr(&p);
p                1561 arch/mips/mm/tlbex.c 		uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16);
p                1562 arch/mips/mm/tlbex.c 		uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff);
p                1563 arch/mips/mm/tlbex.c 		uasm_i_mtc0(&p, K0, C0_PAGEMASK);
p                1565 arch/mips/mm/tlbex.c 		uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK);
p                1566 arch/mips/mm/tlbex.c 		uasm_i_mtc0(&p, K0, C0_PAGEMASK);
p                1568 arch/mips/mm/tlbex.c 		uasm_i_mtc0(&p, 0, C0_PAGEMASK);
p                1571 arch/mips/mm/tlbex.c 	uasm_i_eret(&p);
p                1574 arch/mips/mm/tlbex.c 		uasm_l_large_segbits_fault(&l, p);
p                1575 arch/mips/mm/tlbex.c 		UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0);
p                1576 arch/mips/mm/tlbex.c 		uasm_i_jr(&p, K1);
p                1577 arch/mips/mm/tlbex.c 		uasm_i_nop(&p);
p                1592 arch/mips/mm/tlbex.c 	u32 *p = (u32 *)msk_isa16_mode((ulong)tlbmiss_handler_setup_pgd);
p                1597 arch/mips/mm/tlbex.c 	memset(p, 0, tlbmiss_handler_setup_pgd_end - (char *)p);
p                1613 arch/mips/mm/tlbex.c 		UASM_i_SRA(&p, a1, a0, 29);
p                1614 arch/mips/mm/tlbex.c 		UASM_i_ADDIU(&p, a1, a1, 4);
p                1615 arch/mips/mm/tlbex.c 		uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
p                1616 arch/mips/mm/tlbex.c 		uasm_i_nop(&p);
p                1617 arch/mips/mm/tlbex.c 		uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
p                1618 arch/mips/mm/tlbex.c 		uasm_l_tlbl_goaround1(&l, p);
p                1619 arch/mips/mm/tlbex.c 		UASM_i_SLL(&p, a0, a0, 11);
p                1620 arch/mips/mm/tlbex.c 		UASM_i_MTC0(&p, a0, C0_CONTEXT);
p                1621 arch/mips/mm/tlbex.c 		uasm_i_jr(&p, 31);
p                1622 arch/mips/mm/tlbex.c 		uasm_i_ehb(&p);
p                1626 arch/mips/mm/tlbex.c 			UASM_i_MTC0(&p, a0, C0_PWBASE);
p                1628 arch/mips/mm/tlbex.c 			UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
p                1629 arch/mips/mm/tlbex.c 		uasm_i_jr(&p, 31);
p                1630 arch/mips/mm/tlbex.c 		uasm_i_ehb(&p);
p                1635 arch/mips/mm/tlbex.c 	UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG);
p                1636 arch/mips/mm/tlbex.c 	UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT);
p                1637 arch/mips/mm/tlbex.c 	UASM_i_LA_mostly(&p, a2, pgdc);
p                1638 arch/mips/mm/tlbex.c 	UASM_i_ADDU(&p, a2, a2, a1);
p                1639 arch/mips/mm/tlbex.c 	UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
p                1641 arch/mips/mm/tlbex.c 	UASM_i_LA_mostly(&p, a2, pgdc);
p                1642 arch/mips/mm/tlbex.c 	UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
p                1647 arch/mips/mm/tlbex.c 		UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
p                1648 arch/mips/mm/tlbex.c 		uasm_i_jr(&p, 31);
p                1649 arch/mips/mm/tlbex.c 		uasm_i_ehb(&p);
p                1651 arch/mips/mm/tlbex.c 		uasm_i_jr(&p, 31);
p                1652 arch/mips/mm/tlbex.c 		uasm_i_nop(&p);
p                1655 arch/mips/mm/tlbex.c 	if (p >= (u32 *)tlbmiss_handler_setup_pgd_end)
p                1660 arch/mips/mm/tlbex.c 		 (unsigned int)(p - (u32 *)tlbmiss_handler_setup_pgd));
p                1667 arch/mips/mm/tlbex.c iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
p                1671 arch/mips/mm/tlbex.c 		uasm_i_sync(p, 0);
p                1674 arch/mips/mm/tlbex.c 		uasm_i_lld(p, pte, 0, ptr);
p                1677 arch/mips/mm/tlbex.c 		UASM_i_LL(p, pte, 0, ptr);
p                1681 arch/mips/mm/tlbex.c 		uasm_i_ld(p, pte, 0, ptr);
p                1684 arch/mips/mm/tlbex.c 		UASM_i_LW(p, pte, 0, ptr);
p                1689 arch/mips/mm/tlbex.c iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
p                1696 arch/mips/mm/tlbex.c 		uasm_i_lui(p, scratch, swmode >> 16);
p                1697 arch/mips/mm/tlbex.c 		uasm_i_or(p, pte, pte, scratch);
p                1700 arch/mips/mm/tlbex.c 		uasm_i_ori(p, pte, pte, mode);
p                1706 arch/mips/mm/tlbex.c 		uasm_i_scd(p, pte, 0, ptr);
p                1709 arch/mips/mm/tlbex.c 		UASM_i_SC(p, pte, 0, ptr);
p                1712 arch/mips/mm/tlbex.c 		uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
p                1714 arch/mips/mm/tlbex.c 		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
p                1719 arch/mips/mm/tlbex.c 		uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
p                1720 arch/mips/mm/tlbex.c 		uasm_i_ori(p, pte, pte, hwmode);
p                1722 arch/mips/mm/tlbex.c 		uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
p                1723 arch/mips/mm/tlbex.c 		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
p                1725 arch/mips/mm/tlbex.c 		uasm_i_lw(p, pte, 0, ptr);
p                1727 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                1729 arch/mips/mm/tlbex.c 	uasm_i_nop(p);
p                1734 arch/mips/mm/tlbex.c 		uasm_i_sd(p, pte, 0, ptr);
p                1737 arch/mips/mm/tlbex.c 		UASM_i_SW(p, pte, 0, ptr);
p                1741 arch/mips/mm/tlbex.c 		uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
p                1742 arch/mips/mm/tlbex.c 		uasm_i_ori(p, pte, pte, hwmode);
p                1744 arch/mips/mm/tlbex.c 		uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
p                1745 arch/mips/mm/tlbex.c 		uasm_i_lw(p, pte, 0, ptr);
p                1757 arch/mips/mm/tlbex.c build_pte_present(u32 **p, struct uasm_reloc **r,
p                1765 arch/mips/mm/tlbex.c 			uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
p                1766 arch/mips/mm/tlbex.c 			uasm_i_nop(p);
p                1769 arch/mips/mm/tlbex.c 				uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
p                1772 arch/mips/mm/tlbex.c 			uasm_i_andi(p, t, cur, 1);
p                1773 arch/mips/mm/tlbex.c 			uasm_il_beqz(p, r, t, lid);
p                1776 arch/mips/mm/tlbex.c 				iPTE_LW(p, pte, ptr);
p                1780 arch/mips/mm/tlbex.c 			uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
p                1783 arch/mips/mm/tlbex.c 		uasm_i_andi(p, t, cur,
p                1785 arch/mips/mm/tlbex.c 		uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT);
p                1786 arch/mips/mm/tlbex.c 		uasm_il_bnez(p, r, t, lid);
p                1789 arch/mips/mm/tlbex.c 			iPTE_LW(p, pte, ptr);
p                1795 arch/mips/mm/tlbex.c build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
p                1800 arch/mips/mm/tlbex.c 	iPTE_SW(p, r, pte, ptr, mode, scratch);
p                1808 arch/mips/mm/tlbex.c build_pte_writable(u32 **p, struct uasm_reloc **r,
p                1816 arch/mips/mm/tlbex.c 		uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
p                1819 arch/mips/mm/tlbex.c 	uasm_i_andi(p, t, cur,
p                1821 arch/mips/mm/tlbex.c 	uasm_i_xori(p, t, t,
p                1823 arch/mips/mm/tlbex.c 	uasm_il_bnez(p, r, t, lid);
p                1826 arch/mips/mm/tlbex.c 		iPTE_LW(p, pte, ptr);
p                1828 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                1835 arch/mips/mm/tlbex.c build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
p                1841 arch/mips/mm/tlbex.c 	iPTE_SW(p, r, pte, ptr, mode, scratch);
p                1849 arch/mips/mm/tlbex.c build_pte_modifiable(u32 **p, struct uasm_reloc **r,
p                1854 arch/mips/mm/tlbex.c 		uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
p                1855 arch/mips/mm/tlbex.c 		uasm_i_nop(p);
p                1858 arch/mips/mm/tlbex.c 		uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
p                1859 arch/mips/mm/tlbex.c 		uasm_i_andi(p, t, t, 1);
p                1860 arch/mips/mm/tlbex.c 		uasm_il_beqz(p, r, t, lid);
p                1863 arch/mips/mm/tlbex.c 			iPTE_LW(p, pte, ptr);
p                1879 arch/mips/mm/tlbex.c build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
p                1881 arch/mips/mm/tlbex.c 	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
p                1882 arch/mips/mm/tlbex.c 	uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
p                1883 arch/mips/mm/tlbex.c 	uasm_i_tlbwi(p);
p                1884 arch/mips/mm/tlbex.c 	uasm_i_jr(p, tmp);
p                1885 arch/mips/mm/tlbex.c 	uasm_i_rfe(p); /* branch delay */
p                1895 arch/mips/mm/tlbex.c build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
p                1899 arch/mips/mm/tlbex.c 	uasm_i_mfc0(p, tmp, C0_INDEX);
p                1900 arch/mips/mm/tlbex.c 	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
p                1901 arch/mips/mm/tlbex.c 	uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
p                1902 arch/mips/mm/tlbex.c 	uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
p                1903 arch/mips/mm/tlbex.c 	uasm_i_tlbwi(p); /* cp0 delay */
p                1904 arch/mips/mm/tlbex.c 	uasm_i_jr(p, tmp);
p                1905 arch/mips/mm/tlbex.c 	uasm_i_rfe(p); /* branch delay */
p                1906 arch/mips/mm/tlbex.c 	uasm_l_r3000_write_probe_fail(l, *p);
p                1907 arch/mips/mm/tlbex.c 	uasm_i_tlbwr(p); /* cp0 delay */
p                1908 arch/mips/mm/tlbex.c 	uasm_i_jr(p, tmp);
p                1909 arch/mips/mm/tlbex.c 	uasm_i_rfe(p); /* branch delay */
p                1913 arch/mips/mm/tlbex.c build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
p                1918 arch/mips/mm/tlbex.c 	uasm_i_mfc0(p, pte, C0_BADVADDR);
p                1919 arch/mips/mm/tlbex.c 	uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
p                1920 arch/mips/mm/tlbex.c 	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
p                1921 arch/mips/mm/tlbex.c 	uasm_i_srl(p, pte, pte, 22); /* load delay */
p                1922 arch/mips/mm/tlbex.c 	uasm_i_sll(p, pte, pte, 2);
p                1923 arch/mips/mm/tlbex.c 	uasm_i_addu(p, ptr, ptr, pte);
p                1924 arch/mips/mm/tlbex.c 	uasm_i_mfc0(p, pte, C0_CONTEXT);
p                1925 arch/mips/mm/tlbex.c 	uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
p                1926 arch/mips/mm/tlbex.c 	uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
p                1927 arch/mips/mm/tlbex.c 	uasm_i_addu(p, ptr, ptr, pte);
p                1928 arch/mips/mm/tlbex.c 	uasm_i_lw(p, pte, 0, ptr);
p                1929 arch/mips/mm/tlbex.c 	uasm_i_tlbp(p); /* load delay */
p                1934 arch/mips/mm/tlbex.c 	u32 *p = (u32 *)handle_tlbl;
p                1938 arch/mips/mm/tlbex.c 	memset(p, 0, handle_tlbl_end - (char *)p);
p                1942 arch/mips/mm/tlbex.c 	build_r3000_tlbchange_handler_head(&p, K0, K1);
p                1943 arch/mips/mm/tlbex.c 	build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
p                1944 arch/mips/mm/tlbex.c 	uasm_i_nop(&p); /* load delay */
p                1945 arch/mips/mm/tlbex.c 	build_make_valid(&p, &r, K0, K1, -1);
p                1946 arch/mips/mm/tlbex.c 	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
p                1948 arch/mips/mm/tlbex.c 	uasm_l_nopage_tlbl(&l, p);
p                1949 arch/mips/mm/tlbex.c 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
p                1950 arch/mips/mm/tlbex.c 	uasm_i_nop(&p);
p                1952 arch/mips/mm/tlbex.c 	if (p >= (u32 *)handle_tlbl_end)
p                1957 arch/mips/mm/tlbex.c 		 (unsigned int)(p - (u32 *)handle_tlbl));
p                1964 arch/mips/mm/tlbex.c 	u32 *p = (u32 *)handle_tlbs;
p                1968 arch/mips/mm/tlbex.c 	memset(p, 0, handle_tlbs_end - (char *)p);
p                1972 arch/mips/mm/tlbex.c 	build_r3000_tlbchange_handler_head(&p, K0, K1);
p                1973 arch/mips/mm/tlbex.c 	build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
p                1974 arch/mips/mm/tlbex.c 	uasm_i_nop(&p); /* load delay */
p                1975 arch/mips/mm/tlbex.c 	build_make_write(&p, &r, K0, K1, -1);
p                1976 arch/mips/mm/tlbex.c 	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
p                1978 arch/mips/mm/tlbex.c 	uasm_l_nopage_tlbs(&l, p);
p                1979 arch/mips/mm/tlbex.c 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
p                1980 arch/mips/mm/tlbex.c 	uasm_i_nop(&p);
p                1982 arch/mips/mm/tlbex.c 	if (p >= (u32 *)handle_tlbs_end)
p                1987 arch/mips/mm/tlbex.c 		 (unsigned int)(p - (u32 *)handle_tlbs));
p                1994 arch/mips/mm/tlbex.c 	u32 *p = (u32 *)handle_tlbm;
p                1998 arch/mips/mm/tlbex.c 	memset(p, 0, handle_tlbm_end - (char *)p);
p                2002 arch/mips/mm/tlbex.c 	build_r3000_tlbchange_handler_head(&p, K0, K1);
p                2003 arch/mips/mm/tlbex.c 	build_pte_modifiable(&p, &r, K0, K1,  -1, label_nopage_tlbm);
p                2004 arch/mips/mm/tlbex.c 	uasm_i_nop(&p); /* load delay */
p                2005 arch/mips/mm/tlbex.c 	build_make_write(&p, &r, K0, K1, -1);
p                2006 arch/mips/mm/tlbex.c 	build_r3000_pte_reload_tlbwi(&p, K0, K1);
p                2008 arch/mips/mm/tlbex.c 	uasm_l_nopage_tlbm(&l, p);
p                2009 arch/mips/mm/tlbex.c 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
p                2010 arch/mips/mm/tlbex.c 	uasm_i_nop(&p);
p                2012 arch/mips/mm/tlbex.c 	if (p >= (u32 *)handle_tlbm_end)
p                2017 arch/mips/mm/tlbex.c 		 (unsigned int)(p - (u32 *)handle_tlbm));
p                2047 arch/mips/mm/tlbex.c build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
p                2050 arch/mips/mm/tlbex.c 	struct work_registers wr = build_get_work_registers(p);
p                2053 arch/mips/mm/tlbex.c 	build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
p                2055 arch/mips/mm/tlbex.c 	build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
p                2064 arch/mips/mm/tlbex.c 	build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
p                2067 arch/mips/mm/tlbex.c 	UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
p                2068 arch/mips/mm/tlbex.c 	UASM_i_LW(p, wr.r2, 0, wr.r2);
p                2069 arch/mips/mm/tlbex.c 	UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
p                2070 arch/mips/mm/tlbex.c 	uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
p                2071 arch/mips/mm/tlbex.c 	UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
p                2074 arch/mips/mm/tlbex.c 	uasm_l_smp_pgtable_change(l, *p);
p                2076 arch/mips/mm/tlbex.c 	iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
p                2078 arch/mips/mm/tlbex.c 		build_tlb_probe_entry(p);
p                2081 arch/mips/mm/tlbex.c 			uasm_i_ehb(p);
p                2082 arch/mips/mm/tlbex.c 			uasm_i_mfc0(p, wr.r3, C0_INDEX);
p                2083 arch/mips/mm/tlbex.c 			uasm_il_bltz(p, r, wr.r3, label_leave);
p                2084 arch/mips/mm/tlbex.c 			uasm_i_nop(p);
p                2091 arch/mips/mm/tlbex.c build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
p                2095 arch/mips/mm/tlbex.c 	uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
p                2096 arch/mips/mm/tlbex.c 	uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
p                2097 arch/mips/mm/tlbex.c 	build_update_entries(p, tmp, ptr);
p                2098 arch/mips/mm/tlbex.c 	build_tlb_write_entry(p, l, r, tlb_indexed);
p                2099 arch/mips/mm/tlbex.c 	uasm_l_leave(l, *p);
p                2100 arch/mips/mm/tlbex.c 	build_restore_work_registers(p);
p                2101 arch/mips/mm/tlbex.c 	uasm_i_eret(p); /* return from trap */
p                2104 arch/mips/mm/tlbex.c 	build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
p                2110 arch/mips/mm/tlbex.c 	u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl);
p                2115 arch/mips/mm/tlbex.c 	memset(p, 0, handle_tlbl_end - (char *)p);
p                2122 arch/mips/mm/tlbex.c 		uasm_i_dmfc0(&p, K0, C0_BADVADDR);
p                2123 arch/mips/mm/tlbex.c 		uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
p                2124 arch/mips/mm/tlbex.c 		uasm_i_xor(&p, K0, K0, K1);
p                2125 arch/mips/mm/tlbex.c 		uasm_i_dsrl_safe(&p, K1, K0, 62);
p                2126 arch/mips/mm/tlbex.c 		uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
p                2127 arch/mips/mm/tlbex.c 		uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
p                2128 arch/mips/mm/tlbex.c 		uasm_i_or(&p, K0, K0, K1);
p                2129 arch/mips/mm/tlbex.c 		uasm_il_bnez(&p, &r, K0, label_leave);
p                2133 arch/mips/mm/tlbex.c 	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
p                2134 arch/mips/mm/tlbex.c 	build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
p                2136 arch/mips/mm/tlbex.c 		build_tlb_probe_entry(&p);
p                2144 arch/mips/mm/tlbex.c 			uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
p                2147 arch/mips/mm/tlbex.c 			uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
p                2148 arch/mips/mm/tlbex.c 			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
p                2150 arch/mips/mm/tlbex.c 		uasm_i_nop(&p);
p                2160 arch/mips/mm/tlbex.c 		uasm_i_tlbr(&p);
p                2165 arch/mips/mm/tlbex.c 				uasm_i_ehb(&p);
p                2176 arch/mips/mm/tlbex.c 			uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
p                2178 arch/mips/mm/tlbex.c 			uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
p                2179 arch/mips/mm/tlbex.c 			uasm_i_beqz(&p, wr.r3, 8);
p                2182 arch/mips/mm/tlbex.c 		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
p                2184 arch/mips/mm/tlbex.c 		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
p                2190 arch/mips/mm/tlbex.c 			uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
p                2191 arch/mips/mm/tlbex.c 			uasm_i_nop(&p);
p                2192 arch/mips/mm/tlbex.c 			uasm_l_tlbl_goaround1(&l, p);
p                2194 arch/mips/mm/tlbex.c 			uasm_i_andi(&p, wr.r3, wr.r3, 2);
p                2195 arch/mips/mm/tlbex.c 			uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
p                2196 arch/mips/mm/tlbex.c 			uasm_i_nop(&p);
p                2198 arch/mips/mm/tlbex.c 		uasm_l_tlbl_goaround1(&l, p);
p                2200 arch/mips/mm/tlbex.c 	build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3);
p                2201 arch/mips/mm/tlbex.c 	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
p                2208 arch/mips/mm/tlbex.c 	uasm_l_tlb_huge_update(&l, p);
p                2209 arch/mips/mm/tlbex.c 	iPTE_LW(&p, wr.r1, wr.r2);
p                2210 arch/mips/mm/tlbex.c 	build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
p                2211 arch/mips/mm/tlbex.c 	build_tlb_probe_entry(&p);
p                2219 arch/mips/mm/tlbex.c 			uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
p                2222 arch/mips/mm/tlbex.c 			uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
p                2223 arch/mips/mm/tlbex.c 			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
p                2225 arch/mips/mm/tlbex.c 		uasm_i_nop(&p);
p                2235 arch/mips/mm/tlbex.c 		uasm_i_tlbr(&p);
p                2240 arch/mips/mm/tlbex.c 				uasm_i_ehb(&p);
p                2251 arch/mips/mm/tlbex.c 			uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
p                2253 arch/mips/mm/tlbex.c 			uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
p                2254 arch/mips/mm/tlbex.c 			uasm_i_beqz(&p, wr.r3, 8);
p                2257 arch/mips/mm/tlbex.c 		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
p                2259 arch/mips/mm/tlbex.c 		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
p                2265 arch/mips/mm/tlbex.c 			uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
p                2267 arch/mips/mm/tlbex.c 			uasm_i_andi(&p, wr.r3, wr.r3, 2);
p                2268 arch/mips/mm/tlbex.c 			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
p                2271 arch/mips/mm/tlbex.c 			uasm_i_nop(&p);
p                2276 arch/mips/mm/tlbex.c 		build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
p                2278 arch/mips/mm/tlbex.c 		uasm_l_tlbl_goaround2(&l, p);
p                2280 arch/mips/mm/tlbex.c 	uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
p                2281 arch/mips/mm/tlbex.c 	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
p                2284 arch/mips/mm/tlbex.c 	uasm_l_nopage_tlbl(&l, p);
p                2286 arch/mips/mm/tlbex.c 		uasm_i_sync(&p, 0);
p                2287 arch/mips/mm/tlbex.c 	build_restore_work_registers(&p);
p                2290 arch/mips/mm/tlbex.c 		uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
p                2291 arch/mips/mm/tlbex.c 		uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
p                2292 arch/mips/mm/tlbex.c 		uasm_i_jr(&p, K0);
p                2295 arch/mips/mm/tlbex.c 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
p                2296 arch/mips/mm/tlbex.c 	uasm_i_nop(&p);
p                2298 arch/mips/mm/tlbex.c 	if (p >= (u32 *)handle_tlbl_end)
p                2303 arch/mips/mm/tlbex.c 		 (unsigned int)(p - (u32 *)handle_tlbl));
p                2310 arch/mips/mm/tlbex.c 	u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbs);
p                2315 arch/mips/mm/tlbex.c 	memset(p, 0, handle_tlbs_end - (char *)p);
p                2319 arch/mips/mm/tlbex.c 	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
p                2320 arch/mips/mm/tlbex.c 	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
p                2322 arch/mips/mm/tlbex.c 		build_tlb_probe_entry(&p);
p                2323 arch/mips/mm/tlbex.c 	build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
p                2324 arch/mips/mm/tlbex.c 	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
p                2331 arch/mips/mm/tlbex.c 	uasm_l_tlb_huge_update(&l, p);
p                2332 arch/mips/mm/tlbex.c 	iPTE_LW(&p, wr.r1, wr.r2);
p                2333 arch/mips/mm/tlbex.c 	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
p                2334 arch/mips/mm/tlbex.c 	build_tlb_probe_entry(&p);
p                2335 arch/mips/mm/tlbex.c 	uasm_i_ori(&p, wr.r1, wr.r1,
p                2337 arch/mips/mm/tlbex.c 	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
p                2340 arch/mips/mm/tlbex.c 	uasm_l_nopage_tlbs(&l, p);
p                2342 arch/mips/mm/tlbex.c 		uasm_i_sync(&p, 0);
p                2343 arch/mips/mm/tlbex.c 	build_restore_work_registers(&p);
p                2346 arch/mips/mm/tlbex.c 		uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
p                2347 arch/mips/mm/tlbex.c 		uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
p                2348 arch/mips/mm/tlbex.c 		uasm_i_jr(&p, K0);
p                2351 arch/mips/mm/tlbex.c 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
p                2352 arch/mips/mm/tlbex.c 	uasm_i_nop(&p);
p                2354 arch/mips/mm/tlbex.c 	if (p >= (u32 *)handle_tlbs_end)
p                2359 arch/mips/mm/tlbex.c 		 (unsigned int)(p - (u32 *)handle_tlbs));
p                2366 arch/mips/mm/tlbex.c 	u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbm);
p                2371 arch/mips/mm/tlbex.c 	memset(p, 0, handle_tlbm_end - (char *)p);
p                2375 arch/mips/mm/tlbex.c 	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
p                2376 arch/mips/mm/tlbex.c 	build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
p                2378 arch/mips/mm/tlbex.c 		build_tlb_probe_entry(&p);
p                2380 arch/mips/mm/tlbex.c 	build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
p                2381 arch/mips/mm/tlbex.c 	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
p                2388 arch/mips/mm/tlbex.c 	uasm_l_tlb_huge_update(&l, p);
p                2389 arch/mips/mm/tlbex.c 	iPTE_LW(&p, wr.r1, wr.r2);
p                2390 arch/mips/mm/tlbex.c 	build_pte_modifiable(&p, &r, wr.r1, wr.r2,  wr.r3, label_nopage_tlbm);
p                2391 arch/mips/mm/tlbex.c 	build_tlb_probe_entry(&p);
p                2392 arch/mips/mm/tlbex.c 	uasm_i_ori(&p, wr.r1, wr.r1,
p                2394 arch/mips/mm/tlbex.c 	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
p                2397 arch/mips/mm/tlbex.c 	uasm_l_nopage_tlbm(&l, p);
p                2399 arch/mips/mm/tlbex.c 		uasm_i_sync(&p, 0);
p                2400 arch/mips/mm/tlbex.c 	build_restore_work_registers(&p);
p                2403 arch/mips/mm/tlbex.c 		uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
p                2404 arch/mips/mm/tlbex.c 		uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
p                2405 arch/mips/mm/tlbex.c 		uasm_i_jr(&p, K0);
p                2408 arch/mips/mm/tlbex.c 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
p                2409 arch/mips/mm/tlbex.c 	uasm_i_nop(&p);
p                2411 arch/mips/mm/tlbex.c 	if (p >= (u32 *)handle_tlbm_end)
p                2416 arch/mips/mm/tlbex.c 		 (unsigned int)(p - (u32 *)handle_tlbm));
p                 558 arch/mips/mm/uasm.c void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg,
p                 561 arch/mips/mm/uasm.c 	uasm_r_mips_pc16(r, *p, lid);
p                 562 arch/mips/mm/uasm.c 	uasm_i_bltz(p, reg, 0);
p                 566 arch/mips/mm/uasm.c void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
p                 568 arch/mips/mm/uasm.c 	uasm_r_mips_pc16(r, *p, lid);
p                 569 arch/mips/mm/uasm.c 	uasm_i_b(p, 0);
p                 573 arch/mips/mm/uasm.c void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1,
p                 576 arch/mips/mm/uasm.c 	uasm_r_mips_pc16(r, *p, lid);
p                 577 arch/mips/mm/uasm.c 	uasm_i_beq(p, r1, r2, 0);
p                 581 arch/mips/mm/uasm.c void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg,
p                 584 arch/mips/mm/uasm.c 	uasm_r_mips_pc16(r, *p, lid);
p                 585 arch/mips/mm/uasm.c 	uasm_i_beqz(p, reg, 0);
p                 589 arch/mips/mm/uasm.c void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg,
p                 592 arch/mips/mm/uasm.c 	uasm_r_mips_pc16(r, *p, lid);
p                 593 arch/mips/mm/uasm.c 	uasm_i_beqzl(p, reg, 0);
p                 597 arch/mips/mm/uasm.c void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
p                 600 arch/mips/mm/uasm.c 	uasm_r_mips_pc16(r, *p, lid);
p                 601 arch/mips/mm/uasm.c 	uasm_i_bne(p, reg1, reg2, 0);
p                 605 arch/mips/mm/uasm.c void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg,
p                 608 arch/mips/mm/uasm.c 	uasm_r_mips_pc16(r, *p, lid);
p                 609 arch/mips/mm/uasm.c 	uasm_i_bnez(p, reg, 0);
p                 613 arch/mips/mm/uasm.c void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg,
p                 616 arch/mips/mm/uasm.c 	uasm_r_mips_pc16(r, *p, lid);
p                 617 arch/mips/mm/uasm.c 	uasm_i_bgezl(p, reg, 0);
p                 621 arch/mips/mm/uasm.c void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg,
p                 624 arch/mips/mm/uasm.c 	uasm_r_mips_pc16(r, *p, lid);
p                 625 arch/mips/mm/uasm.c 	uasm_i_bgez(p, reg, 0);
p                 629 arch/mips/mm/uasm.c void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
p                 632 arch/mips/mm/uasm.c 	uasm_r_mips_pc16(r, *p, lid);
p                 633 arch/mips/mm/uasm.c 	uasm_i_bbit0(p, reg, bit, 0);
p                 637 arch/mips/mm/uasm.c void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
p                 640 arch/mips/mm/uasm.c 	uasm_r_mips_pc16(r, *p, lid);
p                 641 arch/mips/mm/uasm.c 	uasm_i_bbit1(p, reg, bit, 0);
p                 129 arch/mips/net/ebpf_jit.c 		u32 *p = &(ctx)->target[ctx->idx];		\
p                 131 arch/mips/net/ebpf_jit.c 			uasm_i_##func64(&p, ##__VA_ARGS__);	\
p                 133 arch/mips/net/ebpf_jit.c 			uasm_i_##func32(&p, ##__VA_ARGS__);	\
p                1790 arch/mips/net/ebpf_jit.c 	u32 *p;
p                1793 arch/mips/net/ebpf_jit.c 	for (p = area; size >= sizeof(u32); size -= sizeof(u32))
p                1794 arch/mips/net/ebpf_jit.c 		uasm_i_break(&p, BRK_BUG); /* Increments p */
p                  26 arch/mips/netlogic/xlr/platform.c static unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset)
p                  32 arch/mips/netlogic/xlr/platform.c 	uartbase = (uint64_t)(long)p->membase;
p                  44 arch/mips/netlogic/xlr/platform.c static void nlm_xlr_uart_out(struct uart_port *p, int offset, int value)
p                  49 arch/mips/netlogic/xlr/platform.c 	uartbase = (uint64_t)(long)p->membase;
p                 181 arch/mips/pci/pci-xlp.c 	struct pci_bus *bus, *p;
p                 187 arch/mips/pci/pci-xlp.c 		for (p = bus->parent; p && p->parent && p->parent->number != 0;
p                 188 arch/mips/pci/pci-xlp.c 				p = p->parent)
p                 189 arch/mips/pci/pci-xlp.c 			bus = p;
p                 190 arch/mips/pci/pci-xlp.c 		return (p && p->parent) ? bus->self : NULL;
p                 193 arch/mips/pci/pci-xlp.c 		for (p = bus->parent; p && p->number != 0; p = p->parent)
p                 194 arch/mips/pci/pci-xlp.c 			bus = p;
p                 196 arch/mips/pci/pci-xlp.c 		return p ? bus->self : NULL;
p                 168 arch/mips/pci/pci-xlr.c 	struct pci_bus *bus, *p;
p                 172 arch/mips/pci/pci-xlr.c 	for (p = bus->parent; p && p->number != 0; p = p->parent)
p                 173 arch/mips/pci/pci-xlr.c 		bus = p;
p                 175 arch/mips/pci/pci-xlr.c 	return p ? bus->self : NULL;
p                 346 arch/mips/pmcs-msp71xx/msp_prom.c 	struct prom_pmemblock *p;
p                 348 arch/mips/pmcs-msp71xx/msp_prom.c 	p = prom_getmdesc();
p                 350 arch/mips/pmcs-msp71xx/msp_prom.c 	while (p->size) {
p                 354 arch/mips/pmcs-msp71xx/msp_prom.c 		type = prom_memtype_classify(p->type);
p                 355 arch/mips/pmcs-msp71xx/msp_prom.c 		base = p->base;
p                 356 arch/mips/pmcs-msp71xx/msp_prom.c 		size = p->size;
p                 359 arch/mips/pmcs-msp71xx/msp_prom.c 		p++;
p                  46 arch/mips/pmcs-msp71xx/msp_serial.c static void msp_serial_out(struct uart_port *p, int offset, int value)
p                  48 arch/mips/pmcs-msp71xx/msp_serial.c 	struct msp_uart_data *d = p->private_data;
p                  53 arch/mips/pmcs-msp71xx/msp_serial.c 	offset <<= p->regshift;
p                  54 arch/mips/pmcs-msp71xx/msp_serial.c 	writeb(value, p->membase + offset);
p                  57 arch/mips/pmcs-msp71xx/msp_serial.c static unsigned int msp_serial_in(struct uart_port *p, int offset)
p                  59 arch/mips/pmcs-msp71xx/msp_serial.c 	offset <<= p->regshift;
p                  61 arch/mips/pmcs-msp71xx/msp_serial.c 	return readb(p->membase + offset);
p                  64 arch/mips/pmcs-msp71xx/msp_serial.c static int msp_serial_handle_irq(struct uart_port *p)
p                  66 arch/mips/pmcs-msp71xx/msp_serial.c 	struct msp_uart_data *d = p->private_data;
p                  67 arch/mips/pmcs-msp71xx/msp_serial.c 	unsigned int iir = readb(p->membase + (UART_IIR << p->regshift));
p                  69 arch/mips/pmcs-msp71xx/msp_serial.c 	if (serial8250_handle_irq(p, iir)) {
p                  82 arch/mips/pmcs-msp71xx/msp_serial.c 		(void)readb(p->membase + 0xc0);
p                  83 arch/mips/pmcs-msp71xx/msp_serial.c 		writeb(d->last_lcr, p->membase + (UART_LCR << p->regshift));
p                  51 arch/mips/ralink/prom.c 		char *p = (char *) KSEG1ADDR(argv[i]);
p                  53 arch/mips/ralink/prom.c 		if (CPHYSADDR(p) && *p) {
p                  54 arch/mips/ralink/prom.c 			pr_debug("argv[%d]: %s\n", i, p);
p                  56 arch/mips/ralink/prom.c 			strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
p                 128 arch/mips/sgi-ip27/ip27-init.c 	u64 p, e, n_mode;
p                 139 arch/mips/sgi-ip27/ip27-init.c 	p = LOCAL_HUB_L(PI_CPU_PRESENT_A) & 1;
p                 142 arch/mips/sgi-ip27/ip27-init.c 	       p ? "a" : "no",
p                 145 arch/mips/sgi-ip27/ip27-init.c 	p = LOCAL_HUB_L(PI_CPU_PRESENT_B) & 1;
p                 148 arch/mips/sgi-ip27/ip27-init.c 	       p ? "a" : "no",
p                 197 arch/mips/sibyte/common/sb_tbprof.c 		u64 *p = sbp.sbprof_tbbuf[sbp.next_tb_sample++];
p                 206 arch/mips/sibyte/common/sb_tbprof.c 			p[i - 1] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
p                 208 arch/mips/sibyte/common/sb_tbprof.c 			p[i - 2] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
p                 210 arch/mips/sibyte/common/sb_tbprof.c 			p[i - 3] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
p                 212 arch/mips/sibyte/common/sb_tbprof.c 			p[i - 4] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
p                 214 arch/mips/sibyte/common/sb_tbprof.c 			p[i - 5] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
p                 216 arch/mips/sibyte/common/sb_tbprof.c 			p[i - 6] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
p                  30 arch/mips/sni/irq.c static irqreturn_t sni_isa_irq_handler(int dummy, void *p)
p                  55 arch/mips/sni/pcimt.c 	char *p = boardtype;
p                  60 arch/mips/sni/pcimt.c 	p += sprintf(p, "%s PCI", (csmsr & 0x80) ? "RM200" : "RM300");
p                  62 arch/mips/sni/pcimt.c 		p += sprintf(p, ", board revision %s",
p                  66 arch/mips/sni/pcimt.c 	p += sprintf(p, ", ASIC PCI Rev %s", asic ? "1.0" : "1.1");
p                 380 arch/mips/sni/rm200.c static irqreturn_t sni_rm200_i8259A_irq_handler(int dummy, void *p)
p                  89 arch/nds32/include/asm/pmu.h #define to_nds32_pmu(p)			(container_of(p, struct nds32_pmu, pmu))
p                  86 arch/nds32/include/asm/processor.h unsigned long get_wchan(struct task_struct *p);
p                 153 arch/nds32/kernel/process.c 		unsigned long stk_sz, struct task_struct *p)
p                 155 arch/nds32/kernel/process.c 	struct pt_regs *childregs = task_pt_regs(p);
p                 157 arch/nds32/kernel/process.c 	memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
p                 159 arch/nds32/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 162 arch/nds32/kernel/process.c 		p->thread.cpu_context.r6 = stack_start;
p                 164 arch/nds32/kernel/process.c 		p->thread.cpu_context.r7 = stk_sz;
p                 176 arch/nds32/kernel/process.c 	p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p                 177 arch/nds32/kernel/process.c 	p->thread.cpu_context.sp = (unsigned long)childregs;
p                 189 arch/nds32/kernel/process.c 		p->thread.fpu = current->thread.fpu;
p                 190 arch/nds32/kernel/process.c 		clear_fpu(task_pt_regs(p));
p                 191 arch/nds32/kernel/process.c 		set_stopped_child_used_math(p);
p                 236 arch/nds32/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 242 arch/nds32/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 246 arch/nds32/kernel/process.c 		stack_start = (unsigned long)end_of_stack(p);
p                 247 arch/nds32/kernel/process.c 		stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE;
p                 249 arch/nds32/kernel/process.c 		fp = thread_saved_fp(p);
p                  92 arch/nds32/kernel/setup.c 	int i, p = 0;
p                  97 arch/nds32/kernel/setup.c 			sprintf(str + p, "%s ", hwcap_str[i]);
p                  98 arch/nds32/kernel/setup.c 			p += strlen(hwcap_str[i]) + 1;
p                  42 arch/nds32/kernel/traps.c 		unsigned long p;
p                  48 arch/nds32/kernel/traps.c 		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
p                  49 arch/nds32/kernel/traps.c 			if (p >= bottom && p < top) {
p                  51 arch/nds32/kernel/traps.c 				if (__get_user(val, (unsigned long *)p) == 0)
p                  69 arch/nds32/kernel/traps.c 	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
p                  88 arch/nds32/kernel/traps.c 			p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
p                  90 arch/nds32/kernel/traps.c 			p += sprintf(p, "bad PC value");
p                  18 arch/nds32/math-emu/fpuemu.c #define DEF3OP(name, p, f1, f2) \
p                  19 arch/nds32/math-emu/fpuemu.c void fpemu_##name##p(void *ft, void *fa, void *fb) \
p                  25 arch/nds32/math-emu/fpuemu.c #define DEF3OPNEG(name, p, f1, f2, f3) \
p                  26 arch/nds32/math-emu/fpuemu.c void fpemu_##name##p(void *ft, void *fa, void *fb) \
p                  55 arch/nds32/mm/init.c 	unsigned long v, p, e;
p                  64 arch/nds32/mm/init.c 	p = (u32) memblock_start_of_DRAM() & PAGE_MASK;
p                  67 arch/nds32/mm/init.c 	v = (u32) __va(p);
p                  70 arch/nds32/mm/init.c 	while (p < e) {
p                  88 arch/nds32/mm/init.c 		for (j = 0; p < e && j < PTRS_PER_PTE;
p                  89 arch/nds32/mm/init.c 		     v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
p                  92 arch/nds32/mm/init.c 			set_pte(pte, __pte(p + pgprot_val(PAGE_KERNEL)));
p                  72 arch/nios2/include/asm/processor.h extern unsigned long get_wchan(struct task_struct *p);
p                  74 arch/nios2/include/asm/processor.h #define task_pt_regs(p) \
p                  75 arch/nios2/include/asm/processor.h 	((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
p                  97 arch/nios2/kernel/kgdb.c void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
p                 100 arch/nios2/kernel/kgdb.c 	gdb_regs[GDB_SP] = p->thread.kregs->sp;
p                 101 arch/nios2/kernel/kgdb.c 	gdb_regs[GDB_PC] = p->thread.kregs->ea;
p                  53 arch/nios2/kernel/misaligned.c 	u8 *p = ((u8 *)fp) + reg_offsets[reg];
p                  54 arch/nios2/kernel/misaligned.c 	return *(u32 *)p;
p                  59 arch/nios2/kernel/misaligned.c 	u8 *p = ((u8 *)fp) + reg_offsets[reg];
p                  60 arch/nios2/kernel/misaligned.c 	*(u32 *)p = val;
p                 104 arch/nios2/kernel/process.c 		unsigned long usp, unsigned long arg, struct task_struct *p)
p                 106 arch/nios2/kernel/process.c 	struct pt_regs *childregs = task_pt_regs(p);
p                 112 arch/nios2/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 122 arch/nios2/kernel/process.c 		p->thread.ksp = (unsigned long) childstack;
p                 123 arch/nios2/kernel/process.c 		p->thread.kregs = childregs;
p                 135 arch/nios2/kernel/process.c 	p->thread.kregs = childregs;
p                 136 arch/nios2/kernel/process.c 	p->thread.ksp = (unsigned long) childstack;
p                 220 arch/nios2/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 226 arch/nios2/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 229 arch/nios2/kernel/process.c 	stack_page = (unsigned long)p;
p                 230 arch/nios2/kernel/process.c 	fp = ((struct switch_stack *)p->thread.ksp)->fp;	/* ;dgt2 */
p                 177 arch/nios2/mm/ioremap.c 	struct vm_struct *p;
p                 182 arch/nios2/mm/ioremap.c 	p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
p                 183 arch/nios2/mm/ioremap.c 	if (!p)
p                 185 arch/nios2/mm/ioremap.c 	kfree(p);
p                  39 arch/nios2/mm/pgtable.c 	unsigned long *p = (unsigned long *) pgd;
p                  43 arch/nios2/mm/pgtable.c 		p[i + 0] = (unsigned long) invalid_pte_table;
p                  44 arch/nios2/mm/pgtable.c 		p[i + 1] = (unsigned long) invalid_pte_table;
p                  45 arch/nios2/mm/pgtable.c 		p[i + 2] = (unsigned long) invalid_pte_table;
p                  46 arch/nios2/mm/pgtable.c 		p[i + 3] = (unsigned long) invalid_pte_table;
p                  47 arch/nios2/mm/pgtable.c 		p[i + 4] = (unsigned long) invalid_pte_table;
p                  48 arch/nios2/mm/pgtable.c 		p[i + 5] = (unsigned long) invalid_pte_table;
p                  49 arch/nios2/mm/pgtable.c 		p[i + 6] = (unsigned long) invalid_pte_table;
p                  50 arch/nios2/mm/pgtable.c 		p[i + 7] = (unsigned long) invalid_pte_table;
p                  15 arch/openrisc/include/asm/bitops/atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  25 arch/openrisc/include/asm/bitops/atomic.h 		: "r"(p), "r"(mask)
p                  32 arch/openrisc/include/asm/bitops/atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  42 arch/openrisc/include/asm/bitops/atomic.h 		: "r"(p), "r"(~mask)
p                  49 arch/openrisc/include/asm/bitops/atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  59 arch/openrisc/include/asm/bitops/atomic.h 		: "r"(p), "r"(mask)
p                  66 arch/openrisc/include/asm/bitops/atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  77 arch/openrisc/include/asm/bitops/atomic.h 		: "r"(p), "r"(mask)
p                  86 arch/openrisc/include/asm/bitops/atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  97 arch/openrisc/include/asm/bitops/atomic.h 		: "r"(p), "r"(~mask)
p                 106 arch/openrisc/include/asm/bitops/atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                 117 arch/openrisc/include/asm/bitops/atomic.h 		: "r"(p), "r"(mask)
p                  63 arch/openrisc/include/asm/cmpxchg.h 	volatile u32 *p = ptr - off;
p                  73 arch/openrisc/include/asm/cmpxchg.h 	load32 = READ_ONCE(*p);
p                  84 arch/openrisc/include/asm/cmpxchg.h 		load32 = cmpxchg_u32(p, old32, new32);
p                  95 arch/openrisc/include/asm/cmpxchg.h 	volatile u32 *p = ptr - off;
p                 106 arch/openrisc/include/asm/cmpxchg.h 		oldv = READ_ONCE(*p);
p                 109 arch/openrisc/include/asm/cmpxchg.h 	} while (cmpxchg_u32(p, oldv, newv) != oldv);
p                  76 arch/openrisc/include/asm/processor.h unsigned long get_wchan(struct task_struct *p);
p                 152 arch/openrisc/kernel/process.c 	    unsigned long arg, struct task_struct *p)
p                 156 arch/openrisc/kernel/process.c 	unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
p                 171 arch/openrisc/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 205 arch/openrisc/kernel/process.c 	task_thread_info(p)->ksp = (unsigned long)kregs;
p                 275 arch/openrisc/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                  71 arch/openrisc/mm/init.c 	unsigned long v, p, e;
p                  85 arch/openrisc/mm/init.c 		p = (u32) region->base & PAGE_MASK;
p                  86 arch/openrisc/mm/init.c 		e = p + (u32) region->size;
p                  88 arch/openrisc/mm/init.c 		v = (u32) __va(p);
p                  91 arch/openrisc/mm/init.c 		while (p < e) {
p                 110 arch/openrisc/mm/init.c 			for (j = 0; p < e && j < PTRS_PER_PTE;
p                 111 arch/openrisc/mm/init.c 			     v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
p                 118 arch/openrisc/mm/init.c 				set_pte(pte, mk_pte_phys(p, prot));
p                  39 arch/openrisc/mm/ioremap.c 	phys_addr_t p;
p                  53 arch/openrisc/mm/ioremap.c 	p = addr & PAGE_MASK;
p                  54 arch/openrisc/mm/ioremap.c 	size = PAGE_ALIGN(last_addr + 1) - p;
p                  68 arch/openrisc/mm/ioremap.c 	if (ioremap_page_range(v, v + size, p,
p                 327 arch/parisc/include/asm/io.h #define xlate_dev_mem_ptr(p)	__va(p)
p                 332 arch/parisc/include/asm/io.h #define xlate_dev_kmem_ptr(p)	p
p                  27 arch/parisc/include/asm/kprobes.h void arch_remove_kprobe(struct kprobe *p);
p                  29 arch/parisc/include/asm/kprobes.h #define flush_insn_slot(p) \
p                  30 arch/parisc/include/asm/kprobes.h 	flush_icache_range((unsigned long)&(p)->ainsn.insn[0], \
p                  31 arch/parisc/include/asm/kprobes.h 			   (unsigned long)&(p)->ainsn.insn[0] + \
p                  53 arch/parisc/include/asm/parisc-device.h parisc_set_drvdata(struct parisc_device *d, void *p)
p                  55 arch/parisc/include/asm/parisc-device.h 	dev_set_drvdata(&d->dev, p);
p                 285 arch/parisc/include/asm/processor.h extern unsigned long get_wchan(struct task_struct *p);
p                  91 arch/parisc/include/asm/psw.h 	unsigned int p:1;
p                 123 arch/parisc/include/asm/ropes.h #define IKE_IOC_OFFSET(p)	((p+2) * SBA_FUNC_SIZE)
p                 920 arch/parisc/kernel/drivers.c 	unsigned long *p;
p                 933 arch/parisc/kernel/drivers.c 		p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
p                 952 arch/parisc/kernel/drivers.c 	p = (unsigned long *) &cache_info;
p                 959 arch/parisc/kernel/drivers.c 			num?", ":"", *p++);
p                 146 arch/parisc/kernel/firmware.c 	unsigned int *p = (unsigned int *)addr;
p                 150 arch/parisc/kernel/firmware.c 			addr[i] = p[i];
p                 209 arch/parisc/kernel/ftrace.c 	struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip);
p                 211 arch/parisc/kernel/ftrace.c 	if (unlikely(!p) || kprobe_disabled(p))
p                 215 arch/parisc/kernel/ftrace.c 		kprobes_inc_nmissed_count(p);
p                 219 arch/parisc/kernel/ftrace.c 	__this_cpu_write(current_kprobe, p);
p                 227 arch/parisc/kernel/ftrace.c 	if (!p->pre_handler || !p->pre_handler(p, regs)) {
p                 231 arch/parisc/kernel/ftrace.c 		if (unlikely(p->post_handler)) {
p                 233 arch/parisc/kernel/ftrace.c 			p->post_handler(p, regs, 0);
p                 240 arch/parisc/kernel/ftrace.c int arch_prepare_kprobe_ftrace(struct kprobe *p)
p                 242 arch/parisc/kernel/ftrace.c 	p->ainsn.insn = NULL;
p                 151 arch/parisc/kernel/irq.c int arch_show_interrupts(struct seq_file *p, int prec)
p                 156 arch/parisc/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "STK");
p                 158 arch/parisc/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
p                 159 arch/parisc/kernel/irq.c 	seq_puts(p, "  Kernel stack usage\n");
p                 161 arch/parisc/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "IST");
p                 163 arch/parisc/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
p                 164 arch/parisc/kernel/irq.c 	seq_puts(p, "  Interrupt stack usage\n");
p                 169 arch/parisc/kernel/irq.c 		seq_printf(p, "%*s: ", prec, "RES");
p                 171 arch/parisc/kernel/irq.c 			seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
p                 172 arch/parisc/kernel/irq.c 		seq_puts(p, "  Rescheduling interrupts\n");
p                 173 arch/parisc/kernel/irq.c 		seq_printf(p, "%*s: ", prec, "CAL");
p                 175 arch/parisc/kernel/irq.c 			seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
p                 176 arch/parisc/kernel/irq.c 		seq_puts(p, "  Function call interrupts\n");
p                 179 arch/parisc/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "UAH");
p                 181 arch/parisc/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
p                 182 arch/parisc/kernel/irq.c 	seq_puts(p, "  Unaligned access handler traps\n");
p                 183 arch/parisc/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "FPA");
p                 185 arch/parisc/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
p                 186 arch/parisc/kernel/irq.c 	seq_puts(p, "  Floating point assist traps\n");
p                 187 arch/parisc/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "TLB");
p                 189 arch/parisc/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
p                 190 arch/parisc/kernel/irq.c 	seq_puts(p, "  TLB shootdowns\n");
p                 194 arch/parisc/kernel/irq.c int show_interrupts(struct seq_file *p, void *v)
p                 200 arch/parisc/kernel/irq.c 		seq_puts(p, "    ");
p                 202 arch/parisc/kernel/irq.c 			seq_printf(p, "       CPU%d", j);
p                 205 arch/parisc/kernel/irq.c 		seq_printf(p, " [min/avg/max] (CPU cycle counts)");
p                 207 arch/parisc/kernel/irq.c 		seq_putc(p, '\n');
p                 218 arch/parisc/kernel/irq.c 		seq_printf(p, "%3d: ", i);
p                 221 arch/parisc/kernel/irq.c 			seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
p                 223 arch/parisc/kernel/irq.c 		seq_printf(p, "%10u ", kstat_irqs(i));
p                 226 arch/parisc/kernel/irq.c 		seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
p                 228 arch/parisc/kernel/irq.c 		seq_printf(p, "  %s", action->name);
p                 231 arch/parisc/kernel/irq.c 			seq_printf(p, ", %s", action->name);
p                 251 arch/parisc/kernel/irq.c 			seq_printf(p, " %s[%d/%d/%d]", action->name,
p                 256 arch/parisc/kernel/irq.c 		seq_putc(p, '\n');
p                 262 arch/parisc/kernel/irq.c 		arch_show_interrupts(p, 3);
p                 178 arch/parisc/kernel/kgdb.c 	char *p = inbuf + 1;
p                 187 arch/parisc/kernel/kgdb.c 		if (kgdb_hex2long(&p, &addr))
p                 195 arch/parisc/kernel/kgdb.c 		if (kgdb_hex2long(&p, &addr)) {
p                  19 arch/parisc/kernel/kprobes.c int __kprobes arch_prepare_kprobe(struct kprobe *p)
p                  21 arch/parisc/kernel/kprobes.c 	if ((unsigned long)p->addr & 3UL)
p                  24 arch/parisc/kernel/kprobes.c 	p->ainsn.insn = get_insn_slot();
p                  25 arch/parisc/kernel/kprobes.c 	if (!p->ainsn.insn)
p                  28 arch/parisc/kernel/kprobes.c 	memcpy(p->ainsn.insn, p->addr,
p                  30 arch/parisc/kernel/kprobes.c 	p->opcode = *p->addr;
p                  31 arch/parisc/kernel/kprobes.c 	flush_insn_slot(p);
p                  35 arch/parisc/kernel/kprobes.c void __kprobes arch_remove_kprobe(struct kprobe *p)
p                  37 arch/parisc/kernel/kprobes.c 	if (!p->ainsn.insn)
p                  40 arch/parisc/kernel/kprobes.c 	free_insn_slot(p->ainsn.insn, 0);
p                  41 arch/parisc/kernel/kprobes.c 	p->ainsn.insn = NULL;
p                  44 arch/parisc/kernel/kprobes.c void __kprobes arch_arm_kprobe(struct kprobe *p)
p                  46 arch/parisc/kernel/kprobes.c 	patch_text(p->addr, PARISC_KPROBES_BREAK_INSN);
p                  49 arch/parisc/kernel/kprobes.c void __kprobes arch_disarm_kprobe(struct kprobe *p)
p                  51 arch/parisc/kernel/kprobes.c 	patch_text(p->addr, p->opcode);
p                  66 arch/parisc/kernel/kprobes.c static inline void __kprobes set_current_kprobe(struct kprobe *p)
p                  68 arch/parisc/kernel/kprobes.c 	__this_cpu_write(current_kprobe, p);
p                  71 arch/parisc/kernel/kprobes.c static void __kprobes setup_singlestep(struct kprobe *p,
p                  76 arch/parisc/kernel/kprobes.c 	regs->iaoq[0] = (unsigned long)p->ainsn.insn;
p                  83 arch/parisc/kernel/kprobes.c 	struct kprobe *p;
p                  89 arch/parisc/kernel/kprobes.c 	p = get_kprobe((unsigned long *)regs->iaoq[0]);
p                  91 arch/parisc/kernel/kprobes.c 	if (!p) {
p                 105 arch/parisc/kernel/kprobes.c 		set_current_kprobe(p);
p                 106 arch/parisc/kernel/kprobes.c 		kprobes_inc_nmissed_count(p);
p                 107 arch/parisc/kernel/kprobes.c 		setup_singlestep(p, kcb, regs);
p                 112 arch/parisc/kernel/kprobes.c 	set_current_kprobe(p);
p                 121 arch/parisc/kernel/kprobes.c 	if (!p->pre_handler || !p->pre_handler(p, regs)) {
p                 122 arch/parisc/kernel/kprobes.c 		setup_singlestep(p, kcb, regs);
p                 134 arch/parisc/kernel/kprobes.c 	struct kprobe *p = kprobe_running();
p                 136 arch/parisc/kernel/kprobes.c 	if (!p)
p                 139 arch/parisc/kernel/kprobes.c 	if (regs->iaoq[0] != (unsigned long)p->ainsn.insn+4)
p                 154 arch/parisc/kernel/kprobes.c 	if (p->post_handler)
p                 155 arch/parisc/kernel/kprobes.c 		p->post_handler(p, regs, 0);
p                 184 arch/parisc/kernel/kprobes.c static int __kprobes trampoline_probe_handler(struct kprobe *p,
p                 191 arch/parisc/kernel/kprobes.c static int __kprobes trampoline_probe_handler(struct kprobe *p,
p                 280 arch/parisc/kernel/kprobes.c int __kprobes arch_trampoline_kprobe(struct kprobe *p)
p                 282 arch/parisc/kernel/kprobes.c 	return p->addr == trampoline_p.addr;
p                  66 arch/parisc/kernel/patch.c 	u32 *p, *fixmap;
p                  73 arch/parisc/kernel/patch.c 	p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
p                  76 arch/parisc/kernel/patch.c 		*p++ = *insn++;
p                  85 arch/parisc/kernel/patch.c 						(p-fixmap) * sizeof(*p));
p                  88 arch/parisc/kernel/patch.c 			p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
p                  93 arch/parisc/kernel/patch.c 	flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p));
p                 212 arch/parisc/kernel/process.c 	    unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
p                 214 arch/parisc/kernel/process.c 	struct pt_regs *cregs = &(p->thread.regs);
p                 215 arch/parisc/kernel/process.c 	void *stack = task_stack_page(p);
p                 223 arch/parisc/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 266 arch/parisc/kernel/process.c get_wchan(struct task_struct *p)
p                 272 arch/parisc/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 279 arch/parisc/kernel/process.c 	unwind_frame_init_from_blocked_task(&info, p);
p                 294 arch/parisc/kernel/process.c 	void *p;
p                 296 arch/parisc/kernel/process.c 	if (!probe_kernel_address(&desc->addr, p))
p                 297 arch/parisc/kernel/process.c 		ptr = p;
p                  83 arch/parisc/kernel/processor.c 	struct cpuinfo_parisc *p;
p                 159 arch/parisc/kernel/processor.c 	p = &per_cpu(cpu_data, cpuid);
p                 164 arch/parisc/kernel/processor.c 		memset(p, 0, sizeof(struct cpuinfo_parisc));
p                 166 arch/parisc/kernel/processor.c 	p->loops_per_jiffy = loops_per_jiffy;
p                 167 arch/parisc/kernel/processor.c 	p->dev = dev;		/* Save IODC data in case we need it */
p                 168 arch/parisc/kernel/processor.c 	p->hpa = dev->hpa.start;	/* save CPU hpa */
p                 169 arch/parisc/kernel/processor.c 	p->cpuid = cpuid;	/* save CPU id */
p                 170 arch/parisc/kernel/processor.c 	p->txn_addr = txn_addr;	/* save CPU IRQ address */
p                 171 arch/parisc/kernel/processor.c 	p->cpu_num = cpu_info.cpu_num;
p                 172 arch/parisc/kernel/processor.c 	p->cpu_loc = cpu_info.cpu_loc;
p                 245 arch/parisc/kernel/processor.c 			p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
p                 122 arch/parisc/kernel/smp.c 	struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
p                 129 arch/parisc/kernel/smp.c 		ops = p->pending_ipi;
p                 130 arch/parisc/kernel/smp.c 		p->pending_ipi = 0;
p                 190 arch/parisc/kernel/smp.c 	struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
p                 195 arch/parisc/kernel/smp.c 	p->pending_ipi |= 1 << op;
p                 196 arch/parisc/kernel/smp.c 	gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
p                 322 arch/parisc/kernel/smp.c 	const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
p                 339 arch/parisc/kernel/smp.c 	printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
p                 350 arch/parisc/kernel/smp.c 	gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
p                  66 arch/parisc/kernel/topology.c 	struct cpuinfo_parisc *p;
p                  78 arch/parisc/kernel/topology.c 	p = &per_cpu(cpu_data, cpuid);
p                  85 arch/parisc/kernel/topology.c 		if (cpuinfo->cpu_loc == p->cpu_loc) {
p                  87 arch/parisc/kernel/topology.c 			if (p->cpu_loc) {
p                 129 arch/parisc/lib/io.c 	unsigned char *p;
p                 131 arch/parisc/lib/io.c 	p = (unsigned char *)dst;
p                 133 arch/parisc/lib/io.c 	while (((unsigned long)p) & 0x3) {
p                 137 arch/parisc/lib/io.c 		*p = inb(port);
p                 138 arch/parisc/lib/io.c 		p++;
p                 148 arch/parisc/lib/io.c 		*(unsigned int *) p = w;
p                 149 arch/parisc/lib/io.c 		p += 4;
p                 154 arch/parisc/lib/io.c 		*p = inb(port);
p                 155 arch/parisc/lib/io.c 		p++;
p                 170 arch/parisc/lib/io.c 	unsigned char *p;
p                 172 arch/parisc/lib/io.c 	p = (unsigned char *)dst;
p                 177 arch/parisc/lib/io.c 	switch (((unsigned long)p) & 0x3)
p                 185 arch/parisc/lib/io.c 			*(unsigned int *)p = l;
p                 186 arch/parisc/lib/io.c 			p += 4;
p                 189 arch/parisc/lib/io.c 			*(unsigned short *)p = cpu_to_le16(inw(port));
p                 194 arch/parisc/lib/io.c 		*(unsigned short *)p = cpu_to_le16(inw(port));
p                 195 arch/parisc/lib/io.c 		p += 2;
p                 202 arch/parisc/lib/io.c 			*(unsigned int *)p = l;
p                 203 arch/parisc/lib/io.c 			p += 4;
p                 206 arch/parisc/lib/io.c 			*(unsigned short *)p = cpu_to_le16(inw(port));
p                 217 arch/parisc/lib/io.c 		*p = l >> 8;
p                 218 arch/parisc/lib/io.c 		p++;
p                 222 arch/parisc/lib/io.c 			*(unsigned short *)p = (l & 0xff) << 8 | (l2 >> 8);
p                 223 arch/parisc/lib/io.c 			p += 2;
p                 226 arch/parisc/lib/io.c 		*p = l & 0xff;
p                 242 arch/parisc/lib/io.c 	unsigned char *p;
p                 244 arch/parisc/lib/io.c 	p = (unsigned char *)dst;
p                 254 arch/parisc/lib/io.c 			*(unsigned int *)p = cpu_to_le32(inl(port));
p                 255 arch/parisc/lib/io.c 			p += 4;
p                 263 arch/parisc/lib/io.c 		*(unsigned short *)p = l >> 16;
p                 264 arch/parisc/lib/io.c 		p += 2;
p                 269 arch/parisc/lib/io.c 			*(unsigned int *)p = (l & 0xffff) << 16 | (l2 >> 16);
p                 270 arch/parisc/lib/io.c 			p += 4;
p                 273 arch/parisc/lib/io.c 		*(unsigned short *)p = l & 0xffff;
p                 279 arch/parisc/lib/io.c 		*(unsigned char *)p = l >> 24;
p                 280 arch/parisc/lib/io.c 		p++;
p                 281 arch/parisc/lib/io.c 		*(unsigned short *)p = (l >> 8) & 0xffff;
p                 282 arch/parisc/lib/io.c 		p += 2;
p                 286 arch/parisc/lib/io.c 			*(unsigned int *)p = (l & 0xff) << 24 | (l2 >> 8);
p                 287 arch/parisc/lib/io.c 			p += 4;
p                 290 arch/parisc/lib/io.c 		*p = l & 0xff;
p                 296 arch/parisc/lib/io.c 		*p = l >> 24;
p                 297 arch/parisc/lib/io.c 		p++;
p                 301 arch/parisc/lib/io.c 			*(unsigned int *)p = (l & 0xffffff) << 8 | l2 >> 24;
p                 302 arch/parisc/lib/io.c 			p += 4;
p                 305 arch/parisc/lib/io.c 		*(unsigned short *)p = (l >> 8) & 0xffff;
p                 306 arch/parisc/lib/io.c 		p += 2;
p                 307 arch/parisc/lib/io.c 		*p = l & 0xff;
p                 321 arch/parisc/lib/io.c 	const unsigned char *p;
p                 323 arch/parisc/lib/io.c 	p = (const unsigned char *)src;
p                 326 arch/parisc/lib/io.c 		outb(*p, port);
p                 327 arch/parisc/lib/io.c 		p++;
p                 340 arch/parisc/lib/io.c 	const unsigned char *p;
p                 342 arch/parisc/lib/io.c 	p = (const unsigned char *)src;
p                 347 arch/parisc/lib/io.c 	switch (((unsigned long)p) & 0x3)
p                 352 arch/parisc/lib/io.c 			l = *(unsigned int *)p;
p                 353 arch/parisc/lib/io.c 			p += 4;
p                 358 arch/parisc/lib/io.c 			outw(le16_to_cpu(*(unsigned short*)p), port);
p                 364 arch/parisc/lib/io.c 		outw(le16_to_cpu(*(unsigned short*)p), port);
p                 365 arch/parisc/lib/io.c 		p += 2;
p                 370 arch/parisc/lib/io.c 			l = *(unsigned int *)p;
p                 371 arch/parisc/lib/io.c 			p += 4;
p                 376 arch/parisc/lib/io.c 			outw(le16_to_cpu(*(unsigned short *)p), port);
p                 384 arch/parisc/lib/io.c 		l  = *p << 8;
p                 385 arch/parisc/lib/io.c 		p++;
p                 390 arch/parisc/lib/io.c 			l2 = *(unsigned short *)p;
p                 391 arch/parisc/lib/io.c 			p += 2;
p                 395 arch/parisc/lib/io.c 		l2 = *(unsigned char *)p;
p                 412 arch/parisc/lib/io.c 	const unsigned char *p;
p                 414 arch/parisc/lib/io.c 	p = (const unsigned char *)src;
p                 419 arch/parisc/lib/io.c 	switch (((unsigned long)p) & 0x3)
p                 424 arch/parisc/lib/io.c 			outl(le32_to_cpu(*(unsigned int *)p), port);
p                 425 arch/parisc/lib/io.c 			p += 4;
p                 432 arch/parisc/lib/io.c 		l = *(unsigned short *)p;
p                 433 arch/parisc/lib/io.c 		p += 2;
p                 437 arch/parisc/lib/io.c 			l2 = *(unsigned int *)p;
p                 438 arch/parisc/lib/io.c 			p += 4;
p                 442 arch/parisc/lib/io.c 		l2 = *(unsigned short *)p;
p                 448 arch/parisc/lib/io.c 		l = *p << 24;
p                 449 arch/parisc/lib/io.c 		p++;
p                 450 arch/parisc/lib/io.c 		l |= *(unsigned short *)p << 8;
p                 451 arch/parisc/lib/io.c 		p += 2;
p                 455 arch/parisc/lib/io.c 			l2 = *(unsigned int *)p;
p                 456 arch/parisc/lib/io.c 			p += 4;
p                 460 arch/parisc/lib/io.c 		l2 = *p;
p                 466 arch/parisc/lib/io.c 		l = *p << 24;
p                 467 arch/parisc/lib/io.c 		p++;
p                 471 arch/parisc/lib/io.c 			l2 = *(unsigned int *)p;
p                 472 arch/parisc/lib/io.c 			p += 4;
p                 476 arch/parisc/lib/io.c 		l2 = *(unsigned short *)p << 16;
p                 477 arch/parisc/lib/io.c 		p += 2;
p                 478 arch/parisc/lib/io.c 		l2 |= *p;
p                 308 arch/powerpc/boot/4xx.c 	u32 *p = ranges;
p                 316 arch/powerpc/boot/4xx.c 			*p++ = i;
p                 317 arch/powerpc/boot/4xx.c 			*p++ = 0;
p                 318 arch/powerpc/boot/4xx.c 			*p++ = bxcr & EBC_BXCR_BAS;
p                 319 arch/powerpc/boot/4xx.c 			*p++ = EBC_BXCR_BANK_SIZE(bxcr);
p                 327 arch/powerpc/boot/4xx.c 	setprop(devp, "ranges", ranges, (p - ranges) * sizeof(u32));
p                  70 arch/powerpc/boot/libfdt-wrapper.c 	const void *p;
p                  73 arch/powerpc/boot/libfdt-wrapper.c 	p = fdt_getprop(fdt, devp_offset(devp), name, &len);
p                  74 arch/powerpc/boot/libfdt-wrapper.c 	if (!p)
p                  76 arch/powerpc/boot/libfdt-wrapper.c 	memcpy(buf, p, min(len, buflen));
p                 179 arch/powerpc/boot/oflib.c 	void *p;
p                 189 arch/powerpc/boot/oflib.c 	p = malloc(size);
p                 190 arch/powerpc/boot/oflib.c 	if (!p)
p                 193 arch/powerpc/boot/oflib.c 	return p;
p                  40 arch/powerpc/boot/simple_alloc.c 	struct alloc_info *p = alloc_tbl;
p                  47 arch/powerpc/boot/simple_alloc.c 	for (i=0; i<tbl_entries; i++, p++)
p                  48 arch/powerpc/boot/simple_alloc.c 		if (!(p->flags & ENTRY_BEEN_USED)) { /* never been used */
p                  50 arch/powerpc/boot/simple_alloc.c 				p->base = next_base;
p                  51 arch/powerpc/boot/simple_alloc.c 				p->size = size;
p                  52 arch/powerpc/boot/simple_alloc.c 				p->flags = ENTRY_BEEN_USED | ENTRY_IN_USE;
p                  55 arch/powerpc/boot/simple_alloc.c 				return (void *)p->base;
p                  60 arch/powerpc/boot/simple_alloc.c 		else if (!(p->flags & ENTRY_IN_USE) && (size <= p->size)) {
p                  61 arch/powerpc/boot/simple_alloc.c 			p->flags |= ENTRY_IN_USE;
p                  62 arch/powerpc/boot/simple_alloc.c 			return (void *)p->base;
p                  71 arch/powerpc/boot/simple_alloc.c 	struct alloc_info *p = alloc_tbl;
p                  73 arch/powerpc/boot/simple_alloc.c 	for (i=0; i<tbl_entries; i++,p++) {
p                  74 arch/powerpc/boot/simple_alloc.c 		if (!(p->flags & ENTRY_BEEN_USED))
p                  76 arch/powerpc/boot/simple_alloc.c 		if ((p->flags & ENTRY_IN_USE) &&
p                  77 arch/powerpc/boot/simple_alloc.c 		    (p->base == (unsigned long)ptr))
p                  78 arch/powerpc/boot/simple_alloc.c 			return p;
p                  85 arch/powerpc/boot/simple_alloc.c 	struct alloc_info *p = simple_find_entry(ptr);
p                  87 arch/powerpc/boot/simple_alloc.c 	if (p != NULL)
p                  88 arch/powerpc/boot/simple_alloc.c 		p->flags &= ~ENTRY_IN_USE;
p                  99 arch/powerpc/boot/simple_alloc.c 	struct alloc_info *p;
p                 110 arch/powerpc/boot/simple_alloc.c 	p = simple_find_entry(ptr);
p                 111 arch/powerpc/boot/simple_alloc.c 	if (p == NULL) /* ptr not from simple_malloc/simple_realloc */
p                 113 arch/powerpc/boot/simple_alloc.c 	if (size <= p->size) /* fits in current block */
p                 117 arch/powerpc/boot/simple_alloc.c 	memcpy(new, ptr, p->size);
p                  14 arch/powerpc/boot/xz_config.h static inline uint32_t swab32p(void *p)
p                  16 arch/powerpc/boot/xz_config.h 	uint32_t *q = p;
p                  22 arch/powerpc/boot/xz_config.h #define get_le32(p) (*((uint32_t *) (p)))
p                  24 arch/powerpc/boot/xz_config.h static inline u32 be32_to_cpup(const u32 *p)
p                  26 arch/powerpc/boot/xz_config.h 	return swab32p((u32 *)p);
p                  29 arch/powerpc/boot/xz_config.h #define get_le32(p) swab32p(p)
p                  31 arch/powerpc/boot/xz_config.h static inline u32 be32_to_cpup(const u32 *p)
p                  33 arch/powerpc/boot/xz_config.h 	return *p;
p                  37 arch/powerpc/boot/xz_config.h static inline uint32_t get_unaligned_be32(const void *p)
p                  39 arch/powerpc/boot/xz_config.h 	return be32_to_cpup(p);
p                  42 arch/powerpc/boot/xz_config.h static inline void put_unaligned_be32(u32 val, void *p)
p                  44 arch/powerpc/boot/xz_config.h 	*((u32 *)p) = cpu_to_be32(val);
p                  21 arch/powerpc/crypto/crc32c-vpmsum_glue.c u32 __crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len);
p                  23 arch/powerpc/crypto/crc32c-vpmsum_glue.c static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
p                  29 arch/powerpc/crypto/crc32c-vpmsum_glue.c 		return __crc32c_le(crc, p, len);
p                  31 arch/powerpc/crypto/crc32c-vpmsum_glue.c 	if ((unsigned long)p & VMX_ALIGN_MASK) {
p                  32 arch/powerpc/crypto/crc32c-vpmsum_glue.c 		prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK);
p                  33 arch/powerpc/crypto/crc32c-vpmsum_glue.c 		crc = __crc32c_le(crc, p, prealign);
p                  35 arch/powerpc/crypto/crc32c-vpmsum_glue.c 		p += prealign;
p                  42 arch/powerpc/crypto/crc32c-vpmsum_glue.c 		crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
p                  50 arch/powerpc/crypto/crc32c-vpmsum_glue.c 		p += len & ~VMX_ALIGN_MASK;
p                  51 arch/powerpc/crypto/crc32c-vpmsum_glue.c 		crc = __crc32c_le(crc, p, tail);
p                  25 arch/powerpc/crypto/crct10dif-vpmsum_glue.c u32 __crct10dif_vpmsum(u32 crc, unsigned char const *p, size_t len);
p                  27 arch/powerpc/crypto/crct10dif-vpmsum_glue.c static u16 crct10dif_vpmsum(u16 crci, unsigned char const *p, size_t len)
p                  34 arch/powerpc/crypto/crct10dif-vpmsum_glue.c 		return crc_t10dif_generic(crc, p, len);
p                  36 arch/powerpc/crypto/crct10dif-vpmsum_glue.c 	if ((unsigned long)p & VMX_ALIGN_MASK) {
p                  37 arch/powerpc/crypto/crct10dif-vpmsum_glue.c 		prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK);
p                  38 arch/powerpc/crypto/crct10dif-vpmsum_glue.c 		crc = crc_t10dif_generic(crc, p, prealign);
p                  40 arch/powerpc/crypto/crct10dif-vpmsum_glue.c 		p += prealign;
p                  48 arch/powerpc/crypto/crct10dif-vpmsum_glue.c 		crc = __crct10dif_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
p                  57 arch/powerpc/crypto/crct10dif-vpmsum_glue.c 		p += len & ~VMX_ALIGN_MASK;
p                  58 arch/powerpc/crypto/crct10dif-vpmsum_glue.c 		crc = crc_t10dif_generic(crc, p, tail);
p                  81 arch/powerpc/crypto/md5-glue.c 	u8 *p = (u8 *)src + offset;
p                  86 arch/powerpc/crypto/md5-glue.c 	*p++ = 0x80;
p                  89 arch/powerpc/crypto/md5-glue.c 		memset(p, 0x00, padlen + sizeof (u64));
p                  91 arch/powerpc/crypto/md5-glue.c 		p = (char *)sctx->block;
p                  95 arch/powerpc/crypto/md5-glue.c 	memset(p, 0, padlen);
p                 121 arch/powerpc/crypto/sha1-spe-glue.c 	char *p = (char *)sctx->buffer + offset;
p                 127 arch/powerpc/crypto/sha1-spe-glue.c 	*p++ = 0x80;
p                 132 arch/powerpc/crypto/sha1-spe-glue.c 		memset(p, 0x00, padlen + sizeof (u64));
p                 134 arch/powerpc/crypto/sha1-spe-glue.c 		p = (char *)sctx->buffer;
p                 138 arch/powerpc/crypto/sha1-spe-glue.c 	memset(p, 0, padlen);
p                 143 arch/powerpc/crypto/sha256-spe-glue.c 	char *p = (char *)sctx->buf + offset;
p                 149 arch/powerpc/crypto/sha256-spe-glue.c 	*p++ = 0x80;
p                 154 arch/powerpc/crypto/sha256-spe-glue.c 		memset(p, 0x00, padlen + sizeof (u64));
p                 156 arch/powerpc/crypto/sha256-spe-glue.c 		p = (char *)sctx->buf;
p                 160 arch/powerpc/crypto/sha256-spe-glue.c 	memset(p, 0, padlen);
p                  66 arch/powerpc/include/asm/barrier.h #define __smp_store_release(p, v)						\
p                  68 arch/powerpc/include/asm/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                  70 arch/powerpc/include/asm/barrier.h 	WRITE_ONCE(*p, v);						\
p                  73 arch/powerpc/include/asm/barrier.h #define __smp_load_acquire(p)						\
p                  75 arch/powerpc/include/asm/barrier.h 	typeof(*p) ___p1 = READ_ONCE(*p);				\
p                  76 arch/powerpc/include/asm/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                  71 arch/powerpc/include/asm/bitops.h 	unsigned long *p = (unsigned long *)_p;	\
p                  79 arch/powerpc/include/asm/bitops.h 	: "=&r" (old), "+m" (*p)		\
p                  80 arch/powerpc/include/asm/bitops.h 	: "r" (mask), "r" (p)			\
p                 117 arch/powerpc/include/asm/bitops.h 	unsigned long *p = (unsigned long *)_p;		\
p                 127 arch/powerpc/include/asm/bitops.h 	: "r" (mask), "r" (p)				\
p                 171 arch/powerpc/include/asm/bitops.h 	unsigned long *p = (unsigned long *)addr + BIT_WORD(nr);
p                 182 arch/powerpc/include/asm/bitops.h 	: "r" (mask), "r" (p)
p                  55 arch/powerpc/include/asm/book3s/32/kup.h 	stw	\gpr2, STACK_REGS_KUAP(\sp)
p                  66 arch/powerpc/include/asm/book3s/32/kup.h 	lwz	\gpr2, STACK_REGS_KUAP(\sp)
p                 258 arch/powerpc/include/asm/book3s/32/pgtable.h static inline unsigned long pte_update(pte_t *p,
p                 270 arch/powerpc/include/asm/book3s/32/pgtable.h 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
p                 271 arch/powerpc/include/asm/book3s/32/pgtable.h 	: "r" (p), "r" (clr), "r" (set), "m" (*p)
p                 277 arch/powerpc/include/asm/book3s/32/pgtable.h static inline unsigned long long pte_update(pte_t *p,
p                 291 arch/powerpc/include/asm/book3s/32/pgtable.h 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
p                 292 arch/powerpc/include/asm/book3s/32/pgtable.h 	: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
p                 334 arch/powerpc/include/asm/book3s/64/pgtable.h #define __real_pte(e, p, o)		((real_pte_t){(e)})
p                  70 arch/powerpc/include/asm/book3s/64/tlbflush-radix.h #define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p)
p                  18 arch/powerpc/include/asm/cmpxchg.h static inline u32 __xchg_##type##sfx(volatile void *p, u32 val)	\
p                  22 arch/powerpc/include/asm/cmpxchg.h 	off = (unsigned long)p % sizeof(u32);			\
p                  24 arch/powerpc/include/asm/cmpxchg.h 	p -= off;						\
p                  35 arch/powerpc/include/asm/cmpxchg.h 	: "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p)		\
p                  36 arch/powerpc/include/asm/cmpxchg.h 	: "r" (p), "r" (val), "r" (prev_mask)			\
p                  44 arch/powerpc/include/asm/cmpxchg.h u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new)	\
p                  48 arch/powerpc/include/asm/cmpxchg.h 	off = (unsigned long)p % sizeof(u32);			\
p                  50 arch/powerpc/include/asm/cmpxchg.h 	p -= off;						\
p                  69 arch/powerpc/include/asm/cmpxchg.h 	: "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p)		\
p                  70 arch/powerpc/include/asm/cmpxchg.h 	: "r" (p), "r" (old), "r" (new), "r" (prev_mask)	\
p                  89 arch/powerpc/include/asm/cmpxchg.h __xchg_u32_local(volatile void *p, unsigned long val)
p                  98 arch/powerpc/include/asm/cmpxchg.h 	: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
p                  99 arch/powerpc/include/asm/cmpxchg.h 	: "r" (p), "r" (val)
p                 106 arch/powerpc/include/asm/cmpxchg.h __xchg_u32_relaxed(u32 *p, unsigned long val)
p                 115 arch/powerpc/include/asm/cmpxchg.h 	: "=&r" (prev), "+m" (*p)
p                 116 arch/powerpc/include/asm/cmpxchg.h 	: "r" (p), "r" (val)
p                 124 arch/powerpc/include/asm/cmpxchg.h __xchg_u64_local(volatile void *p, unsigned long val)
p                 133 arch/powerpc/include/asm/cmpxchg.h 	: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
p                 134 arch/powerpc/include/asm/cmpxchg.h 	: "r" (p), "r" (val)
p                 141 arch/powerpc/include/asm/cmpxchg.h __xchg_u64_relaxed(u64 *p, unsigned long val)
p                 150 arch/powerpc/include/asm/cmpxchg.h 	: "=&r" (prev), "+m" (*p)
p                 151 arch/powerpc/include/asm/cmpxchg.h 	: "r" (p), "r" (val)
p                 223 arch/powerpc/include/asm/cmpxchg.h __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
p                 238 arch/powerpc/include/asm/cmpxchg.h 	: "=&r" (prev), "+m" (*p)
p                 239 arch/powerpc/include/asm/cmpxchg.h 	: "r" (p), "r" (old), "r" (new)
p                 246 arch/powerpc/include/asm/cmpxchg.h __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
p                 260 arch/powerpc/include/asm/cmpxchg.h 	: "=&r" (prev), "+m" (*p)
p                 261 arch/powerpc/include/asm/cmpxchg.h 	: "r" (p), "r" (old), "r" (new)
p                 268 arch/powerpc/include/asm/cmpxchg.h __cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
p                 280 arch/powerpc/include/asm/cmpxchg.h 	: "=&r" (prev), "+m" (*p)
p                 281 arch/powerpc/include/asm/cmpxchg.h 	: "r" (p), "r" (old), "r" (new)
p                 296 arch/powerpc/include/asm/cmpxchg.h __cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
p                 310 arch/powerpc/include/asm/cmpxchg.h 	: "=&r" (prev), "+m" (*p)
p                 311 arch/powerpc/include/asm/cmpxchg.h 	: "r" (p), "r" (old), "r" (new)
p                 319 arch/powerpc/include/asm/cmpxchg.h __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
p                 333 arch/powerpc/include/asm/cmpxchg.h 	: "=&r" (prev), "+m" (*p)
p                 334 arch/powerpc/include/asm/cmpxchg.h 	: "r" (p), "r" (old), "r" (new)
p                 341 arch/powerpc/include/asm/cmpxchg.h __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
p                 354 arch/powerpc/include/asm/cmpxchg.h 	: "=&r" (prev), "+m" (*p)
p                 355 arch/powerpc/include/asm/cmpxchg.h 	: "r" (p), "r" (old), "r" (new)
p                 362 arch/powerpc/include/asm/cmpxchg.h __cmpxchg_u64_relaxed(u64 *p, unsigned long old, unsigned long new)
p                 373 arch/powerpc/include/asm/cmpxchg.h 	: "=&r" (prev), "+m" (*p)
p                 374 arch/powerpc/include/asm/cmpxchg.h 	: "r" (p), "r" (old), "r" (new)
p                 381 arch/powerpc/include/asm/cmpxchg.h __cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new)
p                 394 arch/powerpc/include/asm/cmpxchg.h 	: "=&r" (prev), "+m" (*p)
p                 395 arch/powerpc/include/asm/cmpxchg.h 	: "r" (p), "r" (old), "r" (new)
p                 507 arch/powerpc/include/asm/cpm1.h #define TM_CMD_PERIOD(p) ((p)&0xFFFF)	/* Timer Period */
p                 292 arch/powerpc/include/asm/epapr_hcalls.h 	const uint32_t *p = (const uint32_t *) buffer;
p                 297 arch/powerpc/include/asm/epapr_hcalls.h 	r5 = be32_to_cpu(p[0]);
p                 298 arch/powerpc/include/asm/epapr_hcalls.h 	r6 = be32_to_cpu(p[1]);
p                 299 arch/powerpc/include/asm/epapr_hcalls.h 	r7 = be32_to_cpu(p[2]);
p                 300 arch/powerpc/include/asm/epapr_hcalls.h 	r8 = be32_to_cpu(p[3]);
p                 335 arch/powerpc/include/asm/epapr_hcalls.h 	uint32_t *p = (uint32_t *) buffer;
p                 348 arch/powerpc/include/asm/epapr_hcalls.h 	p[0] = cpu_to_be32(r5);
p                 349 arch/powerpc/include/asm/epapr_hcalls.h 	p[1] = cpu_to_be32(r6);
p                 350 arch/powerpc/include/asm/epapr_hcalls.h 	p[2] = cpu_to_be32(r7);
p                 351 arch/powerpc/include/asm/epapr_hcalls.h 	p[3] = cpu_to_be32(r8);
p                  13 arch/powerpc/include/asm/ide.h #define __ide_mm_insw(p, a, c)	readsw((void __iomem *)(p), (a), (c))
p                  14 arch/powerpc/include/asm/ide.h #define __ide_mm_insl(p, a, c)	readsl((void __iomem *)(p), (a), (c))
p                  15 arch/powerpc/include/asm/ide.h #define __ide_mm_outsw(p, a, c)	writesw((void __iomem *)(p), (a), (c))
p                  16 arch/powerpc/include/asm/ide.h #define __ide_mm_outsl(p, a, c)	writesl((void __iomem *)(p), (a), (c))
p                  43 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(insb, (unsigned long p, void *b, unsigned long c),
p                  44 arch/powerpc/include/asm/io-defs.h 		 (p, b, c), pio, p)
p                  45 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(insw, (unsigned long p, void *b, unsigned long c),
p                  46 arch/powerpc/include/asm/io-defs.h 		 (p, b, c), pio, p)
p                  47 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(insl, (unsigned long p, void *b, unsigned long c),
p                  48 arch/powerpc/include/asm/io-defs.h 		 (p, b, c), pio, p)
p                  49 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(outsb, (unsigned long p, const void *b, unsigned long c),
p                  50 arch/powerpc/include/asm/io-defs.h 		 (p, b, c), pio, p)
p                  51 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(outsw, (unsigned long p, const void *b, unsigned long c),
p                  52 arch/powerpc/include/asm/io-defs.h 		 (p, b, c), pio, p)
p                  53 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(outsl, (unsigned long p, const void *b, unsigned long c),
p                  54 arch/powerpc/include/asm/io-defs.h 		 (p, b, c), pio, p)
p                 542 arch/powerpc/include/asm/io.h #define __do_insb(p, b, n)	readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
p                 543 arch/powerpc/include/asm/io.h #define __do_insw(p, b, n)	readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
p                 544 arch/powerpc/include/asm/io.h #define __do_insl(p, b, n)	readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
p                 545 arch/powerpc/include/asm/io.h #define __do_outsb(p, b, n)	writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
p                 546 arch/powerpc/include/asm/io.h #define __do_outsw(p, b, n)	writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
p                 547 arch/powerpc/include/asm/io.h #define __do_outsl(p, b, n)	writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
p                 622 arch/powerpc/include/asm/io.h #define xlate_dev_mem_ptr(p)	__va(p)
p                 627 arch/powerpc/include/asm/io.h #define xlate_dev_kmem_ptr(p)	p
p                 203 arch/powerpc/include/asm/keylargo.h #define KL3_IT_PORT_WAKEUP_ENABLE(p)		(0x00080000 << ((p)<<3))
p                 204 arch/powerpc/include/asm/keylargo.h #define KL3_IT_PORT_RESUME_WAKE_EN(p)		(0x00040000 << ((p)<<3))
p                 205 arch/powerpc/include/asm/keylargo.h #define KL3_IT_PORT_CONNECT_WAKE_EN(p)		(0x00020000 << ((p)<<3))
p                 206 arch/powerpc/include/asm/keylargo.h #define KL3_IT_PORT_DISCONNECT_WAKE_EN(p)	(0x00010000 << ((p)<<3))
p                 207 arch/powerpc/include/asm/keylargo.h #define KL3_IT_PORT_RESUME_STAT(p)		(0x00300000 << ((p)<<3))
p                 208 arch/powerpc/include/asm/keylargo.h #define KL3_IT_PORT_CONNECT_STAT(p)		(0x00200000 << ((p)<<3))
p                 209 arch/powerpc/include/asm/keylargo.h #define KL3_IT_PORT_DISCONNECT_STAT(p)		(0x00100000 << ((p)<<3))
p                 212 arch/powerpc/include/asm/keylargo.h #define KL4_PORT_WAKEUP_ENABLE(p)	(0x00000008 << ((p)<<3))
p                 213 arch/powerpc/include/asm/keylargo.h #define KL4_PORT_RESUME_WAKE_EN(p)	(0x00000004 << ((p)<<3))
p                 214 arch/powerpc/include/asm/keylargo.h #define KL4_PORT_CONNECT_WAKE_EN(p)	(0x00000002 << ((p)<<3))
p                 215 arch/powerpc/include/asm/keylargo.h #define KL4_PORT_DISCONNECT_WAKE_EN(p)	(0x00000001 << ((p)<<3))
p                 216 arch/powerpc/include/asm/keylargo.h #define KL4_PORT_RESUME_STAT(p)		(0x00000040 << ((p)<<3))
p                 217 arch/powerpc/include/asm/keylargo.h #define KL4_PORT_CONNECT_STAT(p)	(0x00000020 << ((p)<<3))
p                 218 arch/powerpc/include/asm/keylargo.h #define KL4_PORT_DISCONNECT_STAT(p)	(0x00000010 << ((p)<<3))
p                  51 arch/powerpc/include/asm/kprobes.h #define flush_insn_slot(p)	do { } while (0)
p                  55 arch/powerpc/include/asm/kprobes.h extern void arch_remove_kprobe(struct kprobe *p);
p                  30 arch/powerpc/include/asm/livepatch.h static inline void klp_init_thread_info(struct task_struct *p)
p                  33 arch/powerpc/include/asm/livepatch.h 	task_thread_info(p)->livepatch_sp = end_of_stack(p) + 1;
p                  36 arch/powerpc/include/asm/livepatch.h static inline void klp_init_thread_info(struct task_struct *p) { }
p                  15 arch/powerpc/include/asm/nohash/32/kup-8xx.h 	stw	\gpr1, STACK_REGS_KUAP(\sp)
p                  19 arch/powerpc/include/asm/nohash/32/kup-8xx.h 	lwz	\gpr1, STACK_REGS_KUAP(\sp)
p                 221 arch/powerpc/include/asm/nohash/32/pgtable.h static inline unsigned long pte_update(pte_t *p,
p                 235 arch/powerpc/include/asm/nohash/32/pgtable.h 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
p                 236 arch/powerpc/include/asm/nohash/32/pgtable.h 	: "r" (p), "r" (clr), "r" (set), "m" (*p)
p                 239 arch/powerpc/include/asm/nohash/32/pgtable.h 	unsigned long old = pte_val(*p);
p                 243 arch/powerpc/include/asm/nohash/32/pgtable.h 	p->pte = p->pte1 = p->pte2 = p->pte3 = new;
p                 245 arch/powerpc/include/asm/nohash/32/pgtable.h 	*p = __pte(new);
p                 256 arch/powerpc/include/asm/nohash/32/pgtable.h static inline unsigned long long pte_update(pte_t *p,
p                 272 arch/powerpc/include/asm/nohash/32/pgtable.h 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
p                 273 arch/powerpc/include/asm/nohash/32/pgtable.h 	: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
p                 276 arch/powerpc/include/asm/nohash/32/pgtable.h 	unsigned long long old = pte_val(*p);
p                 277 arch/powerpc/include/asm/nohash/32/pgtable.h 	*p = __pte((old & ~(unsigned long long)clr) | set);
p                  50 arch/powerpc/include/asm/nohash/tlbflush.h #define __flush_tlb_page(mm,addr,p,i)	__local_flush_tlb_page(mm,addr,p,i)
p                 323 arch/powerpc/include/asm/page.h 		struct page *p);
p                  83 arch/powerpc/include/asm/pgtable-be-types.h 	unsigned long *p = (unsigned long *)ptep;
p                  87 arch/powerpc/include/asm/pgtable-be-types.h 	prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
p                  95 arch/powerpc/include/asm/pgtable-be-types.h 	unsigned long *p = (unsigned long *)pmdp;
p                  98 arch/powerpc/include/asm/pgtable-be-types.h 	prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pmd_raw(old),
p                  63 arch/powerpc/include/asm/pgtable-types.h 	unsigned long *p = (unsigned long *)ptep;
p                  66 arch/powerpc/include/asm/pgtable-types.h 	return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
p                 113 arch/powerpc/include/asm/pgtable.h static inline void pte_frag_set(mm_context_t *ctx, void *p)
p                 115 arch/powerpc/include/asm/pgtable.h 	ctx->pte_frag = p;
p                 127 arch/powerpc/include/asm/pgtable.h static inline void pte_frag_set(mm_context_t *ctx, void *p)
p                 323 arch/powerpc/include/asm/plpar_wrappers.h static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
p                 330 arch/powerpc/include/asm/plpar_wrappers.h 		p->character = retbuf[0];
p                 331 arch/powerpc/include/asm/plpar_wrappers.h 		p->behaviour = retbuf[1];
p                  27 arch/powerpc/include/asm/pmac_pfunc.h 		u32 *p;
p                 303 arch/powerpc/include/asm/processor.h unsigned long get_wchan(struct task_struct *p);
p                 367 arch/powerpc/include/asm/processor.h int validate_sp(unsigned long sp, struct task_struct *p,
p                  86 arch/powerpc/include/asm/sections.h 	void *p;
p                  88 arch/powerpc/include/asm/sections.h 	if (!probe_kernel_address(&desc->funcaddr, p))
p                  89 arch/powerpc/include/asm/sections.h 		ptr = p;
p                  63 arch/powerpc/include/asm/string.h static inline void *memset16(uint16_t *p, uint16_t v, __kernel_size_t n)
p                  65 arch/powerpc/include/asm/string.h 	return __memset16(p, v, n * 2);
p                  68 arch/powerpc/include/asm/string.h static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n)
p                  70 arch/powerpc/include/asm/string.h 	return __memset32(p, v, n * 4);
p                  73 arch/powerpc/include/asm/string.h static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
p                  75 arch/powerpc/include/asm/string.h 	return __memset64(p, v, n * 8);
p                 116 arch/powerpc/kernel/align.c 	unsigned char __user *p, *addr;
p                 171 arch/powerpc/kernel/align.c 		p = addr;
p                 175 arch/powerpc/kernel/align.c 			ret |= __get_user_inatomic(temp.v[0], p++);
p                 176 arch/powerpc/kernel/align.c 			ret |= __get_user_inatomic(temp.v[1], p++);
p                 177 arch/powerpc/kernel/align.c 			ret |= __get_user_inatomic(temp.v[2], p++);
p                 178 arch/powerpc/kernel/align.c 			ret |= __get_user_inatomic(temp.v[3], p++);
p                 181 arch/powerpc/kernel/align.c 			ret |= __get_user_inatomic(temp.v[4], p++);
p                 182 arch/powerpc/kernel/align.c 			ret |= __get_user_inatomic(temp.v[5], p++);
p                 185 arch/powerpc/kernel/align.c 			ret |= __get_user_inatomic(temp.v[6], p++);
p                 186 arch/powerpc/kernel/align.c 			ret |= __get_user_inatomic(temp.v[7], p++);
p                 257 arch/powerpc/kernel/align.c 		p = addr;
p                 260 arch/powerpc/kernel/align.c 			ret |= __put_user_inatomic(data.v[0], p++);
p                 261 arch/powerpc/kernel/align.c 			ret |= __put_user_inatomic(data.v[1], p++);
p                 262 arch/powerpc/kernel/align.c 			ret |= __put_user_inatomic(data.v[2], p++);
p                 263 arch/powerpc/kernel/align.c 			ret |= __put_user_inatomic(data.v[3], p++);
p                 266 arch/powerpc/kernel/align.c 			ret |= __put_user_inatomic(data.v[4], p++);
p                 267 arch/powerpc/kernel/align.c 			ret |= __put_user_inatomic(data.v[5], p++);
p                 270 arch/powerpc/kernel/align.c 			ret |= __put_user_inatomic(data.v[6], p++);
p                 271 arch/powerpc/kernel/align.c 			ret |= __put_user_inatomic(data.v[7], p++);
p                  37 arch/powerpc/kernel/crash_dump.c 	unsigned int *p = (unsigned int *)addr;
p                  47 arch/powerpc/kernel/crash_dump.c 	patch_instruction(p, PPC_INST_NOP);
p                  48 arch/powerpc/kernel/crash_dump.c 	patch_branch(++p, addr + PHYSICAL_START, 0);
p                 778 arch/powerpc/kernel/dt_cpu_ftrs.c 	const char *p;
p                 785 arch/powerpc/kernel/dt_cpu_ftrs.c 	p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
p                 786 arch/powerpc/kernel/dt_cpu_ftrs.c 	if (!p)
p                 789 arch/powerpc/kernel/dt_cpu_ftrs.c 	if (strstr(p, "dt_cpu_ftrs=off"))
p                 121 arch/powerpc/kernel/eeh_cache.c 	struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
p                 126 arch/powerpc/kernel/eeh_cache.c 	while (*p) {
p                 127 arch/powerpc/kernel/eeh_cache.c 		parent = *p;
p                 130 arch/powerpc/kernel/eeh_cache.c 			p = &parent->rb_left;
p                 132 arch/powerpc/kernel/eeh_cache.c 			p = &parent->rb_right;
p                 154 arch/powerpc/kernel/eeh_cache.c 	rb_link_node(&piar->rb_node, parent, p);
p                 564 arch/powerpc/kernel/fadump.c static int __init early_fadump_param(char *p)
p                 566 arch/powerpc/kernel/fadump.c 	if (!p)
p                 569 arch/powerpc/kernel/fadump.c 	if (strncmp(p, "on", 2) == 0)
p                 571 arch/powerpc/kernel/fadump.c 	else if (strncmp(p, "off", 3) == 0)
p                 573 arch/powerpc/kernel/fadump.c 	else if (strncmp(p, "nocma", 5) == 0) {
p                 587 arch/powerpc/kernel/fadump.c static int __init early_fadump_reserve_mem(char *p)
p                 589 arch/powerpc/kernel/fadump.c 	if (p)
p                 590 arch/powerpc/kernel/fadump.c 		fw_dump.reserve_bootvar = memparse(p, &p);
p                 148 arch/powerpc/kernel/ima_kexec.c static int write_number(void *p, u64 value, int cells)
p                 157 arch/powerpc/kernel/ima_kexec.c 		memcpy(p, &tmp, sizeof(tmp));
p                 162 arch/powerpc/kernel/ima_kexec.c 		memcpy(p, &tmp, sizeof(tmp));
p                 125 arch/powerpc/kernel/io.c 	void *p = (void __force *)addr;
p                 131 arch/powerpc/kernel/io.c 	while(n && !IO_CHECK_ALIGN(p, 4)) {
p                 132 arch/powerpc/kernel/io.c 		*((volatile u8 *)p) = c;
p                 133 arch/powerpc/kernel/io.c 		p++;
p                 137 arch/powerpc/kernel/io.c 		*((volatile u32 *)p) = lc;
p                 138 arch/powerpc/kernel/io.c 		p += 4;
p                 142 arch/powerpc/kernel/io.c 		*((volatile u8 *)p) = c;
p                 143 arch/powerpc/kernel/io.c 		p++;
p                 367 arch/powerpc/kernel/iommu.c 	struct iommu_pool *p;
p                 372 arch/powerpc/kernel/iommu.c 		p = &tbl->large_pool;
p                 377 arch/powerpc/kernel/iommu.c 		p = &tbl->pools[pool_nr];
p                 380 arch/powerpc/kernel/iommu.c 	return p;
p                 689 arch/powerpc/kernel/iommu.c 	struct iommu_pool *p;
p                 714 arch/powerpc/kernel/iommu.c 		p = &tbl->pools[i];
p                 715 arch/powerpc/kernel/iommu.c 		spin_lock_init(&(p->lock));
p                 716 arch/powerpc/kernel/iommu.c 		p->start = tbl->poolsize * i;
p                 717 arch/powerpc/kernel/iommu.c 		p->hint = p->start;
p                 718 arch/powerpc/kernel/iommu.c 		p->end = p->start + tbl->poolsize;
p                 721 arch/powerpc/kernel/iommu.c 	p = &tbl->large_pool;
p                 722 arch/powerpc/kernel/iommu.c 	spin_lock_init(&(p->lock));
p                 723 arch/powerpc/kernel/iommu.c 	p->start = tbl->poolsize * i;
p                 724 arch/powerpc/kernel/iommu.c 	p->hint = p->start;
p                 725 arch/powerpc/kernel/iommu.c 	p->end = tbl->it_size;
p                 500 arch/powerpc/kernel/irq.c int arch_show_interrupts(struct seq_file *p, int prec)
p                 506 arch/powerpc/kernel/irq.c 		seq_printf(p, "%*s: ", prec, "TAU");
p                 508 arch/powerpc/kernel/irq.c 			seq_printf(p, "%10u ", tau_interrupts(j));
p                 509 arch/powerpc/kernel/irq.c 		seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
p                 513 arch/powerpc/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "LOC");
p                 515 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
p                 516 arch/powerpc/kernel/irq.c         seq_printf(p, "  Local timer interrupts for timer event device\n");
p                 518 arch/powerpc/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "BCT");
p                 520 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event);
p                 521 arch/powerpc/kernel/irq.c 	seq_printf(p, "  Broadcast timer interrupts for timer event device\n");
p                 523 arch/powerpc/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "LOC");
p                 525 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
p                 526 arch/powerpc/kernel/irq.c         seq_printf(p, "  Local timer interrupts for others\n");
p                 528 arch/powerpc/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "SPU");
p                 530 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
p                 531 arch/powerpc/kernel/irq.c 	seq_printf(p, "  Spurious interrupts\n");
p                 533 arch/powerpc/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "PMI");
p                 535 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
p                 536 arch/powerpc/kernel/irq.c 	seq_printf(p, "  Performance monitoring interrupts\n");
p                 538 arch/powerpc/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "MCE");
p                 540 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
p                 541 arch/powerpc/kernel/irq.c 	seq_printf(p, "  Machine check exceptions\n");
p                 544 arch/powerpc/kernel/irq.c 		seq_printf(p, "%*s: ", prec, "HMI");
p                 546 arch/powerpc/kernel/irq.c 			seq_printf(p, "%10u ",
p                 548 arch/powerpc/kernel/irq.c 		seq_printf(p, "  Hypervisor Maintenance Interrupts\n");
p                 551 arch/powerpc/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "NMI");
p                 553 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
p                 554 arch/powerpc/kernel/irq.c 	seq_printf(p, "  System Reset interrupts\n");
p                 557 arch/powerpc/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "WDG");
p                 559 arch/powerpc/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
p                 560 arch/powerpc/kernel/irq.c 	seq_printf(p, "  Watchdog soft-NMI interrupts\n");
p                 565 arch/powerpc/kernel/irq.c 		seq_printf(p, "%*s: ", prec, "DBL");
p                 567 arch/powerpc/kernel/irq.c 			seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
p                 568 arch/powerpc/kernel/irq.c 		seq_printf(p, "  Doorbell interrupts\n");
p                 193 arch/powerpc/kernel/kgdb.c void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
p                 195 arch/powerpc/kernel/kgdb.c 	struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
p                 216 arch/powerpc/kernel/kgdb.c 		PACK64(ptr, p->thread.evr[reg]);
p                  19 arch/powerpc/kernel/kprobes-ftrace.c 	struct kprobe *p;
p                  22 arch/powerpc/kernel/kprobes-ftrace.c 	p = get_kprobe((kprobe_opcode_t *)nip);
p                  23 arch/powerpc/kernel/kprobes-ftrace.c 	if (unlikely(!p) || kprobe_disabled(p))
p                  28 arch/powerpc/kernel/kprobes-ftrace.c 		kprobes_inc_nmissed_count(p);
p                  36 arch/powerpc/kernel/kprobes-ftrace.c 		__this_cpu_write(current_kprobe, p);
p                  38 arch/powerpc/kernel/kprobes-ftrace.c 		if (!p->pre_handler || !p->pre_handler(p, regs)) {
p                  44 arch/powerpc/kernel/kprobes-ftrace.c 			if (unlikely(p->post_handler)) {
p                  46 arch/powerpc/kernel/kprobes-ftrace.c 				p->post_handler(p, regs, 0);
p                  58 arch/powerpc/kernel/kprobes-ftrace.c int arch_prepare_kprobe_ftrace(struct kprobe *p)
p                  60 arch/powerpc/kernel/kprobes-ftrace.c 	p->ainsn.insn = NULL;
p                  61 arch/powerpc/kernel/kprobes-ftrace.c 	p->ainsn.boostable = -1;
p                 105 arch/powerpc/kernel/kprobes.c int arch_prepare_kprobe(struct kprobe *p)
p                 108 arch/powerpc/kernel/kprobes.c 	kprobe_opcode_t insn = *p->addr;
p                 110 arch/powerpc/kernel/kprobes.c 	if ((unsigned long)p->addr & 0x03) {
p                 121 arch/powerpc/kernel/kprobes.c 		p->ainsn.insn = get_insn_slot();
p                 122 arch/powerpc/kernel/kprobes.c 		if (!p->ainsn.insn)
p                 127 arch/powerpc/kernel/kprobes.c 		memcpy(p->ainsn.insn, p->addr,
p                 129 arch/powerpc/kernel/kprobes.c 		p->opcode = *p->addr;
p                 130 arch/powerpc/kernel/kprobes.c 		flush_icache_range((unsigned long)p->ainsn.insn,
p                 131 arch/powerpc/kernel/kprobes.c 			(unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
p                 134 arch/powerpc/kernel/kprobes.c 	p->ainsn.boostable = 0;
p                 139 arch/powerpc/kernel/kprobes.c void arch_arm_kprobe(struct kprobe *p)
p                 141 arch/powerpc/kernel/kprobes.c 	patch_instruction(p->addr, BREAKPOINT_INSTRUCTION);
p                 145 arch/powerpc/kernel/kprobes.c void arch_disarm_kprobe(struct kprobe *p)
p                 147 arch/powerpc/kernel/kprobes.c 	patch_instruction(p->addr, p->opcode);
p                 151 arch/powerpc/kernel/kprobes.c void arch_remove_kprobe(struct kprobe *p)
p                 153 arch/powerpc/kernel/kprobes.c 	if (p->ainsn.insn) {
p                 154 arch/powerpc/kernel/kprobes.c 		free_insn_slot(p->ainsn.insn, 0);
p                 155 arch/powerpc/kernel/kprobes.c 		p->ainsn.insn = NULL;
p                 160 arch/powerpc/kernel/kprobes.c static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
p                 170 arch/powerpc/kernel/kprobes.c 	regs->nip = (unsigned long)p->ainsn.insn;
p                 187 arch/powerpc/kernel/kprobes.c static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
p                 190 arch/powerpc/kernel/kprobes.c 	__this_cpu_write(current_kprobe, p);
p                 216 arch/powerpc/kernel/kprobes.c static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
p                 219 arch/powerpc/kernel/kprobes.c 	unsigned int insn = *p->ainsn.insn;
p                 228 arch/powerpc/kernel/kprobes.c 		if (unlikely(p->ainsn.boostable == 0))
p                 229 arch/powerpc/kernel/kprobes.c 			p->ainsn.boostable = 1;
p                 249 arch/powerpc/kernel/kprobes.c 		if (unlikely(p->ainsn.boostable != 1))
p                 250 arch/powerpc/kernel/kprobes.c 			p->ainsn.boostable = -1;
p                 259 arch/powerpc/kernel/kprobes.c 	struct kprobe *p;
p                 279 arch/powerpc/kernel/kprobes.c 		p = get_kprobe(addr);
p                 280 arch/powerpc/kernel/kprobes.c 		if (p) {
p                 281 arch/powerpc/kernel/kprobes.c 			kprobe_opcode_t insn = *p->ainsn.insn;
p                 296 arch/powerpc/kernel/kprobes.c 			set_current_kprobe(p, regs, kcb);
p                 297 arch/powerpc/kernel/kprobes.c 			kprobes_inc_nmissed_count(p);
p                 299 arch/powerpc/kernel/kprobes.c 			if (p->ainsn.boostable >= 0) {
p                 300 arch/powerpc/kernel/kprobes.c 				ret = try_to_emulate(p, regs);
p                 308 arch/powerpc/kernel/kprobes.c 			prepare_singlestep(p, regs);
p                 325 arch/powerpc/kernel/kprobes.c 	p = get_kprobe(addr);
p                 326 arch/powerpc/kernel/kprobes.c 	if (!p) {
p                 350 arch/powerpc/kernel/kprobes.c 	set_current_kprobe(p, regs, kcb);
p                 351 arch/powerpc/kernel/kprobes.c 	if (p->pre_handler && p->pre_handler(p, regs)) {
p                 358 arch/powerpc/kernel/kprobes.c 	if (p->ainsn.boostable >= 0) {
p                 359 arch/powerpc/kernel/kprobes.c 		ret = try_to_emulate(p, regs);
p                 362 arch/powerpc/kernel/kprobes.c 			if (p->post_handler)
p                 363 arch/powerpc/kernel/kprobes.c 				p->post_handler(p, regs, 0);
p                 371 arch/powerpc/kernel/kprobes.c 	prepare_singlestep(p, regs);
p                 397 arch/powerpc/kernel/kprobes.c static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
p                 608 arch/powerpc/kernel/kprobes.c int arch_trampoline_kprobe(struct kprobe *p)
p                 610 arch/powerpc/kernel/kprobes.c 	if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
p                 134 arch/powerpc/kernel/kvm.c 	u32 *p;
p                 143 arch/powerpc/kernel/kvm.c 	p = (void*)&kvm_tmp[kvm_tmp_index];
p                 146 arch/powerpc/kernel/kvm.c 	return p;
p                 157 arch/powerpc/kernel/kvm.c 	u32 *p;
p                 162 arch/powerpc/kernel/kvm.c 	p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
p                 163 arch/powerpc/kernel/kvm.c 	if (!p)
p                 167 arch/powerpc/kernel/kvm.c 	distance_start = (ulong)p - (ulong)inst;
p                 169 arch/powerpc/kernel/kvm.c 	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
p                 178 arch/powerpc/kernel/kvm.c 	memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
p                 179 arch/powerpc/kernel/kvm.c 	p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
p                 182 arch/powerpc/kernel/kvm.c 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
p                 186 arch/powerpc/kernel/kvm.c 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
p                 190 arch/powerpc/kernel/kvm.c 		p[kvm_emulate_mtmsrd_reg_offs] |= rt;
p                 194 arch/powerpc/kernel/kvm.c 	p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
p                 195 arch/powerpc/kernel/kvm.c 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
p                 210 arch/powerpc/kernel/kvm.c 	u32 *p;
p                 215 arch/powerpc/kernel/kvm.c 	p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
p                 216 arch/powerpc/kernel/kvm.c 	if (!p)
p                 220 arch/powerpc/kernel/kvm.c 	distance_start = (ulong)p - (ulong)inst;
p                 222 arch/powerpc/kernel/kvm.c 	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
p                 231 arch/powerpc/kernel/kvm.c 	memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
p                 232 arch/powerpc/kernel/kvm.c 	p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
p                 237 arch/powerpc/kernel/kvm.c 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
p                 239 arch/powerpc/kernel/kvm.c 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
p                 243 arch/powerpc/kernel/kvm.c 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
p                 245 arch/powerpc/kernel/kvm.c 		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
p                 249 arch/powerpc/kernel/kvm.c 		p[kvm_emulate_mtmsr_reg1_offs] |= rt;
p                 250 arch/powerpc/kernel/kvm.c 		p[kvm_emulate_mtmsr_reg2_offs] |= rt;
p                 254 arch/powerpc/kernel/kvm.c 	p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
p                 255 arch/powerpc/kernel/kvm.c 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
p                 271 arch/powerpc/kernel/kvm.c 	u32 *p;
p                 276 arch/powerpc/kernel/kvm.c 	p = kvm_alloc(kvm_emulate_wrtee_len * 4);
p                 277 arch/powerpc/kernel/kvm.c 	if (!p)
p                 281 arch/powerpc/kernel/kvm.c 	distance_start = (ulong)p - (ulong)inst;
p                 283 arch/powerpc/kernel/kvm.c 	distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
p                 292 arch/powerpc/kernel/kvm.c 	memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
p                 293 arch/powerpc/kernel/kvm.c 	p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
p                 296 arch/powerpc/kernel/kvm.c 		p[kvm_emulate_wrtee_reg_offs] =
p                 302 arch/powerpc/kernel/kvm.c 			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
p                 306 arch/powerpc/kernel/kvm.c 			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
p                 310 arch/powerpc/kernel/kvm.c 			p[kvm_emulate_wrtee_reg_offs] |= rt;
p                 315 arch/powerpc/kernel/kvm.c 	p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
p                 316 arch/powerpc/kernel/kvm.c 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
p                 328 arch/powerpc/kernel/kvm.c 	u32 *p;
p                 333 arch/powerpc/kernel/kvm.c 	p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
p                 334 arch/powerpc/kernel/kvm.c 	if (!p)
p                 338 arch/powerpc/kernel/kvm.c 	distance_start = (ulong)p - (ulong)inst;
p                 340 arch/powerpc/kernel/kvm.c 	distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
p                 348 arch/powerpc/kernel/kvm.c 	memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
p                 349 arch/powerpc/kernel/kvm.c 	p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
p                 350 arch/powerpc/kernel/kvm.c 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
p                 369 arch/powerpc/kernel/kvm.c 	u32 *p;
p                 374 arch/powerpc/kernel/kvm.c 	p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
p                 375 arch/powerpc/kernel/kvm.c 	if (!p)
p                 379 arch/powerpc/kernel/kvm.c 	distance_start = (ulong)p - (ulong)inst;
p                 381 arch/powerpc/kernel/kvm.c 	distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
p                 390 arch/powerpc/kernel/kvm.c 	memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
p                 391 arch/powerpc/kernel/kvm.c 	p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
p                 392 arch/powerpc/kernel/kvm.c 	p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
p                 393 arch/powerpc/kernel/kvm.c 	p[kvm_emulate_mtsrin_reg2_offs] |= rt;
p                 394 arch/powerpc/kernel/kvm.c 	p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
p                 395 arch/powerpc/kernel/kvm.c 	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
p                 664 arch/powerpc/kernel/kvm.c 	u32 *p;
p                 688 arch/powerpc/kernel/kvm.c 	for (p = start; p < end; p++) {
p                 690 arch/powerpc/kernel/kvm.c 		if (p >= kvm_template_start && p < kvm_template_end) {
p                 691 arch/powerpc/kernel/kvm.c 			p = kvm_template_end - 1;
p                 694 arch/powerpc/kernel/kvm.c 		kvm_check_ins(p, features);
p                  55 arch/powerpc/kernel/legacy_serial.c static unsigned int tsi_serial_in(struct uart_port *p, int offset)
p                  58 arch/powerpc/kernel/legacy_serial.c 	offset = offset << p->regshift;
p                  60 arch/powerpc/kernel/legacy_serial.c 		tmp = readl(p->membase + (UART_IIR & ~3));
p                  63 arch/powerpc/kernel/legacy_serial.c 		return readb(p->membase + offset);
p                  66 arch/powerpc/kernel/legacy_serial.c static void tsi_serial_out(struct uart_port *p, int offset, int value)
p                  68 arch/powerpc/kernel/legacy_serial.c 	offset = offset << p->regshift;
p                  70 arch/powerpc/kernel/legacy_serial.c 		writeb(value, p->membase + offset);
p                 347 arch/powerpc/kernel/module_64.c 		char *p;
p                 360 arch/powerpc/kernel/module_64.c 		while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
p                 361 arch/powerpc/kernel/module_64.c 			p[0] = '_';
p                 276 arch/powerpc/kernel/nvram_64.c 	loff_t p;
p                 280 arch/powerpc/kernel/nvram_64.c 	p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size);
p                 283 arch/powerpc/kernel/nvram_64.c 	if (p && size < part->min_size) {
p                 287 arch/powerpc/kernel/nvram_64.c 		p = 0;
p                 291 arch/powerpc/kernel/nvram_64.c 	if (!p) {
p                 292 arch/powerpc/kernel/nvram_64.c 		p = nvram_create_partition(part->name, NVRAM_SIG_OS,
p                 294 arch/powerpc/kernel/nvram_64.c 		if (p == -ENOSPC) {
p                 300 arch/powerpc/kernel/nvram_64.c 			p = nvram_create_partition(part->name, NVRAM_SIG_OS,
p                 305 arch/powerpc/kernel/nvram_64.c 	if (p <= 0) {
p                 307 arch/powerpc/kernel/nvram_64.c 		       " partition, err %d\n", part->name, (int)p);
p                 311 arch/powerpc/kernel/nvram_64.c 	part->index = p;
p                 312 arch/powerpc/kernel/nvram_64.c 	part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info);
p                 428 arch/powerpc/kernel/nvram_64.c 	loff_t p;
p                 476 arch/powerpc/kernel/nvram_64.c 		p = nvram_find_partition(part->name, sig, &size);
p                 477 arch/powerpc/kernel/nvram_64.c 		if (p <= 0) {
p                 479 arch/powerpc/kernel/nvram_64.c 				"err %d\n", part->name, (int)p);
p                 482 arch/powerpc/kernel/nvram_64.c 		part->index = p;
p                 740 arch/powerpc/kernel/nvram_64.c static unsigned char __init nvram_checksum(struct nvram_header *p)
p                 743 arch/powerpc/kernel/nvram_64.c 	unsigned short *sp = (unsigned short *)p->name; /* assume 6 shorts */
p                 744 arch/powerpc/kernel/nvram_64.c 	c_sum = p->signature + p->length + sp[0] + sp[1] + sp[2] + sp[3] + sp[4] + sp[5];
p                 970 arch/powerpc/kernel/nvram_64.c 	struct nvram_partition *p;
p                 972 arch/powerpc/kernel/nvram_64.c 	list_for_each_entry(p, &nvram_partitions, partition) {
p                 973 arch/powerpc/kernel/nvram_64.c 		if (p->header.signature == sig &&
p                 974 arch/powerpc/kernel/nvram_64.c 		    (!name || !strncmp(p->header.name, name, 12))) {
p                 976 arch/powerpc/kernel/nvram_64.c 				*out_size = (p->header.length - 1) *
p                 978 arch/powerpc/kernel/nvram_64.c 			return p->index + NVRAM_HEADER_LEN;
p                  63 arch/powerpc/kernel/optprobes.c static unsigned long can_optimize(struct kprobe *p)
p                  74 arch/powerpc/kernel/optprobes.c 	if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
p                  75 arch/powerpc/kernel/optprobes.c 		return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
p                  83 arch/powerpc/kernel/optprobes.c 	if (!is_kernel_addr((unsigned long)p->addr))
p                  87 arch/powerpc/kernel/optprobes.c 	regs.nip = (unsigned long)p->addr;
p                 102 arch/powerpc/kernel/optprobes.c 	if (!is_conditional_branch(*p->ainsn.insn) &&
p                 103 arch/powerpc/kernel/optprobes.c 			analyse_instr(&op, &regs, *p->ainsn.insn) == 1) {
p                 190 arch/powerpc/kernel/optprobes.c int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
p                 200 arch/powerpc/kernel/optprobes.c 	nip = can_optimize(p);
p                 218 arch/powerpc/kernel/optprobes.c 	b_offset = (unsigned long)buff - (unsigned long)p->addr;
p                 271 arch/powerpc/kernel/optprobes.c 	patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
p                1098 arch/powerpc/kernel/pci-common.c 	struct resource *p, **pp;
p                1101 arch/powerpc/kernel/pci-common.c 	for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
p                1102 arch/powerpc/kernel/pci-common.c 		if (p->end < res->start)
p                1104 arch/powerpc/kernel/pci-common.c 		if (res->end < p->start)
p                1106 arch/powerpc/kernel/pci-common.c 		if (p->start < res->start || p->end > res->end)
p                1118 arch/powerpc/kernel/pci-common.c 	for (p = res->child; p != NULL; p = p->sibling) {
p                1119 arch/powerpc/kernel/pci-common.c 		p->parent = res;
p                1121 arch/powerpc/kernel/pci-common.c 			 p->name, p, res->name);
p                1563 arch/powerpc/kernel/process.c static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
p                1579 arch/powerpc/kernel/process.c 	p->thread.ksp_vsid = sp_vsid;
p                1591 arch/powerpc/kernel/process.c 		unsigned long kthread_arg, struct task_struct *p,
p                1598 arch/powerpc/kernel/process.c 	unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
p                1599 arch/powerpc/kernel/process.c 	struct thread_info *ti = task_thread_info(p);
p                1601 arch/powerpc/kernel/process.c 	klp_init_thread_info(p);
p                1606 arch/powerpc/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                1614 arch/powerpc/kernel/process.c 		clear_tsk_thread_flag(p, TIF_32BIT);
p                1618 arch/powerpc/kernel/process.c 		p->thread.regs = NULL;	/* no user register state */
p                1628 arch/powerpc/kernel/process.c 		p->thread.regs = childregs;
p                1656 arch/powerpc/kernel/process.c 	p->thread.ksp = sp;
p                1658 arch/powerpc/kernel/process.c 	p->thread.ksp_limit = (unsigned long)end_of_stack(p);
p                1661 arch/powerpc/kernel/process.c 	p->thread.ptrace_bps[0] = NULL;
p                1664 arch/powerpc/kernel/process.c 	p->thread.fp_save_area = NULL;
p                1666 arch/powerpc/kernel/process.c 	p->thread.vr_save_area = NULL;
p                1669 arch/powerpc/kernel/process.c 	setup_ksp_vsid(p, sp);
p                1673 arch/powerpc/kernel/process.c 		p->thread.dscr_inherit = current->thread.dscr_inherit;
p                1674 arch/powerpc/kernel/process.c 		p->thread.dscr = mfspr(SPRN_DSCR);
p                1679 arch/powerpc/kernel/process.c 	p->thread.tidr = 0;
p                1956 arch/powerpc/kernel/process.c static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
p                1960 arch/powerpc/kernel/process.c 	unsigned long cpu = task_cpu(p);
p                1973 arch/powerpc/kernel/process.c int validate_sp(unsigned long sp, struct task_struct *p,
p                1976 arch/powerpc/kernel/process.c 	unsigned long stack_page = (unsigned long)task_stack_page(p);
p                1984 arch/powerpc/kernel/process.c 	return valid_irq_stack(sp, p, nbytes);
p                1989 arch/powerpc/kernel/process.c static unsigned long __get_wchan(struct task_struct *p)
p                1994 arch/powerpc/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                1997 arch/powerpc/kernel/process.c 	sp = p->thread.ksp;
p                1998 arch/powerpc/kernel/process.c 	if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
p                2003 arch/powerpc/kernel/process.c 		if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
p                2004 arch/powerpc/kernel/process.c 		    p->state == TASK_RUNNING)
p                2015 arch/powerpc/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                2019 arch/powerpc/kernel/process.c 	if (!try_get_task_stack(p))
p                2022 arch/powerpc/kernel/process.c 	ret = __get_wchan(p);
p                2024 arch/powerpc/kernel/process.c 	put_task_stack(p);
p                  77 arch/powerpc/kernel/prom.c static int __init early_parse_mem(char *p)
p                  79 arch/powerpc/kernel/prom.c 	if (!p)
p                  82 arch/powerpc/kernel/prom.c 	memory_limit = PAGE_ALIGN(memparse(p, &p));
p                 116 arch/powerpc/kernel/prom.c 	void *p;
p                 126 arch/powerpc/kernel/prom.c 		p = memblock_alloc_raw(size, PAGE_SIZE);
p                 127 arch/powerpc/kernel/prom.c 		if (!p)
p                 130 arch/powerpc/kernel/prom.c 		memcpy(p, initial_boot_params, size);
p                 131 arch/powerpc/kernel/prom.c 		initial_boot_params = p;
p                 132 arch/powerpc/kernel/prom.c 		DBG("Moved device tree to 0x%px\n", p);
p                 224 arch/powerpc/kernel/prom_init.c #define PHANDLE_VALID(p)	((p) != 0 && (p) != PROM_ERROR)
p                 417 arch/powerpc/kernel/prom_init.c 	const char *p, *q;
p                 422 arch/powerpc/kernel/prom_init.c 	for (p = msg; *p != 0; p = q) {
p                 423 arch/powerpc/kernel/prom_init.c 		for (q = p; *q != 0 && *q != '\n'; ++q)
p                 425 arch/powerpc/kernel/prom_init.c 		if (q > p)
p                 426 arch/powerpc/kernel/prom_init.c 			call_prom("write", 3, 1, prom.stdout, p, q - p);
p                 475 arch/powerpc/kernel/prom_init.c 	const char *p, *q, *s;
p                 482 arch/powerpc/kernel/prom_init.c 	for (p = format; *p != 0; p = q) {
p                 483 arch/powerpc/kernel/prom_init.c 		for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
p                 485 arch/powerpc/kernel/prom_init.c 		if (q > p)
p                 486 arch/powerpc/kernel/prom_init.c 			call_prom("write", 3, 1, prom.stdout, p, q - p);
p                 647 arch/powerpc/kernel/prom_init.c 	char *p = *str;
p                 650 arch/powerpc/kernel/prom_init.c 		*p++ = *q++;
p                 651 arch/powerpc/kernel/prom_init.c 	*p++ = ' ';
p                 652 arch/powerpc/kernel/prom_init.c 	*str = p;
p                 674 arch/powerpc/kernel/prom_init.c 	char cmd[256], *p;
p                 681 arch/powerpc/kernel/prom_init.c 	p = cmd;
p                 682 arch/powerpc/kernel/prom_init.c 	add_string(&p, "dev");
p                 683 arch/powerpc/kernel/prom_init.c 	add_string(&p, nodename);
p                 684 arch/powerpc/kernel/prom_init.c 	add_string(&p, tohex((u32)(unsigned long) value));
p                 685 arch/powerpc/kernel/prom_init.c 	add_string(&p, tohex(valuelen));
p                 686 arch/powerpc/kernel/prom_init.c 	add_string(&p, tohex(ADDR(pname)));
p                 687 arch/powerpc/kernel/prom_init.c 	add_string(&p, tohex(prom_strlen(pname)));
p                 688 arch/powerpc/kernel/prom_init.c 	add_string(&p, "property");
p                 689 arch/powerpc/kernel/prom_init.c 	*p = 0;
p                 762 arch/powerpc/kernel/prom_init.c 	char *p;
p                 766 arch/powerpc/kernel/prom_init.c 	p = prom_cmd_line;
p                 768 arch/powerpc/kernel/prom_init.c 		l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
p                 769 arch/powerpc/kernel/prom_init.c 	if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && (l <= 0 || p[0] == '\0')) /* dbl check */
p                1538 arch/powerpc/kernel/prom_init.c 	cell_t *p = *cellp;
p                1543 arch/powerpc/kernel/prom_init.c 		p++;
p                1546 arch/powerpc/kernel/prom_init.c 	r = be32_to_cpu(*p++);
p                1550 arch/powerpc/kernel/prom_init.c 		r |= be32_to_cpu(*(p++));
p                1553 arch/powerpc/kernel/prom_init.c 	*cellp = p;
p                1597 arch/powerpc/kernel/prom_init.c 	cell_t *p, *endp;
p                1636 arch/powerpc/kernel/prom_init.c 		p = regbuf;
p                1637 arch/powerpc/kernel/prom_init.c 		endp = p + (plen / sizeof(cell_t));
p                1646 arch/powerpc/kernel/prom_init.c 		while ((endp - p) >= (rac + rsc)) {
p                1649 arch/powerpc/kernel/prom_init.c 			base = prom_next_cell(rac, &p);
p                1650 arch/powerpc/kernel/prom_init.c 			size = prom_next_cell(rsc, &p);
p                2257 arch/powerpc/kernel/prom_init.c 			char *p = &compat[i];
p                2258 arch/powerpc/kernel/prom_init.c 			int sl = prom_strlen(p);
p                2261 arch/powerpc/kernel/prom_init.c 			if (prom_strstr(p, "Power Macintosh") ||
p                2262 arch/powerpc/kernel/prom_init.c 			    prom_strstr(p, "MacRISC"))
p                2269 arch/powerpc/kernel/prom_init.c 			if (prom_strstr(p, "IBM,CBEA") ||
p                2270 arch/powerpc/kernel/prom_init.c 			    prom_strstr(p, "IBM,CPBW-1.0"))
p                2519 arch/powerpc/kernel/prom_init.c 	char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
p                2546 arch/powerpc/kernel/prom_init.c 		for (lp = p = namep, ep = namep + l; p < ep; p++) {
p                2547 arch/powerpc/kernel/prom_init.c 			if (*p == '/')
p                2549 arch/powerpc/kernel/prom_init.c 			else if (*p != 0)
p                2550 arch/powerpc/kernel/prom_init.c 				*lp++ = *p;
p                 259 arch/powerpc/kernel/rtas-proc.c static int parse_number(const char __user *p, size_t count, u64 *val)
p                 267 arch/powerpc/kernel/rtas-proc.c 	if (copy_from_user(buf, p, count))
p                 405 arch/powerpc/kernel/rtas-proc.c 		struct individual_sensor *p = &sensors.sensor[i];
p                 410 arch/powerpc/kernel/rtas-proc.c 		sprintf (rstr, SENSOR_PREFIX"%04d", p->token);
p                 414 arch/powerpc/kernel/rtas-proc.c 		for (j = 0, offs = 0; j <= p->quant; j++) {
p                 416 arch/powerpc/kernel/rtas-proc.c 				  	  p->token, j);
p                 418 arch/powerpc/kernel/rtas-proc.c 			ppc_rtas_process_sensor(m, p, state, error, loc);
p                 200 arch/powerpc/kernel/rtas.c 	const __be32 *p;
p                 215 arch/powerpc/kernel/rtas.c 			if ((p = of_get_property(root,
p                 217 arch/powerpc/kernel/rtas.c 				display_width = be32_to_cpu(*p);
p                 218 arch/powerpc/kernel/rtas.c 			if ((p = of_get_property(root,
p                 220 arch/powerpc/kernel/rtas.c 				form_feed = be32_to_cpu(*p);
p                 221 arch/powerpc/kernel/rtas.c 			if ((p = of_get_property(root,
p                 223 arch/powerpc/kernel/rtas.c 				display_lines = be32_to_cpu(*p);
p                1037 arch/powerpc/kernel/rtas.c 	unsigned char *p, *log_end;
p                1049 arch/powerpc/kernel/rtas.c 	p = ext_log->vendor_log;
p                1051 arch/powerpc/kernel/rtas.c 	while (p < log_end) {
p                1052 arch/powerpc/kernel/rtas.c 		sect = (struct pseries_errorlog *)p;
p                1055 arch/powerpc/kernel/rtas.c 		p += pseries_errorlog_length(sect);
p                 314 arch/powerpc/kernel/rtas_flash.c 	char *p;
p                 348 arch/powerpc/kernel/rtas_flash.c 	p = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL);
p                 349 arch/powerpc/kernel/rtas_flash.c 	if (!p)
p                 352 arch/powerpc/kernel/rtas_flash.c 	if(copy_from_user(p, buffer, count)) {
p                 353 arch/powerpc/kernel/rtas_flash.c 		kmem_cache_free(flash_block_cache, p);
p                 357 arch/powerpc/kernel/rtas_flash.c 	fl->blocks[next_free].data = p;
p                  65 arch/powerpc/kernel/security.c static int __init handle_nospectre_v1(char *p)
p                 119 arch/powerpc/kernel/security.c static int __init handle_nospectre_v2(char *p)
p                 250 arch/powerpc/kernel/security.c static int __init handle_no_stf_barrier(char *p)
p                 260 arch/powerpc/kernel/security.c static int __init handle_ssbd(char *p)
p                 262 arch/powerpc/kernel/security.c 	if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
p                 265 arch/powerpc/kernel/security.c 	} else if (strncmp(p, "off", 3) == 0) {
p                 276 arch/powerpc/kernel/security.c static int __init handle_no_ssbd(char *p)
p                 173 arch/powerpc/kernel/setup_64.c static int __init early_smt_enabled(char *p)
p                 175 arch/powerpc/kernel/setup_64.c 	smt_enabled_cmdline = p;
p                 864 arch/powerpc/kernel/setup_64.c static int __init handle_no_rfi_flush(char *p)
p                 876 arch/powerpc/kernel/setup_64.c static int __init handle_no_pti(char *p)
p                  99 arch/powerpc/kernel/signal_32.c #define to_user_ptr(p)		ptr_to_compat(p)
p                 100 arch/powerpc/kernel/signal_32.c #define from_user_ptr(p)	compat_ptr(p)
p                 156 arch/powerpc/kernel/signal_32.c #define to_user_ptr(p)		((unsigned long)(p))
p                 157 arch/powerpc/kernel/signal_32.c #define from_user_ptr(p)	((void __user *)(p))
p                 243 arch/powerpc/kernel/stacktrace.c 		struct paca_struct *p = paca_ptrs[cpu];
p                 248 arch/powerpc/kernel/stacktrace.c 		if (!virt_addr_valid(p)) {
p                 249 arch/powerpc/kernel/stacktrace.c 			pr_warn("paca pointer appears corrupt? (%px)\n", p);
p                 254 arch/powerpc/kernel/stacktrace.c 			p->irq_soft_mask, p->in_mce, p->in_nmi);
p                 256 arch/powerpc/kernel/stacktrace.c 		if (virt_addr_valid(p->__current))
p                 257 arch/powerpc/kernel/stacktrace.c 			pr_cont(" current: %d (%s)\n", p->__current->pid,
p                 258 arch/powerpc/kernel/stacktrace.c 				p->__current->comm);
p                 260 arch/powerpc/kernel/stacktrace.c 			pr_cont(" current pointer corrupt? (%px)\n", p->__current);
p                 262 arch/powerpc/kernel/stacktrace.c 		pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
p                 263 arch/powerpc/kernel/stacktrace.c 		show_stack(p->__current, (unsigned long *)p->saved_r1);
p                 125 arch/powerpc/kvm/book3s_64_mmu.c 	int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
p                 127 arch/powerpc/kvm/book3s_64_mmu.c 	return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
p                 171 arch/powerpc/kvm/book3s_64_mmu.c 	int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
p                 175 arch/powerpc/kvm/book3s_64_mmu.c 	avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
p                 177 arch/powerpc/kvm/book3s_64_mmu.c 	if (p < 16)
p                 178 arch/powerpc/kvm/book3s_64_mmu.c 		avpn >>= ((80 - p) - 56) - 8;	/* 16 - p */
p                 180 arch/powerpc/kvm/book3s_64_mmu.c 		avpn <<= p - 16;
p                2030 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct debugfs_htab_state *p;
p                2032 arch/powerpc/kvm/book3s_64_mmu_hv.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                2033 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (!p)
p                2037 arch/powerpc/kvm/book3s_64_mmu_hv.c 	p->kvm = kvm;
p                2038 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mutex_init(&p->mutex);
p                2039 arch/powerpc/kvm/book3s_64_mmu_hv.c 	file->private_data = p;
p                2046 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct debugfs_htab_state *p = file->private_data;
p                2048 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm_put_kvm(p->kvm);
p                2049 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kfree(p);
p                2056 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct debugfs_htab_state *p = file->private_data;
p                2063 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm = p->kvm;
p                2067 arch/powerpc/kvm/book3s_64_mmu_hv.c 	ret = mutex_lock_interruptible(&p->mutex);
p                2071 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (p->chars_left) {
p                2072 arch/powerpc/kvm/book3s_64_mmu_hv.c 		n = p->chars_left;
p                2075 arch/powerpc/kvm/book3s_64_mmu_hv.c 		r = copy_to_user(buf, p->buf + p->buf_index, n);
p                2077 arch/powerpc/kvm/book3s_64_mmu_hv.c 		p->chars_left -= n;
p                2078 arch/powerpc/kvm/book3s_64_mmu_hv.c 		p->buf_index += n;
p                2089 arch/powerpc/kvm/book3s_64_mmu_hv.c 	i = p->hpt_index;
p                2109 arch/powerpc/kvm/book3s_64_mmu_hv.c 		n = scnprintf(p->buf, sizeof(p->buf),
p                2112 arch/powerpc/kvm/book3s_64_mmu_hv.c 		p->chars_left = n;
p                2115 arch/powerpc/kvm/book3s_64_mmu_hv.c 		r = copy_to_user(buf, p->buf, n);
p                2117 arch/powerpc/kvm/book3s_64_mmu_hv.c 		p->chars_left -= n;
p                2118 arch/powerpc/kvm/book3s_64_mmu_hv.c 		p->buf_index = n;
p                2128 arch/powerpc/kvm/book3s_64_mmu_hv.c 	p->hpt_index = i;
p                2131 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mutex_unlock(&p->mutex);
p                 430 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pte_t *p = pte;
p                 433 arch/powerpc/kvm/book3s_64_mmu_radix.c 		for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
p                 434 arch/powerpc/kvm/book3s_64_mmu_radix.c 			if (pte_val(*p) == 0)
p                 437 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_unmap_pte(kvm, p,
p                 438 arch/powerpc/kvm/book3s_64_mmu_radix.c 					 pte_pfn(*p) << PAGE_SHIFT,
p                 450 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pmd_t *p = pmd;
p                 452 arch/powerpc/kvm/book3s_64_mmu_radix.c 	for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
p                 453 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (!pmd_present(*p))
p                 455 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (pmd_is_leaf(*p)) {
p                 457 arch/powerpc/kvm/book3s_64_mmu_radix.c 				pmd_clear(p);
p                 460 arch/powerpc/kvm/book3s_64_mmu_radix.c 				kvmppc_unmap_pte(kvm, (pte_t *)p,
p                 461 arch/powerpc/kvm/book3s_64_mmu_radix.c 					 pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
p                 467 arch/powerpc/kvm/book3s_64_mmu_radix.c 			pte = pte_offset_map(p, 0);
p                 469 arch/powerpc/kvm/book3s_64_mmu_radix.c 			pmd_clear(p);
p                 479 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pud_t *p = pud;
p                 481 arch/powerpc/kvm/book3s_64_mmu_radix.c 	for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
p                 482 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (!pud_present(*p))
p                 484 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (pud_is_leaf(*p)) {
p                 485 arch/powerpc/kvm/book3s_64_mmu_radix.c 			pud_clear(p);
p                 489 arch/powerpc/kvm/book3s_64_mmu_radix.c 			pmd = pmd_offset(p, 0);
p                 491 arch/powerpc/kvm/book3s_64_mmu_radix.c 			pud_clear(p);
p                1166 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct debugfs_radix_state *p;
p                1168 arch/powerpc/kvm/book3s_64_mmu_radix.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                1169 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (!p)
p                1173 arch/powerpc/kvm/book3s_64_mmu_radix.c 	p->kvm = kvm;
p                1174 arch/powerpc/kvm/book3s_64_mmu_radix.c 	mutex_init(&p->mutex);
p                1175 arch/powerpc/kvm/book3s_64_mmu_radix.c 	file->private_data = p;
p                1182 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct debugfs_radix_state *p = file->private_data;
p                1184 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvm_put_kvm(p->kvm);
p                1185 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kfree(p);
p                1192 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct debugfs_radix_state *p = file->private_data;
p                1206 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvm = p->kvm;
p                1210 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ret = mutex_lock_interruptible(&p->mutex);
p                1214 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (p->chars_left) {
p                1215 arch/powerpc/kvm/book3s_64_mmu_radix.c 		n = p->chars_left;
p                1218 arch/powerpc/kvm/book3s_64_mmu_radix.c 		r = copy_to_user(buf, p->buf + p->buf_index, n);
p                1220 arch/powerpc/kvm/book3s_64_mmu_radix.c 		p->chars_left -= n;
p                1221 arch/powerpc/kvm/book3s_64_mmu_radix.c 		p->buf_index += n;
p                1232 arch/powerpc/kvm/book3s_64_mmu_radix.c 	gpa = p->gpa;
p                1235 arch/powerpc/kvm/book3s_64_mmu_radix.c 	while (len != 0 && p->lpid >= 0) {
p                1243 arch/powerpc/kvm/book3s_64_mmu_radix.c 			p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid);
p                1244 arch/powerpc/kvm/book3s_64_mmu_radix.c 			p->hdr = 0;
p                1245 arch/powerpc/kvm/book3s_64_mmu_radix.c 			if (p->lpid < 0)
p                1249 arch/powerpc/kvm/book3s_64_mmu_radix.c 			if (p->lpid == 0) {
p                1252 arch/powerpc/kvm/book3s_64_mmu_radix.c 				nested = kvmhv_get_nested(kvm, p->lpid, false);
p                1261 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (!p->hdr) {
p                1262 arch/powerpc/kvm/book3s_64_mmu_radix.c 			if (p->lpid > 0)
p                1263 arch/powerpc/kvm/book3s_64_mmu_radix.c 				n = scnprintf(p->buf, sizeof(p->buf),
p                1264 arch/powerpc/kvm/book3s_64_mmu_radix.c 					      "\nNested LPID %d: ", p->lpid);
p                1265 arch/powerpc/kvm/book3s_64_mmu_radix.c 			n += scnprintf(p->buf + n, sizeof(p->buf) - n,
p                1267 arch/powerpc/kvm/book3s_64_mmu_radix.c 			p->hdr = 1;
p                1310 arch/powerpc/kvm/book3s_64_mmu_radix.c 		n = scnprintf(p->buf, sizeof(p->buf),
p                1314 arch/powerpc/kvm/book3s_64_mmu_radix.c 		p->chars_left = n;
p                1317 arch/powerpc/kvm/book3s_64_mmu_radix.c 		r = copy_to_user(buf, p->buf, n);
p                1319 arch/powerpc/kvm/book3s_64_mmu_radix.c 		p->chars_left -= n;
p                1320 arch/powerpc/kvm/book3s_64_mmu_radix.c 		p->buf_index = n;
p                1330 arch/powerpc/kvm/book3s_64_mmu_radix.c 	p->gpa = gpa;
p                1335 arch/powerpc/kvm/book3s_64_mmu_radix.c 	mutex_unlock(&p->mutex);
p                 686 arch/powerpc/kvm/book3s_hv.c 	u64 p;
p                 690 arch/powerpc/kvm/book3s_hv.c 	p = vc->stolen_tb;
p                 693 arch/powerpc/kvm/book3s_hv.c 		p += now - vc->preempt_tb;
p                 695 arch/powerpc/kvm/book3s_hv.c 	return p;
p                2135 arch/powerpc/kvm/book3s_hv.c 	struct debugfs_timings_state *p;
p                2137 arch/powerpc/kvm/book3s_hv.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                2138 arch/powerpc/kvm/book3s_hv.c 	if (!p)
p                2142 arch/powerpc/kvm/book3s_hv.c 	p->vcpu = vcpu;
p                2143 arch/powerpc/kvm/book3s_hv.c 	file->private_data = p;
p                2150 arch/powerpc/kvm/book3s_hv.c 	struct debugfs_timings_state *p = file->private_data;
p                2152 arch/powerpc/kvm/book3s_hv.c 	kvm_put_kvm(p->vcpu->kvm);
p                2153 arch/powerpc/kvm/book3s_hv.c 	kfree(p);
p                2160 arch/powerpc/kvm/book3s_hv.c 	struct debugfs_timings_state *p = file->private_data;
p                2161 arch/powerpc/kvm/book3s_hv.c 	struct kvm_vcpu *vcpu = p->vcpu;
p                2170 arch/powerpc/kvm/book3s_hv.c 	if (!p->buflen) {
p                2171 arch/powerpc/kvm/book3s_hv.c 		s = p->buf;
p                2172 arch/powerpc/kvm/book3s_hv.c 		buf_end = s + sizeof(p->buf);
p                2204 arch/powerpc/kvm/book3s_hv.c 		p->buflen = s - p->buf;
p                2208 arch/powerpc/kvm/book3s_hv.c 	if (pos >= p->buflen)
p                2210 arch/powerpc/kvm/book3s_hv.c 	if (len > p->buflen - pos)
p                2211 arch/powerpc/kvm/book3s_hv.c 		len = p->buflen - pos;
p                2212 arch/powerpc/kvm/book3s_hv.c 	n = copy_to_user(buf, p->buf + pos, len);
p                4399 arch/powerpc/kvm/book3s_hv.c 	unsigned long *buf, *p;
p                4435 arch/powerpc/kvm/book3s_hv.c 	p = memslot->dirty_bitmap;
p                4437 arch/powerpc/kvm/book3s_hv.c 		buf[i] |= xchg(&p[i], 0);
p                  63 arch/powerpc/kvm/book3s_hv_builtin.c static int __init early_parse_kvm_cma_resv(char *p)
p                  65 arch/powerpc/kvm/book3s_hv_builtin.c 	pr_debug("%s(%s)\n", __func__, p);
p                  66 arch/powerpc/kvm/book3s_hv_builtin.c 	if (!p)
p                  68 arch/powerpc/kvm/book3s_hv_builtin.c 	return kstrtoul(p, 0, &kvm_cma_resv_ratio);
p                 688 arch/powerpc/kvm/book3s_hv_builtin.c #define ALL(p)		(((p) << 24) | ((p) << 16) | ((p) << 8) | (p))
p                  29 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	pte_t *p;
p                  35 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	p = find_init_mm_pte(addr, NULL);
p                  36 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (!p || !pte_present(*p))
p                  38 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
p                1717 arch/powerpc/kvm/book3s_pr.c 	unsigned long p;
p                1740 arch/powerpc/kvm/book3s_pr.c 	p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
p                1741 arch/powerpc/kvm/book3s_pr.c 	if (!p)
p                1743 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.shared = (void *)p;
p                 201 arch/powerpc/kvm/book3s_xive_template.c 				int p = atomic_xchg(&q->pending_count, 0);
p                 202 arch/powerpc/kvm/book3s_xive_template.c 				if (p) {
p                 204 arch/powerpc/kvm/book3s_xive_template.c 					WARN_ON(p > atomic_read(&q->count));
p                 206 arch/powerpc/kvm/book3s_xive_template.c 					atomic_sub(p, &q->count);
p                  12 arch/powerpc/lib/alloc.c 	void *p;
p                  15 arch/powerpc/lib/alloc.c 		p = kzalloc(size, mask);
p                  17 arch/powerpc/lib/alloc.c 		p = memblock_alloc(size, SMP_CACHE_BYTES);
p                  18 arch/powerpc/lib/alloc.c 		if (!p)
p                  22 arch/powerpc/lib/alloc.c 	return p;
p                 564 arch/powerpc/lib/code-patching.c 	unsigned int *p, *q;
p                 573 arch/powerpc/lib/code-patching.c 	p = buf;
p                 574 arch/powerpc/lib/code-patching.c 	addr = (unsigned long)p;
p                 575 arch/powerpc/lib/code-patching.c 	patch_branch(p, addr, 0);
p                 576 arch/powerpc/lib/code-patching.c 	check(instr_is_branch_to_addr(p, addr));
p                 577 arch/powerpc/lib/code-patching.c 	q = p + 1;
p                 578 arch/powerpc/lib/code-patching.c 	patch_instruction(q, translate_branch(q, p));
p                 582 arch/powerpc/lib/code-patching.c 	p = buf;
p                 583 arch/powerpc/lib/code-patching.c 	addr = (unsigned long)p;
p                 584 arch/powerpc/lib/code-patching.c 	patch_branch(p, addr, 0);
p                 586 arch/powerpc/lib/code-patching.c 	patch_instruction(q, translate_branch(q, p));
p                 587 arch/powerpc/lib/code-patching.c 	check(instr_is_branch_to_addr(p, addr));
p                 592 arch/powerpc/lib/code-patching.c 	p = buf + 0x2000000;
p                 593 arch/powerpc/lib/code-patching.c 	addr = (unsigned long)p;
p                 594 arch/powerpc/lib/code-patching.c 	patch_branch(p, addr, 0);
p                 596 arch/powerpc/lib/code-patching.c 	patch_instruction(q, translate_branch(q, p));
p                 597 arch/powerpc/lib/code-patching.c 	check(instr_is_branch_to_addr(p, addr));
p                 602 arch/powerpc/lib/code-patching.c 	p = buf;
p                 604 arch/powerpc/lib/code-patching.c 	patch_branch(p, addr, BRANCH_SET_LINK);
p                 606 arch/powerpc/lib/code-patching.c 	patch_instruction(q, translate_branch(q, p));
p                 607 arch/powerpc/lib/code-patching.c 	check(instr_is_branch_to_addr(p, addr));
p                 611 arch/powerpc/lib/code-patching.c 	p = buf + 0x1000000;
p                 613 arch/powerpc/lib/code-patching.c 	patch_branch(p, addr, 0);
p                 615 arch/powerpc/lib/code-patching.c 	patch_instruction(q, translate_branch(q, p));
p                 616 arch/powerpc/lib/code-patching.c 	check(instr_is_branch_to_addr(p, addr));
p                 623 arch/powerpc/lib/code-patching.c 	p = buf;
p                 624 arch/powerpc/lib/code-patching.c 	addr = (unsigned long)p;
p                 625 arch/powerpc/lib/code-patching.c 	patch_instruction(p, create_cond_branch(p, addr, 0));
p                 626 arch/powerpc/lib/code-patching.c 	check(instr_is_branch_to_addr(p, addr));
p                 627 arch/powerpc/lib/code-patching.c 	q = p + 1;
p                 628 arch/powerpc/lib/code-patching.c 	patch_instruction(q, translate_branch(q, p));
p                 632 arch/powerpc/lib/code-patching.c 	p = buf;
p                 633 arch/powerpc/lib/code-patching.c 	addr = (unsigned long)p;
p                 634 arch/powerpc/lib/code-patching.c 	patch_instruction(p, create_cond_branch(p, addr, 0xFFFFFFFC));
p                 636 arch/powerpc/lib/code-patching.c 	patch_instruction(q, translate_branch(q, p));
p                 637 arch/powerpc/lib/code-patching.c 	check(instr_is_branch_to_addr(p, addr));
p                 642 arch/powerpc/lib/code-patching.c 	p = buf + 0x8000;
p                 643 arch/powerpc/lib/code-patching.c 	addr = (unsigned long)p;
p                 644 arch/powerpc/lib/code-patching.c 	patch_instruction(p, create_cond_branch(p, addr, 0xFFFFFFFC));
p                 646 arch/powerpc/lib/code-patching.c 	patch_instruction(q, translate_branch(q, p));
p                 647 arch/powerpc/lib/code-patching.c 	check(instr_is_branch_to_addr(p, addr));
p                 652 arch/powerpc/lib/code-patching.c 	p = buf;
p                 654 arch/powerpc/lib/code-patching.c 	patch_instruction(p, create_cond_branch(p, addr, BRANCH_SET_LINK));
p                 656 arch/powerpc/lib/code-patching.c 	patch_instruction(q, translate_branch(q, p));
p                 657 arch/powerpc/lib/code-patching.c 	check(instr_is_branch_to_addr(p, addr));
p                 661 arch/powerpc/lib/code-patching.c 	p = buf + 0x2000;
p                 663 arch/powerpc/lib/code-patching.c 	patch_instruction(p, create_cond_branch(p, addr, 0));
p                 665 arch/powerpc/lib/code-patching.c 	patch_instruction(q, translate_branch(q, p));
p                 666 arch/powerpc/lib/code-patching.c 	check(instr_is_branch_to_addr(p, addr));
p                 481 arch/powerpc/lib/feature-fixups.c static long calc_offset(struct fixup_entry *entry, unsigned int *p)
p                 483 arch/powerpc/lib/feature-fixups.c 	return (unsigned long)p - (unsigned long)entry;
p                  37 arch/powerpc/lib/sstep.c extern void get_fpr(int rn, double *p);
p                  38 arch/powerpc/lib/sstep.c extern void put_fpr(int rn, const double *p);
p                  39 arch/powerpc/lib/sstep.c extern void get_vr(int rn, __vector128 *p);
p                  40 arch/powerpc/lib/sstep.c extern void put_vr(int rn, __vector128 *p);
p                  41 arch/powerpc/lib/sstep.c extern void load_vsrn(int vsr, const void *p);
p                  42 arch/powerpc/lib/sstep.c extern void store_vsrn(int vsr, void *p);
p                 347 arch/powerpc/mm/book3s64/hash_utils.c static int __init parse_disable_1tb_segments(char *p)
p                  82 arch/powerpc/mm/book3s64/slb.c 	struct slb_shadow *p = get_slb_shadow();
p                  89 arch/powerpc/mm/book3s64/slb.c 	WRITE_ONCE(p->save_area[index].esid, 0);
p                  90 arch/powerpc/mm/book3s64/slb.c 	WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
p                  91 arch/powerpc/mm/book3s64/slb.c 	WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
p                 123 arch/powerpc/mm/book3s64/slb.c 	struct slb_shadow *p = get_slb_shadow();
p                 129 arch/powerpc/mm/book3s64/slb.c 		     : "r" (be64_to_cpu(p->save_area[index].vsid)),
p                 130 arch/powerpc/mm/book3s64/slb.c 		       "r" (be64_to_cpu(p->save_area[index].esid)));
p                 162 arch/powerpc/mm/book3s64/slb.c 	struct slb_shadow *p = get_slb_shadow();
p                 178 arch/powerpc/mm/book3s64/slb.c 		     :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
p                 179 arch/powerpc/mm/book3s64/slb.c 			"r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
p                 456 arch/powerpc/mm/book3s64/slb.c 			struct slb_shadow *p = get_slb_shadow();
p                 458 arch/powerpc/mm/book3s64/slb.c 				be64_to_cpu(p->save_area[KSTACK_INDEX].esid);
p                 460 arch/powerpc/mm/book3s64/slb.c 				be64_to_cpu(p->save_area[KSTACK_INDEX].vsid);
p                  26 arch/powerpc/mm/book3s64/subpage_prot.c 	u32 **p;
p                  39 arch/powerpc/mm/book3s64/subpage_prot.c 		p = spt->protptrs[i];
p                  40 arch/powerpc/mm/book3s64/subpage_prot.c 		if (!p)
p                  45 arch/powerpc/mm/book3s64/subpage_prot.c 			if (p[j])
p                  46 arch/powerpc/mm/book3s64/subpage_prot.c 				free_page((unsigned long)p[j]);
p                  47 arch/powerpc/mm/book3s64/subpage_prot.c 		free_page((unsigned long)p);
p                  67 arch/powerpc/mm/drmem.c 	u32 *p;
p                  73 arch/powerpc/mm/drmem.c 	p = new_prop->value;
p                  74 arch/powerpc/mm/drmem.c 	*p++ = cpu_to_be32(drmem_info->n_lmbs);
p                  76 arch/powerpc/mm/drmem.c 	dr_cell = (struct of_drconf_cell_v1 *)p;
p                 107 arch/powerpc/mm/drmem.c 	u32 *p;
p                 131 arch/powerpc/mm/drmem.c 	p = new_prop->value;
p                 132 arch/powerpc/mm/drmem.c 	*p++ = cpu_to_be32(lmb_sets);
p                 134 arch/powerpc/mm/drmem.c 	dr_cell = (struct of_drconf_cell_v2 *)p;
p                 195 arch/powerpc/mm/drmem.c 	const __be32 *p = *prop;
p                 197 arch/powerpc/mm/drmem.c 	lmb->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
p                 198 arch/powerpc/mm/drmem.c 	lmb->drc_index = of_read_number(p++, 1);
p                 200 arch/powerpc/mm/drmem.c 	p++; /* skip reserved field */
p                 202 arch/powerpc/mm/drmem.c 	lmb->aa_index = of_read_number(p++, 1);
p                 203 arch/powerpc/mm/drmem.c 	lmb->flags = of_read_number(p++, 1);
p                 205 arch/powerpc/mm/drmem.c 	*prop = p;
p                 227 arch/powerpc/mm/drmem.c 	const __be32 *p = *prop;
p                 229 arch/powerpc/mm/drmem.c 	dr_cell->seq_lmbs = of_read_number(p++, 1);
p                 230 arch/powerpc/mm/drmem.c 	dr_cell->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
p                 231 arch/powerpc/mm/drmem.c 	dr_cell->drc_index = of_read_number(p++, 1);
p                 232 arch/powerpc/mm/drmem.c 	dr_cell->aa_index = of_read_number(p++, 1);
p                 233 arch/powerpc/mm/drmem.c 	dr_cell->flags = of_read_number(p++, 1);
p                 235 arch/powerpc/mm/drmem.c 	*prop = p;
p                 375 arch/powerpc/mm/drmem.c 	const __be32 *p;
p                 384 arch/powerpc/mm/drmem.c 	p = prop;
p                 386 arch/powerpc/mm/drmem.c 		read_drconf_v2_cell(&dr_cell, &p);
p                 397 arch/powerpc/mm/drmem.c 	p = prop;
p                 400 arch/powerpc/mm/drmem.c 		read_drconf_v2_cell(&dr_cell, &p);
p                  27 arch/powerpc/mm/init-common.c static int __init parse_nosmep(char *p)
p                  35 arch/powerpc/mm/init-common.c static int __init parse_nosmap(char *p)
p                 201 arch/powerpc/mm/init_64.c 		void *p = NULL;
p                 213 arch/powerpc/mm/init_64.c 			p = altmap_alloc_block_buf(page_size, altmap);
p                 214 arch/powerpc/mm/init_64.c 			if (!p)
p                 217 arch/powerpc/mm/init_64.c 		if (!p)
p                 218 arch/powerpc/mm/init_64.c 			p = vmemmap_alloc_block_buf(page_size, node);
p                 219 arch/powerpc/mm/init_64.c 		if (!p)
p                 222 arch/powerpc/mm/init_64.c 		vmemmap_list_populate(__pa(p), start, node);
p                 225 arch/powerpc/mm/init_64.c 			 start, start + page_size, p);
p                 227 arch/powerpc/mm/init_64.c 		rc = vmemmap_create_mapping(start, page_size, __pa(p));
p                 340 arch/powerpc/mm/init_64.c static int __init parse_disable_radix(char *p)
p                 344 arch/powerpc/mm/init_64.c 	if (!p)
p                 346 arch/powerpc/mm/init_64.c 	else if (kstrtobool(p, &val))
p                  21 arch/powerpc/mm/ioremap_32.c 	phys_addr_t p, offset;
p                  30 arch/powerpc/mm/ioremap_32.c 	p = addr & PAGE_MASK;
p                  32 arch/powerpc/mm/ioremap_32.c 	size = PAGE_ALIGN(addr + size) - p;
p                  38 arch/powerpc/mm/ioremap_32.c 	if (p < 16 * 1024 * 1024)
p                  39 arch/powerpc/mm/ioremap_32.c 		p += _ISA_MEM_BASE;
p                  46 arch/powerpc/mm/ioremap_32.c 	if (slab_is_available() && p <= virt_to_phys(high_memory - 1) &&
p                  47 arch/powerpc/mm/ioremap_32.c 	    page_is_ram(__phys_to_pfn(p))) {
p                  49 arch/powerpc/mm/ioremap_32.c 			(unsigned long long)p, __builtin_return_address(0));
p                  61 arch/powerpc/mm/ioremap_32.c 	v = p_block_mapped(p);
p                  66 arch/powerpc/mm/ioremap_32.c 		return do_ioremap(p, offset, size, prot, caller);
p                  72 arch/powerpc/mm/ioremap_32.c 	err = early_ioremap_range(ioremap_bot - size, p, size, prot);
p                 516 arch/powerpc/mm/mem.c void __flush_dcache_icache(void *p)
p                 518 arch/powerpc/mm/mem.c 	unsigned long addr = (unsigned long)p;
p                  94 arch/powerpc/mm/nohash/40x.c 	phys_addr_t p;
p                  97 arch/powerpc/mm/nohash/40x.c 	p = 0;
p                 105 arch/powerpc/mm/nohash/40x.c 		unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE;
p                 114 arch/powerpc/mm/nohash/40x.c 		p += LARGE_PAGE_SIZE_16M;
p                 120 arch/powerpc/mm/nohash/40x.c 		unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE;
p                 126 arch/powerpc/mm/nohash/40x.c 		p += LARGE_PAGE_SIZE_4M;
p                  29 arch/powerpc/mm/nohash/8xx.c 	unsigned long p = PHYS_IMMR_BASE;
p                  34 arch/powerpc/mm/nohash/8xx.c 		return p + va - VIRT_IMMR_BASE;
p                  45 arch/powerpc/mm/nohash/8xx.c 	unsigned long p = PHYS_IMMR_BASE;
p                  49 arch/powerpc/mm/nohash/8xx.c 	if (pa >= p && pa < p + IMMR_SIZE)
p                  50 arch/powerpc/mm/nohash/8xx.c 		return VIRT_IMMR_BASE + pa - p;
p                  84 arch/powerpc/mm/nohash/8xx.c 	unsigned long p = PHYS_IMMR_BASE;
p                  89 arch/powerpc/mm/nohash/8xx.c 		map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
p                 241 arch/powerpc/mm/nohash/tlb.c 	struct tlb_flush_param *p = param;
p                 243 arch/powerpc/mm/nohash/tlb.c 	_tlbil_pid(p ? p->pid : 0);
p                 248 arch/powerpc/mm/nohash/tlb.c 	struct tlb_flush_param *p = param;
p                 250 arch/powerpc/mm/nohash/tlb.c 	_tlbil_va(p->addr, p->pid, p->tsize, p->ind);
p                 279 arch/powerpc/mm/nohash/tlb.c 		struct tlb_flush_param p = { .pid = pid };
p                 282 arch/powerpc/mm/nohash/tlb.c 				       do_flush_tlb_mm_ipi, &p, 1);
p                 319 arch/powerpc/mm/nohash/tlb.c 			struct tlb_flush_param p = {
p                 327 arch/powerpc/mm/nohash/tlb.c 					       do_flush_tlb_page_ipi, &p, 1);
p                  89 arch/powerpc/mm/numa.c 	char *p = cmdline;
p                 104 arch/powerpc/mm/numa.c 	if (!p)
p                 107 arch/powerpc/mm/numa.c 	mem = memparse(p, &p);
p                 120 arch/powerpc/mm/numa.c 		while (*p == ',' || *p == ' ' || *p == '\t')
p                 121 arch/powerpc/mm/numa.c 			p++;
p                 123 arch/powerpc/mm/numa.c 		cmdline = p;
p                 911 arch/powerpc/mm/numa.c static int __init early_numa(char *p)
p                 913 arch/powerpc/mm/numa.c 	if (!p)
p                 916 arch/powerpc/mm/numa.c 	if (strstr(p, "off"))
p                 919 arch/powerpc/mm/numa.c 	if (strstr(p, "debug"))
p                 922 arch/powerpc/mm/numa.c 	p = strstr(p, "fake=");
p                 923 arch/powerpc/mm/numa.c 	if (p)
p                 924 arch/powerpc/mm/numa.c 		cmdline = p + strlen("fake=");
p                 938 arch/powerpc/mm/numa.c static int __init early_topology_updates(char *p)
p                 940 arch/powerpc/mm/numa.c 	if (!p)
p                 943 arch/powerpc/mm/numa.c 	if (!strcmp(p, "on")) {
p                  90 arch/powerpc/mm/pgtable_32.c 	phys_addr_t p;
p                  95 arch/powerpc/mm/pgtable_32.c 	p = memstart_addr + s;
p                  99 arch/powerpc/mm/pgtable_32.c 		map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
p                 105 arch/powerpc/mm/pgtable_32.c 		p += PAGE_SIZE;
p                  29 arch/powerpc/oprofile/backtrace.c 	void __user *p = compat_ptr(sp);
p                  31 arch/powerpc/oprofile/backtrace.c 	if (!access_ok(p, sizeof(stack_frame)))
p                  39 arch/powerpc/oprofile/backtrace.c 	if (__copy_from_user_inatomic(stack_frame, p, sizeof(stack_frame)))
p                 284 arch/powerpc/oprofile/op_model_cell.c 	struct pm_signal *p;
p                 292 arch/powerpc/oprofile/op_model_cell.c 		p = &(pm_signal[ctr]);
p                 293 arch/powerpc/oprofile/op_model_cell.c 		p->signal_group = PPU_CYCLES_GRP_NUM;
p                 294 arch/powerpc/oprofile/op_model_cell.c 		p->bus_word = 1;
p                 295 arch/powerpc/oprofile/op_model_cell.c 		p->sub_unit = 0;
p                 296 arch/powerpc/oprofile/op_model_cell.c 		p->bit = 0;
p                 309 arch/powerpc/oprofile/op_model_cell.c 	p = &(pm_signal[ctr]);
p                 311 arch/powerpc/oprofile/op_model_cell.c 	p->signal_group = event / 100;
p                 312 arch/powerpc/oprofile/op_model_cell.c 	p->bus_word = bus_word;
p                 313 arch/powerpc/oprofile/op_model_cell.c 	p->sub_unit = GET_SUB_UNIT(unit_mask);
p                 338 arch/powerpc/oprofile/op_model_cell.c 		if ((bus_type == 0) && p->signal_group >= 60)
p                 340 arch/powerpc/oprofile/op_model_cell.c 		if ((bus_type == 1) && p->signal_group >= 50)
p                 346 arch/powerpc/oprofile/op_model_cell.c 		p->bit = signal_bit;
p                  16 arch/powerpc/perf/hv-common.c 	struct p arg = {
p                  43 arch/powerpc/platforms/44x/idle.c static int __init idle_param(char *p)
p                  46 arch/powerpc/platforms/44x/idle.c 	if (!strcmp("spin", p)) {
p                 147 arch/powerpc/platforms/4xx/cpm.c 	char *p;
p                 150 arch/powerpc/platforms/4xx/cpm.c 	p = memchr(buf, '\n', n);
p                 151 arch/powerpc/platforms/4xx/cpm.c 	len = p ? p - buf : n;
p                 356 arch/powerpc/platforms/52xx/mpc52xx_gpt.c mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *p, struct device_node *np) { }
p                2363 arch/powerpc/platforms/cell/spufs/file.c 	struct switch_log_entry *p;
p                2365 arch/powerpc/platforms/cell/spufs/file.c 	p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
p                2368 arch/powerpc/platforms/cell/spufs/file.c 			(unsigned long long) p->tstamp.tv_sec,
p                2369 arch/powerpc/platforms/cell/spufs/file.c 			(unsigned int) p->tstamp.tv_nsec,
p                2370 arch/powerpc/platforms/cell/spufs/file.c 			p->spu_id,
p                2371 arch/powerpc/platforms/cell/spufs/file.c 			(unsigned int) p->type,
p                2372 arch/powerpc/platforms/cell/spufs/file.c 			(unsigned int) p->val,
p                2373 arch/powerpc/platforms/cell/spufs/file.c 			(unsigned long long) p->timebase);
p                2488 arch/powerpc/platforms/cell/spufs/file.c 		struct switch_log_entry *p;
p                2490 arch/powerpc/platforms/cell/spufs/file.c 		p = ctx->switch_log->log + ctx->switch_log->head;
p                2491 arch/powerpc/platforms/cell/spufs/file.c 		ktime_get_ts64(&p->tstamp);
p                2492 arch/powerpc/platforms/cell/spufs/file.c 		p->timebase = get_tb();
p                2493 arch/powerpc/platforms/cell/spufs/file.c 		p->spu_id = spu ? spu->number : -1;
p                2494 arch/powerpc/platforms/cell/spufs/file.c 		p->type = type;
p                2495 arch/powerpc/platforms/cell/spufs/file.c 		p->val = val;
p                  68 arch/powerpc/platforms/cell/spufs/inode.c spufs_init_once(void *p)
p                  70 arch/powerpc/platforms/cell/spufs/inode.c 	struct spufs_inode_info *ei = p;
p                  24 arch/powerpc/platforms/cell/spufs/lscsa_alloc.c 	unsigned char *p;
p                  32 arch/powerpc/platforms/cell/spufs/lscsa_alloc.c 	for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
p                  33 arch/powerpc/platforms/cell/spufs/lscsa_alloc.c 		SetPageReserved(vmalloc_to_page(p));
p                  41 arch/powerpc/platforms/cell/spufs/lscsa_alloc.c 	unsigned char *p;
p                  46 arch/powerpc/platforms/cell/spufs/lscsa_alloc.c 	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
p                  47 arch/powerpc/platforms/cell/spufs/lscsa_alloc.c 		ClearPageReserved(vmalloc_to_page(p));
p                 277 arch/powerpc/platforms/chrp/pci.c 			void __iomem *p = ioremap(GG2_PCI_CONFIG_BASE, 0x80000);
p                 279 arch/powerpc/platforms/chrp/pci.c 			hose->cfg_data = p;
p                 280 arch/powerpc/platforms/chrp/pci.c 			gg2_pci_config_base = p;
p                  59 arch/powerpc/platforms/embedded6xx/wii.c 	struct memblock_region *p = memblock.memory.regions;
p                  62 arch/powerpc/platforms/embedded6xx/wii.c 	BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
p                 468 arch/powerpc/platforms/pasemi/dma_lib.c static void *map_onedev(struct pci_dev *p, int index)
p                 473 arch/powerpc/platforms/pasemi/dma_lib.c 	dn = pci_device_to_OF_node(p);
p                 487 arch/powerpc/platforms/pasemi/dma_lib.c 	return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
p                  80 arch/powerpc/platforms/pasemi/idle.c static int __init idle_param(char *p)
p                  84 arch/powerpc/platforms/pasemi/idle.c 		if (!strcmp(modes[i].name, p)) {
p                  46 arch/powerpc/platforms/powermac/bootx_init.c 	const char *p, *q, *s;
p                  51 arch/powerpc/platforms/powermac/bootx_init.c 	for (p = format; *p != 0; p = q) {
p                  52 arch/powerpc/platforms/powermac/bootx_init.c 		for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
p                  54 arch/powerpc/platforms/powermac/bootx_init.c 		if (q > p)
p                  55 arch/powerpc/platforms/powermac/bootx_init.c 			btext_drawtext(p, q - p);
p                 280 arch/powerpc/platforms/powermac/bootx_init.c 	char *namep, *p, *ep, *lp;
p                 299 arch/powerpc/platforms/powermac/bootx_init.c 	for (lp = p = namep, ep = namep + l; p < ep; p++) {
p                 300 arch/powerpc/platforms/powermac/bootx_init.c 		if (*p == '/')
p                 302 arch/powerpc/platforms/powermac/bootx_init.c 		else if (*p != 0)
p                 303 arch/powerpc/platforms/powermac/bootx_init.c 			*lp++ = *p;
p                 957 arch/powerpc/platforms/powermac/low_i2c.c 	struct device_node *p = of_node_get(node);
p                 961 arch/powerpc/platforms/powermac/low_i2c.c 	while(p) {
p                 963 arch/powerpc/platforms/powermac/low_i2c.c 			if (p == bus->busnode) {
p                 973 arch/powerpc/platforms/powermac/low_i2c.c 				of_node_put(p);
p                 979 arch/powerpc/platforms/powermac/low_i2c.c 		prev = p;
p                 980 arch/powerpc/platforms/powermac/low_i2c.c 		p = of_get_parent(p);
p                1198 arch/powerpc/platforms/powermac/low_i2c.c 			struct whitelist_ent *p;
p                1203 arch/powerpc/platforms/powermac/low_i2c.c 			for (p = whitelist; p->name != NULL; p++) {
p                1204 arch/powerpc/platforms/powermac/low_i2c.c 				if (!of_node_name_eq(np, p->name))
p                1206 arch/powerpc/platforms/powermac/low_i2c.c 				if (p->compatible &&
p                1207 arch/powerpc/platforms/powermac/low_i2c.c 				    !of_device_is_compatible(np, p->compatible))
p                1209 arch/powerpc/platforms/powermac/low_i2c.c 				if (p->quirks & pmac_i2c_quirk_skip)
p                1211 arch/powerpc/platforms/powermac/low_i2c.c 				callback(np, p->quirks);
p                1376 arch/powerpc/platforms/powermac/low_i2c.c 	if (!args || !args->count || !args->u[0].p)
p                1386 arch/powerpc/platforms/powermac/low_i2c.c 	*args->u[0].p = match;
p                  72 arch/powerpc/platforms/powermac/pfunc_base.c 	if (args == NULL || args->count == 0 || args->u[0].p == NULL)
p                  76 arch/powerpc/platforms/powermac/pfunc_base.c 	*args->u[0].p = ((value & mask) >> rshift) ^ xor;
p                 160 arch/powerpc/platforms/powermac/pfunc_base.c 	if (args == NULL || args->count == 0 || args->u[0].p == NULL)
p                 163 arch/powerpc/platforms/powermac/pfunc_base.c 	*args->u[0].p = MACIO_IN32(offset);
p                 183 arch/powerpc/platforms/powermac/pfunc_base.c 	if (args == NULL || args->count == 0 || args->u[0].p == NULL)
p                 186 arch/powerpc/platforms/powermac/pfunc_base.c 	*((u8 *)(args->u[0].p)) = MACIO_IN8(offset);
p                 196 arch/powerpc/platforms/powermac/pfunc_base.c 	if (args == NULL || args->count == 0 || args->u[0].p == NULL)
p                 199 arch/powerpc/platforms/powermac/pfunc_base.c 	*args->u[0].p = ((MACIO_IN32(offset) & mask) >> shift) ^ xor;
p                 209 arch/powerpc/platforms/powermac/pfunc_base.c 	if (args == NULL || args->count == 0 || args->u[0].p == NULL)
p                 212 arch/powerpc/platforms/powermac/pfunc_base.c 	*((u8 *)(args->u[0].p)) = ((MACIO_IN8(offset) & mask) >> shift) ^ xor;
p                 124 arch/powerpc/platforms/powermac/pfunc_core.c #define PMF_PARSE_CALL(name, cmd, handlers, p...) \
p                 132 arch/powerpc/platforms/powermac/pfunc_core.c 					      cmd->args, p);	      \
p                 359 arch/powerpc/platforms/powermac/setup.c 	char *p;
p                 366 arch/powerpc/platforms/powermac/setup.c 	p = strstr(boot_command_line, "root=");
p                 367 arch/powerpc/platforms/powermac/setup.c 	if (p != NULL && (p == boot_command_line || p[-1] == ' '))
p                 569 arch/powerpc/platforms/powermac/smp.c 	struct device_node *p;
p                 576 arch/powerpc/platforms/powermac/smp.c 		p = of_get_parent(cc);
p                 577 arch/powerpc/platforms/powermac/smp.c 		ok = p && of_device_is_compatible(p, "uni-n-i2c");
p                 578 arch/powerpc/platforms/powermac/smp.c 		of_node_put(p);
p                1334 arch/powerpc/platforms/powernv/idle.c 		struct paca_struct *p = paca_ptrs[cpu];
p                1336 arch/powerpc/platforms/powernv/idle.c 		p->idle_state = 0;
p                1338 arch/powerpc/platforms/powernv/idle.c 			p->idle_state = (1 << threads_per_core) - 1;
p                1342 arch/powerpc/platforms/powernv/idle.c 			p->thread_idle_state = PNV_THREAD_RUNNING;
p                1346 arch/powerpc/platforms/powernv/idle.c 			p->requested_psscr = 0;
p                1347 arch/powerpc/platforms/powernv/idle.c 			atomic_set(&p->dont_stop, 0);
p                 116 arch/powerpc/platforms/powernv/opal-lpc.c static void opal_lpc_insb(unsigned long p, void *b, unsigned long c)
p                 121 arch/powerpc/platforms/powernv/opal-lpc.c 		*(ptr++) = opal_lpc_inb(p);
p                 124 arch/powerpc/platforms/powernv/opal-lpc.c static void opal_lpc_insw(unsigned long p, void *b, unsigned long c)
p                 129 arch/powerpc/platforms/powernv/opal-lpc.c 		*(ptr++) = __opal_lpc_inw(p);
p                 132 arch/powerpc/platforms/powernv/opal-lpc.c static void opal_lpc_insl(unsigned long p, void *b, unsigned long c)
p                 137 arch/powerpc/platforms/powernv/opal-lpc.c 		*(ptr++) = __opal_lpc_inl(p);
p                 140 arch/powerpc/platforms/powernv/opal-lpc.c static void opal_lpc_outsb(unsigned long p, const void *b, unsigned long c)
p                 145 arch/powerpc/platforms/powernv/opal-lpc.c 		opal_lpc_outb(*(ptr++), p);
p                 148 arch/powerpc/platforms/powernv/opal-lpc.c static void opal_lpc_outsw(unsigned long p, const void *b, unsigned long c)
p                 153 arch/powerpc/platforms/powernv/opal-lpc.c 		__opal_lpc_outw(*(ptr++), p);
p                 156 arch/powerpc/platforms/powernv/opal-lpc.c static void opal_lpc_outsl(unsigned long p, const void *b, unsigned long c)
p                 161 arch/powerpc/platforms/powernv/opal-lpc.c 		__opal_lpc_outl(*(ptr++), p);
p                 120 arch/powerpc/platforms/ps3/device-init.c 	} *p;
p                 127 arch/powerpc/platforms/ps3/device-init.c 	p = kzalloc(sizeof(struct layout), GFP_KERNEL);
p                 129 arch/powerpc/platforms/ps3/device-init.c 	if (!p) {
p                 134 arch/powerpc/platforms/ps3/device-init.c 	p->dev.match_id = PS3_MATCH_ID_GELIC;
p                 135 arch/powerpc/platforms/ps3/device-init.c 	p->dev.dev_type = PS3_DEVICE_TYPE_SB;
p                 136 arch/powerpc/platforms/ps3/device-init.c 	p->dev.bus_id = repo->bus_id;
p                 137 arch/powerpc/platforms/ps3/device-init.c 	p->dev.dev_id = repo->dev_id;
p                 138 arch/powerpc/platforms/ps3/device-init.c 	p->dev.d_region = &p->d_region;
p                 141 arch/powerpc/platforms/ps3/device-init.c 		PS3_INTERRUPT_TYPE_EVENT_PORT, &p->dev.interrupt_id);
p                 149 arch/powerpc/platforms/ps3/device-init.c 	BUG_ON(p->dev.interrupt_id != 0);
p                 151 arch/powerpc/platforms/ps3/device-init.c 	result = ps3_dma_region_init(&p->dev, p->dev.d_region, PS3_DMA_64K,
p                 160 arch/powerpc/platforms/ps3/device-init.c 	result = ps3_system_bus_device_register(&p->dev);
p                 174 arch/powerpc/platforms/ps3/device-init.c 	kfree(p);
p                 189 arch/powerpc/platforms/ps3/device-init.c 	} *p;
p                 198 arch/powerpc/platforms/ps3/device-init.c 	p = kzalloc(sizeof(struct layout), GFP_KERNEL);
p                 200 arch/powerpc/platforms/ps3/device-init.c 	if (!p) {
p                 205 arch/powerpc/platforms/ps3/device-init.c 	p->dev.match_id = match_id;
p                 206 arch/powerpc/platforms/ps3/device-init.c 	p->dev.dev_type = PS3_DEVICE_TYPE_SB;
p                 207 arch/powerpc/platforms/ps3/device-init.c 	p->dev.bus_id = repo->bus_id;
p                 208 arch/powerpc/platforms/ps3/device-init.c 	p->dev.dev_id = repo->dev_id;
p                 209 arch/powerpc/platforms/ps3/device-init.c 	p->dev.d_region = &p->d_region;
p                 210 arch/powerpc/platforms/ps3/device-init.c 	p->dev.m_region = &p->m_region;
p                 213 arch/powerpc/platforms/ps3/device-init.c 		interrupt_type, &p->dev.interrupt_id);
p                 230 arch/powerpc/platforms/ps3/device-init.c 	result = ps3_dma_region_init(&p->dev, p->dev.d_region, PS3_DMA_64K,
p                 239 arch/powerpc/platforms/ps3/device-init.c 	result = ps3_mmio_region_init(&p->dev, p->dev.m_region, bus_addr, len,
p                 248 arch/powerpc/platforms/ps3/device-init.c 	result = ps3_system_bus_device_register(&p->dev);
p                 264 arch/powerpc/platforms/ps3/device-init.c 	kfree(p);
p                 290 arch/powerpc/platforms/ps3/device-init.c 	} *p;
p                 295 arch/powerpc/platforms/ps3/device-init.c 	p = kzalloc(sizeof(struct layout), GFP_KERNEL);
p                 297 arch/powerpc/platforms/ps3/device-init.c 	if (!p)
p                 300 arch/powerpc/platforms/ps3/device-init.c 	p->dev.match_id = match_id;
p                 301 arch/powerpc/platforms/ps3/device-init.c 	p->dev.dev_type = PS3_DEVICE_TYPE_VUART;
p                 302 arch/powerpc/platforms/ps3/device-init.c 	p->dev.port_number = port_number;
p                 304 arch/powerpc/platforms/ps3/device-init.c 	result = ps3_system_bus_device_register(&p->dev);
p                 315 arch/powerpc/platforms/ps3/device-init.c 	kfree(p);
p                 324 arch/powerpc/platforms/ps3/device-init.c 	struct ps3_storage_device *p;
p                 345 arch/powerpc/platforms/ps3/device-init.c 	p = kzalloc(struct_size(p, regions, num_regions), GFP_KERNEL);
p                 346 arch/powerpc/platforms/ps3/device-init.c 	if (!p) {
p                 351 arch/powerpc/platforms/ps3/device-init.c 	p->sbd.match_id = match_id;
p                 352 arch/powerpc/platforms/ps3/device-init.c 	p->sbd.dev_type = PS3_DEVICE_TYPE_SB;
p                 353 arch/powerpc/platforms/ps3/device-init.c 	p->sbd.bus_id = repo->bus_id;
p                 354 arch/powerpc/platforms/ps3/device-init.c 	p->sbd.dev_id = repo->dev_id;
p                 355 arch/powerpc/platforms/ps3/device-init.c 	p->sbd.d_region = &p->dma_region;
p                 356 arch/powerpc/platforms/ps3/device-init.c 	p->blk_size = blk_size;
p                 357 arch/powerpc/platforms/ps3/device-init.c 	p->num_regions = num_regions;
p                 361 arch/powerpc/platforms/ps3/device-init.c 					       &p->sbd.interrupt_id);
p                 387 arch/powerpc/platforms/ps3/device-init.c 		p->regions[i].id = id;
p                 388 arch/powerpc/platforms/ps3/device-init.c 		p->regions[i].start = start;
p                 389 arch/powerpc/platforms/ps3/device-init.c 		p->regions[i].size = size;
p                 392 arch/powerpc/platforms/ps3/device-init.c 	result = ps3_system_bus_device_register(&p->sbd);
p                 405 arch/powerpc/platforms/ps3/device-init.c 	kfree(p);
p                 444 arch/powerpc/platforms/ps3/device-init.c 	} *p;
p                 448 arch/powerpc/platforms/ps3/device-init.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 449 arch/powerpc/platforms/ps3/device-init.c 	if (!p)
p                 452 arch/powerpc/platforms/ps3/device-init.c 	p->dev.match_id = PS3_MATCH_ID_SOUND;
p                 453 arch/powerpc/platforms/ps3/device-init.c 	p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
p                 454 arch/powerpc/platforms/ps3/device-init.c 	p->dev.d_region = &p->d_region;
p                 455 arch/powerpc/platforms/ps3/device-init.c 	p->dev.m_region = &p->m_region;
p                 457 arch/powerpc/platforms/ps3/device-init.c 	result = ps3_system_bus_device_register(&p->dev);
p                 468 arch/powerpc/platforms/ps3/device-init.c 	kfree(p);
p                 478 arch/powerpc/platforms/ps3/device-init.c 	} *p;
p                 482 arch/powerpc/platforms/ps3/device-init.c 	p = kzalloc(sizeof(struct layout), GFP_KERNEL);
p                 484 arch/powerpc/platforms/ps3/device-init.c 	if (!p)
p                 487 arch/powerpc/platforms/ps3/device-init.c 	p->dev.match_id = PS3_MATCH_ID_GPU;
p                 488 arch/powerpc/platforms/ps3/device-init.c 	p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_FB;
p                 489 arch/powerpc/platforms/ps3/device-init.c 	p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
p                 491 arch/powerpc/platforms/ps3/device-init.c 	result = ps3_system_bus_device_register(&p->dev);
p                 503 arch/powerpc/platforms/ps3/device-init.c 	kfree(p);
p                 513 arch/powerpc/platforms/ps3/device-init.c 	} *p;
p                 517 arch/powerpc/platforms/ps3/device-init.c 	p = kzalloc(sizeof(struct layout), GFP_KERNEL);
p                 519 arch/powerpc/platforms/ps3/device-init.c 	if (!p)
p                 522 arch/powerpc/platforms/ps3/device-init.c 	p->dev.match_id = PS3_MATCH_ID_GPU;
p                 523 arch/powerpc/platforms/ps3/device-init.c 	p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK;
p                 524 arch/powerpc/platforms/ps3/device-init.c 	p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
p                 526 arch/powerpc/platforms/ps3/device-init.c 	result = ps3_system_bus_device_register(&p->dev);
p                 538 arch/powerpc/platforms/ps3/device-init.c 	kfree(p);
p                 194 arch/powerpc/platforms/ps3/gelic_udbg.c 	u16 *p;
p                 205 arch/powerpc/platforms/ps3/gelic_udbg.c 	p = (u16 *)h_ip;
p                 207 arch/powerpc/platforms/ps3/gelic_udbg.c 		sum += *p++;
p                 617 arch/powerpc/platforms/ps3/interrupt.c static void _dump_64_bmp(const char *header, const u64 *p, unsigned cpu,
p                 622 arch/powerpc/platforms/ps3/interrupt.c 		*p >> 48, (*p >> 32) & 0xffff, (*p >> 16) & 0xffff,
p                 623 arch/powerpc/platforms/ps3/interrupt.c 		*p & 0xffff);
p                 627 arch/powerpc/platforms/ps3/interrupt.c 	const u64 *p, unsigned cpu, const char* func, int line)
p                 630 arch/powerpc/platforms/ps3/interrupt.c 		func, line, header, cpu, p[0], p[1], p[2], p[3]);
p                 327 arch/powerpc/platforms/ps3/os-area.c static void _dump_params(const struct os_area_params *p, const char *func,
p                 330 arch/powerpc/platforms/ps3/os-area.c 	pr_debug("%s:%d: p.boot_flag:       %u\n", func, line, p->boot_flag);
p                 331 arch/powerpc/platforms/ps3/os-area.c 	pr_debug("%s:%d: p.num_params:      %u\n", func, line, p->num_params);
p                 332 arch/powerpc/platforms/ps3/os-area.c 	pr_debug("%s:%d: p.rtc_diff         %lld\n", func, line, p->rtc_diff);
p                 333 arch/powerpc/platforms/ps3/os-area.c 	pr_debug("%s:%d: p.av_multi_out     %u\n", func, line, p->av_multi_out);
p                 334 arch/powerpc/platforms/ps3/os-area.c 	pr_debug("%s:%d: p.ctrl_button:     %u\n", func, line, p->ctrl_button);
p                 336 arch/powerpc/platforms/ps3/os-area.c 		p->static_ip_addr[0], p->static_ip_addr[1],
p                 337 arch/powerpc/platforms/ps3/os-area.c 		p->static_ip_addr[2], p->static_ip_addr[3]);
p                 339 arch/powerpc/platforms/ps3/os-area.c 		p->network_mask[0], p->network_mask[1],
p                 340 arch/powerpc/platforms/ps3/os-area.c 		p->network_mask[2], p->network_mask[3]);
p                 342 arch/powerpc/platforms/ps3/os-area.c 		p->default_gateway[0], p->default_gateway[1],
p                 343 arch/powerpc/platforms/ps3/os-area.c 		p->default_gateway[2], p->default_gateway[3]);
p                 345 arch/powerpc/platforms/ps3/os-area.c 		p->dns_primary[0], p->dns_primary[1],
p                 346 arch/powerpc/platforms/ps3/os-area.c 		p->dns_primary[2], p->dns_primary[3]);
p                 348 arch/powerpc/platforms/ps3/os-area.c 		p->dns_secondary[0], p->dns_secondary[1],
p                 349 arch/powerpc/platforms/ps3/os-area.c 		p->dns_secondary[2], p->dns_secondary[3]);
p                 112 arch/powerpc/platforms/ps3/setup.c static void __init prealloc(struct ps3_prealloc *p)
p                 114 arch/powerpc/platforms/ps3/setup.c 	if (!p->size)
p                 117 arch/powerpc/platforms/ps3/setup.c 	p->address = memblock_alloc(p->size, p->align);
p                 118 arch/powerpc/platforms/ps3/setup.c 	if (!p->address)
p                 120 arch/powerpc/platforms/ps3/setup.c 		      __func__, p->size, p->align);
p                 122 arch/powerpc/platforms/ps3/setup.c 	printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size,
p                 123 arch/powerpc/platforms/ps3/setup.c 	       p->address);
p                 136 arch/powerpc/platforms/ps3/setup.c static int __init early_parse_ps3fb(char *p)
p                 138 arch/powerpc/platforms/ps3/setup.c 	if (!p)
p                 141 arch/powerpc/platforms/ps3/setup.c 	ps3fb_videomemory.size = _ALIGN_UP(memparse(p, &p),
p                 159 arch/powerpc/platforms/ps3/setup.c static int __init early_parse_ps3flash(char *p)
p                 161 arch/powerpc/platforms/ps3/setup.c 	if (!p)
p                 164 arch/powerpc/platforms/ps3/setup.c 	if (!strcmp(p, "off"))
p                 981 arch/powerpc/platforms/pseries/hotplug-memory.c 	__be32 *p;
p                 994 arch/powerpc/platforms/pseries/hotplug-memory.c 	p = (__be32 *) pr->old_prop->value;
p                 995 arch/powerpc/platforms/pseries/hotplug-memory.c 	if (!p)
p                1003 arch/powerpc/platforms/pseries/hotplug-memory.c 	entries = be32_to_cpu(*p++);
p                1004 arch/powerpc/platforms/pseries/hotplug-memory.c 	old_drmem = (struct of_drconf_cell_v1 *)p;
p                1006 arch/powerpc/platforms/pseries/hotplug-memory.c 	p = (__be32 *)pr->prop->value;
p                1007 arch/powerpc/platforms/pseries/hotplug-memory.c 	p++;
p                1008 arch/powerpc/platforms/pseries/hotplug-memory.c 	new_drmem = (struct of_drconf_cell_v1 *)p;
p                  42 arch/powerpc/platforms/pseries/hvCall_inst.c static void *hc_next(struct seq_file *m, void *p, loff_t * pos)
p                  49 arch/powerpc/platforms/pseries/hvCall_inst.c static void hc_stop(struct seq_file *m, void *p)
p                  53 arch/powerpc/platforms/pseries/hvCall_inst.c static int hc_show(struct seq_file *m, void *p)
p                  55 arch/powerpc/platforms/pseries/hvCall_inst.c 	unsigned long h_num = (unsigned long)p;
p                 496 arch/powerpc/platforms/pseries/lpar.c static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
p                 507 arch/powerpc/platforms/pseries/lpar.c 	if (copy_from_user(buf, p, count))
p                 553 arch/powerpc/platforms/pseries/lpar.c static int vcpudispatch_stats_display(struct seq_file *p, void *v)
p                 559 arch/powerpc/platforms/pseries/lpar.c 		seq_puts(p, "off\n");
p                 565 arch/powerpc/platforms/pseries/lpar.c 		seq_printf(p, "cpu%d", cpu);
p                 566 arch/powerpc/platforms/pseries/lpar.c 		seq_put_decimal_ull(p, " ", disp->total_disp);
p                 567 arch/powerpc/platforms/pseries/lpar.c 		seq_put_decimal_ull(p, " ", disp->same_cpu_disp);
p                 568 arch/powerpc/platforms/pseries/lpar.c 		seq_put_decimal_ull(p, " ", disp->same_chip_disp);
p                 569 arch/powerpc/platforms/pseries/lpar.c 		seq_put_decimal_ull(p, " ", disp->diff_chip_disp);
p                 570 arch/powerpc/platforms/pseries/lpar.c 		seq_put_decimal_ull(p, " ", disp->far_chip_disp);
p                 571 arch/powerpc/platforms/pseries/lpar.c 		seq_put_decimal_ull(p, " ", disp->numa_home_disp);
p                 572 arch/powerpc/platforms/pseries/lpar.c 		seq_put_decimal_ull(p, " ", disp->numa_remote_disp);
p                 573 arch/powerpc/platforms/pseries/lpar.c 		seq_put_decimal_ull(p, " ", disp->numa_far_disp);
p                 574 arch/powerpc/platforms/pseries/lpar.c 		seq_puts(p, "\n");
p                 594 arch/powerpc/platforms/pseries/lpar.c 		const char __user *p, size_t count, loff_t *ppos)
p                 602 arch/powerpc/platforms/pseries/lpar.c 	if (copy_from_user(buf, p, count))
p                 618 arch/powerpc/platforms/pseries/lpar.c static int vcpudispatch_stats_freq_display(struct seq_file *p, void *v)
p                 620 arch/powerpc/platforms/pseries/lpar.c 	seq_printf(p, "%d\n", vcpudispatch_stats_freq);
p                 130 arch/powerpc/platforms/pseries/msi.c 	const __be32 *p;
p                 135 arch/powerpc/platforms/pseries/msi.c 	p = of_get_property(dn, prop_name, NULL);
p                 136 arch/powerpc/platforms/pseries/msi.c 	if (!p) {
p                 141 arch/powerpc/platforms/pseries/msi.c 	req_msi = be32_to_cpup(p);
p                 169 arch/powerpc/platforms/pseries/msi.c 	const __be32 *p;
p                 173 arch/powerpc/platforms/pseries/msi.c 		p = of_get_property(dn, "ibm,pe-total-#msi", NULL);
p                 174 arch/powerpc/platforms/pseries/msi.c 		if (p) {
p                 177 arch/powerpc/platforms/pseries/msi.c 			*total = be32_to_cpup(p);
p                 231 arch/powerpc/platforms/pseries/msi.c 	const __be32 *p;
p                 236 arch/powerpc/platforms/pseries/msi.c 	p = of_get_property(dn, "class-code", NULL);
p                 237 arch/powerpc/platforms/pseries/msi.c 	class = p ? be32_to_cpup(p) : 0;
p                 248 arch/powerpc/platforms/pseries/msi.c 	const __be32 *p;
p                 257 arch/powerpc/platforms/pseries/msi.c 		p = of_get_property(dn, "ibm,req#msi", NULL);
p                 258 arch/powerpc/platforms/pseries/msi.c 		if (p)
p                 259 arch/powerpc/platforms/pseries/msi.c 			req = be32_to_cpup(p);
p                 261 arch/powerpc/platforms/pseries/msi.c 		p = of_get_property(dn, "ibm,req#msi-x", NULL);
p                 262 arch/powerpc/platforms/pseries/msi.c 		if (p)
p                 263 arch/powerpc/platforms/pseries/msi.c 			req = max(req, (int)be32_to_cpup(p));
p                  43 arch/powerpc/platforms/pseries/nvram.c 	char *p = buf;
p                  69 arch/powerpc/platforms/pseries/nvram.c 		memcpy(p, nvram_buf, len);
p                  71 arch/powerpc/platforms/pseries/nvram.c 		p += len;
p                  78 arch/powerpc/platforms/pseries/nvram.c 	return p - buf;
p                  87 arch/powerpc/platforms/pseries/nvram.c 	const char *p = buf;
p                 106 arch/powerpc/platforms/pseries/nvram.c 		memcpy(nvram_buf, p, len);
p                 114 arch/powerpc/platforms/pseries/nvram.c 		p += len;
p                 120 arch/powerpc/platforms/pseries/nvram.c 	return p - buf;
p                  48 arch/powerpc/platforms/pseries/of_helpers.c 	const char *p;
p                  55 arch/powerpc/platforms/pseries/of_helpers.c 	p = data->drc_type = (char*) (*curval);
p                  56 arch/powerpc/platforms/pseries/of_helpers.c 	p = of_prop_next_string(*prop, p);
p                  57 arch/powerpc/platforms/pseries/of_helpers.c 	if (!p)
p                  61 arch/powerpc/platforms/pseries/of_helpers.c 	data->drc_name_prefix = (char *)p;
p                  62 arch/powerpc/platforms/pseries/of_helpers.c 	p = of_prop_next_string(*prop, p);
p                  63 arch/powerpc/platforms/pseries/of_helpers.c 	if (!p)
p                  67 arch/powerpc/platforms/pseries/of_helpers.c 	p2 = (const __be32 *)p;
p                  44 arch/powerpc/platforms/pseries/papr_scm.c static int drc_pmem_bind(struct papr_scm_priv *p)
p                  60 arch/powerpc/platforms/pseries/papr_scm.c 		rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
p                  61 arch/powerpc/platforms/pseries/papr_scm.c 				p->blocks, BIND_ANY_ADDR, token);
p                  71 arch/powerpc/platforms/pseries/papr_scm.c 	p->bound_addr = saved;
p                  72 arch/powerpc/platforms/pseries/papr_scm.c 	dev_dbg(&p->pdev->dev, "bound drc 0x%x to %pR\n", p->drc_index, &p->res);
p                  76 arch/powerpc/platforms/pseries/papr_scm.c static void drc_pmem_unbind(struct papr_scm_priv *p)
p                  82 arch/powerpc/platforms/pseries/papr_scm.c 	dev_dbg(&p->pdev->dev, "unbind drc 0x%x\n", p->drc_index);
p                  89 arch/powerpc/platforms/pseries/papr_scm.c 				 p->drc_index, token);
p                 103 arch/powerpc/platforms/pseries/papr_scm.c 		dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
p                 105 arch/powerpc/platforms/pseries/papr_scm.c 		dev_dbg(&p->pdev->dev, "unbind drc 0x%x complete\n",
p                 106 arch/powerpc/platforms/pseries/papr_scm.c 			p->drc_index);
p                 111 arch/powerpc/platforms/pseries/papr_scm.c static int drc_pmem_query_n_bind(struct papr_scm_priv *p)
p                 120 arch/powerpc/platforms/pseries/papr_scm.c 			 p->drc_index, 0);
p                 127 arch/powerpc/platforms/pseries/papr_scm.c 			 p->drc_index, p->blocks - 1);
p                 132 arch/powerpc/platforms/pseries/papr_scm.c 	if ((end_addr - start_addr) != ((p->blocks - 1) * p->block_size))
p                 135 arch/powerpc/platforms/pseries/papr_scm.c 	p->bound_addr = start_addr;
p                 136 arch/powerpc/platforms/pseries/papr_scm.c 	dev_dbg(&p->pdev->dev, "bound drc 0x%x to %pR\n", p->drc_index, &p->res);
p                 140 arch/powerpc/platforms/pseries/papr_scm.c 	dev_info(&p->pdev->dev,
p                 142 arch/powerpc/platforms/pseries/papr_scm.c 	drc_pmem_unbind(p);
p                 143 arch/powerpc/platforms/pseries/papr_scm.c 	return drc_pmem_bind(p);
p                 147 arch/powerpc/platforms/pseries/papr_scm.c static int papr_scm_meta_get(struct papr_scm_priv *p,
p                 155 arch/powerpc/platforms/pseries/papr_scm.c 	if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
p                 172 arch/powerpc/platforms/pseries/papr_scm.c 		ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index,
p                 200 arch/powerpc/platforms/pseries/papr_scm.c static int papr_scm_meta_set(struct papr_scm_priv *p,
p                 209 arch/powerpc/platforms/pseries/papr_scm.c 	if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
p                 237 arch/powerpc/platforms/pseries/papr_scm.c 		ret = plpar_hcall_norets(H_SCM_WRITE_METADATA, p->drc_index,
p                 252 arch/powerpc/platforms/pseries/papr_scm.c 	struct papr_scm_priv *p;
p                 258 arch/powerpc/platforms/pseries/papr_scm.c 	p = nvdimm_provider_data(nvdimm);
p                 266 arch/powerpc/platforms/pseries/papr_scm.c 		get_size_hdr->config_size = p->metadata_size;
p                 271 arch/powerpc/platforms/pseries/papr_scm.c 		*cmd_rc = papr_scm_meta_get(p, buf);
p                 275 arch/powerpc/platforms/pseries/papr_scm.c 		*cmd_rc = papr_scm_meta_set(p, buf);
p                 282 arch/powerpc/platforms/pseries/papr_scm.c 	dev_dbg(&p->pdev->dev, "returned with cmd_rc = %d\n", *cmd_rc);
p                 325 arch/powerpc/platforms/pseries/papr_scm.c static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
p                 327 arch/powerpc/platforms/pseries/papr_scm.c 	struct device *dev = &p->pdev->dev;
p                 333 arch/powerpc/platforms/pseries/papr_scm.c 	p->bus_desc.ndctl = papr_scm_ndctl;
p                 334 arch/powerpc/platforms/pseries/papr_scm.c 	p->bus_desc.module = THIS_MODULE;
p                 335 arch/powerpc/platforms/pseries/papr_scm.c 	p->bus_desc.of_node = p->pdev->dev.of_node;
p                 336 arch/powerpc/platforms/pseries/papr_scm.c 	p->bus_desc.attr_groups = bus_attr_groups;
p                 337 arch/powerpc/platforms/pseries/papr_scm.c 	p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
p                 339 arch/powerpc/platforms/pseries/papr_scm.c 	if (!p->bus_desc.provider_name)
p                 342 arch/powerpc/platforms/pseries/papr_scm.c 	p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
p                 343 arch/powerpc/platforms/pseries/papr_scm.c 	if (!p->bus) {
p                 344 arch/powerpc/platforms/pseries/papr_scm.c 		dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
p                 345 arch/powerpc/platforms/pseries/papr_scm.c 		kfree(p->bus_desc.provider_name);
p                 352 arch/powerpc/platforms/pseries/papr_scm.c 	p->nvdimm = nvdimm_create(p->bus, p, papr_scm_dimm_groups,
p                 354 arch/powerpc/platforms/pseries/papr_scm.c 	if (!p->nvdimm) {
p                 355 arch/powerpc/platforms/pseries/papr_scm.c 		dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
p                 359 arch/powerpc/platforms/pseries/papr_scm.c 	if (nvdimm_bus_check_dimm_count(p->bus, 1))
p                 365 arch/powerpc/platforms/pseries/papr_scm.c 	mapping.nvdimm = p->nvdimm;
p                 367 arch/powerpc/platforms/pseries/papr_scm.c 	mapping.size = p->blocks * p->block_size; // XXX: potential overflow?
p                 371 arch/powerpc/platforms/pseries/papr_scm.c 	target_nid = dev_to_node(&p->pdev->dev);
p                 375 arch/powerpc/platforms/pseries/papr_scm.c 	ndr_desc.res = &p->res;
p                 376 arch/powerpc/platforms/pseries/papr_scm.c 	ndr_desc.of_node = p->dn;
p                 377 arch/powerpc/platforms/pseries/papr_scm.c 	ndr_desc.provider_data = p;
p                 380 arch/powerpc/platforms/pseries/papr_scm.c 	ndr_desc.nd_set = &p->nd_set;
p                 383 arch/powerpc/platforms/pseries/papr_scm.c 	if (p->is_volatile)
p                 384 arch/powerpc/platforms/pseries/papr_scm.c 		p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
p                 386 arch/powerpc/platforms/pseries/papr_scm.c 		p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
p                 387 arch/powerpc/platforms/pseries/papr_scm.c 	if (!p->region) {
p                 389 arch/powerpc/platforms/pseries/papr_scm.c 				ndr_desc.res, p->dn);
p                 398 arch/powerpc/platforms/pseries/papr_scm.c err:	nvdimm_bus_unregister(p->bus);
p                 399 arch/powerpc/platforms/pseries/papr_scm.c 	kfree(p->bus_desc.provider_name);
p                 408 arch/powerpc/platforms/pseries/papr_scm.c 	struct papr_scm_priv *p;
p                 435 arch/powerpc/platforms/pseries/papr_scm.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 436 arch/powerpc/platforms/pseries/papr_scm.c 	if (!p)
p                 442 arch/powerpc/platforms/pseries/papr_scm.c 	p->dn = dn;
p                 443 arch/powerpc/platforms/pseries/papr_scm.c 	p->drc_index = drc_index;
p                 444 arch/powerpc/platforms/pseries/papr_scm.c 	p->block_size = block_size;
p                 445 arch/powerpc/platforms/pseries/papr_scm.c 	p->blocks = blocks;
p                 446 arch/powerpc/platforms/pseries/papr_scm.c 	p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
p                 457 arch/powerpc/platforms/pseries/papr_scm.c 	p->nd_set.cookie1 = cpu_to_le64(uuid[0]);
p                 458 arch/powerpc/platforms/pseries/papr_scm.c 	p->nd_set.cookie2 = cpu_to_le64(uuid[1]);
p                 461 arch/powerpc/platforms/pseries/papr_scm.c 	p->metadata_size = metadata_size;
p                 462 arch/powerpc/platforms/pseries/papr_scm.c 	p->pdev = pdev;
p                 465 arch/powerpc/platforms/pseries/papr_scm.c 	rc = drc_pmem_bind(p);
p                 469 arch/powerpc/platforms/pseries/papr_scm.c 		rc = drc_pmem_query_n_bind(p);
p                 472 arch/powerpc/platforms/pseries/papr_scm.c 		dev_err(&p->pdev->dev, "bind err: %d\n", rc);
p                 478 arch/powerpc/platforms/pseries/papr_scm.c 	p->res.start = p->bound_addr;
p                 479 arch/powerpc/platforms/pseries/papr_scm.c 	p->res.end   = p->bound_addr + p->blocks * p->block_size - 1;
p                 480 arch/powerpc/platforms/pseries/papr_scm.c 	p->res.name  = pdev->name;
p                 481 arch/powerpc/platforms/pseries/papr_scm.c 	p->res.flags = IORESOURCE_MEM;
p                 483 arch/powerpc/platforms/pseries/papr_scm.c 	rc = papr_scm_nvdimm_init(p);
p                 487 arch/powerpc/platforms/pseries/papr_scm.c 	platform_set_drvdata(pdev, p);
p                 491 arch/powerpc/platforms/pseries/papr_scm.c err2:	drc_pmem_unbind(p);
p                 492 arch/powerpc/platforms/pseries/papr_scm.c err:	kfree(p);
p                 498 arch/powerpc/platforms/pseries/papr_scm.c 	struct papr_scm_priv *p = platform_get_drvdata(pdev);
p                 500 arch/powerpc/platforms/pseries/papr_scm.c 	nvdimm_bus_unregister(p->bus);
p                 501 arch/powerpc/platforms/pseries/papr_scm.c 	drc_pmem_unbind(p);
p                 502 arch/powerpc/platforms/pseries/papr_scm.c 	kfree(p->bus_desc.provider_name);
p                 503 arch/powerpc/platforms/pseries/papr_scm.c 	kfree(p);
p                  18 arch/powerpc/sysdev/dcr.c 	const u32 *p;
p                  23 arch/powerpc/sysdev/dcr.c 		p = of_get_property(par, "dcr-parent", NULL);
p                  25 arch/powerpc/sysdev/dcr.c 		if (p == NULL)
p                  28 arch/powerpc/sysdev/dcr.c 			par = of_find_node_by_phandle(*p);
p                 147 arch/powerpc/sysdev/dcr.c 	const u32 *p;
p                 156 arch/powerpc/sysdev/dcr.c 	p = of_get_property(dp, "dcr-mmio-stride", NULL);
p                 157 arch/powerpc/sysdev/dcr.c 	stride = (p == NULL) ? 0x10 : *p;
p                 160 arch/powerpc/sysdev/dcr.c 	p = of_get_property(dp, "dcr-mmio-range", NULL);
p                 161 arch/powerpc/sysdev/dcr.c 	if (p == NULL)
p                 162 arch/powerpc/sysdev/dcr.c 		p = of_get_property(dp, "dcr-mmio-space", NULL);
p                 163 arch/powerpc/sysdev/dcr.c 	if (p == NULL)
p                 167 arch/powerpc/sysdev/dcr.c 	ret = of_translate_address(dp, p);
p                  65 arch/powerpc/sysdev/fsl_msi.c static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
p                  74 arch/powerpc/sysdev/fsl_msi.c 	seq_printf(p, " fsl-msi-%d", cascade_virq);
p                 402 arch/powerpc/sysdev/fsl_msi.c 	const u32 *p;
p                 483 arch/powerpc/sysdev/fsl_msi.c 	p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
p                 489 arch/powerpc/sysdev/fsl_msi.c 		if (p)
p                 507 arch/powerpc/sysdev/fsl_msi.c 		if (p && len % (2 * sizeof(u32)) != 0) {
p                 514 arch/powerpc/sysdev/fsl_msi.c 		if (!p) {
p                 515 arch/powerpc/sysdev/fsl_msi.c 			p = all_avail;
p                 520 arch/powerpc/sysdev/fsl_msi.c 			if (p[i * 2] % IRQS_PER_MSI_REG ||
p                 521 arch/powerpc/sysdev/fsl_msi.c 			    p[i * 2 + 1] % IRQS_PER_MSI_REG) {
p                 524 arch/powerpc/sysdev/fsl_msi.c 				       p[i * 2 + 1], p[i * 2]);
p                 529 arch/powerpc/sysdev/fsl_msi.c 			offset = p[i * 2] / IRQS_PER_MSI_REG;
p                 530 arch/powerpc/sysdev/fsl_msi.c 			count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
p                 341 arch/powerpc/sysdev/mpic.c #define mpic_map(m,p,b,o,s)	_mpic_map_mmio(m,p,b,o,s)
p                 418 arch/powerpc/sysdev/mpic_timer.c 	const u32 *p;
p                 428 arch/powerpc/sysdev/mpic_timer.c 	p = of_get_property(np, "fsl,available-ranges", &len);
p                 429 arch/powerpc/sysdev/mpic_timer.c 	if (p && len % (2 * sizeof(u32)) != 0) {
p                 434 arch/powerpc/sysdev/mpic_timer.c 	if (!p) {
p                 435 arch/powerpc/sysdev/mpic_timer.c 		p = all_timer;
p                 442 arch/powerpc/sysdev/mpic_timer.c 		offset = p[i * 2];
p                 443 arch/powerpc/sysdev/mpic_timer.c 		count = p[i * 2 + 1];
p                  77 arch/powerpc/sysdev/msi_bitmap.c 	const u32 *p;
p                  82 arch/powerpc/sysdev/msi_bitmap.c 	p = of_get_property(bmp->of_node, "msi-available-ranges", &len);
p                  83 arch/powerpc/sysdev/msi_bitmap.c 	if (!p) {
p                 101 arch/powerpc/sysdev/msi_bitmap.c 	for (i = 0; i < len; i++, p += 2) {
p                 102 arch/powerpc/sysdev/msi_bitmap.c 		for (j = 0; j < *(p + 1); j++)
p                 103 arch/powerpc/sysdev/msi_bitmap.c 			bitmap_release_region(bmp->bitmap, *p + j, 0);
p                 169 arch/powerpc/sysdev/xive/common.c 			int p = atomic_xchg(&q->pending_count, 0);
p                 170 arch/powerpc/sysdev/xive/common.c 			if (p) {
p                 171 arch/powerpc/sysdev/xive/common.c 				WARN_ON(p > atomic_read(&q->count));
p                 172 arch/powerpc/sysdev/xive/common.c 				atomic_sub(p, &q->count);
p                 556 arch/powerpc/sysdev/xive/native.c 	const __be32 *p;
p                 587 arch/powerpc/sysdev/xive/native.c 	of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
p                 635 arch/powerpc/sysdev/xive/native.c 	void *p;
p                 644 arch/powerpc/sysdev/xive/native.c 		p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
p                 645 arch/powerpc/sysdev/xive/native.c 		if (!p) {
p                 649 arch/powerpc/sysdev/xive/native.c 		opal_xive_donate_page(chip, __pa(p));
p                  41 arch/powerpc/xmon/nonstdio.c 	const char *p = ptr, *q;
p                  51 arch/powerpc/xmon/nonstdio.c 		while (paginating && (q = strchr(p, '\n'))) {
p                  52 arch/powerpc/xmon/nonstdio.c 			rv += udbg_write(p, q - p + 1);
p                  53 arch/powerpc/xmon/nonstdio.c 			p = q + 1;
p                  80 arch/powerpc/xmon/nonstdio.c 	return rv + udbg_write(p, nb - (p - ptr));
p                 149 arch/powerpc/xmon/nonstdio.c 	char *p;
p                 152 arch/powerpc/xmon/nonstdio.c 	for (p = str; p < str + nb - 1; ) {
p                 155 arch/powerpc/xmon/nonstdio.c 			if (p == str)
p                 159 arch/powerpc/xmon/nonstdio.c 		*p++ = c;
p                 163 arch/powerpc/xmon/nonstdio.c 	*p = 0;
p                 328 arch/powerpc/xmon/xmon.c static inline void store_inst(void *p)
p                 330 arch/powerpc/xmon/xmon.c 	asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p));
p                 333 arch/powerpc/xmon/xmon.c static inline void cflush(void *p)
p                 335 arch/powerpc/xmon/xmon.c 	asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p));
p                 338 arch/powerpc/xmon/xmon.c static inline void cinval(void *p)
p                 340 arch/powerpc/xmon/xmon.c 	asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p));
p                2045 arch/powerpc/xmon/xmon.c 	char *p, *q;
p                2051 arch/powerpc/xmon/xmon.c 		p = (char *)adrs;
p                2055 arch/powerpc/xmon/xmon.c 			*(u16 *)q = *(u16 *)p;
p                2058 arch/powerpc/xmon/xmon.c 			*(u32 *)q = *(u32 *)p;
p                2061 arch/powerpc/xmon/xmon.c 			*(u64 *)q = *(u64 *)p;
p                2065 arch/powerpc/xmon/xmon.c 				*q++ = *p++;
p                2082 arch/powerpc/xmon/xmon.c 	char *p, *q;
p                2094 arch/powerpc/xmon/xmon.c 		p = (char *) adrs;
p                2098 arch/powerpc/xmon/xmon.c 			*(u16 *)p = *(u16 *)q;
p                2101 arch/powerpc/xmon/xmon.c 			*(u32 *)p = *(u32 *)q;
p                2104 arch/powerpc/xmon/xmon.c 			*(u64 *)p = *(u64 *)q;
p                2108 arch/powerpc/xmon/xmon.c 				*p++ = *q++;
p                2412 arch/powerpc/xmon/xmon.c 	struct paca_struct *p;
p                2425 arch/powerpc/xmon/xmon.c 	p = paca_ptrs[cpu];
p                2427 arch/powerpc/xmon/xmon.c 	printf("paca for cpu 0x%x @ %px:\n", cpu, p);
p                2437 arch/powerpc/xmon/xmon.c 	DUMP(p, lock_token, "%#-*x");
p                2438 arch/powerpc/xmon/xmon.c 	DUMP(p, paca_index, "%#-*x");
p                2439 arch/powerpc/xmon/xmon.c 	DUMP(p, kernel_toc, "%#-*llx");
p                2440 arch/powerpc/xmon/xmon.c 	DUMP(p, kernelbase, "%#-*llx");
p                2441 arch/powerpc/xmon/xmon.c 	DUMP(p, kernel_msr, "%#-*llx");
p                2442 arch/powerpc/xmon/xmon.c 	DUMP(p, emergency_sp, "%-*px");
p                2444 arch/powerpc/xmon/xmon.c 	DUMP(p, nmi_emergency_sp, "%-*px");
p                2445 arch/powerpc/xmon/xmon.c 	DUMP(p, mc_emergency_sp, "%-*px");
p                2446 arch/powerpc/xmon/xmon.c 	DUMP(p, in_nmi, "%#-*x");
p                2447 arch/powerpc/xmon/xmon.c 	DUMP(p, in_mce, "%#-*x");
p                2448 arch/powerpc/xmon/xmon.c 	DUMP(p, hmi_event_available, "%#-*x");
p                2450 arch/powerpc/xmon/xmon.c 	DUMP(p, data_offset, "%#-*llx");
p                2451 arch/powerpc/xmon/xmon.c 	DUMP(p, hw_cpu_id, "%#-*x");
p                2452 arch/powerpc/xmon/xmon.c 	DUMP(p, cpu_start, "%#-*x");
p                2453 arch/powerpc/xmon/xmon.c 	DUMP(p, kexec_state, "%#-*x");
p                2459 arch/powerpc/xmon/xmon.c 			if (!p->slb_shadow_ptr)
p                2462 arch/powerpc/xmon/xmon.c 			esid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].esid);
p                2463 arch/powerpc/xmon/xmon.c 			vsid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].vsid);
p                2470 arch/powerpc/xmon/xmon.c 		DUMP(p, vmalloc_sllp, "%#-*x");
p                2471 arch/powerpc/xmon/xmon.c 		DUMP(p, stab_rr, "%#-*x");
p                2472 arch/powerpc/xmon/xmon.c 		DUMP(p, slb_used_bitmap, "%#-*x");
p                2473 arch/powerpc/xmon/xmon.c 		DUMP(p, slb_kern_bitmap, "%#-*x");
p                2476 arch/powerpc/xmon/xmon.c 			DUMP(p, slb_cache_ptr, "%#-*x");
p                2479 arch/powerpc/xmon/xmon.c 				       22, "slb_cache", i, p->slb_cache[i]);
p                2483 arch/powerpc/xmon/xmon.c 	DUMP(p, rfi_flush_fallback_area, "%-*px");
p                2485 arch/powerpc/xmon/xmon.c 	DUMP(p, dscr_default, "%#-*llx");
p                2487 arch/powerpc/xmon/xmon.c 	DUMP(p, pgd, "%-*px");
p                2488 arch/powerpc/xmon/xmon.c 	DUMP(p, kernel_pgd, "%-*px");
p                2489 arch/powerpc/xmon/xmon.c 	DUMP(p, tcd_ptr, "%-*px");
p                2490 arch/powerpc/xmon/xmon.c 	DUMP(p, mc_kstack, "%-*px");
p                2491 arch/powerpc/xmon/xmon.c 	DUMP(p, crit_kstack, "%-*px");
p                2492 arch/powerpc/xmon/xmon.c 	DUMP(p, dbg_kstack, "%-*px");
p                2494 arch/powerpc/xmon/xmon.c 	DUMP(p, __current, "%-*px");
p                2495 arch/powerpc/xmon/xmon.c 	DUMP(p, kstack, "%#-*llx");
p                2496 arch/powerpc/xmon/xmon.c 	printf(" %-*s = 0x%016llx\n", 25, "kstack_base", p->kstack & ~(THREAD_SIZE - 1));
p                2498 arch/powerpc/xmon/xmon.c 	DUMP(p, canary, "%#-*lx");
p                2500 arch/powerpc/xmon/xmon.c 	DUMP(p, saved_r1, "%#-*llx");
p                2502 arch/powerpc/xmon/xmon.c 	DUMP(p, trap_save, "%#-*x");
p                2504 arch/powerpc/xmon/xmon.c 	DUMP(p, irq_soft_mask, "%#-*x");
p                2505 arch/powerpc/xmon/xmon.c 	DUMP(p, irq_happened, "%#-*x");
p                2507 arch/powerpc/xmon/xmon.c 	DUMP(p, mmiowb_state.nesting_count, "%#-*x");
p                2508 arch/powerpc/xmon/xmon.c 	DUMP(p, mmiowb_state.mmiowb_pending, "%#-*x");
p                2510 arch/powerpc/xmon/xmon.c 	DUMP(p, irq_work_pending, "%#-*x");
p                2511 arch/powerpc/xmon/xmon.c 	DUMP(p, sprg_vdso, "%#-*llx");
p                2514 arch/powerpc/xmon/xmon.c 	DUMP(p, tm_scratch, "%#-*llx");
p                2518 arch/powerpc/xmon/xmon.c 	DUMP(p, idle_state, "%#-*lx");
p                2520 arch/powerpc/xmon/xmon.c 		DUMP(p, thread_idle_state, "%#-*x");
p                2521 arch/powerpc/xmon/xmon.c 		DUMP(p, subcore_sibling_mask, "%#-*x");
p                2524 arch/powerpc/xmon/xmon.c 		DUMP(p, requested_psscr, "%#-*llx");
p                2525 arch/powerpc/xmon/xmon.c 		DUMP(p, dont_stop.counter, "%#-*x");
p                2530 arch/powerpc/xmon/xmon.c 	DUMP(p, accounting.utime, "%#-*lx");
p                2531 arch/powerpc/xmon/xmon.c 	DUMP(p, accounting.stime, "%#-*lx");
p                2533 arch/powerpc/xmon/xmon.c 	DUMP(p, accounting.utime_scaled, "%#-*lx");
p                2535 arch/powerpc/xmon/xmon.c 	DUMP(p, accounting.starttime, "%#-*lx");
p                2536 arch/powerpc/xmon/xmon.c 	DUMP(p, accounting.starttime_user, "%#-*lx");
p                2538 arch/powerpc/xmon/xmon.c 	DUMP(p, accounting.startspurr, "%#-*lx");
p                2539 arch/powerpc/xmon/xmon.c 	DUMP(p, accounting.utime_sspurr, "%#-*lx");
p                2541 arch/powerpc/xmon/xmon.c 	DUMP(p, accounting.steal_time, "%#-*lx");
p                3895 arch/powerpc/xmon/xmon.c static int __init early_parse_xmon(char *p)
p                3901 arch/powerpc/xmon/xmon.c 	} else if (!p || strncmp(p, "early", 5) == 0) {
p                3906 arch/powerpc/xmon/xmon.c 	} else if (strncmp(p, "on", 2) == 0) {
p                3909 arch/powerpc/xmon/xmon.c 	} else if (strncmp(p, "rw", 2) == 0) {
p                3913 arch/powerpc/xmon/xmon.c 	} else if (strncmp(p, "ro", 2) == 0) {
p                3917 arch/powerpc/xmon/xmon.c 	} else if (strncmp(p, "off", 3) == 0)
p                 214 arch/riscv/include/asm/atomic.h 		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
p                 235 arch/riscv/include/asm/atomic.h 		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
p                 327 arch/riscv/include/asm/atomic.h 		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
p                 349 arch/riscv/include/asm/atomic.h 		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
p                  17 arch/riscv/include/asm/barrier.h #define RISCV_FENCE(p, s) \
p                  18 arch/riscv/include/asm/barrier.h 	__asm__ __volatile__ ("fence " #p "," #s : : : "memory")
p                  30 arch/riscv/include/asm/barrier.h #define __smp_store_release(p, v)					\
p                  32 arch/riscv/include/asm/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                  34 arch/riscv/include/asm/barrier.h 	WRITE_ONCE(*p, v);						\
p                  37 arch/riscv/include/asm/barrier.h #define __smp_load_acquire(p)						\
p                  39 arch/riscv/include/asm/barrier.h 	typeof(*p) ___p1 = READ_ONCE(*p);				\
p                  40 arch/riscv/include/asm/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                  58 arch/riscv/include/asm/processor.h extern unsigned long get_wchan(struct task_struct *p);
p                  26 arch/riscv/include/asm/smp.h void show_ipi_stats(struct seq_file *p, int prec);
p                  48 arch/riscv/include/asm/smp.h static inline void show_ipi_stats(struct seq_file *p, int prec)
p                  21 arch/riscv/kernel/irq.c int arch_show_interrupts(struct seq_file *p, int prec)
p                  23 arch/riscv/kernel/irq.c 	show_ipi_stats(p, prec);
p                 103 arch/riscv/kernel/process.c 	unsigned long arg, struct task_struct *p, unsigned long tls)
p                 105 arch/riscv/kernel/process.c 	struct pt_regs *childregs = task_pt_regs(p);
p                 108 arch/riscv/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 115 arch/riscv/kernel/process.c 		p->thread.ra = (unsigned long)ret_from_kernel_thread;
p                 116 arch/riscv/kernel/process.c 		p->thread.s[0] = usp; /* fn */
p                 117 arch/riscv/kernel/process.c 		p->thread.s[1] = arg;
p                 125 arch/riscv/kernel/process.c 		p->thread.ra = (unsigned long)ret_from_fork;
p                 127 arch/riscv/kernel/process.c 	p->thread.sp = (unsigned long)childregs; /* kernel sp */
p                 159 arch/riscv/kernel/smp.c void show_ipi_stats(struct seq_file *p, int prec)
p                 164 arch/riscv/kernel/smp.c 		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
p                 167 arch/riscv/kernel/smp.c 			seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
p                 168 arch/riscv/kernel/smp.c 		seq_printf(p, " %s\n", ipi_names[i]);
p                 115 arch/riscv/kernel/stacktrace.c 		unsigned long *p = arg;
p                 116 arch/riscv/kernel/stacktrace.c 		*p = pc;
p                  32 arch/s390/boot/pgm_check_info.c 	char *p;
p                  38 arch/s390/boot/pgm_check_info.c 	p = add_str(buf, "Kernel fault: interruption code ");
p                  39 arch/s390/boot/pgm_check_info.c 	p = add_val_as_hex(buf + strlen(buf), S390_lowcore.pgm_code);
p                  40 arch/s390/boot/pgm_check_info.c 	p = add_str(p, " ilc:");
p                  41 arch/s390/boot/pgm_check_info.c 	*p++ = hex_asc_lo(ilc);
p                  42 arch/s390/boot/pgm_check_info.c 	add_str(p, "\n");
p                  45 arch/s390/boot/pgm_check_info.c 	p = add_str(buf, "PSW : ");
p                  46 arch/s390/boot/pgm_check_info.c 	p = add_val_as_hex(p, S390_lowcore.psw_save_area.mask);
p                  47 arch/s390/boot/pgm_check_info.c 	p = add_str(p, " ");
p                  48 arch/s390/boot/pgm_check_info.c 	p = add_val_as_hex(p, S390_lowcore.psw_save_area.addr);
p                  49 arch/s390/boot/pgm_check_info.c 	add_str(p, "\n");
p                  52 arch/s390/boot/pgm_check_info.c 	p = add_str(buf, "      R:");
p                  53 arch/s390/boot/pgm_check_info.c 	*p++ = hex_asc_lo(psw->per);
p                  54 arch/s390/boot/pgm_check_info.c 	p = add_str(p, " T:");
p                  55 arch/s390/boot/pgm_check_info.c 	*p++ = hex_asc_lo(psw->dat);
p                  56 arch/s390/boot/pgm_check_info.c 	p = add_str(p, " IO:");
p                  57 arch/s390/boot/pgm_check_info.c 	*p++ = hex_asc_lo(psw->io);
p                  58 arch/s390/boot/pgm_check_info.c 	p = add_str(p, " EX:");
p                  59 arch/s390/boot/pgm_check_info.c 	*p++ = hex_asc_lo(psw->ext);
p                  60 arch/s390/boot/pgm_check_info.c 	p = add_str(p, " Key:");
p                  61 arch/s390/boot/pgm_check_info.c 	*p++ = hex_asc_lo(psw->key);
p                  62 arch/s390/boot/pgm_check_info.c 	p = add_str(p, " M:");
p                  63 arch/s390/boot/pgm_check_info.c 	*p++ = hex_asc_lo(psw->mcheck);
p                  64 arch/s390/boot/pgm_check_info.c 	p = add_str(p, " W:");
p                  65 arch/s390/boot/pgm_check_info.c 	*p++ = hex_asc_lo(psw->wait);
p                  66 arch/s390/boot/pgm_check_info.c 	p = add_str(p, " P:");
p                  67 arch/s390/boot/pgm_check_info.c 	*p++ = hex_asc_lo(psw->pstate);
p                  68 arch/s390/boot/pgm_check_info.c 	p = add_str(p, " AS:");
p                  69 arch/s390/boot/pgm_check_info.c 	*p++ = hex_asc_lo(psw->as);
p                  70 arch/s390/boot/pgm_check_info.c 	p = add_str(p, " CC:");
p                  71 arch/s390/boot/pgm_check_info.c 	*p++ = hex_asc_lo(psw->cc);
p                  72 arch/s390/boot/pgm_check_info.c 	p = add_str(p, " PM:");
p                  73 arch/s390/boot/pgm_check_info.c 	*p++ = hex_asc_lo(psw->pm);
p                  74 arch/s390/boot/pgm_check_info.c 	p = add_str(p, " RI:");
p                  75 arch/s390/boot/pgm_check_info.c 	*p++ = hex_asc_lo(psw->ri);
p                  76 arch/s390/boot/pgm_check_info.c 	p = add_str(p, " EA:");
p                  77 arch/s390/boot/pgm_check_info.c 	*p++ = hex_asc_lo(psw->eaba);
p                  78 arch/s390/boot/pgm_check_info.c 	add_str(p, "\n");
p                  82 arch/s390/boot/pgm_check_info.c 		p = add_str(buf, row == 0 ? "GPRS:" : "     ");
p                  84 arch/s390/boot/pgm_check_info.c 			p = add_str(p, " ");
p                  85 arch/s390/boot/pgm_check_info.c 			p = add_val_as_hex(p, S390_lowcore.gpregs_save_area[row * 4 + col]);
p                  87 arch/s390/boot/pgm_check_info.c 		add_str(p, "\n");
p                 166 arch/s390/crypto/prng.c 			u64 *p = (u64 *)(pg + offset);
p                 167 arch/s390/crypto/prng.c 			*p ^= get_tod_clock_fast();
p                 594 arch/s390/crypto/prng.c 	u8 *p;
p                 623 arch/s390/crypto/prng.c 			p = prng_data->buf + prng_chunk_size - prng_data->rest;
p                 629 arch/s390/crypto/prng.c 			p = prng_data->buf;
p                 630 arch/s390/crypto/prng.c 			n = prng_sha512_generate(p, prng_chunk_size);
p                 643 arch/s390/crypto/prng.c 		if (copy_to_user(ubuf, p, n)) {
p                 647 arch/s390/crypto/prng.c 		memzero_explicit(p, n);
p                  34 arch/s390/include/asm/barrier.h #define __smp_store_release(p, v)					\
p                  36 arch/s390/include/asm/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                  38 arch/s390/include/asm/barrier.h 	WRITE_ONCE(*p, v);						\
p                  41 arch/s390/include/asm/barrier.h #define __smp_load_acquire(p)						\
p                  43 arch/s390/include/asm/barrier.h 	typeof(*p) ___p1 = READ_ONCE(*p);				\
p                  44 arch/s390/include/asm/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                  25 arch/s390/include/asm/io.h #define xlate_dev_kmem_ptr(p)	p
p                  41 arch/s390/include/asm/io.h static inline void ioport_unmap(void __iomem *p)
p                  73 arch/s390/include/asm/kprobes.h void arch_remove_kprobe(struct kprobe *p);
p                  80 arch/s390/include/asm/kprobes.h #define flush_insn_slot(p)	do { } while (0)
p                  32 arch/s390/include/asm/preempt.h #define init_task_preempt_count(p)	do { } while (0)
p                  34 arch/s390/include/asm/preempt.h #define init_idle_preempt_count(p, cpu)	do { \
p                  91 arch/s390/include/asm/preempt.h #define init_task_preempt_count(p)	do { } while (0)
p                  93 arch/s390/include/asm/preempt.h #define init_idle_preempt_count(p, cpu)	do { \
p                 201 arch/s390/include/asm/processor.h unsigned long get_wchan(struct task_struct *p);
p                  18 arch/s390/include/asm/sysinfo.h 	unsigned char p:1;
p                 121 arch/s390/kernel/irq.c static void show_msi_interrupt(struct seq_file *p, int irq)
p                 133 arch/s390/kernel/irq.c 	seq_printf(p, "%3d: ", irq);
p                 135 arch/s390/kernel/irq.c 		seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
p                 138 arch/s390/kernel/irq.c 		seq_printf(p, " %8s", desc->irq_data.chip->name);
p                 141 arch/s390/kernel/irq.c 		seq_printf(p, "  %s", desc->action->name);
p                 143 arch/s390/kernel/irq.c 	seq_putc(p, '\n');
p                 152 arch/s390/kernel/irq.c int show_interrupts(struct seq_file *p, void *v)
p                 159 arch/s390/kernel/irq.c 		seq_puts(p, "           ");
p                 161 arch/s390/kernel/irq.c 			seq_printf(p, "CPU%-8d", cpu);
p                 162 arch/s390/kernel/irq.c 		seq_putc(p, '\n');
p                 165 arch/s390/kernel/irq.c 		seq_printf(p, "%s: ", irqclass_main_desc[index].name);
p                 168 arch/s390/kernel/irq.c 			seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
p                 169 arch/s390/kernel/irq.c 		seq_putc(p, '\n');
p                 173 arch/s390/kernel/irq.c 		show_msi_interrupt(p, index);
p                 177 arch/s390/kernel/irq.c 		seq_printf(p, "%s: ", irqclass_sub_desc[index].name);
p                 180 arch/s390/kernel/irq.c 			seq_printf(p, "%10u ",
p                 183 arch/s390/kernel/irq.c 			seq_printf(p, "  %s", irqclass_sub_desc[index].desc);
p                 184 arch/s390/kernel/irq.c 		seq_putc(p, '\n');
p                 239 arch/s390/kernel/irq.c 	struct ext_int_info *p;
p                 243 arch/s390/kernel/irq.c 	p = kmalloc(sizeof(*p), GFP_ATOMIC);
p                 244 arch/s390/kernel/irq.c 	if (!p)
p                 246 arch/s390/kernel/irq.c 	p->code = code;
p                 247 arch/s390/kernel/irq.c 	p->handler = handler;
p                 251 arch/s390/kernel/irq.c 	hlist_add_head_rcu(&p->entry, &ext_int_hash[index]);
p                 259 arch/s390/kernel/irq.c 	struct ext_int_info *p;
p                 264 arch/s390/kernel/irq.c 	hlist_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
p                 265 arch/s390/kernel/irq.c 		if (p->code == code && p->handler == handler) {
p                 266 arch/s390/kernel/irq.c 			hlist_del_rcu(&p->entry);
p                 267 arch/s390/kernel/irq.c 			kfree_rcu(p, rcu);
p                 279 arch/s390/kernel/irq.c 	struct ext_int_info *p;
p                 288 arch/s390/kernel/irq.c 	hlist_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
p                 289 arch/s390/kernel/irq.c 		if (unlikely(p->code != ext_code.code))
p                 291 arch/s390/kernel/irq.c 		p->handler(ext_code, regs->int_parm, regs->int_parm_long);
p                  57 arch/s390/kernel/kprobes.c static void copy_instruction(struct kprobe *p)
p                  59 arch/s390/kernel/kprobes.c 	unsigned long ip = (unsigned long) p->addr;
p                  70 arch/s390/kernel/kprobes.c 		ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
p                  71 arch/s390/kernel/kprobes.c 		p->ainsn.is_ftrace_insn = 1;
p                  73 arch/s390/kernel/kprobes.c 		memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
p                  74 arch/s390/kernel/kprobes.c 	p->opcode = p->ainsn.insn[0];
p                  75 arch/s390/kernel/kprobes.c 	if (!probe_is_insn_relative_long(p->ainsn.insn))
p                  84 arch/s390/kernel/kprobes.c 	disp = *(s32 *)&p->ainsn.insn[1];
p                  85 arch/s390/kernel/kprobes.c 	addr = (u64)(unsigned long)p->addr;
p                  86 arch/s390/kernel/kprobes.c 	new_addr = (u64)(unsigned long)p->ainsn.insn;
p                  88 arch/s390/kernel/kprobes.c 	*(s32 *)&p->ainsn.insn[1] = new_disp;
p                  97 arch/s390/kernel/kprobes.c static int s390_get_insn_slot(struct kprobe *p)
p                 104 arch/s390/kernel/kprobes.c 	p->ainsn.insn = NULL;
p                 105 arch/s390/kernel/kprobes.c 	if (is_kernel_addr(p->addr))
p                 106 arch/s390/kernel/kprobes.c 		p->ainsn.insn = get_s390_insn_slot();
p                 107 arch/s390/kernel/kprobes.c 	else if (is_module_addr(p->addr))
p                 108 arch/s390/kernel/kprobes.c 		p->ainsn.insn = get_insn_slot();
p                 109 arch/s390/kernel/kprobes.c 	return p->ainsn.insn ? 0 : -ENOMEM;
p                 113 arch/s390/kernel/kprobes.c static void s390_free_insn_slot(struct kprobe *p)
p                 115 arch/s390/kernel/kprobes.c 	if (!p->ainsn.insn)
p                 117 arch/s390/kernel/kprobes.c 	if (is_kernel_addr(p->addr))
p                 118 arch/s390/kernel/kprobes.c 		free_s390_insn_slot(p->ainsn.insn, 0);
p                 120 arch/s390/kernel/kprobes.c 		free_insn_slot(p->ainsn.insn, 0);
p                 121 arch/s390/kernel/kprobes.c 	p->ainsn.insn = NULL;
p                 125 arch/s390/kernel/kprobes.c int arch_prepare_kprobe(struct kprobe *p)
p                 127 arch/s390/kernel/kprobes.c 	if ((unsigned long) p->addr & 0x01)
p                 130 arch/s390/kernel/kprobes.c 	if (probe_is_prohibited_opcode(p->addr))
p                 132 arch/s390/kernel/kprobes.c 	if (s390_get_insn_slot(p))
p                 134 arch/s390/kernel/kprobes.c 	copy_instruction(p);
p                 139 arch/s390/kernel/kprobes.c int arch_check_ftrace_location(struct kprobe *p)
p                 145 arch/s390/kernel/kprobes.c 	struct kprobe *p;
p                 153 arch/s390/kernel/kprobes.c 	struct kprobe *p = args->p;
p                 156 arch/s390/kernel/kprobes.c 	new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
p                 158 arch/s390/kernel/kprobes.c 	if (!p->ainsn.is_ftrace_insn)
p                 161 arch/s390/kernel/kprobes.c 	insn = (struct ftrace_insn *) p->addr;
p                 168 arch/s390/kernel/kprobes.c 		ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
p                 173 arch/s390/kernel/kprobes.c 	s390_kernel_write(p->addr, &new_insn, len);
p                 178 arch/s390/kernel/kprobes.c void arch_arm_kprobe(struct kprobe *p)
p                 180 arch/s390/kernel/kprobes.c 	struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
p                 186 arch/s390/kernel/kprobes.c void arch_disarm_kprobe(struct kprobe *p)
p                 188 arch/s390/kernel/kprobes.c 	struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
p                 194 arch/s390/kernel/kprobes.c void arch_remove_kprobe(struct kprobe *p)
p                 196 arch/s390/kernel/kprobes.c 	s390_free_insn_slot(p);
p                 241 arch/s390/kernel/kprobes.c static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
p                 245 arch/s390/kernel/kprobes.c 	__this_cpu_write(current_kprobe, p);
p                 270 arch/s390/kernel/kprobes.c static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
p                 275 arch/s390/kernel/kprobes.c 		kprobes_inc_nmissed_count(p);
p                 286 arch/s390/kernel/kprobes.c 		dump_kprobe(p);
p                 295 arch/s390/kernel/kprobes.c 	struct kprobe *p;
p                 304 arch/s390/kernel/kprobes.c 	p = get_kprobe((void *)(regs->psw.addr - 2));
p                 306 arch/s390/kernel/kprobes.c 	if (p) {
p                 317 arch/s390/kernel/kprobes.c 			kprobe_reenter_check(kcb, p);
p                 318 arch/s390/kernel/kprobes.c 			push_kprobe(kcb, p);
p                 328 arch/s390/kernel/kprobes.c 			push_kprobe(kcb, p);
p                 330 arch/s390/kernel/kprobes.c 			if (p->pre_handler && p->pre_handler(p, regs)) {
p                 337 arch/s390/kernel/kprobes.c 		enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
p                 365 arch/s390/kernel/kprobes.c static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
p                 461 arch/s390/kernel/kprobes.c static void resume_execution(struct kprobe *p, struct pt_regs *regs)
p                 465 arch/s390/kernel/kprobes.c 	int fixup = probe_get_fixup_type(p->ainsn.insn);
p                 468 arch/s390/kernel/kprobes.c 	if (p->ainsn.is_ftrace_insn) {
p                 469 arch/s390/kernel/kprobes.c 		struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
p                 472 arch/s390/kernel/kprobes.c 		ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
p                 481 arch/s390/kernel/kprobes.c 			regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
p                 486 arch/s390/kernel/kprobes.c 		ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
p                 489 arch/s390/kernel/kprobes.c 		int ilen = insn_length(p->ainsn.insn[0] >> 8);
p                 490 arch/s390/kernel/kprobes.c 		if (ip - (unsigned long) p->ainsn.insn == ilen)
p                 491 arch/s390/kernel/kprobes.c 			ip = (unsigned long) p->addr + ilen;
p                 495 arch/s390/kernel/kprobes.c 		int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
p                 496 arch/s390/kernel/kprobes.c 		regs->gprs[reg] += (unsigned long) p->addr -
p                 497 arch/s390/kernel/kprobes.c 				   (unsigned long) p->ainsn.insn;
p                 507 arch/s390/kernel/kprobes.c 	struct kprobe *p = kprobe_running();
p                 509 arch/s390/kernel/kprobes.c 	if (!p)
p                 512 arch/s390/kernel/kprobes.c 	if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
p                 514 arch/s390/kernel/kprobes.c 		p->post_handler(p, regs, 0);
p                 517 arch/s390/kernel/kprobes.c 	resume_execution(p, regs);
p                 536 arch/s390/kernel/kprobes.c 	struct kprobe *p = kprobe_running();
p                 549 arch/s390/kernel/kprobes.c 		disable_singlestep(kcb, regs, (unsigned long) p->addr);
p                 560 arch/s390/kernel/kprobes.c 		kprobes_inc_nmissed_count(p);
p                 569 arch/s390/kernel/kprobes.c 		if (p->fault_handler && p->fault_handler(p, regs, trapnr))
p                 655 arch/s390/kernel/kprobes.c int arch_trampoline_kprobe(struct kprobe *p)
p                 657 arch/s390/kernel/kprobes.c 	return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
p                  36 arch/s390/kernel/module.c 	void *p;
p                  40 arch/s390/kernel/module.c 	p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
p                  43 arch/s390/kernel/module.c 	if (p && (kasan_module_alloc(p, size) < 0)) {
p                  44 arch/s390/kernel/module.c 		vfree(p);
p                  47 arch/s390/kernel/module.c 	return p;
p                2143 arch/s390/kernel/perf_cpum_sf.c #define param_check_sfb_size(name, p) __param_check(name, p, void)
p                  83 arch/s390/kernel/process.c 		    unsigned long arg, struct task_struct *p, unsigned long tls)
p                  91 arch/s390/kernel/process.c 	frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
p                  92 arch/s390/kernel/process.c 	p->thread.ksp = (unsigned long) frame;
p                  94 arch/s390/kernel/process.c 	save_access_regs(&p->thread.acrs[0]);
p                  96 arch/s390/kernel/process.c 	p->thread.mm_segment = get_fs();
p                  98 arch/s390/kernel/process.c 	memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
p                  99 arch/s390/kernel/process.c 	memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
p                 100 arch/s390/kernel/process.c 	clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
p                 101 arch/s390/kernel/process.c 	p->thread.per_flags = 0;
p                 103 arch/s390/kernel/process.c 	p->thread.user_timer = 0;
p                 104 arch/s390/kernel/process.c 	p->thread.guest_timer = 0;
p                 105 arch/s390/kernel/process.c 	p->thread.system_timer = 0;
p                 106 arch/s390/kernel/process.c 	p->thread.hardirq_timer = 0;
p                 107 arch/s390/kernel/process.c 	p->thread.softirq_timer = 0;
p                 108 arch/s390/kernel/process.c 	p->thread.last_break = 1;
p                 117 arch/s390/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 138 arch/s390/kernel/process.c 	p->thread.ri_cb = NULL;
p                 141 arch/s390/kernel/process.c 	p->thread.gs_cb = NULL;
p                 142 arch/s390/kernel/process.c 	p->thread.gs_bc_cb = NULL;
p                 147 arch/s390/kernel/process.c 			p->thread.acrs[0] = (unsigned int)tls;
p                 149 arch/s390/kernel/process.c 			p->thread.acrs[0] = (unsigned int)(tls >> 32);
p                 150 arch/s390/kernel/process.c 			p->thread.acrs[1] = (unsigned int)tls;
p                 180 arch/s390/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 186 arch/s390/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
p                 189 arch/s390/kernel/process.c 	if (!try_get_task_stack(p))
p                 192 arch/s390/kernel/process.c 	low = task_stack_page(p);
p                 193 arch/s390/kernel/process.c 	high = (struct stack_frame *) task_pt_regs(p);
p                 194 arch/s390/kernel/process.c 	sf = (struct stack_frame *) p->thread.ksp;
p                 210 arch/s390/kernel/process.c 	put_task_stack(p);
p                 130 arch/s390/kernel/sysinfo.c 	if (info->p) {
p                 355 arch/s390/kernel/sysinfo.c static void *service_level_next(struct seq_file *m, void *p, loff_t *pos)
p                 357 arch/s390/kernel/sysinfo.c 	return seq_list_next(p, &service_level_list, pos);
p                 360 arch/s390/kernel/sysinfo.c static void service_level_stop(struct seq_file *m, void *p)
p                 365 arch/s390/kernel/sysinfo.c static int service_level_show(struct seq_file *m, void *p)
p                 369 arch/s390/kernel/sysinfo.c 	slr = list_entry(p, struct service_level, list);
p                 507 arch/s390/kernel/time.c static int __init early_parse_stp(char *p)
p                 509 arch/s390/kernel/time.c 	return kstrtobool(p, &stp_online);
p                 114 arch/s390/kernel/vtime.c static void account_system_index_scaled(struct task_struct *p, u64 cputime,
p                 117 arch/s390/kernel/vtime.c 	p->stimescaled += cputime_to_nsecs(scale_vtime(cputime));
p                 118 arch/s390/kernel/vtime.c 	account_system_index_time(p, cputime_to_nsecs(cputime), index);
p                  25 arch/s390/kvm/gaccess.c 		unsigned long p  : 1; /* Private Space Control */
p                  47 arch/s390/kvm/gaccess.c 		unsigned long p  : 1; /* DAT-Protection Bit */
p                  62 arch/s390/kvm/gaccess.c 		unsigned long p  : 1; /* DAT-Protection Bit */
p                  76 arch/s390/kvm/gaccess.c 	unsigned long p  : 1; /* DAT-Protection Bit */
p                  92 arch/s390/kvm/gaccess.c 	unsigned long p  : 1; /* DAT-Protection Bit */
p                 119 arch/s390/kvm/gaccess.c 	unsigned long p  : 1; /* DAT-Protection Bit */
p                 134 arch/s390/kvm/gaccess.c 	unsigned long p  : 1; /* DAT-Protection Bit */
p                 171 arch/s390/kvm/gaccess.c 		unsigned long p  : 1; /* DAT-Protection Bit */
p                 219 arch/s390/kvm/gaccess.c 		u32 p        : 1;
p                 238 arch/s390/kvm/gaccess.c 	unsigned long p      : 1; /* Private Bit */
p                 407 arch/s390/kvm/gaccess.c 	if (alet.p)
p                 442 arch/s390/kvm/gaccess.c 	if (ale.p == 1) {
p                 677 arch/s390/kvm/gaccess.c 			dat_protection |= rfte.p;
p                 695 arch/s390/kvm/gaccess.c 			dat_protection |= rste.p;
p                 710 arch/s390/kvm/gaccess.c 		if (rtte.cr && asce.p && edat2)
p                 713 arch/s390/kvm/gaccess.c 			dat_protection |= rtte.fc1.p;
p                 723 arch/s390/kvm/gaccess.c 			dat_protection |= rtte.fc0.p;
p                 738 arch/s390/kvm/gaccess.c 		if (ste.cs && asce.p)
p                 741 arch/s390/kvm/gaccess.c 			dat_protection |= ste.fc1.p;
p                 746 arch/s390/kvm/gaccess.c 		dat_protection |= ste.fc0.p;
p                 758 arch/s390/kvm/gaccess.c 	dat_protection |= pte.p;
p                 792 arch/s390/kvm/gaccess.c 	if (psw_bits(*psw).dat && asce.p)
p                1047 arch/s390/kvm/gaccess.c 			*dat_protection |= rfte.p;
p                1072 arch/s390/kvm/gaccess.c 			*dat_protection |= rste.p;
p                1075 arch/s390/kvm/gaccess.c 		rste.p |= *dat_protection;
p                1095 arch/s390/kvm/gaccess.c 		if (rtte.cr && asce.p && sg->edat_level >= 2)
p                1098 arch/s390/kvm/gaccess.c 			*dat_protection |= rtte.fc0.p;
p                1107 arch/s390/kvm/gaccess.c 			*dat_protection |= rtte.fc0.p;
p                1110 arch/s390/kvm/gaccess.c 		rtte.fc0.p |= *dat_protection;
p                1130 arch/s390/kvm/gaccess.c 		if (ste.cs && asce.p)
p                1132 arch/s390/kvm/gaccess.c 		*dat_protection |= ste.fc0.p;
p                1141 arch/s390/kvm/gaccess.c 		ste.fc0.p |= *dat_protection;
p                1198 arch/s390/kvm/gaccess.c 	pte.p |= dat_protection;
p                  19 arch/s390/lib/find.c 	const unsigned long *p = addr;
p                  24 arch/s390/lib/find.c 		if ((tmp = *(p++)))
p                  31 arch/s390/lib/find.c 	tmp = (*p) & (~0UL << (BITS_PER_LONG - size));
p                  42 arch/s390/lib/find.c 	const unsigned long *p = addr + (offset / BITS_PER_LONG);
p                  51 arch/s390/lib/find.c 		tmp = *(p++);
p                  61 arch/s390/lib/find.c 		if ((tmp = *(p++)))
p                  68 arch/s390/lib/find.c 	tmp = *p;
p                  32 arch/s390/lib/string.c 	const char *p = s + n;
p                  36 arch/s390/lib/string.c 		      : "+d" (p), "+a" (s) : "d" (r0) : "cc", "memory");
p                  37 arch/s390/lib/string.c 	return (char *) p;
p                 203 arch/s390/lib/string.c 	char *p = __strend(dest);
p                 205 arch/s390/lib/string.c 	p[len] = '\0';
p                 206 arch/s390/lib/string.c 	memcpy(p, src, len);
p                 291 arch/s390/mm/cmm.c 	char buf[64], *p;
p                 305 arch/s390/mm/cmm.c 		cmm_skip_blanks(buf, &p);
p                 306 arch/s390/mm/cmm.c 		nr = simple_strtoul(p, &p, 0);
p                 307 arch/s390/mm/cmm.c 		cmm_skip_blanks(p, &p);
p                 308 arch/s390/mm/cmm.c 		seconds = simple_strtoul(p, &p, 0);
p                 104 arch/s390/mm/fault.c static int bad_address(void *p)
p                 108 arch/s390/mm/fault.c 	return probe_kernel_address((unsigned long *)p, dummy);
p                 557 arch/s390/numa/mode_emu.c static int __init early_parse_emu_nodes(char *p)
p                 561 arch/s390/numa/mode_emu.c 	if (!p || kstrtoint(p, 0, &count) != 0 || count <= 0)
p                 571 arch/s390/numa/mode_emu.c static int __init early_parse_emu_size(char *p)
p                 573 arch/s390/numa/mode_emu.c 	if (p)
p                 574 arch/s390/numa/mode_emu.c 		emu_size = memparse(p, NULL);
p                 320 arch/sh/boards/board-sh7785lcr.c 	unsigned char *p;
p                 322 arch/sh/boards/board-sh7785lcr.c 	p = ioremap(PLD_POFCR, PLD_POFCR + 1);
p                 323 arch/sh/boards/board-sh7785lcr.c 	if (!p) {
p                 327 arch/sh/boards/board-sh7785lcr.c 	*p = 0x01;
p                 328 arch/sh/boards/board-sh7785lcr.c 	iounmap(p);
p                   4 arch/sh/boot/compressed/cache.c 	volatile unsigned int *p = (volatile unsigned int *) 0x80000000;
p                   8 arch/sh/boot/compressed/cache.c 		(void)*p;
p                   9 arch/sh/boot/compressed/cache.c 		p += (32 / sizeof(int));
p                 106 arch/sh/drivers/dma/dma-api.c 	const char **p;
p                 108 arch/sh/drivers/dma/dma-api.c 	for (p = haystack; *p; p++)
p                 109 arch/sh/drivers/dma/dma-api.c 		if (strcmp(*p, needle) == 0)
p                 132 arch/sh/drivers/dma/dma-api.c 	const char **p;
p                 152 arch/sh/drivers/dma/dma-api.c 		for (p = caps; *p; p++) {
p                 153 arch/sh/drivers/dma/dma-api.c 			if (!search_cap(channel->caps, *p))
p                  30 arch/sh/drivers/pci/fixups-dreamcast.c 	struct pci_channel *p = dev->sysdata;
p                  41 arch/sh/drivers/pci/fixups-dreamcast.c 		dev->resource[1].start	= p->resources[0].start  + 0x100;
p                   5 arch/sh/include/asm/bitops-cas.h static inline unsigned __bo_cas(volatile unsigned *p, unsigned old, unsigned new)
p                   9 arch/sh/include/asm/bitops-cas.h 		: "r"(old), "z"(p)
p                  32 arch/sh/include/asm/bitops-op32.h 		unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  34 arch/sh/include/asm/bitops-op32.h 		*p |= mask;
p                  50 arch/sh/include/asm/bitops-op32.h 		unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  52 arch/sh/include/asm/bitops-op32.h 		*p &= ~mask;
p                  77 arch/sh/include/asm/bitops-op32.h 		unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  79 arch/sh/include/asm/bitops-op32.h 		*p ^= mask;
p                  95 arch/sh/include/asm/bitops-op32.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  96 arch/sh/include/asm/bitops-op32.h 	unsigned long old = *p;
p                  98 arch/sh/include/asm/bitops-op32.h 	*p = old | mask;
p                 114 arch/sh/include/asm/bitops-op32.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                 115 arch/sh/include/asm/bitops-op32.h 	unsigned long old = *p;
p                 117 arch/sh/include/asm/bitops-op32.h 	*p = old & ~mask;
p                 126 arch/sh/include/asm/bitops-op32.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                 127 arch/sh/include/asm/bitops-op32.h 	unsigned long old = *p;
p                 129 arch/sh/include/asm/bitops-op32.h 	*p = old ^ mask;
p                  23 arch/sh/include/asm/bugs.h 	char *p = &init_utsname()->machine[2]; /* "sh" */
p                  31 arch/sh/include/asm/bugs.h 		*p++ = '2';
p                  34 arch/sh/include/asm/bugs.h 		*p++ = '2';
p                  35 arch/sh/include/asm/bugs.h 		*p++ = 'a';
p                  38 arch/sh/include/asm/bugs.h 		*p++ = '3';
p                  41 arch/sh/include/asm/bugs.h 		*p++ = '4';
p                  44 arch/sh/include/asm/bugs.h 		*p++ = '4';
p                  45 arch/sh/include/asm/bugs.h 		*p++ = 'a';
p                  48 arch/sh/include/asm/bugs.h 		*p++ = '4';
p                  49 arch/sh/include/asm/bugs.h 		*p++ = 'a';
p                  50 arch/sh/include/asm/bugs.h 		*p++ = 'l';
p                  51 arch/sh/include/asm/bugs.h 		*p++ = '-';
p                  52 arch/sh/include/asm/bugs.h 		*p++ = 'd';
p                  53 arch/sh/include/asm/bugs.h 		*p++ = 's';
p                  54 arch/sh/include/asm/bugs.h 		*p++ = 'p';
p                  57 arch/sh/include/asm/bugs.h 		*p++ = '6';
p                  58 arch/sh/include/asm/bugs.h 		*p++ = '4';
p                  73 arch/sh/include/asm/bugs.h 	*p++ = 'e';
p                  74 arch/sh/include/asm/bugs.h 	*p++ = 'b';
p                  76 arch/sh/include/asm/bugs.h 	*p = '\0';
p                  21 arch/sh/include/asm/cmpxchg-xchg.h 	volatile u32 *p = ptr - off;
p                  32 arch/sh/include/asm/cmpxchg-xchg.h 		oldv = READ_ONCE(*p);
p                  35 arch/sh/include/asm/cmpxchg-xchg.h 	} while (__cmpxchg_u32(p, oldv, newv) != oldv);
p                  60 arch/sh/include/asm/io.h #define readsb(p,d,l)		__raw_readsb(p,d,l)
p                  61 arch/sh/include/asm/io.h #define readsw(p,d,l)		__raw_readsw(p,d,l)
p                  62 arch/sh/include/asm/io.h #define readsl(p,d,l)		__raw_readsl(p,d,l)
p                  64 arch/sh/include/asm/io.h #define writesb(p,d,l)		__raw_writesb(p,d,l)
p                  65 arch/sh/include/asm/io.h #define writesw(p,d,l)		__raw_writesw(p,d,l)
p                  66 arch/sh/include/asm/io.h #define writesl(p,d,l)		__raw_writesl(p,d,l)
p                 159 arch/sh/include/asm/io.h #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow)			\
p                 161 arch/sh/include/asm/io.h static inline void pfx##out##bwlq##p(type val, unsigned long port)	\
p                 170 arch/sh/include/asm/io.h static inline type pfx##in##bwlq##p(unsigned long port)			\
p                 382 arch/sh/include/asm/io.h #define xlate_dev_mem_ptr(p)	__va(p)
p                 387 arch/sh/include/asm/io.h #define xlate_dev_kmem_ptr(p)	p
p                  23 arch/sh/include/asm/kprobes.h #define flush_insn_slot(p)		do { } while (0)
p                 184 arch/sh/include/asm/processor_32.h extern unsigned long get_wchan(struct task_struct *p);
p                 206 arch/sh/include/asm/processor_64.h extern unsigned long get_wchan(struct task_struct *p);
p                  13 arch/sh/include/asm/spinlock-cas.h static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new)
p                  17 arch/sh/include/asm/spinlock-cas.h 		: "r"(old), "z"(p)
p                  26 arch/sh/include/asm/unaligned-sh4a.h static inline u16 sh4a_get_unaligned_cpu16(const u8 *p)
p                  29 arch/sh/include/asm/unaligned-sh4a.h 	return p[0] | p[1] << 8;
p                  31 arch/sh/include/asm/unaligned-sh4a.h 	return p[0] << 8 | p[1];
p                  35 arch/sh/include/asm/unaligned-sh4a.h static __always_inline u32 sh4a_get_unaligned_cpu32(const u8 *p)
p                  42 arch/sh/include/asm/unaligned-sh4a.h 		 : "r" (p)
p                  53 arch/sh/include/asm/unaligned-sh4a.h static inline u64 sh4a_get_unaligned_cpu64(const u8 *p)
p                  56 arch/sh/include/asm/unaligned-sh4a.h 	return (u64)sh4a_get_unaligned_cpu32(p + 4) << 32 |
p                  57 arch/sh/include/asm/unaligned-sh4a.h 		    sh4a_get_unaligned_cpu32(p);
p                  59 arch/sh/include/asm/unaligned-sh4a.h 	return (u64)sh4a_get_unaligned_cpu32(p) << 32 |
p                  60 arch/sh/include/asm/unaligned-sh4a.h 		    sh4a_get_unaligned_cpu32(p + 4);
p                  64 arch/sh/include/asm/unaligned-sh4a.h static inline u16 get_unaligned_le16(const void *p)
p                  66 arch/sh/include/asm/unaligned-sh4a.h 	return le16_to_cpu(sh4a_get_unaligned_cpu16(p));
p                  69 arch/sh/include/asm/unaligned-sh4a.h static inline u32 get_unaligned_le32(const void *p)
p                  71 arch/sh/include/asm/unaligned-sh4a.h 	return le32_to_cpu(sh4a_get_unaligned_cpu32(p));
p                  74 arch/sh/include/asm/unaligned-sh4a.h static inline u64 get_unaligned_le64(const void *p)
p                  76 arch/sh/include/asm/unaligned-sh4a.h 	return le64_to_cpu(sh4a_get_unaligned_cpu64(p));
p                  79 arch/sh/include/asm/unaligned-sh4a.h static inline u16 get_unaligned_be16(const void *p)
p                  81 arch/sh/include/asm/unaligned-sh4a.h 	return be16_to_cpu(sh4a_get_unaligned_cpu16(p));
p                  84 arch/sh/include/asm/unaligned-sh4a.h static inline u32 get_unaligned_be32(const void *p)
p                  86 arch/sh/include/asm/unaligned-sh4a.h 	return be32_to_cpu(sh4a_get_unaligned_cpu32(p));
p                  89 arch/sh/include/asm/unaligned-sh4a.h static inline u64 get_unaligned_be64(const void *p)
p                  91 arch/sh/include/asm/unaligned-sh4a.h 	return be64_to_cpu(sh4a_get_unaligned_cpu64(p));
p                  94 arch/sh/include/asm/unaligned-sh4a.h static inline void nonnative_put_le16(u16 val, u8 *p)
p                  96 arch/sh/include/asm/unaligned-sh4a.h 	*p++ = val;
p                  97 arch/sh/include/asm/unaligned-sh4a.h 	*p++ = val >> 8;
p                 100 arch/sh/include/asm/unaligned-sh4a.h static inline void nonnative_put_le32(u32 val, u8 *p)
p                 102 arch/sh/include/asm/unaligned-sh4a.h 	nonnative_put_le16(val, p);
p                 103 arch/sh/include/asm/unaligned-sh4a.h 	nonnative_put_le16(val >> 16, p + 2);
p                 106 arch/sh/include/asm/unaligned-sh4a.h static inline void nonnative_put_le64(u64 val, u8 *p)
p                 108 arch/sh/include/asm/unaligned-sh4a.h 	nonnative_put_le32(val, p);
p                 109 arch/sh/include/asm/unaligned-sh4a.h 	nonnative_put_le32(val >> 32, p + 4);
p                 112 arch/sh/include/asm/unaligned-sh4a.h static inline void nonnative_put_be16(u16 val, u8 *p)
p                 114 arch/sh/include/asm/unaligned-sh4a.h 	*p++ = val >> 8;
p                 115 arch/sh/include/asm/unaligned-sh4a.h 	*p++ = val;
p                 118 arch/sh/include/asm/unaligned-sh4a.h static inline void nonnative_put_be32(u32 val, u8 *p)
p                 120 arch/sh/include/asm/unaligned-sh4a.h 	nonnative_put_be16(val >> 16, p);
p                 121 arch/sh/include/asm/unaligned-sh4a.h 	nonnative_put_be16(val, p + 2);
p                 124 arch/sh/include/asm/unaligned-sh4a.h static inline void nonnative_put_be64(u64 val, u8 *p)
p                 126 arch/sh/include/asm/unaligned-sh4a.h 	nonnative_put_be32(val >> 32, p);
p                 127 arch/sh/include/asm/unaligned-sh4a.h 	nonnative_put_be32(val, p + 4);
p                 130 arch/sh/include/asm/unaligned-sh4a.h static inline void put_unaligned_le16(u16 val, void *p)
p                 133 arch/sh/include/asm/unaligned-sh4a.h 	__put_unaligned_cpu16(val, p);
p                 135 arch/sh/include/asm/unaligned-sh4a.h 	nonnative_put_le16(val, p);
p                 139 arch/sh/include/asm/unaligned-sh4a.h static inline void put_unaligned_le32(u32 val, void *p)
p                 142 arch/sh/include/asm/unaligned-sh4a.h 	__put_unaligned_cpu32(val, p);
p                 144 arch/sh/include/asm/unaligned-sh4a.h 	nonnative_put_le32(val, p);
p                 148 arch/sh/include/asm/unaligned-sh4a.h static inline void put_unaligned_le64(u64 val, void *p)
p                 151 arch/sh/include/asm/unaligned-sh4a.h 	__put_unaligned_cpu64(val, p);
p                 153 arch/sh/include/asm/unaligned-sh4a.h 	nonnative_put_le64(val, p);
p                 157 arch/sh/include/asm/unaligned-sh4a.h static inline void put_unaligned_be16(u16 val, void *p)
p                 160 arch/sh/include/asm/unaligned-sh4a.h 	__put_unaligned_cpu16(val, p);
p                 162 arch/sh/include/asm/unaligned-sh4a.h 	nonnative_put_be16(val, p);
p                 166 arch/sh/include/asm/unaligned-sh4a.h static inline void put_unaligned_be32(u32 val, void *p)
p                 169 arch/sh/include/asm/unaligned-sh4a.h 	__put_unaligned_cpu32(val, p);
p                 171 arch/sh/include/asm/unaligned-sh4a.h 	nonnative_put_be32(val, p);
p                 175 arch/sh/include/asm/unaligned-sh4a.h static inline void put_unaligned_be64(u64 val, void *p)
p                 178 arch/sh/include/asm/unaligned-sh4a.h 	__put_unaligned_cpu64(val, p);
p                 180 arch/sh/include/asm/unaligned-sh4a.h 	nonnative_put_be64(val, p);
p                  32 arch/sh/kernel/cpu/irq/ipr.c 	struct ipr_data *p = irq_data_get_irq_chip_data(data);
p                  33 arch/sh/kernel/cpu/irq/ipr.c 	unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
p                  35 arch/sh/kernel/cpu/irq/ipr.c 	__raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr);
p                  41 arch/sh/kernel/cpu/irq/ipr.c 	struct ipr_data *p = irq_data_get_irq_chip_data(data);
p                  42 arch/sh/kernel/cpu/irq/ipr.c 	unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
p                  44 arch/sh/kernel/cpu/irq/ipr.c 	__raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr);
p                  60 arch/sh/kernel/cpu/irq/ipr.c 		struct ipr_data *p = desc->ipr_data + i;
p                  63 arch/sh/kernel/cpu/irq/ipr.c 		BUG_ON(p->ipr_idx >= desc->nr_offsets);
p                  64 arch/sh/kernel/cpu/irq/ipr.c 		BUG_ON(!desc->ipr_offsets[p->ipr_idx]);
p                  66 arch/sh/kernel/cpu/irq/ipr.c 		res = irq_alloc_desc_at(p->irq, numa_node_id());
p                  67 arch/sh/kernel/cpu/irq/ipr.c 		if (unlikely(res != p->irq && res != -EEXIST)) {
p                  69 arch/sh/kernel/cpu/irq/ipr.c 			       p->irq);
p                  73 arch/sh/kernel/cpu/irq/ipr.c 		disable_irq_nosync(p->irq);
p                  74 arch/sh/kernel/cpu/irq/ipr.c 		irq_set_chip_and_handler_name(p->irq, &desc->chip,
p                  76 arch/sh/kernel/cpu/irq/ipr.c 		irq_set_chip_data(p->irq, p);
p                  77 arch/sh/kernel/cpu/irq/ipr.c 		disable_ipr_irq(irq_get_irq_data(p->irq));
p                  72 arch/sh/kernel/cpu/sh4/sq.c 	struct sq_mapping **p, *tmp;
p                  76 arch/sh/kernel/cpu/sh4/sq.c 	p = &sq_mapping_list;
p                  77 arch/sh/kernel/cpu/sh4/sq.c 	while ((tmp = *p) != NULL)
p                  78 arch/sh/kernel/cpu/sh4/sq.c 		p = &tmp->next;
p                  81 arch/sh/kernel/cpu/sh4/sq.c 	*p = map;
p                  88 arch/sh/kernel/cpu/sh4/sq.c 	struct sq_mapping **p, *tmp;
p                  92 arch/sh/kernel/cpu/sh4/sq.c 	for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next)
p                  94 arch/sh/kernel/cpu/sh4/sq.c 			*p = tmp->next;
p                 208 arch/sh/kernel/cpu/sh4/sq.c 	struct sq_mapping **p, *map;
p                 211 arch/sh/kernel/cpu/sh4/sq.c 	for (p = &sq_mapping_list; (map = *p); p = &map->next)
p                 291 arch/sh/kernel/cpu/sh4/sq.c 	char *p = buf;
p                 294 arch/sh/kernel/cpu/sh4/sq.c 		p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n",
p                 298 arch/sh/kernel/cpu/sh4/sq.c 	return p - buf;
p                  21 arch/sh/kernel/dumpstack.c 	unsigned long p;
p                  26 arch/sh/kernel/dumpstack.c 	for (p = bottom & ~31; p < top; ) {
p                  27 arch/sh/kernel/dumpstack.c 		printk("%04lx: ", p & 0xffff);
p                  29 arch/sh/kernel/dumpstack.c 		for (i = 0; i < 8; i++, p += 4) {
p                  32 arch/sh/kernel/dumpstack.c 			if (p < bottom || p >= top)
p                  35 arch/sh/kernel/dumpstack.c 				if (__get_user(val, (unsigned int __user *)p)) {
p                 735 arch/sh/kernel/dwarf.c static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
p                 758 arch/sh/kernel/dwarf.c 	cie->version = *(char *)p++;
p                 761 arch/sh/kernel/dwarf.c 	cie->augmentation = p;
p                 762 arch/sh/kernel/dwarf.c 	p += strlen(cie->augmentation) + 1;
p                 764 arch/sh/kernel/dwarf.c 	count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
p                 765 arch/sh/kernel/dwarf.c 	p += count;
p                 767 arch/sh/kernel/dwarf.c 	count = dwarf_read_leb128(p, &cie->data_alignment_factor);
p                 768 arch/sh/kernel/dwarf.c 	p += count;
p                 775 arch/sh/kernel/dwarf.c 		cie->return_address_reg = __raw_readb(p);
p                 776 arch/sh/kernel/dwarf.c 		p++;
p                 778 arch/sh/kernel/dwarf.c 		count = dwarf_read_uleb128(p, &cie->return_address_reg);
p                 779 arch/sh/kernel/dwarf.c 		p += count;
p                 786 arch/sh/kernel/dwarf.c 		count = dwarf_read_uleb128(p, &length);
p                 787 arch/sh/kernel/dwarf.c 		p += count;
p                 789 arch/sh/kernel/dwarf.c 		UNWINDER_BUG_ON((unsigned char *)p > end);
p                 791 arch/sh/kernel/dwarf.c 		cie->initial_instructions = p + length;
p                 801 arch/sh/kernel/dwarf.c 			p++;
p                 809 arch/sh/kernel/dwarf.c 			cie->encoding = *(char *)p++;
p                 825 arch/sh/kernel/dwarf.c 			p = cie->initial_instructions;
p                 826 arch/sh/kernel/dwarf.c 			UNWINDER_BUG_ON(!p);
p                 831 arch/sh/kernel/dwarf.c 	cie->initial_instructions = p;
p                 875 arch/sh/kernel/dwarf.c 	void *p = start;
p                 887 arch/sh/kernel/dwarf.c 	fde->cie_pointer = (unsigned long)(p - entry_type - 4);
p                 893 arch/sh/kernel/dwarf.c 		count = dwarf_read_encoded_value(p, &fde->initial_location,
p                 896 arch/sh/kernel/dwarf.c 		count = dwarf_read_addr(p, &fde->initial_location);
p                 898 arch/sh/kernel/dwarf.c 	p += count;
p                 901 arch/sh/kernel/dwarf.c 		count = dwarf_read_encoded_value(p, &fde->address_range,
p                 904 arch/sh/kernel/dwarf.c 		count = dwarf_read_addr(p, &fde->address_range);
p                 906 arch/sh/kernel/dwarf.c 	p += count;
p                 910 arch/sh/kernel/dwarf.c 		count = dwarf_read_uleb128(p, &length);
p                 911 arch/sh/kernel/dwarf.c 		p += count + length;
p                 915 arch/sh/kernel/dwarf.c 	fde->instructions = p;
p                1028 arch/sh/kernel/dwarf.c 	void *p, *entry;
p                1039 arch/sh/kernel/dwarf.c 		p = entry;
p                1041 arch/sh/kernel/dwarf.c 		count = dwarf_entry_len(p, &len);
p                1053 arch/sh/kernel/dwarf.c 			p += count;
p                1056 arch/sh/kernel/dwarf.c 		end = p + len;
p                1058 arch/sh/kernel/dwarf.c 		entry_type = get_unaligned((u32 *)p);
p                1059 arch/sh/kernel/dwarf.c 		p += 4;
p                1062 arch/sh/kernel/dwarf.c 			err = dwarf_parse_cie(entry, p, len, end, mod);
p                1068 arch/sh/kernel/dwarf.c 			err = dwarf_parse_fde(entry, entry_type, p, len,
p                  41 arch/sh/kernel/irq.c int arch_show_interrupts(struct seq_file *p, int prec)
p                  45 arch/sh/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "NMI");
p                  47 arch/sh/kernel/irq.c 		seq_printf(p, "%10u ", nmi_count(j));
p                  48 arch/sh/kernel/irq.c 	seq_printf(p, "  Non-maskable interrupts\n");
p                  50 arch/sh/kernel/irq.c 	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
p                 223 arch/sh/kernel/kgdb.c void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
p                 225 arch/sh/kernel/kgdb.c 	struct pt_regs *thread_regs = task_pt_regs(p);
p                 242 arch/sh/kernel/kgdb.c 	gdb_regs[GDB_R15] = p->thread.sp;
p                 243 arch/sh/kernel/kgdb.c 	gdb_regs[GDB_PC] = p->thread.pc;
p                  40 arch/sh/kernel/kprobes.c int __kprobes arch_prepare_kprobe(struct kprobe *p)
p                  42 arch/sh/kernel/kprobes.c 	kprobe_opcode_t opcode = *(kprobe_opcode_t *) (p->addr);
p                  47 arch/sh/kernel/kprobes.c 	p->opcode = opcode;
p                  52 arch/sh/kernel/kprobes.c void __kprobes arch_copy_kprobe(struct kprobe *p)
p                  54 arch/sh/kernel/kprobes.c 	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p                  55 arch/sh/kernel/kprobes.c 	p->opcode = *p->addr;
p                  58 arch/sh/kernel/kprobes.c void __kprobes arch_arm_kprobe(struct kprobe *p)
p                  60 arch/sh/kernel/kprobes.c 	*p->addr = BREAKPOINT_INSTRUCTION;
p                  61 arch/sh/kernel/kprobes.c 	flush_icache_range((unsigned long)p->addr,
p                  62 arch/sh/kernel/kprobes.c 			   (unsigned long)p->addr + sizeof(kprobe_opcode_t));
p                  65 arch/sh/kernel/kprobes.c void __kprobes arch_disarm_kprobe(struct kprobe *p)
p                  67 arch/sh/kernel/kprobes.c 	*p->addr = p->opcode;
p                  68 arch/sh/kernel/kprobes.c 	flush_icache_range((unsigned long)p->addr,
p                  69 arch/sh/kernel/kprobes.c 			   (unsigned long)p->addr + sizeof(kprobe_opcode_t));
p                  72 arch/sh/kernel/kprobes.c int __kprobes arch_trampoline_kprobe(struct kprobe *p)
p                  74 arch/sh/kernel/kprobes.c 	if (*p->addr == BREAKPOINT_INSTRUCTION)
p                  88 arch/sh/kernel/kprobes.c 	struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1);
p                  90 arch/sh/kernel/kprobes.c 	if (p != NULL) {
p                  93 arch/sh/kernel/kprobes.c 		unregister_kprobe(p);
p                 100 arch/sh/kernel/kprobes.c void __kprobes arch_remove_kprobe(struct kprobe *p)
p                 105 arch/sh/kernel/kprobes.c 		arch_disarm_kprobe(p);
p                 133 arch/sh/kernel/kprobes.c static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
p                 136 arch/sh/kernel/kprobes.c 	__this_cpu_write(current_kprobe, p);
p                 144 arch/sh/kernel/kprobes.c static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
p                 148 arch/sh/kernel/kprobes.c 	if (p != NULL) {
p                 151 arch/sh/kernel/kprobes.c 		arch_disarm_kprobe(p);
p                 156 arch/sh/kernel/kprobes.c 		if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) {
p                 157 arch/sh/kernel/kprobes.c 			unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
p                 159 arch/sh/kernel/kprobes.c 		} else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) {
p                 160 arch/sh/kernel/kprobes.c 			unsigned long disp = (p->opcode & 0x0FFF);
p                 164 arch/sh/kernel/kprobes.c 		} else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) {
p                 165 arch/sh/kernel/kprobes.c 			unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
p                 170 arch/sh/kernel/kprobes.c 		} else if (OPCODE_RTS(p->opcode)) {
p                 173 arch/sh/kernel/kprobes.c 		} else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) {
p                 174 arch/sh/kernel/kprobes.c 			unsigned long disp = (p->opcode & 0x00FF);
p                 176 arch/sh/kernel/kprobes.c 			op1->addr = p->addr + 1;
p                 183 arch/sh/kernel/kprobes.c 		} else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) {
p                 184 arch/sh/kernel/kprobes.c 			unsigned long disp = (p->opcode & 0x00FF);
p                 186 arch/sh/kernel/kprobes.c 			op1->addr = p->addr + 2;
p                 194 arch/sh/kernel/kprobes.c 			op1->addr = p->addr + 1;
p                 214 arch/sh/kernel/kprobes.c 	struct kprobe *p;
p                 230 arch/sh/kernel/kprobes.c 		p = get_kprobe(addr);
p                 231 arch/sh/kernel/kprobes.c 		if (p) {
p                 233 arch/sh/kernel/kprobes.c 			    *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
p                 243 arch/sh/kernel/kprobes.c 			set_current_kprobe(p, regs, kcb);
p                 244 arch/sh/kernel/kprobes.c 			kprobes_inc_nmissed_count(p);
p                 245 arch/sh/kernel/kprobes.c 			prepare_singlestep(p, regs);
p                 252 arch/sh/kernel/kprobes.c 	p = get_kprobe(addr);
p                 253 arch/sh/kernel/kprobes.c 	if (!p) {
p                 269 arch/sh/kernel/kprobes.c 	set_current_kprobe(p, regs, kcb);
p                 272 arch/sh/kernel/kprobes.c 	if (p->pre_handler && p->pre_handler(p, regs)) {
p                 279 arch/sh/kernel/kprobes.c 	prepare_singlestep(p, regs);
p                 303 arch/sh/kernel/kprobes.c int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
p                 368 arch/sh/kernel/kprobes.c 	struct kprobe *p = NULL;
p                 378 arch/sh/kernel/kprobes.c 	p = this_cpu_ptr(&saved_next_opcode);
p                 379 arch/sh/kernel/kprobes.c 	if (p->addr) {
p                 380 arch/sh/kernel/kprobes.c 		arch_disarm_kprobe(p);
p                 381 arch/sh/kernel/kprobes.c 		p->addr = NULL;
p                 382 arch/sh/kernel/kprobes.c 		p->opcode = 0;
p                 387 arch/sh/kernel/kprobes.c 		p = get_kprobe(addr);
p                 388 arch/sh/kernel/kprobes.c 		arch_arm_kprobe(p);
p                 390 arch/sh/kernel/kprobes.c 		p = this_cpu_ptr(&saved_next_opcode2);
p                 391 arch/sh/kernel/kprobes.c 		if (p->addr) {
p                 392 arch/sh/kernel/kprobes.c 			arch_disarm_kprobe(p);
p                 393 arch/sh/kernel/kprobes.c 			p->addr = NULL;
p                 394 arch/sh/kernel/kprobes.c 			p->opcode = 0;
p                 481 arch/sh/kernel/kprobes.c 	struct kprobe *p = NULL;
p                 498 arch/sh/kernel/kprobes.c 			p = get_kprobe(addr);
p                  47 arch/sh/kernel/nmi_debug.c 	char *p, *sep;
p                  54 arch/sh/kernel/nmi_debug.c 	for (p = str + 1; *p; p = sep + 1) {
p                  55 arch/sh/kernel/nmi_debug.c 		sep = strchr(p, ',');
p                  58 arch/sh/kernel/nmi_debug.c 		if (strcmp(p, "state") == 0)
p                  60 arch/sh/kernel/nmi_debug.c 		else if (strcmp(p, "regs") == 0)
p                  62 arch/sh/kernel/nmi_debug.c 		else if (strcmp(p, "debounce") == 0)
p                  64 arch/sh/kernel/nmi_debug.c 		else if (strcmp(p, "die") == 0)
p                  68 arch/sh/kernel/nmi_debug.c 				p);
p                 119 arch/sh/kernel/process_32.c 		unsigned long arg, struct task_struct *p)
p                 121 arch/sh/kernel/process_32.c 	struct thread_info *ti = task_thread_info(p);
p                 132 arch/sh/kernel/process_32.c 		p->thread.dsp_status = tsk->thread.dsp_status;
p                 136 arch/sh/kernel/process_32.c 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
p                 138 arch/sh/kernel/process_32.c 	childregs = task_pt_regs(p);
p                 139 arch/sh/kernel/process_32.c 	p->thread.sp = (unsigned long) childregs;
p                 140 arch/sh/kernel/process_32.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 142 arch/sh/kernel/process_32.c 		p->thread.pc = (unsigned long) ret_from_kernel_thread;
p                 151 arch/sh/kernel/process_32.c 		p->thread.fpu_counter = 0;
p                 164 arch/sh/kernel/process_32.c 	p->thread.pc = (unsigned long) ret_from_fork;
p                 208 arch/sh/kernel/process_32.c unsigned long get_wchan(struct task_struct *p)
p                 212 arch/sh/kernel/process_32.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 218 arch/sh/kernel/process_32.c 	pc = thread_saved_pc(p);
p                 222 arch/sh/kernel/process_32.c 		unsigned long schedule_frame = (unsigned long)p->thread.sp;
p                 374 arch/sh/kernel/process_64.c 		unsigned long arg, struct task_struct *p)
p                 389 arch/sh/kernel/process_64.c 	childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
p                 390 arch/sh/kernel/process_64.c 	p->thread.sp = (unsigned long) childregs;
p                 392 arch/sh/kernel/process_64.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 398 arch/sh/kernel/process_64.c 		p->thread.pc = (unsigned long) ret_from_kernel_thread;
p                 411 arch/sh/kernel/process_64.c 	p->thread.uregs = childregs;
p                 416 arch/sh/kernel/process_64.c 	p->thread.pc = (unsigned long) ret_from_fork;
p                 432 arch/sh/kernel/process_64.c unsigned long get_wchan(struct task_struct *p)
p                 436 arch/sh/kernel/process_64.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 442 arch/sh/kernel/process_64.c 	pc = thread_saved_pc(p);
p                 450 arch/sh/kernel/process_64.c 		sh64_switch_to_fp = (long) p->thread.sp;
p                 107 arch/sh/kernel/setup.c static int __init early_parse_mem(char *p)
p                 109 arch/sh/kernel/setup.c 	if (!p)
p                 112 arch/sh/kernel/setup.c 	memory_limit = PAGE_ALIGN(memparse(p, &p));
p                 138 arch/sh/kernel/traps_64.c 	unsigned char *p, *q;
p                 139 arch/sh/kernel/traps_64.c 	p = (unsigned char *) (int) address;
p                 141 arch/sh/kernel/traps_64.c 	q[0] = p[0];
p                 142 arch/sh/kernel/traps_64.c 	q[1] = p[1];
p                 154 arch/sh/kernel/traps_64.c 	unsigned char *p, *q;
p                 155 arch/sh/kernel/traps_64.c 	p = (unsigned char *) (int) address;
p                 159 arch/sh/kernel/traps_64.c 	p[0] = q[0];
p                 160 arch/sh/kernel/traps_64.c 	p[1] = q[1];
p                  31 arch/sh/mm/asids-debugfs.c 	struct task_struct *p;
p                  35 arch/sh/mm/asids-debugfs.c 	for_each_process(p) {
p                  36 arch/sh/mm/asids-debugfs.c 		int pid = p->pid;
p                  41 arch/sh/mm/asids-debugfs.c 		if (p->mm)
p                  43 arch/sh/mm/asids-debugfs.c 				   cpu_asid(smp_processor_id(), p->mm));
p                  47 arch/sh/mm/cache-sh3.c 			unsigned long data, addr, p;
p                  49 arch/sh/mm/cache-sh3.c 			p = __pa(v);
p                  55 arch/sh/mm/cache-sh3.c 			    (p & CACHE_PHYSADDR_MASK)) {
p                 323 arch/sh/mm/cache-sh4.c 	unsigned long a, ea, p;
p                 355 arch/sh/mm/cache-sh4.c 		p = phys;
p                 358 arch/sh/mm/cache-sh4.c 			*(volatile unsigned long *)a = p;
p                 364 arch/sh/mm/cache-sh4.c 			*(volatile unsigned long *)(a+32) = p;
p                 366 arch/sh/mm/cache-sh4.c 			p += 64;
p                  22 arch/sh/mm/consistent.c 	char *p = boot_command_line;
p                  25 arch/sh/mm/consistent.c 	while ((p = strstr(p, "memchunk."))) {
p                  26 arch/sh/mm/consistent.c 		p += 9; /* strlen("memchunk.") */
p                  27 arch/sh/mm/consistent.c 		if (!strncmp(name, p, k) && p[k] == '=') {
p                  28 arch/sh/mm/consistent.c 			p += k + 1;
p                  29 arch/sh/mm/consistent.c 			*sizep = memparse(p, NULL);
p                 109 arch/sh/mm/ioremap.c 	struct vm_struct *p;
p                 129 arch/sh/mm/ioremap.c 	p = remove_vm_area((void *)(vaddr & PAGE_MASK));
p                 130 arch/sh/mm/ioremap.c 	if (!p) {
p                 135 arch/sh/mm/ioremap.c 	kfree(p);
p                 775 arch/sh/mm/pmb.c static int __init early_pmb(char *p)
p                 777 arch/sh/mm/pmb.c 	if (!p)
p                 780 arch/sh/mm/pmb.c 	if (strstr(p, "iomap"))
p                  43 arch/sparc/boot/piggyback.c static unsigned short ld2(char *p)
p                  45 arch/sparc/boot/piggyback.c 	return (p[0] << 8) | p[1];
p                  49 arch/sparc/boot/piggyback.c static void st4(char *p, unsigned int x)
p                  51 arch/sparc/boot/piggyback.c 	p[0] = x >> 24;
p                  52 arch/sparc/boot/piggyback.c 	p[1] = x >> 16;
p                  53 arch/sparc/boot/piggyback.c 	p[2] = x >> 8;
p                  54 arch/sparc/boot/piggyback.c 	p[3] = x;
p                  41 arch/sparc/include/asm/barrier_64.h #define __smp_store_release(p, v)						\
p                  43 arch/sparc/include/asm/barrier_64.h 	compiletime_assert_atomic_type(*p);				\
p                  45 arch/sparc/include/asm/barrier_64.h 	WRITE_ONCE(*p, v);						\
p                  48 arch/sparc/include/asm/barrier_64.h #define __smp_load_acquire(p)						\
p                  50 arch/sparc/include/asm/barrier_64.h 	typeof(*p) ___p1 = READ_ONCE(*p);				\
p                  51 arch/sparc/include/asm/barrier_64.h 	compiletime_assert_atomic_type(*p);				\
p                  16 arch/sparc/include/asm/ebus_dma.h 	void (*callback)(struct ebus_dma_info *p, int event, void *cookie);
p                  26 arch/sparc/include/asm/ebus_dma.h int ebus_dma_register(struct ebus_dma_info *p);
p                  27 arch/sparc/include/asm/ebus_dma.h int ebus_dma_irq_enable(struct ebus_dma_info *p, int on);
p                  28 arch/sparc/include/asm/ebus_dma.h void ebus_dma_unregister(struct ebus_dma_info *p);
p                  29 arch/sparc/include/asm/ebus_dma.h int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr,
p                  31 arch/sparc/include/asm/ebus_dma.h void ebus_dma_prepare(struct ebus_dma_info *p, int write);
p                  32 arch/sparc/include/asm/ebus_dma.h unsigned int ebus_dma_residue(struct ebus_dma_info *p);
p                  33 arch/sparc/include/asm/ebus_dma.h unsigned int ebus_dma_addr(struct ebus_dma_info *p);
p                  34 arch/sparc/include/asm/ebus_dma.h void ebus_dma_enable(struct ebus_dma_info *p, int on);
p                 432 arch/sparc/include/asm/floppy_64.h void sun_pci_fd_dma_callback(struct ebus_dma_info *p, int event, void *cookie)
p                 275 arch/sparc/include/asm/io_64.h #define ioread8_rep(p,d,l)	readsb(p,d,l)
p                 276 arch/sparc/include/asm/io_64.h #define ioread16_rep(p,d,l)	readsw(p,d,l)
p                 277 arch/sparc/include/asm/io_64.h #define ioread32_rep(p,d,l)	readsl(p,d,l)
p                 278 arch/sparc/include/asm/io_64.h #define iowrite8_rep(p,d,l)	writesb(p,d,l)
p                 279 arch/sparc/include/asm/io_64.h #define iowrite16_rep(p,d,l)	writesw(p,d,l)
p                 280 arch/sparc/include/asm/io_64.h #define iowrite32_rep(p,d,l)	writesl(p,d,l)
p                 452 arch/sparc/include/asm/io_64.h #define xlate_dev_mem_ptr(p)	__va(p)
p                 457 arch/sparc/include/asm/io_64.h #define xlate_dev_kmem_ptr(p)	p
p                  20 arch/sparc/include/asm/kprobes.h #define arch_remove_kprobe(p)	do {} while (0)
p                  22 arch/sparc/include/asm/kprobes.h #define flush_insn_slot(p)		\
p                  23 arch/sparc/include/asm/kprobes.h do { 	flushi(&(p)->ainsn.insn[0]);	\
p                  24 arch/sparc/include/asm/kprobes.h 	flushi(&(p)->ainsn.insn[1]);	\
p                 116 arch/sparc/include/asm/parport.h 	struct parport *p;
p                 121 arch/sparc/include/asm/parport.h 		p = parport_pc_probe_port(base, base + 0x400,
p                 124 arch/sparc/include/asm/parport.h 		if (!p)
p                 126 arch/sparc/include/asm/parport.h 		dev_set_drvdata(&op->dev, p);
p                 171 arch/sparc/include/asm/parport.h 	p = parport_pc_probe_port(base, base + 0x400,
p                 177 arch/sparc/include/asm/parport.h 	if (!p)
p                 180 arch/sparc/include/asm/parport.h 	dev_set_drvdata(&op->dev, p);
p                 200 arch/sparc/include/asm/parport.h 	struct parport *p = dev_get_drvdata(&op->dev);
p                 201 arch/sparc/include/asm/parport.h 	int slot = p->dma;
p                 203 arch/sparc/include/asm/parport.h 	parport_pc_unregister_port(p);
p                  37 arch/sparc/include/asm/vga.h static inline void scr_memsetw(u16 *p, u16 v, unsigned int n)
p                  39 arch/sparc/include/asm/vga.h 	BUG_ON((long) p >= 0);
p                  41 arch/sparc/include/asm/vga.h 	memset16(p, cpu_to_le16(v), n / 2);
p                  37 arch/sparc/kernel/central.c static int clock_board_calc_nslots(struct clock_board *p)
p                  39 arch/sparc/kernel/central.c 	u8 reg = upa_readb(p->clock_regs + CLOCK_STAT1) & 0xc0;
p                  50 arch/sparc/kernel/central.c 		if (p->clock_ver_reg)
p                  51 arch/sparc/kernel/central.c 			reg = upa_readb(p->clock_ver_reg);
p                  66 arch/sparc/kernel/central.c 	struct clock_board *p = kzalloc(sizeof(*p), GFP_KERNEL);
p                  69 arch/sparc/kernel/central.c 	if (!p) {
p                  74 arch/sparc/kernel/central.c 	p->clock_freq_regs = of_ioremap(&op->resource[0], 0,
p                  77 arch/sparc/kernel/central.c 	if (!p->clock_freq_regs) {
p                  82 arch/sparc/kernel/central.c 	p->clock_regs = of_ioremap(&op->resource[1], 0,
p                  85 arch/sparc/kernel/central.c 	if (!p->clock_regs) {
p                  91 arch/sparc/kernel/central.c 		p->clock_ver_reg = of_ioremap(&op->resource[2], 0,
p                  94 arch/sparc/kernel/central.c 		if (!p->clock_ver_reg) {
p                 100 arch/sparc/kernel/central.c 	p->num_slots = clock_board_calc_nslots(p);
p                 102 arch/sparc/kernel/central.c 	p->leds_resource.start = (unsigned long)
p                 103 arch/sparc/kernel/central.c 		(p->clock_regs + CLOCK_CTRL);
p                 104 arch/sparc/kernel/central.c 	p->leds_resource.end = p->leds_resource.start;
p                 105 arch/sparc/kernel/central.c 	p->leds_resource.name = "leds";
p                 107 arch/sparc/kernel/central.c 	p->leds_pdev.name = "sunfire-clockboard-leds";
p                 108 arch/sparc/kernel/central.c 	p->leds_pdev.id = -1;
p                 109 arch/sparc/kernel/central.c 	p->leds_pdev.resource = &p->leds_resource;
p                 110 arch/sparc/kernel/central.c 	p->leds_pdev.num_resources = 1;
p                 111 arch/sparc/kernel/central.c 	p->leds_pdev.dev.parent = &op->dev;
p                 113 arch/sparc/kernel/central.c 	err = platform_device_register(&p->leds_pdev);
p                 121 arch/sparc/kernel/central.c 	       p->num_slots);
p                 128 arch/sparc/kernel/central.c 	if (p->clock_ver_reg)
p                 129 arch/sparc/kernel/central.c 		of_iounmap(&op->resource[2], p->clock_ver_reg,
p                 133 arch/sparc/kernel/central.c 	of_iounmap(&op->resource[1], p->clock_regs,
p                 137 arch/sparc/kernel/central.c 	of_iounmap(&op->resource[0], p->clock_freq_regs,
p                 141 arch/sparc/kernel/central.c 	kfree(p);
p                 162 arch/sparc/kernel/central.c 	struct fhc *p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 166 arch/sparc/kernel/central.c 	if (!p) {
p                 172 arch/sparc/kernel/central.c 		p->central = true;
p                 174 arch/sparc/kernel/central.c 	p->pregs = of_ioremap(&op->resource[0], 0,
p                 177 arch/sparc/kernel/central.c 	if (!p->pregs) {
p                 182 arch/sparc/kernel/central.c 	if (p->central) {
p                 183 arch/sparc/kernel/central.c 		reg = upa_readl(p->pregs + FHC_PREGS_BSR);
p                 184 arch/sparc/kernel/central.c 		p->board_num = ((reg >> 16) & 1) | ((reg >> 12) & 0x0e);
p                 186 arch/sparc/kernel/central.c 		p->board_num = of_getintprop_default(op->dev.of_node, "board#", -1);
p                 187 arch/sparc/kernel/central.c 		if (p->board_num == -1) {
p                 191 arch/sparc/kernel/central.c 		if (upa_readl(p->pregs + FHC_PREGS_JCTRL) & FHC_JTAG_CTRL_MENAB)
p                 192 arch/sparc/kernel/central.c 			p->jtag_master = true;
p                 195 arch/sparc/kernel/central.c 	if (!p->central) {
p                 196 arch/sparc/kernel/central.c 		p->leds_resource.start = (unsigned long)
p                 197 arch/sparc/kernel/central.c 			(p->pregs + FHC_PREGS_CTRL);
p                 198 arch/sparc/kernel/central.c 		p->leds_resource.end = p->leds_resource.start;
p                 199 arch/sparc/kernel/central.c 		p->leds_resource.name = "leds";
p                 201 arch/sparc/kernel/central.c 		p->leds_pdev.name = "sunfire-fhc-leds";
p                 202 arch/sparc/kernel/central.c 		p->leds_pdev.id = p->board_num;
p                 203 arch/sparc/kernel/central.c 		p->leds_pdev.resource = &p->leds_resource;
p                 204 arch/sparc/kernel/central.c 		p->leds_pdev.num_resources = 1;
p                 205 arch/sparc/kernel/central.c 		p->leds_pdev.dev.parent = &op->dev;
p                 207 arch/sparc/kernel/central.c 		err = platform_device_register(&p->leds_pdev);
p                 214 arch/sparc/kernel/central.c 	reg = upa_readl(p->pregs + FHC_PREGS_CTRL);
p                 216 arch/sparc/kernel/central.c 	if (!p->central)
p                 223 arch/sparc/kernel/central.c 	upa_writel(reg, p->pregs + FHC_PREGS_CTRL);
p                 224 arch/sparc/kernel/central.c 	upa_readl(p->pregs + FHC_PREGS_CTRL);
p                 226 arch/sparc/kernel/central.c 	reg = upa_readl(p->pregs + FHC_PREGS_ID);
p                 228 arch/sparc/kernel/central.c 	       p->board_num,
p                 232 arch/sparc/kernel/central.c 	       (p->jtag_master ?
p                 234 arch/sparc/kernel/central.c 		(p->central ? "(Central)" : "")));
p                 242 arch/sparc/kernel/central.c 	of_iounmap(&op->resource[0], p->pregs, resource_size(&op->resource[0]));
p                 245 arch/sparc/kernel/central.c 	kfree(p);
p                  74 arch/sparc/kernel/chmc.c 	struct chmc		*p;
p                 241 arch/sparc/kernel/chmc.c 		struct jbusmc_obp_mem_layout *p = _prop;
p                 247 arch/sparc/kernel/chmc.c 		map_val = p->map.dimm_map[dimm_map_index];
p                 249 arch/sparc/kernel/chmc.c 		*dimm_str_p = p->dimm_labels[base_dimm_offset + map_val];
p                 250 arch/sparc/kernel/chmc.c 		*pin_p = p->map.pin_map[cache_line_offset];
p                 252 arch/sparc/kernel/chmc.c 		struct chmc_obp_mem_layout *p = _prop;
p                 257 arch/sparc/kernel/chmc.c 		if (p->symmetric)
p                 258 arch/sparc/kernel/chmc.c 			mp = &p->map[0];
p                 260 arch/sparc/kernel/chmc.c 			mp = &p->map[1];
p                 268 arch/sparc/kernel/chmc.c 		*dimm_str_p = p->dimm_labels[base_dimm_offset + map_val];
p                 275 arch/sparc/kernel/chmc.c 	struct jbusmc *p;
p                 277 arch/sparc/kernel/chmc.c 	list_for_each_entry(p, &mctrl_list, list) {
p                 280 arch/sparc/kernel/chmc.c 		for (i = 0; i < p->num_dimm_groups; i++) {
p                 281 arch/sparc/kernel/chmc.c 			struct jbusmc_dimm_group *dp = &p->dimm_groups[i];
p                 299 arch/sparc/kernel/chmc.c 	struct jbusmc *p;
p                 312 arch/sparc/kernel/chmc.c 	p = dp->controller;
p                 313 arch/sparc/kernel/chmc.c 	prop = &p->layout;
p                 367 arch/sparc/kernel/chmc.c static void jbusmc_construct_one_dimm_group(struct jbusmc *p,
p                 372 arch/sparc/kernel/chmc.c 	struct jbusmc_dimm_group *dp = &p->dimm_groups[index];
p                 374 arch/sparc/kernel/chmc.c 	dp->controller = p;
p                 377 arch/sparc/kernel/chmc.c 	dp->base_addr  = (p->portid * (64UL * 1024 * 1024 * 1024));
p                 382 arch/sparc/kernel/chmc.c static void jbusmc_construct_dimm_groups(struct jbusmc *p,
p                 386 arch/sparc/kernel/chmc.c 	if (p->mc_reg_1 & JB_MC_REG1_DIMM1_BANK0) {
p                 387 arch/sparc/kernel/chmc.c 		jbusmc_construct_one_dimm_group(p, 0, mem_regs, num_mem_regs);
p                 388 arch/sparc/kernel/chmc.c 		p->num_dimm_groups++;
p                 390 arch/sparc/kernel/chmc.c 	if (p->mc_reg_1 & JB_MC_REG1_DIMM2_BANK2) {
p                 391 arch/sparc/kernel/chmc.c 		jbusmc_construct_one_dimm_group(p, 1, mem_regs, num_mem_regs);
p                 392 arch/sparc/kernel/chmc.c 		p->num_dimm_groups++;
p                 401 arch/sparc/kernel/chmc.c 	struct jbusmc *p;
p                 419 arch/sparc/kernel/chmc.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 420 arch/sparc/kernel/chmc.c 	if (!p) {
p                 425 arch/sparc/kernel/chmc.c 	INIT_LIST_HEAD(&p->list);
p                 434 arch/sparc/kernel/chmc.c 	p->portid = *prop;
p                 442 arch/sparc/kernel/chmc.c 	p->mc_reg_1 = ((u64)prop[0] << 32) | (u64) prop[1];
p                 445 arch/sparc/kernel/chmc.c 	p->regs = of_ioremap(&op->resource[0], 0, JBUSMC_REGS_SIZE, "jbusmc");
p                 446 arch/sparc/kernel/chmc.c 	if (!p->regs) {
p                 452 arch/sparc/kernel/chmc.c 	ml = of_get_property(op->dev.of_node, "memory-layout", &p->layout_len);
p                 457 arch/sparc/kernel/chmc.c 	if (p->layout_len > sizeof(p->layout)) {
p                 459 arch/sparc/kernel/chmc.c 		       p->layout_len);
p                 462 arch/sparc/kernel/chmc.c 	memcpy(&p->layout, ml, p->layout_len);
p                 464 arch/sparc/kernel/chmc.c 	jbusmc_construct_dimm_groups(p, mem_regs, num_mem_regs);
p                 466 arch/sparc/kernel/chmc.c 	mc_list_add(&p->list);
p                 471 arch/sparc/kernel/chmc.c 	dev_set_drvdata(&op->dev, p);
p                 479 arch/sparc/kernel/chmc.c 	of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE);
p                 482 arch/sparc/kernel/chmc.c 	kfree(p);
p                 521 arch/sparc/kernel/chmc.c 	struct chmc *p;
p                 523 arch/sparc/kernel/chmc.c 	list_for_each_entry(p, &mctrl_list, list) {
p                 529 arch/sparc/kernel/chmc.c 			bp = &p->logical_banks[bank_no];
p                 558 arch/sparc/kernel/chmc.c 	prop = &bp->p->layout_prop;
p                 590 arch/sparc/kernel/chmc.c static u64 chmc_read_mcreg(struct chmc *p, unsigned long offset)
p                 598 arch/sparc/kernel/chmc.c 	if (p->portid == this_cpu) {
p                 605 arch/sparc/kernel/chmc.c 				     : "r" (p->regs + offset),
p                 615 arch/sparc/kernel/chmc.c static void chmc_write_mcreg(struct chmc *p, unsigned long offset, u64 val)
p                 617 arch/sparc/kernel/chmc.c 	if (p->portid == smp_processor_id()) {
p                 624 arch/sparc/kernel/chmc.c 				         "r" (p->regs + offset),
p                 630 arch/sparc/kernel/chmc.c static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 val)
p                 632 arch/sparc/kernel/chmc.c 	struct chmc_bank_info *bp = &p->logical_banks[which_bank];
p                 634 arch/sparc/kernel/chmc.c 	bp->p = p;
p                 635 arch/sparc/kernel/chmc.c 	bp->bank_id = (CHMCTRL_NBANKS * p->portid) + which_bank;
p                 678 arch/sparc/kernel/chmc.c static void chmc_fetch_decode_regs(struct chmc *p)
p                 680 arch/sparc/kernel/chmc.c 	if (p->layout_size == 0)
p                 683 arch/sparc/kernel/chmc.c 	chmc_interpret_one_decode_reg(p, 0,
p                 684 arch/sparc/kernel/chmc.c 				      chmc_read_mcreg(p, CHMCTRL_DECODE1));
p                 685 arch/sparc/kernel/chmc.c 	chmc_interpret_one_decode_reg(p, 1,
p                 686 arch/sparc/kernel/chmc.c 				      chmc_read_mcreg(p, CHMCTRL_DECODE2));
p                 687 arch/sparc/kernel/chmc.c 	chmc_interpret_one_decode_reg(p, 2,
p                 688 arch/sparc/kernel/chmc.c 				      chmc_read_mcreg(p, CHMCTRL_DECODE3));
p                 689 arch/sparc/kernel/chmc.c 	chmc_interpret_one_decode_reg(p, 3,
p                 690 arch/sparc/kernel/chmc.c 				      chmc_read_mcreg(p, CHMCTRL_DECODE4));
p                 699 arch/sparc/kernel/chmc.c 	struct chmc *p;
p                 713 arch/sparc/kernel/chmc.c 	if (pval && len > sizeof(p->layout_prop)) {
p                 720 arch/sparc/kernel/chmc.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 721 arch/sparc/kernel/chmc.c 	if (!p) {
p                 726 arch/sparc/kernel/chmc.c 	p->portid = portid;
p                 727 arch/sparc/kernel/chmc.c 	p->layout_size = len;
p                 729 arch/sparc/kernel/chmc.c 		p->layout_size = 0;
p                 731 arch/sparc/kernel/chmc.c 		memcpy(&p->layout_prop, pval, len);
p                 733 arch/sparc/kernel/chmc.c 	p->regs = of_ioremap(&op->resource[0], 0, 0x48, "chmc");
p                 734 arch/sparc/kernel/chmc.c 	if (!p->regs) {
p                 739 arch/sparc/kernel/chmc.c 	if (p->layout_size != 0UL) {
p                 740 arch/sparc/kernel/chmc.c 		p->timing_control1 = chmc_read_mcreg(p, CHMCTRL_TCTRL1);
p                 741 arch/sparc/kernel/chmc.c 		p->timing_control2 = chmc_read_mcreg(p, CHMCTRL_TCTRL2);
p                 742 arch/sparc/kernel/chmc.c 		p->timing_control3 = chmc_read_mcreg(p, CHMCTRL_TCTRL3);
p                 743 arch/sparc/kernel/chmc.c 		p->timing_control4 = chmc_read_mcreg(p, CHMCTRL_TCTRL4);
p                 744 arch/sparc/kernel/chmc.c 		p->memaddr_control = chmc_read_mcreg(p, CHMCTRL_MACTRL);
p                 747 arch/sparc/kernel/chmc.c 	chmc_fetch_decode_regs(p);
p                 749 arch/sparc/kernel/chmc.c 	mc_list_add(&p->list);
p                 753 arch/sparc/kernel/chmc.c 	       (p->layout_size ? "ACTIVE" : "INACTIVE"));
p                 755 arch/sparc/kernel/chmc.c 	dev_set_drvdata(&op->dev, p);
p                 763 arch/sparc/kernel/chmc.c 	kfree(p);
p                 776 arch/sparc/kernel/chmc.c static void chmc_destroy(struct platform_device *op, struct chmc *p)
p                 778 arch/sparc/kernel/chmc.c 	list_del(&p->list);
p                 779 arch/sparc/kernel/chmc.c 	of_iounmap(&op->resource[0], p->regs, 0x48);
p                 780 arch/sparc/kernel/chmc.c 	kfree(p);
p                 783 arch/sparc/kernel/chmc.c static void jbusmc_destroy(struct platform_device *op, struct jbusmc *p)
p                 785 arch/sparc/kernel/chmc.c 	mc_list_del(&p->list);
p                 786 arch/sparc/kernel/chmc.c 	of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE);
p                 787 arch/sparc/kernel/chmc.c 	kfree(p);
p                 792 arch/sparc/kernel/chmc.c 	void *p = dev_get_drvdata(&op->dev);
p                 794 arch/sparc/kernel/chmc.c 	if (p) {
p                 796 arch/sparc/kernel/chmc.c 			chmc_destroy(op, p);
p                 798 arch/sparc/kernel/chmc.c 			jbusmc_destroy(op, p);
p                 782 arch/sparc/kernel/ds.c 		char  *base, *p;
p                 799 arch/sparc/kernel/ds.c 		base = p = &pkt.header.msg.name_and_value[0];
p                 800 arch/sparc/kernel/ds.c 		strcpy(p, var);
p                 801 arch/sparc/kernel/ds.c 		p += strlen(var) + 1;
p                 802 arch/sparc/kernel/ds.c 		strcpy(p, value);
p                 803 arch/sparc/kernel/ds.c 		p += strlen(value) + 1;
p                 807 arch/sparc/kernel/ds.c 			   (p - base));
p                  50 arch/sparc/kernel/ebus.c static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain)
p                  55 arch/sparc/kernel/ebus.c 	writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR);
p                  62 arch/sparc/kernel/ebus.c 		val = readl(p->regs + EBDMA_CSR);
p                  72 arch/sparc/kernel/ebus.c 	struct ebus_dma_info *p = dev_id;
p                  76 arch/sparc/kernel/ebus.c 	spin_lock_irqsave(&p->lock, flags);
p                  77 arch/sparc/kernel/ebus.c 	csr = readl(p->regs + EBDMA_CSR);
p                  78 arch/sparc/kernel/ebus.c 	writel(csr, p->regs + EBDMA_CSR);
p                  79 arch/sparc/kernel/ebus.c 	spin_unlock_irqrestore(&p->lock, flags);
p                  82 arch/sparc/kernel/ebus.c 		printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name);
p                  83 arch/sparc/kernel/ebus.c 		p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie);
p                  86 arch/sparc/kernel/ebus.c 		p->callback(p,
p                  89 arch/sparc/kernel/ebus.c 			    p->client_cookie);
p                  97 arch/sparc/kernel/ebus.c int ebus_dma_register(struct ebus_dma_info *p)
p                 101 arch/sparc/kernel/ebus.c 	if (!p->regs)
p                 103 arch/sparc/kernel/ebus.c 	if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
p                 106 arch/sparc/kernel/ebus.c 	if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback)
p                 108 arch/sparc/kernel/ebus.c 	if (!strlen(p->name))
p                 111 arch/sparc/kernel/ebus.c 	__ebus_dma_reset(p, 1);
p                 115 arch/sparc/kernel/ebus.c 	if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
p                 118 arch/sparc/kernel/ebus.c 	writel(csr, p->regs + EBDMA_CSR);
p                 124 arch/sparc/kernel/ebus.c int ebus_dma_irq_enable(struct ebus_dma_info *p, int on)
p                 130 arch/sparc/kernel/ebus.c 		if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
p                 131 arch/sparc/kernel/ebus.c 			if (request_irq(p->irq, ebus_dma_irq, IRQF_SHARED, p->name, p))
p                 135 arch/sparc/kernel/ebus.c 		spin_lock_irqsave(&p->lock, flags);
p                 136 arch/sparc/kernel/ebus.c 		csr = readl(p->regs + EBDMA_CSR);
p                 138 arch/sparc/kernel/ebus.c 		writel(csr, p->regs + EBDMA_CSR);
p                 139 arch/sparc/kernel/ebus.c 		spin_unlock_irqrestore(&p->lock, flags);
p                 141 arch/sparc/kernel/ebus.c 		spin_lock_irqsave(&p->lock, flags);
p                 142 arch/sparc/kernel/ebus.c 		csr = readl(p->regs + EBDMA_CSR);
p                 144 arch/sparc/kernel/ebus.c 		writel(csr, p->regs + EBDMA_CSR);
p                 145 arch/sparc/kernel/ebus.c 		spin_unlock_irqrestore(&p->lock, flags);
p                 147 arch/sparc/kernel/ebus.c 		if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
p                 148 arch/sparc/kernel/ebus.c 			free_irq(p->irq, p);
p                 156 arch/sparc/kernel/ebus.c void ebus_dma_unregister(struct ebus_dma_info *p)
p                 162 arch/sparc/kernel/ebus.c 	spin_lock_irqsave(&p->lock, flags);
p                 163 arch/sparc/kernel/ebus.c 	csr = readl(p->regs + EBDMA_CSR);
p                 166 arch/sparc/kernel/ebus.c 		writel(csr, p->regs + EBDMA_CSR);
p                 169 arch/sparc/kernel/ebus.c 	spin_unlock_irqrestore(&p->lock, flags);
p                 172 arch/sparc/kernel/ebus.c 		free_irq(p->irq, p);
p                 176 arch/sparc/kernel/ebus.c int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len)
p                 185 arch/sparc/kernel/ebus.c 	spin_lock_irqsave(&p->lock, flags);
p                 186 arch/sparc/kernel/ebus.c 	csr = readl(p->regs + EBDMA_CSR);
p                 194 arch/sparc/kernel/ebus.c 	writel(len,      p->regs + EBDMA_COUNT);
p                 195 arch/sparc/kernel/ebus.c 	writel(bus_addr, p->regs + EBDMA_ADDR);
p                 199 arch/sparc/kernel/ebus.c 	spin_unlock_irqrestore(&p->lock, flags);
p                 205 arch/sparc/kernel/ebus.c void ebus_dma_prepare(struct ebus_dma_info *p, int write)
p                 210 arch/sparc/kernel/ebus.c 	spin_lock_irqsave(&p->lock, flags);
p                 211 arch/sparc/kernel/ebus.c 	__ebus_dma_reset(p, 0);
p                 220 arch/sparc/kernel/ebus.c 	if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
p                 223 arch/sparc/kernel/ebus.c 	writel(csr, p->regs + EBDMA_CSR);
p                 225 arch/sparc/kernel/ebus.c 	spin_unlock_irqrestore(&p->lock, flags);
p                 229 arch/sparc/kernel/ebus.c unsigned int ebus_dma_residue(struct ebus_dma_info *p)
p                 231 arch/sparc/kernel/ebus.c 	return readl(p->regs + EBDMA_COUNT);
p                 235 arch/sparc/kernel/ebus.c unsigned int ebus_dma_addr(struct ebus_dma_info *p)
p                 237 arch/sparc/kernel/ebus.c 	return readl(p->regs + EBDMA_ADDR);
p                 241 arch/sparc/kernel/ebus.c void ebus_dma_enable(struct ebus_dma_info *p, int on)
p                 246 arch/sparc/kernel/ebus.c 	spin_lock_irqsave(&p->lock, flags);
p                 247 arch/sparc/kernel/ebus.c 	orig_csr = csr = readl(p->regs + EBDMA_CSR);
p                 254 arch/sparc/kernel/ebus.c 		writel(csr, p->regs + EBDMA_CSR);
p                 255 arch/sparc/kernel/ebus.c 	spin_unlock_irqrestore(&p->lock, flags);
p                  70 arch/sparc/kernel/hvapi.c static void __get_ref(struct api_info *p)
p                  72 arch/sparc/kernel/hvapi.c 	p->refcnt++;
p                  75 arch/sparc/kernel/hvapi.c static void __put_ref(struct api_info *p)
p                  77 arch/sparc/kernel/hvapi.c 	if (--p->refcnt == 0) {
p                  80 arch/sparc/kernel/hvapi.c 		sun4v_set_version(p->group, 0, 0, &ignore);
p                  81 arch/sparc/kernel/hvapi.c 		p->major = p->minor = 0;
p                  99 arch/sparc/kernel/hvapi.c 	struct api_info *p;
p                 104 arch/sparc/kernel/hvapi.c 	p = __get_info(group);
p                 106 arch/sparc/kernel/hvapi.c 	if (p) {
p                 107 arch/sparc/kernel/hvapi.c 		if (p->refcnt) {
p                 109 arch/sparc/kernel/hvapi.c 			if (p->major == major) {
p                 110 arch/sparc/kernel/hvapi.c 				*minor = p->minor;
p                 122 arch/sparc/kernel/hvapi.c 				p->major = major;
p                 123 arch/sparc/kernel/hvapi.c 				p->minor = actual_minor;
p                 127 arch/sparc/kernel/hvapi.c 				if (p->flags & FLAG_PRE_API) {
p                 129 arch/sparc/kernel/hvapi.c 						p->major = 1;
p                 130 arch/sparc/kernel/hvapi.c 						p->minor = 0;
p                 139 arch/sparc/kernel/hvapi.c 			__get_ref(p);
p                 149 arch/sparc/kernel/hvapi.c 	struct api_info *p;
p                 153 arch/sparc/kernel/hvapi.c 	p = __get_info(group);
p                 154 arch/sparc/kernel/hvapi.c 	if (p)
p                 155 arch/sparc/kernel/hvapi.c 		__put_ref(p);
p                 164 arch/sparc/kernel/hvapi.c 	struct api_info *p;
p                 170 arch/sparc/kernel/hvapi.c 	p = __get_info(group);
p                 171 arch/sparc/kernel/hvapi.c 	if (p && p->refcnt) {
p                 172 arch/sparc/kernel/hvapi.c 		*major = p->major;
p                 173 arch/sparc/kernel/hvapi.c 		*minor = p->minor;
p                  60 arch/sparc/kernel/iommu-common.c 	struct iommu_pool *p = &(iommu->large_pool);
p                  91 arch/sparc/kernel/iommu-common.c 	spin_lock_init(&(p->lock));
p                  92 arch/sparc/kernel/iommu-common.c 	p->start = start;
p                  93 arch/sparc/kernel/iommu-common.c 	p->hint = p->start;
p                  94 arch/sparc/kernel/iommu-common.c 	p->end = num_entries;
p                 230 arch/sparc/kernel/iommu-common.c 	struct iommu_pool *p;
p                 236 arch/sparc/kernel/iommu-common.c 		p = &tbl->large_pool;
p                 241 arch/sparc/kernel/iommu-common.c 		p = &tbl->pools[pool_nr];
p                 243 arch/sparc/kernel/iommu-common.c 	return p;
p                 159 arch/sparc/kernel/irq_32.c 	struct irq_bucket *p;
p                 167 arch/sparc/kernel/irq_32.c 	p = &irq_table[irq];
p                 168 arch/sparc/kernel/irq_32.c 	pil = p->pil;
p                 170 arch/sparc/kernel/irq_32.c 	p->next = irq_map[pil];
p                 171 arch/sparc/kernel/irq_32.c 	irq_map[pil] = p;
p                 178 arch/sparc/kernel/irq_32.c 	struct irq_bucket *p, **pnext;
p                 185 arch/sparc/kernel/irq_32.c 	p = &irq_table[irq];
p                 186 arch/sparc/kernel/irq_32.c 	BUG_ON(p->pil >= SUN4D_MAX_IRQ);
p                 187 arch/sparc/kernel/irq_32.c 	pnext = &irq_map[p->pil];
p                 188 arch/sparc/kernel/irq_32.c 	while (*pnext != p)
p                 190 arch/sparc/kernel/irq_32.c 	*pnext = p->next;
p                 197 arch/sparc/kernel/irq_32.c int arch_show_interrupts(struct seq_file *p, int prec)
p                 202 arch/sparc/kernel/irq_32.c 	seq_printf(p, "RES: ");
p                 204 arch/sparc/kernel/irq_32.c 		seq_printf(p, "%10u ", cpu_data(j).irq_resched_count);
p                 205 arch/sparc/kernel/irq_32.c 	seq_printf(p, "     IPI rescheduling interrupts\n");
p                 206 arch/sparc/kernel/irq_32.c 	seq_printf(p, "CAL: ");
p                 208 arch/sparc/kernel/irq_32.c 		seq_printf(p, "%10u ", cpu_data(j).irq_call_count);
p                 209 arch/sparc/kernel/irq_32.c 	seq_printf(p, "     IPI function call interrupts\n");
p                 211 arch/sparc/kernel/irq_32.c 	seq_printf(p, "NMI: ");
p                 213 arch/sparc/kernel/irq_32.c 		seq_printf(p, "%10u ", cpu_data(j).counter);
p                 214 arch/sparc/kernel/irq_32.c 	seq_printf(p, "     Non-maskable interrupts\n");
p                 221 arch/sparc/kernel/irq_32.c 	struct irq_bucket *p;
p                 227 arch/sparc/kernel/irq_32.c 	p = irq_map[pil];
p                 228 arch/sparc/kernel/irq_32.c 	while (p) {
p                 229 arch/sparc/kernel/irq_32.c 		struct irq_bucket *next = p->next;
p                 231 arch/sparc/kernel/irq_32.c 		generic_handle_irq(p->irq);
p                 232 arch/sparc/kernel/irq_32.c 		p = next;
p                 109 arch/sparc/kernel/irq_64.c static int __init early_hvirq_major(char *p)
p                 111 arch/sparc/kernel/irq_64.c 	int rc = kstrtoul(p, 10, &hvirq_major);
p                 302 arch/sparc/kernel/irq_64.c int arch_show_interrupts(struct seq_file *p, int prec)
p                 306 arch/sparc/kernel/irq_64.c 	seq_printf(p, "NMI: ");
p                 308 arch/sparc/kernel/irq_64.c 		seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
p                 309 arch/sparc/kernel/irq_64.c 	seq_printf(p, "     Non-maskable interrupts\n");
p                1022 arch/sparc/kernel/irq_64.c 	unsigned long p;
p                1024 arch/sparc/kernel/irq_64.c 	p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
p                1025 arch/sparc/kernel/irq_64.c 	if (!p) {
p                1030 arch/sparc/kernel/irq_64.c 	*pa_ptr = __pa(p);
p                1037 arch/sparc/kernel/irq_64.c 	void *mondo, *p;
p                1042 arch/sparc/kernel/irq_64.c 	p = kzalloc(127, GFP_KERNEL);
p                1043 arch/sparc/kernel/irq_64.c 	if (!p) {
p                1047 arch/sparc/kernel/irq_64.c 	mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
p                  22 arch/sparc/kernel/kernel.h static inline unsigned long kimage_addr_to_ra(const void *p)
p                  24 arch/sparc/kernel/kernel.h 	unsigned long val = (unsigned long) p;
p                  47 arch/sparc/kernel/kgdb_32.c void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
p                  49 arch/sparc/kernel/kgdb_32.c 	struct thread_info *t = task_thread_info(p);
p                  45 arch/sparc/kernel/kgdb_64.c void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
p                  47 arch/sparc/kernel/kgdb_64.c 	struct thread_info *t = task_thread_info(p);
p                  57 arch/sparc/kernel/kgdb_64.c 	gdb_regs[GDB_G7] = (unsigned long) p;
p                  50 arch/sparc/kernel/kprobes.c int __kprobes arch_prepare_kprobe(struct kprobe *p)
p                  52 arch/sparc/kernel/kprobes.c 	if ((unsigned long) p->addr & 0x3UL)
p                  55 arch/sparc/kernel/kprobes.c 	p->ainsn.insn[0] = *p->addr;
p                  56 arch/sparc/kernel/kprobes.c 	flushi(&p->ainsn.insn[0]);
p                  58 arch/sparc/kernel/kprobes.c 	p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
p                  59 arch/sparc/kernel/kprobes.c 	flushi(&p->ainsn.insn[1]);
p                  61 arch/sparc/kernel/kprobes.c 	p->opcode = *p->addr;
p                  65 arch/sparc/kernel/kprobes.c void __kprobes arch_arm_kprobe(struct kprobe *p)
p                  67 arch/sparc/kernel/kprobes.c 	*p->addr = BREAKPOINT_INSTRUCTION;
p                  68 arch/sparc/kernel/kprobes.c 	flushi(p->addr);
p                  71 arch/sparc/kernel/kprobes.c void __kprobes arch_disarm_kprobe(struct kprobe *p)
p                  73 arch/sparc/kernel/kprobes.c 	*p->addr = p->opcode;
p                  74 arch/sparc/kernel/kprobes.c 	flushi(p->addr);
p                  93 arch/sparc/kernel/kprobes.c static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
p                  96 arch/sparc/kernel/kprobes.c 	__this_cpu_write(current_kprobe, p);
p                 101 arch/sparc/kernel/kprobes.c static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
p                 107 arch/sparc/kernel/kprobes.c 	if (p->opcode == BREAKPOINT_INSTRUCTION) {
p                 108 arch/sparc/kernel/kprobes.c 		regs->tpc = (unsigned long) p->addr;
p                 111 arch/sparc/kernel/kprobes.c 		regs->tpc = (unsigned long) &p->ainsn.insn[0];
p                 112 arch/sparc/kernel/kprobes.c 		regs->tnpc = (unsigned long) &p->ainsn.insn[1];
p                 118 arch/sparc/kernel/kprobes.c 	struct kprobe *p;
p                 131 arch/sparc/kernel/kprobes.c 		p = get_kprobe(addr);
p                 132 arch/sparc/kernel/kprobes.c 		if (p) {
p                 145 arch/sparc/kernel/kprobes.c 			set_current_kprobe(p, regs, kcb);
p                 146 arch/sparc/kernel/kprobes.c 			kprobes_inc_nmissed_count(p);
p                 148 arch/sparc/kernel/kprobes.c 			prepare_singlestep(p, regs, kcb);
p                 160 arch/sparc/kernel/kprobes.c 	p = get_kprobe(addr);
p                 161 arch/sparc/kernel/kprobes.c 	if (!p) {
p                 176 arch/sparc/kernel/kprobes.c 	set_current_kprobe(p, regs, kcb);
p                 178 arch/sparc/kernel/kprobes.c 	if (p->pre_handler && p->pre_handler(p, regs)) {
p                 184 arch/sparc/kernel/kprobes.c 	prepare_singlestep(p, regs, kcb);
p                 201 arch/sparc/kernel/kprobes.c static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p,
p                 204 arch/sparc/kernel/kprobes.c 	unsigned long real_pc = (unsigned long) p->addr;
p                 218 arch/sparc/kernel/kprobes.c 		ainsn_addr = (unsigned long) &p->ainsn.insn[0];
p                 277 arch/sparc/kernel/kprobes.c static void __kprobes resume_execution(struct kprobe *p,
p                 280 arch/sparc/kernel/kprobes.c 	u32 insn = p->ainsn.insn[0];
p                 282 arch/sparc/kernel/kprobes.c 	regs->tnpc = relbranch_fixup(insn, p, regs);
p                 287 arch/sparc/kernel/kprobes.c 	retpc_fixup(regs, insn, (unsigned long) p->addr);
p                 465 arch/sparc/kernel/kprobes.c static int __kprobes trampoline_probe_handler(struct kprobe *p,
p                 545 arch/sparc/kernel/kprobes.c int __kprobes arch_trampoline_kprobe(struct kprobe *p)
p                 547 arch/sparc/kernel/kprobes.c 	if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
p                 223 arch/sparc/kernel/ldc.c 	struct ldc_packet *p;
p                 232 arch/sparc/kernel/ldc.c 	p = lp->tx_base;
p                 233 arch/sparc/kernel/ldc.c 	return p + (lp->tx_tail / LDC_PACKET_SIZE);
p                 278 arch/sparc/kernel/ldc.c 	struct ldc_packet *p;
p                 288 arch/sparc/kernel/ldc.c 	p = lp->tx_base;
p                 289 arch/sparc/kernel/ldc.c 	return p + (lp->tx_tail / LDC_PACKET_SIZE);
p                 341 arch/sparc/kernel/ldc.c 			  struct ldc_packet *p,
p                 344 arch/sparc/kernel/ldc.c 	BUG_ON(p != (lp->tx_base + (lp->tx_tail / LDC_PACKET_SIZE)));
p                 354 arch/sparc/kernel/ldc.c 	struct ldc_packet *p = handshake_get_tx_packet(lp, new_tail);
p                 356 arch/sparc/kernel/ldc.c 	if (p) {
p                 357 arch/sparc/kernel/ldc.c 		memset(p, 0, sizeof(*p));
p                 358 arch/sparc/kernel/ldc.c 		p->type = LDC_CTRL;
p                 359 arch/sparc/kernel/ldc.c 		p->stype = stype;
p                 360 arch/sparc/kernel/ldc.c 		p->ctrl = ctrl;
p                 362 arch/sparc/kernel/ldc.c 			memcpy(p->u.u_data, data, dlen);
p                 364 arch/sparc/kernel/ldc.c 	return p;
p                 369 arch/sparc/kernel/ldc.c 	struct ldc_packet *p;
p                 378 arch/sparc/kernel/ldc.c 	p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
p                 380 arch/sparc/kernel/ldc.c 	if (p) {
p                 381 arch/sparc/kernel/ldc.c 		int err = send_tx_packet(lp, p, new_tail);
p                 392 arch/sparc/kernel/ldc.c 	struct ldc_packet *p;
p                 399 arch/sparc/kernel/ldc.c 	p = handshake_compose_ctrl(lp, LDC_NACK, LDC_VERS,
p                 401 arch/sparc/kernel/ldc.c 	if (p) {
p                 405 arch/sparc/kernel/ldc.c 		return send_tx_packet(lp, p, new_tail);
p                 413 arch/sparc/kernel/ldc.c 	struct ldc_packet *p;
p                 416 arch/sparc/kernel/ldc.c 	p = handshake_compose_ctrl(lp, LDC_ACK, LDC_VERS,
p                 418 arch/sparc/kernel/ldc.c 	if (p) {
p                 422 arch/sparc/kernel/ldc.c 		return send_tx_packet(lp, p, new_tail);
p                 429 arch/sparc/kernel/ldc.c 	struct ldc_packet *p;
p                 432 arch/sparc/kernel/ldc.c 	p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTS, NULL, 0,
p                 434 arch/sparc/kernel/ldc.c 	if (p) {
p                 435 arch/sparc/kernel/ldc.c 		p->env = lp->cfg.mode;
p                 436 arch/sparc/kernel/ldc.c 		p->seqid = 0;
p                 440 arch/sparc/kernel/ldc.c 		       p->env, p->seqid);
p                 442 arch/sparc/kernel/ldc.c 		return send_tx_packet(lp, p, new_tail);
p                 449 arch/sparc/kernel/ldc.c 	struct ldc_packet *p;
p                 452 arch/sparc/kernel/ldc.c 	p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTR, NULL, 0,
p                 454 arch/sparc/kernel/ldc.c 	if (p) {
p                 455 arch/sparc/kernel/ldc.c 		p->env = lp->cfg.mode;
p                 456 arch/sparc/kernel/ldc.c 		p->seqid = 0;
p                 459 arch/sparc/kernel/ldc.c 		       p->env, p->seqid);
p                 461 arch/sparc/kernel/ldc.c 		return send_tx_packet(lp, p, new_tail);
p                 468 arch/sparc/kernel/ldc.c 	struct ldc_packet *p;
p                 471 arch/sparc/kernel/ldc.c 	p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RDX, NULL, 0,
p                 473 arch/sparc/kernel/ldc.c 	if (p) {
p                 474 arch/sparc/kernel/ldc.c 		p->env = 0;
p                 475 arch/sparc/kernel/ldc.c 		p->seqid = ++lp->snd_nxt;
p                 476 arch/sparc/kernel/ldc.c 		p->u.r.ackid = lp->rcv_nxt;
p                 479 arch/sparc/kernel/ldc.c 		       p->env, p->seqid, p->u.r.ackid);
p                 481 arch/sparc/kernel/ldc.c 		return send_tx_packet(lp, p, new_tail);
p                 488 arch/sparc/kernel/ldc.c 	struct ldc_packet *p;
p                 492 arch/sparc/kernel/ldc.c 	p = data_get_tx_packet(lp, &new_tail);
p                 493 arch/sparc/kernel/ldc.c 	if (!p)
p                 495 arch/sparc/kernel/ldc.c 	memset(p, 0, sizeof(*p));
p                 496 arch/sparc/kernel/ldc.c 	p->type = data_pkt->type;
p                 497 arch/sparc/kernel/ldc.c 	p->stype = LDC_NACK;
p                 498 arch/sparc/kernel/ldc.c 	p->ctrl = data_pkt->ctrl & LDC_CTRL_MSK;
p                 499 arch/sparc/kernel/ldc.c 	p->seqid = lp->snd_nxt + 1;
p                 500 arch/sparc/kernel/ldc.c 	p->u.r.ackid = lp->rcv_nxt;
p                 503 arch/sparc/kernel/ldc.c 	       p->type, p->ctrl, p->seqid, p->u.r.ackid);
p                 505 arch/sparc/kernel/ldc.c 	err = send_tx_packet(lp, p, new_tail);
p                 628 arch/sparc/kernel/ldc.c 	struct ldc_packet *p;
p                 638 arch/sparc/kernel/ldc.c 	p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
p                 641 arch/sparc/kernel/ldc.c 	if (!p)
p                 644 arch/sparc/kernel/ldc.c 	return send_tx_packet(lp, p, new_tail);
p                 648 arch/sparc/kernel/ldc.c 			   struct ldc_packet *p)
p                 652 arch/sparc/kernel/ldc.c 	vp = (struct ldc_version *) p->u.u_data;
p                 654 arch/sparc/kernel/ldc.c 	switch (p->stype) {
p                 670 arch/sparc/kernel/ldc.c 		       struct ldc_packet *p)
p                 673 arch/sparc/kernel/ldc.c 	       p->stype, p->seqid, p->env);
p                 675 arch/sparc/kernel/ldc.c 	if (p->stype     != LDC_INFO	   ||
p                 677 arch/sparc/kernel/ldc.c 	    p->env       != lp->cfg.mode)
p                 680 arch/sparc/kernel/ldc.c 	lp->snd_nxt = p->seqid;
p                 681 arch/sparc/kernel/ldc.c 	lp->rcv_nxt = p->seqid;
p                 690 arch/sparc/kernel/ldc.c 		       struct ldc_packet *p)
p                 693 arch/sparc/kernel/ldc.c 	       p->stype, p->seqid, p->env);
p                 695 arch/sparc/kernel/ldc.c 	if (p->stype     != LDC_INFO ||
p                 696 arch/sparc/kernel/ldc.c 	    p->env       != lp->cfg.mode)
p                 699 arch/sparc/kernel/ldc.c 	lp->snd_nxt = p->seqid;
p                 713 arch/sparc/kernel/ldc.c 		       struct ldc_packet *p)
p                 716 arch/sparc/kernel/ldc.c 	       p->stype, p->seqid, p->env, p->u.r.ackid);
p                 718 arch/sparc/kernel/ldc.c 	if (p->stype != LDC_INFO ||
p                 719 arch/sparc/kernel/ldc.c 	    !(rx_seq_ok(lp, p->seqid)))
p                 722 arch/sparc/kernel/ldc.c 	lp->rcv_nxt = p->seqid;
p                 731 arch/sparc/kernel/ldc.c 				 struct ldc_packet *p)
p                 733 arch/sparc/kernel/ldc.c 	switch (p->ctrl) {
p                 735 arch/sparc/kernel/ldc.c 		return process_version(lp, p);
p                 738 arch/sparc/kernel/ldc.c 		return process_rts(lp, p);
p                 741 arch/sparc/kernel/ldc.c 		return process_rtr(lp, p);
p                 744 arch/sparc/kernel/ldc.c 		return process_rdx(lp, p);
p                 752 arch/sparc/kernel/ldc.c 			       struct ldc_packet *p)
p                 764 arch/sparc/kernel/ldc.c 		struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE);
p                 768 arch/sparc/kernel/ldc.c 		if (p->seqid == ackid) {
p                 859 arch/sparc/kernel/ldc.c 		struct ldc_packet *p;
p                 863 arch/sparc/kernel/ldc.c 		p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
p                 865 arch/sparc/kernel/ldc.c 		switch (p->type) {
p                 867 arch/sparc/kernel/ldc.c 			err = process_control_frame(lp, p);
p                 878 arch/sparc/kernel/ldc.c 			err = process_error_frame(lp, p);
p                1502 arch/sparc/kernel/ldc.c 	struct ldc_packet *p;
p                1517 arch/sparc/kernel/ldc.c 	p = data_get_tx_packet(lp, &new_tail);
p                1518 arch/sparc/kernel/ldc.c 	if (!p)
p                1521 arch/sparc/kernel/ldc.c 	memcpy(p, buf, size);
p                1523 arch/sparc/kernel/ldc.c 	err = send_tx_packet(lp, p, new_tail);
p                1532 arch/sparc/kernel/ldc.c 	struct ldc_packet *p;
p                1553 arch/sparc/kernel/ldc.c 	p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE);
p                1554 arch/sparc/kernel/ldc.c 	memcpy(buf, p, LDC_PACKET_SIZE);
p                1596 arch/sparc/kernel/ldc.c 		struct ldc_packet *p = lp->tx_base + (tail / LDC_PACKET_SIZE);
p                1598 arch/sparc/kernel/ldc.c 			    p->u.u_data :
p                1599 arch/sparc/kernel/ldc.c 			    p->u.r.r_data);
p                1602 arch/sparc/kernel/ldc.c 		p->type = LDC_DATA;
p                1603 arch/sparc/kernel/ldc.c 		p->stype = LDC_INFO;
p                1604 arch/sparc/kernel/ldc.c 		p->ctrl = 0;
p                1612 arch/sparc/kernel/ldc.c 		p->env = (data_len |
p                1616 arch/sparc/kernel/ldc.c 		p->seqid = ++seq;
p                1619 arch/sparc/kernel/ldc.c 		       p->type,
p                1620 arch/sparc/kernel/ldc.c 		       p->stype,
p                1621 arch/sparc/kernel/ldc.c 		       p->ctrl,
p                1622 arch/sparc/kernel/ldc.c 		       p->env,
p                1623 arch/sparc/kernel/ldc.c 		       p->seqid);
p                1641 arch/sparc/kernel/ldc.c static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p,
p                1649 arch/sparc/kernel/ldc.c 	err = send_data_nack(lp, p);
p                1660 arch/sparc/kernel/ldc.c static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p)
p                1662 arch/sparc/kernel/ldc.c 	if (p->stype & LDC_ACK) {
p                1663 arch/sparc/kernel/ldc.c 		int err = process_data_ack(lp, p);
p                1667 arch/sparc/kernel/ldc.c 	if (p->stype & LDC_NACK)
p                1720 arch/sparc/kernel/ldc.c 	struct ldc_packet *p;
p                1722 arch/sparc/kernel/ldc.c 	p = data_get_tx_packet(lp, &new_tail);
p                1723 arch/sparc/kernel/ldc.c 	if (likely(p)) {
p                1726 arch/sparc/kernel/ldc.c 		memset(p, 0, sizeof(*p));
p                1727 arch/sparc/kernel/ldc.c 		p->type = LDC_DATA;
p                1728 arch/sparc/kernel/ldc.c 		p->stype = LDC_ACK;
p                1729 arch/sparc/kernel/ldc.c 		p->ctrl = 0;
p                1730 arch/sparc/kernel/ldc.c 		p->seqid = lp->snd_nxt + 1;
p                1731 arch/sparc/kernel/ldc.c 		p->u.r.ackid = lp->rcv_nxt;
p                1733 arch/sparc/kernel/ldc.c 		err = send_tx_packet(lp, p, new_tail);
p                1763 arch/sparc/kernel/ldc.c 		struct ldc_packet *p;
p                1767 arch/sparc/kernel/ldc.c 		p = lp->rx_base + (new / LDC_PACKET_SIZE);
p                1771 arch/sparc/kernel/ldc.c 		       p->type,
p                1772 arch/sparc/kernel/ldc.c 		       p->stype,
p                1773 arch/sparc/kernel/ldc.c 		       p->ctrl,
p                1774 arch/sparc/kernel/ldc.c 		       p->env,
p                1775 arch/sparc/kernel/ldc.c 		       p->seqid,
p                1776 arch/sparc/kernel/ldc.c 		       p->u.r.ackid,
p                1779 arch/sparc/kernel/ldc.c 		if (unlikely(!rx_seq_ok(lp, p->seqid))) {
p                1780 arch/sparc/kernel/ldc.c 			err = rx_bad_seq(lp, p, first_frag);
p                1785 arch/sparc/kernel/ldc.c 		if (p->type & LDC_CTRL) {
p                1786 arch/sparc/kernel/ldc.c 			err = process_control_frame(lp, p);
p                1792 arch/sparc/kernel/ldc.c 		lp->rcv_nxt = p->seqid;
p                1799 arch/sparc/kernel/ldc.c 		if (!(p->type & LDC_DATA)) {
p                1803 arch/sparc/kernel/ldc.c 		if (p->stype & (LDC_ACK | LDC_NACK)) {
p                1804 arch/sparc/kernel/ldc.c 			err = data_ack_nack(lp, p);
p                1808 arch/sparc/kernel/ldc.c 		if (!(p->stype & LDC_INFO)) {
p                1816 arch/sparc/kernel/ldc.c 		pkt_len = p->env & LDC_LEN;
p                1831 arch/sparc/kernel/ldc.c 		if ((first_frag == NULL && !(p->env & LDC_START)) ||
p                1832 arch/sparc/kernel/ldc.c 		    (first_frag != NULL &&  (p->env & LDC_START))) {
p                1844 arch/sparc/kernel/ldc.c 			first_frag = p;
p                1868 arch/sparc/kernel/ldc.c 			p->u.u_data : p->u.r.r_data), pkt_len);
p                1872 arch/sparc/kernel/ldc.c 		if (p->env & LDC_STOP)
p                  60 arch/sparc/kernel/leon_kernel.c 	struct irq_bucket *p;
p                  64 arch/sparc/kernel/leon_kernel.c 	p = irq_map[eirq];
p                  65 arch/sparc/kernel/leon_kernel.c 	if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */
p                  66 arch/sparc/kernel/leon_kernel.c 		generic_handle_irq(p->irq);
p                 481 arch/sparc/kernel/mdesc.c static void notify_one(struct mdesc_notifier_client *p,
p                 485 arch/sparc/kernel/mdesc.c 	invoke_on_missing(p->node_name, old_hp, new_hp, p->remove);
p                 486 arch/sparc/kernel/mdesc.c 	invoke_on_missing(p->node_name, new_hp, old_hp, p->add);
p                 492 arch/sparc/kernel/mdesc.c 	struct mdesc_notifier_client *p = client_list;
p                 494 arch/sparc/kernel/mdesc.c 	while (p) {
p                 495 arch/sparc/kernel/mdesc.c 		notify_one(p, old_hp, new_hp);
p                 496 arch/sparc/kernel/mdesc.c 		p = p->next;
p                1057 arch/sparc/kernel/mdesc.c static void get_one_mondo_bits(const u64 *p, unsigned int *mask,
p                1062 arch/sparc/kernel/mdesc.c 	if (!p)
p                1064 arch/sparc/kernel/mdesc.c 	val = *p;
p                 198 arch/sparc/kernel/module.c 		void *p = (void *) sun4v_1insn->sh_addr;
p                 199 arch/sparc/kernel/module.c 		sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size);
p                 202 arch/sparc/kernel/module.c 		void *p = (void *) sun4v_2insn->sh_addr;
p                 203 arch/sparc/kernel/module.c 		sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size);
p                 242 arch/sparc/kernel/nmi.c static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p)
p                 625 arch/sparc/kernel/pci.c 	struct resource *p, *root, *conflict;
p                 630 arch/sparc/kernel/pci.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 631 arch/sparc/kernel/pci.c 	if (!p)
p                 634 arch/sparc/kernel/pci.c 	p->name = "Video RAM area";
p                 635 arch/sparc/kernel/pci.c 	p->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
p                 639 arch/sparc/kernel/pci.c 	pcibios_bus_to_resource(dev->bus, p, &region);
p                 641 arch/sparc/kernel/pci.c 	root = pci_find_parent_resource(dev, p);
p                 643 arch/sparc/kernel/pci.c 		pci_info(dev, "can't claim VGA legacy %pR: no compatible bridge window\n", p);
p                 647 arch/sparc/kernel/pci.c 	conflict = request_resource_conflict(root, p);
p                 650 arch/sparc/kernel/pci.c 			 p, conflict->name, conflict);
p                 654 arch/sparc/kernel/pci.c 	pci_info(dev, "VGA legacy framebuffer %pR\n", p);
p                 658 arch/sparc/kernel/pci.c 	kfree(p);
p                 248 arch/sparc/kernel/pci_msi.c 		struct sparc64_msiq_cookie *p;
p                 250 arch/sparc/kernel/pci_msi.c 		p = &pbm->msiq_irq_cookies[i];
p                 251 arch/sparc/kernel/pci_msi.c 		p->pbm = pbm;
p                 252 arch/sparc/kernel/pci_msi.c 		p->msiqid = pbm->msiq_first + i;
p                  68 arch/sparc/kernel/pci_sun4v.c 	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
p                  70 arch/sparc/kernel/pci_sun4v.c 	p->dev		= dev;
p                  71 arch/sparc/kernel/pci_sun4v.c 	p->prot		= prot;
p                  72 arch/sparc/kernel/pci_sun4v.c 	p->entry	= entry;
p                  73 arch/sparc/kernel/pci_sun4v.c 	p->npages	= 0;
p                  82 arch/sparc/kernel/pci_sun4v.c static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
p                  84 arch/sparc/kernel/pci_sun4v.c 	struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
p                  85 arch/sparc/kernel/pci_sun4v.c 	u64 *pglist = p->pglist;
p                  88 arch/sparc/kernel/pci_sun4v.c 	unsigned long prot = p->prot;
p                  89 arch/sparc/kernel/pci_sun4v.c 	unsigned long entry = p->entry;
p                  90 arch/sparc/kernel/pci_sun4v.c 	unsigned long npages = p->npages;
p                 138 arch/sparc/kernel/pci_sun4v.c 	p->entry = entry;
p                 139 arch/sparc/kernel/pci_sun4v.c 	p->npages = 0;
p                 146 arch/sparc/kernel/pci_sun4v.c 	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
p                 148 arch/sparc/kernel/pci_sun4v.c 	if (p->entry + p->npages == entry)
p                 150 arch/sparc/kernel/pci_sun4v.c 	if (p->entry != ~0UL)
p                 151 arch/sparc/kernel/pci_sun4v.c 		iommu_batch_flush(p, mask);
p                 152 arch/sparc/kernel/pci_sun4v.c 	p->entry = entry;
p                 158 arch/sparc/kernel/pci_sun4v.c 	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
p                 160 arch/sparc/kernel/pci_sun4v.c 	BUG_ON(p->npages >= PGLIST_NENTS);
p                 162 arch/sparc/kernel/pci_sun4v.c 	p->pglist[p->npages++] = phys_page;
p                 163 arch/sparc/kernel/pci_sun4v.c 	if (p->npages == PGLIST_NENTS)
p                 164 arch/sparc/kernel/pci_sun4v.c 		return iommu_batch_flush(p, mask);
p                 172 arch/sparc/kernel/pci_sun4v.c 	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
p                 174 arch/sparc/kernel/pci_sun4v.c 	BUG_ON(p->npages >= PGLIST_NENTS);
p                 176 arch/sparc/kernel/pci_sun4v.c 	return iommu_batch_flush(p, mask);
p                 370 arch/sparc/kernel/pcic.c 		struct pcic_sn2list *p;
p                 372 arch/sparc/kernel/pcic.c 		for (p = pcic_known_sysnames; p->sysname != NULL; p++) {
p                 373 arch/sparc/kernel/pcic.c 			if (strcmp(namebuf, p->sysname) == 0)
p                 376 arch/sparc/kernel/pcic.c 		pcic->pcic_imap = p->intmap;
p                 377 arch/sparc/kernel/pcic.c 		pcic->pcic_imdim = p->mapdim;
p                 533 arch/sparc/kernel/pcic.c 	struct pcic_ca2irq *p;
p                 544 arch/sparc/kernel/pcic.c 	if ((p = pcic->pcic_imap) == NULL) {
p                 549 arch/sparc/kernel/pcic.c 		if (p->busno == dev->bus->number && p->devfn == dev->devfn)
p                 551 arch/sparc/kernel/pcic.c 		p++;
p                 560 arch/sparc/kernel/pcic.c 	i = p->pin;
p                 575 arch/sparc/kernel/pcic.c 	if (real_irq == 0 || p->force) {
p                 576 arch/sparc/kernel/pcic.c 		if (p->irq == 0 || p->irq >= 15) {	/* Corrupted map */
p                 577 arch/sparc/kernel/pcic.c 			pci_info(dev, "PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
p                 579 arch/sparc/kernel/pcic.c 		pci_info(dev, "PCIC: setting irq %d at pin %d\n", p->irq,
p                 580 arch/sparc/kernel/pcic.c 			 p->pin);
p                 581 arch/sparc/kernel/pcic.c 		real_irq = p->irq;
p                 583 arch/sparc/kernel/pcic.c 		i = p->pin;
p                 587 arch/sparc/kernel/pcic.c 			ivec |= p->irq << ((i - 4) << 2);
p                 592 arch/sparc/kernel/pcic.c 			ivec |= p->irq << (i << 2);
p                  94 arch/sparc/kernel/process_32.c 	char *p;
p                 100 arch/sparc/kernel/process_32.c 	p = strchr (reboot_command, '\n');
p                 101 arch/sparc/kernel/process_32.c 	if (p) *p = 0;
p                 305 arch/sparc/kernel/process_32.c 		unsigned long arg, struct task_struct *p)
p                 307 arch/sparc/kernel/process_32.c 	struct thread_info *ti = task_thread_info(p);
p                 317 arch/sparc/kernel/process_32.c 		fpsave(&p->thread.float_regs[0], &p->thread.fsr,
p                 318 arch/sparc/kernel/process_32.c 		       &p->thread.fpqueue[0], &p->thread.fpqdepth);
p                 327 arch/sparc/kernel/process_32.c 	new_stack = task_stack_page(p) + THREAD_SIZE;
p                 339 arch/sparc/kernel/process_32.c 	p->thread.kregs = childregs;
p                 341 arch/sparc/kernel/process_32.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 345 arch/sparc/kernel/process_32.c 		p->thread.flags |= SPARC_FLAG_KTHREAD;
p                 346 arch/sparc/kernel/process_32.c 		p->thread.current_ds = KERNEL_DS;
p                 357 arch/sparc/kernel/process_32.c 	p->thread.flags &= ~SPARC_FLAG_KTHREAD;
p                 358 arch/sparc/kernel/process_32.c 	p->thread.current_ds = USER_DS;
p                 396 arch/sparc/kernel/process_32.c 	clear_tsk_thread_flag(p, TIF_USEDFPU);
p                 616 arch/sparc/kernel/process_64.c 		unsigned long arg, struct task_struct *p)
p                 618 arch/sparc/kernel/process_64.c 	struct thread_info *t = task_thread_info(p);
p                 626 arch/sparc/kernel/process_64.c 	child_trap_frame = (task_stack_page(p) +
p                 635 arch/sparc/kernel/process_64.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                  43 arch/sparc/kernel/reboot.c 	char *p;
p                  45 arch/sparc/kernel/reboot.c 	p = strchr(reboot_command, '\n');
p                  46 arch/sparc/kernel/reboot.c 	if (p)
p                  47 arch/sparc/kernel/reboot.c 		*p = 0;
p                 205 arch/sparc/kernel/setup_32.c 	struct cpuid_patch_entry *p;
p                 214 arch/sparc/kernel/setup_32.c 	p = &__cpuid_patch;
p                 215 arch/sparc/kernel/setup_32.c 	while (p < &__cpuid_patch_end) {
p                 216 arch/sparc/kernel/setup_32.c 		unsigned long addr = p->addr;
p                 221 arch/sparc/kernel/setup_32.c 			insns = &p->sun4d[0];
p                 225 arch/sparc/kernel/setup_32.c 			insns = &p->leon[0];
p                 238 arch/sparc/kernel/setup_32.c 		p++;
p                 414 arch/sparc/kernel/setup_32.c 		struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 415 arch/sparc/kernel/setup_32.c 		if (!p)
p                 418 arch/sparc/kernel/setup_32.c 			register_cpu(p, i);
p                 173 arch/sparc/kernel/setup_64.c 	struct cpuid_patch_entry *p;
p                 187 arch/sparc/kernel/setup_64.c 	p = &__cpuid_patch;
p                 188 arch/sparc/kernel/setup_64.c 	while (p < &__cpuid_patch_end) {
p                 189 arch/sparc/kernel/setup_64.c 		unsigned long addr = p->addr;
p                 194 arch/sparc/kernel/setup_64.c 			insns = &p->starfire[0];
p                 199 arch/sparc/kernel/setup_64.c 				insns = &p->cheetah_jbus[0];
p                 201 arch/sparc/kernel/setup_64.c 				insns = &p->cheetah_safari[0];
p                 204 arch/sparc/kernel/setup_64.c 			insns = &p->sun4v[0];
p                 227 arch/sparc/kernel/setup_64.c 		p++;
p                 351 arch/sparc/kernel/setup_64.c 	struct pause_patch_entry *p;
p                 353 arch/sparc/kernel/setup_64.c 	p = &__pause_3insn_patch;
p                 354 arch/sparc/kernel/setup_64.c 	while (p < &__pause_3insn_patch_end) {
p                 355 arch/sparc/kernel/setup_64.c 		unsigned long i, addr = p->addr;
p                 358 arch/sparc/kernel/setup_64.c 			*(unsigned int *) (addr +  (i * 4)) = p->insns[i];
p                 364 arch/sparc/kernel/setup_64.c 		p++;
p                  50 arch/sparc/kernel/starfire.c 	struct starfire_irqinfo *p;
p                  53 arch/sparc/kernel/starfire.c 	p = kmalloc(sizeof(*p), GFP_KERNEL);
p                  54 arch/sparc/kernel/starfire.c 	if (!p) {
p                  62 arch/sparc/kernel/starfire.c 	p->hwmid = hwmid;
p                  66 arch/sparc/kernel/starfire.c 		p->imap_slots[i] = 0UL;
p                  67 arch/sparc/kernel/starfire.c 		p->tregs[i] = treg_base + (i * 0x10UL);
p                  69 arch/sparc/kernel/starfire.c 		if (upa_readl(p->tregs[i]) != 0)
p                  70 arch/sparc/kernel/starfire.c 			p->imap_slots[i] = 0xdeadbeaf;
p                  72 arch/sparc/kernel/starfire.c 	p->upaid = upaid;
p                  73 arch/sparc/kernel/starfire.c 	p->next = sflist;
p                  74 arch/sparc/kernel/starfire.c 	sflist = p;
p                  80 arch/sparc/kernel/starfire.c 	struct starfire_irqinfo *p;
p                  85 arch/sparc/kernel/starfire.c 	for (p = sflist; p != NULL; p = p->next)
p                  86 arch/sparc/kernel/starfire.c 		if (p->hwmid == bus_hwmid)
p                  88 arch/sparc/kernel/starfire.c 	if (p == NULL) {
p                  94 arch/sparc/kernel/starfire.c 		if (p->imap_slots[i] == imap ||
p                  95 arch/sparc/kernel/starfire.c 		    p->imap_slots[i] == 0UL)
p                 102 arch/sparc/kernel/starfire.c 	p->imap_slots[i] = imap;
p                 109 arch/sparc/kernel/starfire.c 	upa_writel(upaid, p->tregs[i]);
p                 126 arch/sparc/kernel/sun4d_irq.c 			struct irq_bucket *p;
p                 134 arch/sparc/kernel/sun4d_irq.c 			p = irq_map[pil];
p                 135 arch/sparc/kernel/sun4d_irq.c 			while (p) {
p                 138 arch/sparc/kernel/sun4d_irq.c 				next = p->next;
p                 139 arch/sparc/kernel/sun4d_irq.c 				generic_handle_irq(p->irq);
p                 140 arch/sparc/kernel/sun4d_irq.c 				p = next;
p                 171 arch/sparc/kernel/sun4d_irq.c 		struct irq_bucket *p;
p                 173 arch/sparc/kernel/sun4d_irq.c 		p = irq_map[pil];
p                 174 arch/sparc/kernel/sun4d_irq.c 		while (p) {
p                 177 arch/sparc/kernel/sun4d_irq.c 			next = p->next;
p                 178 arch/sparc/kernel/sun4d_irq.c 			generic_handle_irq(p->irq);
p                 179 arch/sparc/kernel/sun4d_irq.c 			p = next;
p                 640 arch/sparc/kernel/sys_sparc_64.c 			unsigned long *p = current_thread_info()->utraps;
p                 647 arch/sparc/kernel/sys_sparc_64.c 				current_thread_info()->utraps = p;
p                 650 arch/sparc/kernel/sys_sparc_64.c 			p[0]--;
p                 652 arch/sparc/kernel/sys_sparc_64.c 			memcpy(current_thread_info()->utraps+1, p+1,
p                  23 arch/sparc/kernel/sysfs.c 	struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
p                  24 arch/sparc/kernel/sysfs.c 	return sprintf(buf, "%lu\n", p->NAME); \
p                 802 arch/sparc/kernel/time_64.c 	struct get_tick_patch *p;
p                 807 arch/sparc/kernel/time_64.c 	for (p = &__get_tick_patch; p < &__get_tick_patch_end; p++) {
p                 808 arch/sparc/kernel/time_64.c 		instr = (tlb_type == spitfire) ? p->tick : p->stick;
p                 809 arch/sparc/kernel/time_64.c 		addr = (unsigned int *)(unsigned long)p->addr;
p                  69 arch/sparc/kernel/traps_64.c static void dump_tl1_traplog(struct tl1_traplog *p)
p                  74 arch/sparc/kernel/traps_64.c 	       "dumping track stack.\n", p->tl);
p                  82 arch/sparc/kernel/traps_64.c 		       p->trapstack[i].tstate, p->trapstack[i].tpc,
p                  83 arch/sparc/kernel/traps_64.c 		       p->trapstack[i].tnpc, p->trapstack[i].tt);
p                  84 arch/sparc/kernel/traps_64.c 		printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
p                 489 arch/sparc/kernel/traps_64.c 	char memmod_str[64], *p;
p                 494 arch/sparc/kernel/traps_64.c 			p = syndrome_unknown;
p                 496 arch/sparc/kernel/traps_64.c 			p = memmod_str;
p                 499 arch/sparc/kernel/traps_64.c 		       smp_processor_id(), scode, p);
p                 505 arch/sparc/kernel/traps_64.c 			p = syndrome_unknown;
p                 507 arch/sparc/kernel/traps_64.c 			p = memmod_str;
p                 510 arch/sparc/kernel/traps_64.c 		       smp_processor_id(), scode, p);
p                 829 arch/sparc/kernel/traps_64.c 	struct cheetah_err_info *p;
p                 835 arch/sparc/kernel/traps_64.c 	p = cheetah_error_log + (cpu * 2);
p                 837 arch/sparc/kernel/traps_64.c 		p++;
p                 839 arch/sparc/kernel/traps_64.c 	return p;
p                1315 arch/sparc/kernel/traps_64.c 	struct cheetah_err_info local_snapshot, *p;
p                1321 arch/sparc/kernel/traps_64.c 	p = cheetah_get_error_log(afsr);
p                1322 arch/sparc/kernel/traps_64.c 	if (!p) {
p                1331 arch/sparc/kernel/traps_64.c 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
p                1340 arch/sparc/kernel/traps_64.c 	if (p->afsr != afsr || p->afar != afar)
p                1343 arch/sparc/kernel/traps_64.c 		p->afsr = CHAFSR_INVALID;
p                1476 arch/sparc/kernel/traps_64.c 	struct cheetah_err_info local_snapshot, *p;
p                1479 arch/sparc/kernel/traps_64.c 	p = cheetah_get_error_log(afsr);
p                1480 arch/sparc/kernel/traps_64.c 	if (!p) {
p                1489 arch/sparc/kernel/traps_64.c 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
p                1498 arch/sparc/kernel/traps_64.c 	if (p->afsr != afsr || p->afar != afar)
p                1501 arch/sparc/kernel/traps_64.c 		p->afsr = CHAFSR_INVALID;
p                1576 arch/sparc/kernel/traps_64.c 	struct cheetah_err_info local_snapshot, *p;
p                1614 arch/sparc/kernel/traps_64.c 	p = cheetah_get_error_log(afsr);
p                1615 arch/sparc/kernel/traps_64.c 	if (!p) {
p                1624 arch/sparc/kernel/traps_64.c 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
p                1633 arch/sparc/kernel/traps_64.c 	if (p->afsr != afsr || p->afar != afar)
p                1636 arch/sparc/kernel/traps_64.c 		p->afsr = CHAFSR_INVALID;
p                2837 arch/sparc/kernel/traps_64.c 	struct trap_per_cpu *p = &trap_block[cpu];
p                2839 arch/sparc/kernel/traps_64.c 	p->thread = t;
p                2840 arch/sparc/kernel/traps_64.c 	p->pgd_paddr = 0;
p                  16 arch/sparc/lib/PeeCeeI.c 	const u8 *p = src;
p                  19 arch/sparc/lib/PeeCeeI.c 		__raw_writeb(*p++, addr);
p                 353 arch/sparc/mm/init_64.c 	struct pud_huge_patch_entry *p;
p                 356 arch/sparc/mm/init_64.c 	p = &__pud_huge_patch;
p                 357 arch/sparc/mm/init_64.c 	addr = p->addr;
p                 358 arch/sparc/mm/init_64.c 	*(unsigned int *)addr = p->insn;
p                 869 arch/sparc/mm/init_64.c static int __init early_numa(char *p)
p                 871 arch/sparc/mm/init_64.c 	if (!p)
p                 874 arch/sparc/mm/init_64.c 	if (strstr(p, "off"))
p                 877 arch/sparc/mm/init_64.c 	if (strstr(p, "debug"))
p                 982 arch/sparc/mm/init_64.c 			struct node_mem_mask *p = &node_masks[new_nid];
p                 984 arch/sparc/mm/init_64.c 			if ((start & p->mask) == p->match) {
p                1089 arch/sparc/mm/init_64.c 	struct pglist_data *p;
p                1103 arch/sparc/mm/init_64.c 	p = NODE_DATA(nid);
p                1106 arch/sparc/mm/init_64.c 	p->node_start_pfn = start_pfn;
p                1107 arch/sparc/mm/init_64.c 	p->node_spanned_pages = end_pfn - start_pfn;
p                2040 arch/sparc/mm/init_64.c 	struct tsb_phys_patch_entry *p;
p                2058 arch/sparc/mm/init_64.c 	p = &__tsb_phys_patch;
p                2059 arch/sparc/mm/init_64.c 	while (p < &__tsb_phys_patch_end) {
p                2060 arch/sparc/mm/init_64.c 		unsigned long addr = p->addr;
p                2062 arch/sparc/mm/init_64.c 		*(unsigned int *) addr = p->insn;
p                2068 arch/sparc/mm/init_64.c 		p++;
p                 200 arch/sparc/mm/iommu.c 		unsigned long vaddr, p;
p                 203 arch/sparc/mm/iommu.c 		for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
p                 204 arch/sparc/mm/iommu.c 			flush_page_for_dma(p);
p                 390 arch/sparc/mm/srmmu.c 	unsigned long p;
p                 393 arch/sparc/mm/srmmu.c 	p = (unsigned long)page_address(pte);	/* Cached address (for test) */
p                 394 arch/sparc/mm/srmmu.c 	if (p == 0)
p                 396 arch/sparc/mm/srmmu.c 	p = page_to_pfn(pte) << PAGE_SHIFT;	/* Physical address */
p                 399 arch/sparc/mm/srmmu.c 	srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
p                  19 arch/sparc/prom/memory.c 	struct linux_mlist_v0 *p;
p                  23 arch/sparc/prom/memory.c 	for (p = *(romvec->pv_v0mem.v0_available); p; p = p->theres_more) {
p                  24 arch/sparc/prom/memory.c 		sp_banks[index].base_addr = (unsigned long) p->start_adr;
p                  25 arch/sparc/prom/memory.c 		sp_banks[index].num_bytes = p->num_bytes;
p                  14 arch/um/drivers/pcap_user.c #define PCAP_FD(p) (*(int *)(p))
p                  19 arch/um/drivers/pcap_user.c 	pcap_t *p;
p                  22 arch/um/drivers/pcap_user.c 	p = pcap_open_live(pri->host_if, ETH_MAX_PACKET + ETH_HEADER_OTHER,
p                  24 arch/um/drivers/pcap_user.c 	if (p == NULL) {
p                  31 arch/um/drivers/pcap_user.c 	pri->pcap = p;
p                  40 arch/um/include/asm/page.h #define pte_val(p) ((p).pte)
p                  42 arch/um/include/asm/page.h #define pte_get_bits(p, bits) ((p).pte & (bits))
p                  43 arch/um/include/asm/page.h #define pte_set_bits(p, bits) ((p).pte |= (bits))
p                  44 arch/um/include/asm/page.h #define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
p                  46 arch/um/include/asm/page.h #define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
p                  47 arch/um/include/asm/page.h #define pte_set_val(p, phys, prot) \
p                  48 arch/um/include/asm/page.h 	({ (p).pte = (phys) | pgprot_val(prot); })
p                  69 arch/um/include/asm/page.h #define pte_get_bits(p, bits) ((p).pte & (bits))
p                  70 arch/um/include/asm/page.h #define pte_set_bits(p, bits) ((p).pte |= (bits))
p                  71 arch/um/include/asm/page.h #define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
p                  73 arch/um/include/asm/page.h #define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
p                  74 arch/um/include/asm/page.h #define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
p                 108 arch/um/include/asm/page.h #define phys_to_pfn(p) ((p) >> PAGE_SHIFT)
p                 101 arch/um/include/asm/processor-generic.h extern unsigned long get_wchan(struct task_struct *p);
p                  48 arch/um/include/asm/thread_info.h 	void *p;
p                  50 arch/um/include/asm/thread_info.h 	asm volatile ("" : "=r" (p) : "0" (&ti));
p                  51 arch/um/include/asm/thread_info.h 	ti = (struct thread_info *) (((unsigned long)p) & ~mask);
p                 130 arch/um/kernel/mem.c 	phys_t p;
p                 143 arch/um/kernel/mem.c 	p = __pa(v);
p                 145 arch/um/kernel/mem.c 		      p += PAGE_SIZE) {
p                 150 arch/um/kernel/mem.c 		pte_set_val(*pte, p, PAGE_READONLY);
p                 157 arch/um/kernel/process.c 		unsigned long arg, struct task_struct * p, unsigned long tls)
p                 163 arch/um/kernel/process.c 	p->thread = (struct thread_struct) INIT_THREAD;
p                 166 arch/um/kernel/process.c 	  	memcpy(&p->thread.regs.regs, current_pt_regs(),
p                 167 arch/um/kernel/process.c 		       sizeof(p->thread.regs.regs));
p                 168 arch/um/kernel/process.c 		PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
p                 170 arch/um/kernel/process.c 			REGS_SP(p->thread.regs.regs.gp) = sp;
p                 174 arch/um/kernel/process.c 		arch_copy_thread(&current->thread.arch, &p->thread.arch);
p                 176 arch/um/kernel/process.c 		get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
p                 177 arch/um/kernel/process.c 		p->thread.request.u.thread.proc = (int (*)(void *))sp;
p                 178 arch/um/kernel/process.c 		p->thread.request.u.thread.arg = (void *)arg;
p                 182 arch/um/kernel/process.c 	new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
p                 185 arch/um/kernel/process.c 		clear_flushed_tls(p);
p                 191 arch/um/kernel/process.c 			ret = arch_set_tls(p, tls);
p                 408 arch/um/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 413 arch/um/kernel/process.c 	if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
p                 416 arch/um/kernel/process.c 	stack_page = (unsigned long) task_stack_page(p);
p                 421 arch/um/kernel/process.c 	sp = p->thread.switch_buf->JB_SP;
p                  48 arch/um/kernel/ptrace.c 	unsigned long __user *p = (void __user *)data;
p                  49 arch/um/kernel/ptrace.c 	void __user *vp = p;
p                  69 arch/um/kernel/ptrace.c 		if (!access_ok(p, MAX_REG_OFFSET)) {
p                  74 arch/um/kernel/ptrace.c 			__put_user(getreg(child, i), p);
p                  75 arch/um/kernel/ptrace.c 			p++;
p                  84 arch/um/kernel/ptrace.c 		if (!access_ok(p, MAX_REG_OFFSET)) {
p                  89 arch/um/kernel/ptrace.c 			__get_user(tmp, p);
p                  91 arch/um/kernel/ptrace.c 			p++;
p                  21 arch/um/kernel/reboot.c 	struct task_struct *p;
p                  25 arch/um/kernel/reboot.c 	for_each_process(p) {
p                  28 arch/um/kernel/reboot.c 		t = find_lock_task_mm(p);
p                 166 arch/um/kernel/um_arch.c 	const char **p;
p                 169 arch/um/kernel/um_arch.c 	p = &__uml_help_start;
p                 171 arch/um/kernel/um_arch.c 	while (p < &__uml_help_end) {
p                 172 arch/um/kernel/um_arch.c 		printf("%s", *p);
p                 173 arch/um/kernel/um_arch.c 		p++;
p                 186 arch/um/kernel/um_arch.c 	struct uml_param *p;
p                 188 arch/um/kernel/um_arch.c 	p = &__uml_setup_start;
p                 189 arch/um/kernel/um_arch.c 	while (p < &__uml_setup_end) {
p                 192 arch/um/kernel/um_arch.c 		n = strlen(p->str);
p                 193 arch/um/kernel/um_arch.c 		if (!strncmp(line, p->str, n) && p->setup_func(line + n, add))
p                 195 arch/um/kernel/um_arch.c 		p++;
p                 201 arch/um/kernel/um_arch.c 	initcall_t *p;
p                 203 arch/um/kernel/um_arch.c 	p = &__uml_postsetup_start;
p                 204 arch/um/kernel/um_arch.c 	while (p < &__uml_postsetup_end) {
p                 205 arch/um/kernel/um_arch.c 		(*p)();
p                 206 arch/um/kernel/um_arch.c 		p++;
p                  51 arch/um/os-Linux/execvp.c 		char *name, *p;
p                  64 arch/um/os-Linux/execvp.c 		p = path;
p                  68 arch/um/os-Linux/execvp.c 			path = p;
p                  71 arch/um/os-Linux/execvp.c 			p = strchr(path, ':');
p                  72 arch/um/os-Linux/execvp.c 			if (!p)
p                  73 arch/um/os-Linux/execvp.c 				p = strchr(path, '\0');
p                  75 arch/um/os-Linux/execvp.c 			if (p == path)
p                  80 arch/um/os-Linux/execvp.c 				startp = memcpy(name - (p - path), path, p - path);
p                 120 arch/um/os-Linux/execvp.c 		} while (*p++ != '\0');
p                  54 arch/um/os-Linux/sigio.c 	struct pollfd *p;
p                  69 arch/um/os-Linux/sigio.c 			p = &fds->poll[i];
p                  70 arch/um/os-Linux/sigio.c 			if (p->revents == 0)
p                  72 arch/um/os-Linux/sigio.c 			if (p->fd == sigio_private[1]) {
p                 169 arch/um/os-Linux/sigio.c 	struct pollfd *p;
p                 180 arch/um/os-Linux/sigio.c 	p = &all_sigio_fds.poll[i];
p                 194 arch/um/os-Linux/sigio.c 	next_poll.poll[n] = *p;
p                 204 arch/um/os-Linux/sigio.c 	struct pollfd *p;
p                 228 arch/um/os-Linux/sigio.c 		p = &current_poll.poll[i];
p                 229 arch/um/os-Linux/sigio.c 		if (p->fd != fd)
p                 230 arch/um/os-Linux/sigio.c 			next_poll.poll[n++] = *p;
p                 242 arch/um/os-Linux/sigio.c 	struct pollfd *p;
p                 244 arch/um/os-Linux/sigio.c 	p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
p                 245 arch/um/os-Linux/sigio.c 	if (p == NULL) {
p                 250 arch/um/os-Linux/sigio.c 	*p = ((struct pollfd) { .fd		= fd,
p                 253 arch/um/os-Linux/sigio.c 	return p;
p                 258 arch/um/os-Linux/sigio.c 	struct pollfd *p;
p                 285 arch/um/os-Linux/sigio.c 	p = setup_initial_poll(l_sigio_private[1]);
p                 286 arch/um/os-Linux/sigio.c 	if (!p)
p                 298 arch/um/os-Linux/sigio.c 	current_poll = ((struct pollfds) { .poll 	= p,
p                 330 arch/um/os-Linux/sigio.c 	kfree(p);
p                 151 arch/um/os-Linux/signal.c static void hard_handler(int sig, siginfo_t *si, void *p)
p                 153 arch/um/os-Linux/signal.c 	ucontext_t *uc = p;
p                 139 arch/um/os-Linux/umid.c 	int dead, fd, p, n, err;
p                 178 arch/um/os-Linux/umid.c 	p = strtoul(pid, &end, 0);
p                 185 arch/um/os-Linux/umid.c 	if ((kill(p, 0) == 0) || (errno != ESRCH)) {
p                 187 arch/um/os-Linux/umid.c 		       umid, p);
p                  37 arch/unicore32/boot/compressed/misc.c #define arch_decomp_puts(p)
p                  62 arch/unicore32/include/asm/processor.h unsigned long get_wchan(struct task_struct *p);
p                  66 arch/unicore32/include/asm/processor.h #define task_pt_regs(p) \
p                  67 arch/unicore32/include/asm/processor.h 	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
p                  59 arch/unicore32/kernel/clock.c 	struct clk *p, *clk = ERR_PTR(-ENOENT);
p                  62 arch/unicore32/kernel/clock.c 	list_for_each_entry(p, &clocks, node) {
p                  63 arch/unicore32/kernel/clock.c 		if (strcmp(id, p->name) == 0) {
p                  64 arch/unicore32/kernel/clock.c 			clk = p;
p                 224 arch/unicore32/kernel/process.c 	    unsigned long stk_sz, struct task_struct *p)
p                 226 arch/unicore32/kernel/process.c 	struct thread_info *thread = task_thread_info(p);
p                 227 arch/unicore32/kernel/process.c 	struct pt_regs *childregs = task_pt_regs(p);
p                 231 arch/unicore32/kernel/process.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 274 arch/unicore32/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 278 arch/unicore32/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 281 arch/unicore32/kernel/process.c 	frame.fp = thread_saved_fp(p);
p                 282 arch/unicore32/kernel/process.c 	frame.sp = thread_saved_sp(p);
p                 284 arch/unicore32/kernel/process.c 	frame.pc = thread_saved_pc(p);
p                 164 arch/unicore32/kernel/setup.c static int __init early_mem(char *p)
p                 181 arch/unicore32/kernel/setup.c 	size  = memparse(p, &endp);
p                  83 arch/unicore32/kernel/traps.c 		unsigned long p;
p                  89 arch/unicore32/kernel/traps.c 		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
p                  90 arch/unicore32/kernel/traps.c 			if (p >= bottom && p < top) {
p                  92 arch/unicore32/kernel/traps.c 				if (__get_user(val, (unsigned long *)p) == 0)
p                 109 arch/unicore32/kernel/traps.c 	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
p                 126 arch/unicore32/kernel/traps.c 			p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
p                 129 arch/unicore32/kernel/traps.c 			p += sprintf(p, "bad PC value");
p                 180 arch/unicore32/mm/mmu.c 		pmd_t *p = pmd;
p                 187 arch/unicore32/mm/mmu.c 		flush_pmd_entry(p);
p                  22 arch/x86/boot/bitops.h 	const u32 *p = (const u32 *)addr;
p                  23 arch/x86/boot/bitops.h 	return ((1UL << (nr & 31)) & (p[nr >> 5])) != 0;
p                  28 arch/x86/boot/bitops.h 	const u32 *p = (const u32 *)addr;
p                  30 arch/x86/boot/bitops.h 	asm("btl %2,%1" CC_SET(c) : CC_OUT(c) (v) : "m" (*p), "Ir" (nr));
p                 201 arch/x86/boot/compressed/eboot.c 	void *p;
p                 203 arch/x86/boot/compressed/eboot.c 	status = efi_call_early(locate_protocol, &guid, NULL, &p);
p                 207 arch/x86/boot/compressed/eboot.c 	if (efi_table_attr(apple_properties_protocol, version, p) != 0x10000) {
p                 212 arch/x86/boot/compressed/eboot.c 	efi_call_proto(apple_properties_protocol, get_all, p, NULL, &size);
p                 224 arch/x86/boot/compressed/eboot.c 		status = efi_call_proto(apple_properties_protocol, get_all, p,
p                 676 arch/x86/boot/compressed/eboot.c 	struct exit_boot_struct *p = priv;
p                 680 arch/x86/boot/compressed/eboot.c 	memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
p                 682 arch/x86/boot/compressed/eboot.c 	p->efi->efi_systab		= (unsigned long)sys_table_arg;
p                 683 arch/x86/boot/compressed/eboot.c 	p->efi->efi_memdesc_size	= *map->desc_size;
p                 684 arch/x86/boot/compressed/eboot.c 	p->efi->efi_memdesc_version	= *map->desc_ver;
p                 685 arch/x86/boot/compressed/eboot.c 	p->efi->efi_memmap		= (unsigned long)*map->map;
p                 686 arch/x86/boot/compressed/eboot.c 	p->efi->efi_memmap_size		= *map->map_size;
p                 689 arch/x86/boot/compressed/eboot.c 	p->efi->efi_systab_hi		= (unsigned long)sys_table_arg >> 32;
p                 690 arch/x86/boot/compressed/eboot.c 	p->efi->efi_memmap_hi		= (unsigned long)*map->map >> 32;
p                 848 arch/x86/boot/compressed/eboot.c 		desc->p		= 1;
p                 869 arch/x86/boot/compressed/eboot.c 	desc->p		= 1;
p                 891 arch/x86/boot/compressed/eboot.c 	desc->p		= 1;
p                 908 arch/x86/boot/compressed/eboot.c 		desc->p		= 1;
p                 136 arch/x86/boot/compressed/kaslr.c parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
p                 140 arch/x86/boot/compressed/kaslr.c 	if (!p)
p                 144 arch/x86/boot/compressed/kaslr.c 	if (!strncmp(p, "exactmap", 8))
p                 147 arch/x86/boot/compressed/kaslr.c 	oldp = p;
p                 148 arch/x86/boot/compressed/kaslr.c 	*size = memparse(p, &p);
p                 149 arch/x86/boot/compressed/kaslr.c 	if (p == oldp)
p                 152 arch/x86/boot/compressed/kaslr.c 	switch (*p) {
p                 156 arch/x86/boot/compressed/kaslr.c 		*start = memparse(p + 1, &p);
p                 219 arch/x86/boot/compressed/kaslr.c 	char *p;
p                 222 arch/x86/boot/compressed/kaslr.c 		p = val;
p                 223 arch/x86/boot/compressed/kaslr.c 		if (memparse(p, &p) != PUD_SIZE) {
p                 235 arch/x86/boot/compressed/kaslr.c 		p = val;
p                 236 arch/x86/boot/compressed/kaslr.c 		max_gb_huge_pages = simple_strtoull(p, &p, 0);
p                 278 arch/x86/boot/compressed/kaslr.c 			char *p = val;
p                 280 arch/x86/boot/compressed/kaslr.c 			if (!strcmp(p, "nopentium"))
p                 282 arch/x86/boot/compressed/kaslr.c 			mem_size = memparse(p, &p);
p                 259 arch/x86/boot/string.c 				   unsigned long long *p)
p                 292 arch/x86/boot/string.c 	*p = res;
p                 298 arch/x86/boot/tools/build.c #define PARSE_ZOFS(p, sym) do { \
p                 299 arch/x86/boot/tools/build.c 	if (!strncmp(p, "#define ZO_" #sym " ", 11+sizeof(#sym)))	\
p                 300 arch/x86/boot/tools/build.c 		sym = strtoul(p + 11 + sizeof(#sym), NULL, 16);		\
p                 306 arch/x86/boot/tools/build.c 	char *p;
p                 318 arch/x86/boot/tools/build.c 	p = (char *)buf;
p                 320 arch/x86/boot/tools/build.c 	while (p && *p) {
p                 321 arch/x86/boot/tools/build.c 		PARSE_ZOFS(p, efi32_stub_entry);
p                 322 arch/x86/boot/tools/build.c 		PARSE_ZOFS(p, efi64_stub_entry);
p                 323 arch/x86/boot/tools/build.c 		PARSE_ZOFS(p, efi_pe_entry);
p                 324 arch/x86/boot/tools/build.c 		PARSE_ZOFS(p, startup_64);
p                 326 arch/x86/boot/tools/build.c 		p = strchr(p, '\n');
p                 327 arch/x86/boot/tools/build.c 		while (p && (*p == '\r' || *p == '\n'))
p                 328 arch/x86/boot/tools/build.c 			p++;
p                  52 arch/x86/crypto/crc32-pclmul_glue.c 	crc32_pclmul_le(u32 crc, unsigned char const *p, size_t len)
p                  59 arch/x86/crypto/crc32-pclmul_glue.c 		return crc32_le(crc, p, len);
p                  61 arch/x86/crypto/crc32-pclmul_glue.c 	if ((long)p & SCALE_F_MASK) {
p                  63 arch/x86/crypto/crc32-pclmul_glue.c 		prealign = SCALE_F - ((long)p & SCALE_F_MASK);
p                  65 arch/x86/crypto/crc32-pclmul_glue.c 		crc = crc32_le(crc, p, prealign);
p                  67 arch/x86/crypto/crc32-pclmul_glue.c 		p = (unsigned char *)(((unsigned long)p + SCALE_F_MASK) &
p                  74 arch/x86/crypto/crc32-pclmul_glue.c 	crc = crc32_pclmul_le_16(p, iquotient, crc);
p                  78 arch/x86/crypto/crc32-pclmul_glue.c 		crc = crc32_le(crc, p + iquotient, iremainder);
p                  62 arch/x86/crypto/crc32c-intel_glue.c static u32 __pure crc32c_intel_le_hw(u32 crc, unsigned char const *p, size_t len)
p                  66 arch/x86/crypto/crc32c-intel_glue.c 	unsigned long *ptmp = (unsigned long *)p;
p                1861 arch/x86/events/intel/ds.c 		struct pebs_record_nhm *p = at;
p                1864 arch/x86/events/intel/ds.c 		pebs_status = p->status & cpuc->pebs_enabled;
p                1907 arch/x86/events/intel/ds.c 		if (p->status != (1ULL << bit)) {
p                 616 arch/x86/events/intel/pt.c 	struct page *p;
p                 618 arch/x86/events/intel/pt.c 	p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
p                 619 arch/x86/events/intel/pt.c 	if (!p)
p                 622 arch/x86/events/intel/pt.c 	tp = page_address(p);
p                 630 arch/x86/events/intel/pt.c 		TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p) >> TOPA_SHIFT;
p                 705 arch/x86/events/intel/pt.c 	struct page *p;
p                 707 arch/x86/events/intel/pt.c 	p = virt_to_page(buf->data_pages[buf->nr_pages]);
p                 708 arch/x86/events/intel/pt.c 	if (PagePrivate(p))
p                 709 arch/x86/events/intel/pt.c 		order = page_private(p);
p                 724 arch/x86/events/intel/pt.c 	TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
p                 872 arch/x86/events/intel/uncore_snb.c 	const struct imc_uncore_pci_dev *p;
p                 875 arch/x86/events/intel/uncore_snb.c 	for_each_imc_pci_id(p, desktop_imc_pci_ids) {
p                 876 arch/x86/events/intel/uncore_snb.c 		ret = snb_pci2phy_map_init(p->pci_id);
p                 878 arch/x86/events/intel/uncore_snb.c 			return p->driver;
p                  64 arch/x86/ia32/ia32_aout.c static u32 __user *create_aout_tables(char __user *p, struct linux_binprm *bprm)
p                  69 arch/x86/ia32/ia32_aout.c 	sp = (u32 __user *) ((-(unsigned long)sizeof(u32)) & (unsigned long) p);
p                  77 arch/x86/ia32/ia32_aout.c 	current->mm->arg_start = (unsigned long) p;
p                  81 arch/x86/ia32/ia32_aout.c 		put_user((u32)(unsigned long)p, argv++);
p                  83 arch/x86/ia32/ia32_aout.c 			get_user(c, p++);
p                  87 arch/x86/ia32/ia32_aout.c 	current->mm->arg_end = current->mm->env_start = (unsigned long) p;
p                  91 arch/x86/ia32/ia32_aout.c 		put_user((u32)(unsigned long)p, envp++);
p                  93 arch/x86/ia32/ia32_aout.c 			get_user(c, p++);
p                  97 arch/x86/ia32/ia32_aout.c 	current->mm->env_end = (unsigned long) p;
p                 232 arch/x86/ia32/ia32_aout.c 		(unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
p                  66 arch/x86/include/asm/barrier.h #define __smp_store_release(p, v)					\
p                  68 arch/x86/include/asm/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                  70 arch/x86/include/asm/barrier.h 	WRITE_ONCE(*p, v);						\
p                  73 arch/x86/include/asm/barrier.h #define __smp_load_acquire(p)						\
p                  75 arch/x86/include/asm/barrier.h 	typeof(*p) ___p1 = READ_ONCE(*p);				\
p                  76 arch/x86/include/asm/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                  29 arch/x86/include/asm/desc.h 	desc->p			= info->seg_not_present ^ 1;
p                  88 arch/x86/include/asm/desc.h 	gate->bits.p		= 1;
p                 176 arch/x86/include/asm/desc.h 	desc->p			= 1;
p                  19 arch/x86/include/asm/desc_defs.h 	u16	base1: 8, type: 4, s: 1, dpl: 2, p: 1;
p                  33 arch/x86/include/asm/desc_defs.h 		.p		= (flags >> 7) & 0x01,		\
p                  58 arch/x86/include/asm/desc_defs.h 	u16	base1 : 8, type : 5, dpl : 2, p : 1;
p                  74 arch/x86/include/asm/desc_defs.h 			p	: 1;
p                  57 arch/x86/include/asm/efi.h #define arch_efi_call_virt(p, f, args...)				\
p                  59 arch/x86/include/asm/efi.h 	((efi_##f##_t __attribute__((regparm(0)))*) p->f)(args);	\
p                  92 arch/x86/include/asm/efi.h #define arch_efi_call_virt(p, f, args...)				\
p                  93 arch/x86/include/asm/efi.h 	efi_call((void *)p->f, args)					\
p                  74 arch/x86/include/asm/intel_scu_ipc.h static inline int intel_scu_notifier_post(unsigned long v, void *p)
p                  76 arch/x86/include/asm/intel_scu_ipc.h 	return blocking_notifier_call_chain(&intel_scu_notifier, v, p);
p                  39 arch/x86/include/asm/kprobes.h #define flush_insn_slot(p)	do { } while (0)
p                  54 arch/x86/include/asm/kprobes.h void arch_remove_kprobe(struct kprobe *p);
p                 302 arch/x86/include/asm/msr.h static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
p                 306 arch/x86/include/asm/msr.h 	*p = native_read_msr_safe(msr, &err);
p                  21 arch/x86/include/asm/numachip/numachip_csr.h #define CSR_NODE_BITS(p)	(((unsigned long)(p)) << CSR_NODE_SHIFT)
p                 215 arch/x86/include/asm/paravirt.h static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
p                 219 arch/x86/include/asm/paravirt.h 	*p = paravirt_read_msr_safe(msr, &err);
p                 645 arch/x86/include/asm/pgtable.h #define canon_pgprot(p) __pgprot(massage_pgprot(p))
p                  43 arch/x86/include/asm/preempt.h #define init_task_preempt_count(p) do { } while (0)
p                  45 arch/x86/include/asm/preempt.h #define init_idle_preempt_count(p, cpu) do { \
p                 582 arch/x86/include/asm/processor.h unsigned long get_wchan(struct task_struct *p);
p                 359 arch/x86/include/asm/ptrace.h extern int do_get_thread_area(struct task_struct *p, int idx,
p                 361 arch/x86/include/asm/ptrace.h extern int do_set_thread_area(struct task_struct *p, int idx,
p                 256 arch/x86/include/asm/segment.h 	unsigned int p;
p                 269 arch/x86/include/asm/segment.h 			[p] "=a" (p), [seg] "r" (__CPUNODE_SEG));
p                 272 arch/x86/include/asm/segment.h 		*cpu = (p & VDSO_CPUNODE_MASK);
p                 274 arch/x86/include/asm/segment.h 		*node = (p >> VDSO_CPUNODE_BITS);
p                 213 arch/x86/include/asm/special_insns.h 	volatile struct { char x[64]; } *p = __p;
p                 221 arch/x86/include/asm/special_insns.h 		: [p] "+m" (*p)
p                 222 arch/x86/include/asm/special_insns.h 		: [pax] "a" (p));
p                 346 arch/x86/include/asm/uv/uv_hub.h #define UV_PNODE_TO_GNODE(p)		((p) |uv_hub_info->gnode_extra)
p                 347 arch/x86/include/asm/uv/uv_hub.h #define UV_PNODE_TO_NASID(p)		(UV_PNODE_TO_GNODE(p) << 1)
p                 401 arch/x86/include/asm/uv/uv_hub.h #define UV_GLOBAL_MMR32_PNODE_BITS(p)	((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
p                 403 arch/x86/include/asm/uv/uv_hub.h #define UV_GLOBAL_MMR64_PNODE_BITS(p)					\
p                 404 arch/x86/include/asm/uv/uv_hub.h 	(((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT)
p                 179 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	p:1;				/* RO */
p                 780 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	p:1;				/* RO */
p                 819 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	p:1;				/* RO */
p                1291 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	p:1;				/* RO */
p                1338 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	p:1;				/* RO */
p                4207 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	p:1;				/* RO */
p                 485 arch/x86/include/asm/xen/hypercall.h 		u32 *p = (u32 *)&desc;
p                 489 arch/x86/include/asm/xen/hypercall.h 		mcl->args[2] = *p++;
p                 490 arch/x86/include/asm/xen/hypercall.h 		mcl->args[3] = *p;
p                  40 arch/x86/include/asm/xen/interface.h     typedef struct { type *p; } __guest_handle_ ## name
p                  57 arch/x86/include/asm/xen/interface.h 		(hnd).p = val;				\
p                  60 arch/x86/include/asm/xen/interface.h #define set_xen_guest_handle(hnd, val)	do { (hnd).p = val; } while (0)
p                 596 arch/x86/kernel/alternative.c 	struct paravirt_patch_site *p;
p                 599 arch/x86/kernel/alternative.c 	for (p = start; p < end; p++) {
p                 602 arch/x86/kernel/alternative.c 		BUG_ON(p->len > MAX_PATCH_LEN);
p                 604 arch/x86/kernel/alternative.c 		memcpy(insn_buff, p->instr, p->len);
p                 605 arch/x86/kernel/alternative.c 		used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
p                 607 arch/x86/kernel/alternative.c 		BUG_ON(used > p->len);
p                 610 arch/x86/kernel/alternative.c 		add_nops(insn_buff + used, p->len - used);
p                 611 arch/x86/kernel/alternative.c 		text_poke_early(p->instr, insn_buff, p->len);
p                 816 arch/x86/kernel/amd_gart_64.c void __init gart_parse_options(char *p)
p                 820 arch/x86/kernel/amd_gart_64.c 	if (isdigit(*p) && get_option(&p, &arg))
p                 822 arch/x86/kernel/amd_gart_64.c 	if (!strncmp(p, "fullflush", 9))
p                 824 arch/x86/kernel/amd_gart_64.c 	if (!strncmp(p, "nofullflush", 11))
p                 826 arch/x86/kernel/amd_gart_64.c 	if (!strncmp(p, "noagp", 5))
p                 828 arch/x86/kernel/amd_gart_64.c 	if (!strncmp(p, "noaperture", 10))
p                 831 arch/x86/kernel/amd_gart_64.c 	if (!strncmp(p, "force", 5))
p                 833 arch/x86/kernel/amd_gart_64.c 	if (!strncmp(p, "allowed", 7))
p                 835 arch/x86/kernel/amd_gart_64.c 	if (!strncmp(p, "memaper", 7)) {
p                 837 arch/x86/kernel/amd_gart_64.c 		p += 7;
p                 838 arch/x86/kernel/amd_gart_64.c 		if (*p == '=') {
p                 839 arch/x86/kernel/amd_gart_64.c 			++p;
p                 840 arch/x86/kernel/amd_gart_64.c 			if (get_option(&p, &arg))
p                 267 arch/x86/kernel/aperture_64.c static int __init parse_gart_mem(char *p)
p                 269 arch/x86/kernel/aperture_64.c 	return kstrtobool(p, &gart_fix_e820);
p                1920 arch/x86/kernel/apic/io_apic.c 	struct irq_pin_list *p;
p                1927 arch/x86/kernel/apic/io_apic.c 	for_each_irq_pin(p, mcd->irq_2_pin) {
p                1928 arch/x86/kernel/apic/io_apic.c 		rentry = __ioapic_read_entry(p->apic, p->pin);
p                  41 arch/x86/kernel/cpu/amd.c static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
p                  54 arch/x86/kernel/cpu/amd.c 	*p = gprs[0] | ((u64)gprs[2] << 32);
p                 626 arch/x86/kernel/cpu/common.c 	char *p, *q, *s;
p                 638 arch/x86/kernel/cpu/common.c 	p = q = s = &c->x86_model_id[0];
p                 640 arch/x86/kernel/cpu/common.c 	while (*p == ' ')
p                 641 arch/x86/kernel/cpu/common.c 		p++;
p                 643 arch/x86/kernel/cpu/common.c 	while (*p) {
p                 645 arch/x86/kernel/cpu/common.c 		if (!isspace(*p))
p                 648 arch/x86/kernel/cpu/common.c 		*q++ = *p++;
p                1537 arch/x86/kernel/cpu/common.c 		const char *p;
p                1538 arch/x86/kernel/cpu/common.c 		p = table_lookup_model(c);
p                1539 arch/x86/kernel/cpu/common.c 		if (p)
p                1540 arch/x86/kernel/cpu/common.c 			strcpy(c->x86_model_id, p);
p                1839 arch/x86/kernel/cpu/common.c 	d.p = 1;		/* Present */
p                 195 arch/x86/kernel/cpu/cyrix.c 	const char *p = NULL;
p                 230 arch/x86/kernel/cpu/cyrix.c 		p = Cx486_name[dir0_lsn & 7];
p                 234 arch/x86/kernel/cpu/cyrix.c 		p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
p                 240 arch/x86/kernel/cpu/cyrix.c 		p = Cx86_cb+2;
p                 248 arch/x86/kernel/cpu/cyrix.c 			p = Cx86_cb;
p                 251 arch/x86/kernel/cpu/cyrix.c 			p = Cx86_cb+1;
p                 313 arch/x86/kernel/cpu/cyrix.c 			p = Cx86_cb+2;
p                 329 arch/x86/kernel/cpu/cyrix.c 		p = Cx86_cb+tmp;
p                 340 arch/x86/kernel/cpu/cyrix.c 			p = Cx486_name[!!boot_cpu_has(X86_FEATURE_FPU)];
p                 345 arch/x86/kernel/cpu/cyrix.c 			p = Cx486S_name[0];
p                 355 arch/x86/kernel/cpu/cyrix.c 	if (p)
p                 356 arch/x86/kernel/cpu/cyrix.c 		strcat(buf, p);
p                  64 arch/x86/kernel/cpu/hypervisor.c 	const struct hypervisor_x86 *h = NULL, * const *p;
p                  67 arch/x86/kernel/cpu/hypervisor.c 	for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) {
p                  68 arch/x86/kernel/cpu/hypervisor.c 		if (unlikely(nopv) && !(*p)->ignore_nopv)
p                  71 arch/x86/kernel/cpu/hypervisor.c 		pri = (*p)->detect();
p                  74 arch/x86/kernel/cpu/hypervisor.c 			h = *p;
p                 722 arch/x86/kernel/cpu/intel.c 		char *p = NULL;
p                 727 arch/x86/kernel/cpu/intel.c 				p = "Celeron (Covington)";
p                 729 arch/x86/kernel/cpu/intel.c 				p = "Mobile Pentium II (Dixon)";
p                 734 arch/x86/kernel/cpu/intel.c 				p = "Celeron (Mendocino)";
p                 736 arch/x86/kernel/cpu/intel.c 				p = "Celeron-A";
p                 741 arch/x86/kernel/cpu/intel.c 				p = "Celeron (Coppermine)";
p                 745 arch/x86/kernel/cpu/intel.c 		if (p)
p                 746 arch/x86/kernel/cpu/intel.c 			strcpy(c->x86_model_id, p);
p                 103 arch/x86/kernel/cpu/mce/dev-mcelog.c 	char *p;
p                 107 arch/x86/kernel/cpu/mce/dev-mcelog.c 	p = strchr(mce_helper, '\n');
p                 109 arch/x86/kernel/cpu/mce/dev-mcelog.c 	if (p)
p                 110 arch/x86/kernel/cpu/mce/dev-mcelog.c 		*p = 0;
p                 112 arch/x86/kernel/cpu/mce/dev-mcelog.c 	return strlen(mce_helper) + !!p;
p                 255 arch/x86/kernel/cpu/mce/dev-mcelog.c 	int __user *p = (int __user *)arg;
p                 262 arch/x86/kernel/cpu/mce/dev-mcelog.c 		return put_user(sizeof(struct mce), p);
p                 264 arch/x86/kernel/cpu/mce/dev-mcelog.c 		return put_user(MCE_LOG_LEN, p);
p                 272 arch/x86/kernel/cpu/mce/dev-mcelog.c 		return put_user(flags, p);
p                 594 arch/x86/kernel/cpu/microcode/amd.c 	struct ucode_patch *p;
p                 596 arch/x86/kernel/cpu/microcode/amd.c 	list_for_each_entry(p, &microcode_cache, plist)
p                 597 arch/x86/kernel/cpu/microcode/amd.c 		if (p->equiv_cpu == equiv_cpu)
p                 598 arch/x86/kernel/cpu/microcode/amd.c 			return p;
p                 604 arch/x86/kernel/cpu/microcode/amd.c 	struct ucode_patch *p;
p                 606 arch/x86/kernel/cpu/microcode/amd.c 	list_for_each_entry(p, &microcode_cache, plist) {
p                 607 arch/x86/kernel/cpu/microcode/amd.c 		if (p->equiv_cpu == new_patch->equiv_cpu) {
p                 608 arch/x86/kernel/cpu/microcode/amd.c 			if (p->patch_id >= new_patch->patch_id) {
p                 615 arch/x86/kernel/cpu/microcode/amd.c 			list_replace(&p->plist, &new_patch->plist);
p                 616 arch/x86/kernel/cpu/microcode/amd.c 			kfree(p->data);
p                 617 arch/x86/kernel/cpu/microcode/amd.c 			kfree(p);
p                 627 arch/x86/kernel/cpu/microcode/amd.c 	struct ucode_patch *p, *tmp;
p                 629 arch/x86/kernel/cpu/microcode/amd.c 	list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
p                 630 arch/x86/kernel/cpu/microcode/amd.c 		__list_del(p->plist.prev, p->plist.next);
p                 631 arch/x86/kernel/cpu/microcode/amd.c 		kfree(p->data);
p                 632 arch/x86/kernel/cpu/microcode/amd.c 		kfree(p);
p                 651 arch/x86/kernel/cpu/microcode/amd.c 	struct ucode_patch *p;
p                 660 arch/x86/kernel/cpu/microcode/amd.c 	p = find_patch(cpu);
p                 661 arch/x86/kernel/cpu/microcode/amd.c 	if (p && (p->patch_id == csig->rev))
p                 662 arch/x86/kernel/cpu/microcode/amd.c 		uci->mc = p->data;
p                 674 arch/x86/kernel/cpu/microcode/amd.c 	struct ucode_patch *p;
p                 682 arch/x86/kernel/cpu/microcode/amd.c 	p = find_patch(cpu);
p                 683 arch/x86/kernel/cpu/microcode/amd.c 	if (!p)
p                 686 arch/x86/kernel/cpu/microcode/amd.c 	mc_amd  = p->data;
p                 687 arch/x86/kernel/cpu/microcode/amd.c 	uci->mc = p->data;
p                 840 arch/x86/kernel/cpu/microcode/amd.c 	struct ucode_patch *p;
p                 852 arch/x86/kernel/cpu/microcode/amd.c 	p = find_patch(0);
p                 853 arch/x86/kernel/cpu/microcode/amd.c 	if (!p) {
p                 856 arch/x86/kernel/cpu/microcode/amd.c 		if (boot_cpu_data.microcode >= p->patch_id)
p                 867 arch/x86/kernel/cpu/microcode/amd.c 	memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
p                 152 arch/x86/kernel/cpu/microcode/intel.c 	struct ucode_patch *p;
p                 154 arch/x86/kernel/cpu/microcode/intel.c 	p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
p                 155 arch/x86/kernel/cpu/microcode/intel.c 	if (!p)
p                 158 arch/x86/kernel/cpu/microcode/intel.c 	p->data = kmemdup(data, size, GFP_KERNEL);
p                 159 arch/x86/kernel/cpu/microcode/intel.c 	if (!p->data) {
p                 160 arch/x86/kernel/cpu/microcode/intel.c 		kfree(p);
p                 164 arch/x86/kernel/cpu/microcode/intel.c 	return p;
p                 170 arch/x86/kernel/cpu/microcode/intel.c 	struct ucode_patch *iter, *tmp, *p = NULL;
p                 187 arch/x86/kernel/cpu/microcode/intel.c 			p = memdup_patch(data, size);
p                 188 arch/x86/kernel/cpu/microcode/intel.c 			if (!p)
p                 191 arch/x86/kernel/cpu/microcode/intel.c 				list_replace(&iter->plist, &p->plist);
p                 203 arch/x86/kernel/cpu/microcode/intel.c 		p = memdup_patch(data, size);
p                 204 arch/x86/kernel/cpu/microcode/intel.c 		if (!p)
p                 207 arch/x86/kernel/cpu/microcode/intel.c 			list_add_tail(&p->plist, &microcode_cache);
p                 210 arch/x86/kernel/cpu/microcode/intel.c 	if (!p)
p                 219 arch/x86/kernel/cpu/microcode/intel.c 		intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
p                 221 arch/x86/kernel/cpu/microcode/intel.c 		intel_ucode_patch = p->data;
p                 425 arch/x86/kernel/cpu/microcode/intel.c 	struct ucode_patch *p;
p                 439 arch/x86/kernel/cpu/microcode/intel.c 	list_for_each_entry(p, &microcode_cache, plist) {
p                 445 arch/x86/kernel/cpu/microcode/intel.c 		mc_saved_header = (struct microcode_header_intel *)p->data;
p                 748 arch/x86/kernel/cpu/microcode/intel.c 	struct microcode_intel *p;
p                 753 arch/x86/kernel/cpu/microcode/intel.c 	p = find_patch(&uci);
p                 754 arch/x86/kernel/cpu/microcode/intel.c 	if (!p)
p                 757 arch/x86/kernel/cpu/microcode/intel.c 	uci.mc = p;
p                 440 arch/x86/kernel/cpu/mtrr/cleanup.c static int __init parse_mtrr_chunk_size_opt(char *p)
p                 442 arch/x86/kernel/cpu/mtrr/cleanup.c 	if (!p)
p                 444 arch/x86/kernel/cpu/mtrr/cleanup.c 	mtrr_chunk_size = memparse(p, &p);
p                 452 arch/x86/kernel/cpu/mtrr/cleanup.c static int __init parse_mtrr_gran_size_opt(char *p)
p                 454 arch/x86/kernel/cpu/mtrr/cleanup.c 	if (!p)
p                 456 arch/x86/kernel/cpu/mtrr/cleanup.c 	mtrr_gran_size = memparse(p, &p);
p                 338 arch/x86/kernel/cpu/mtrr/generic.c 	unsigned int *p = (unsigned int *)frs;
p                 343 arch/x86/kernel/cpu/mtrr/generic.c 	rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
p                 346 arch/x86/kernel/cpu/mtrr/generic.c 		rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
p                 348 arch/x86/kernel/cpu/mtrr/generic.c 		rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
p                 599 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	struct task_struct *p, *t;
p                 605 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	for_each_process_thread(p, t) {
p                 701 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	struct task_struct *p, *t;
p                 704 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	for_each_process_thread(p, t) {
p                2185 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	struct task_struct *p, *t;
p                2188 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	for_each_process_thread(p, t) {
p                 869 arch/x86/kernel/e820.c static int __init parse_memopt(char *p)
p                 873 arch/x86/kernel/e820.c 	if (!p)
p                 876 arch/x86/kernel/e820.c 	if (!strcmp(p, "nopentium")) {
p                 887 arch/x86/kernel/e820.c 	mem_size = memparse(p, &p);
p                 903 arch/x86/kernel/e820.c static int __init parse_memmap_one(char *p)
p                 908 arch/x86/kernel/e820.c 	if (!p)
p                 911 arch/x86/kernel/e820.c 	if (!strncmp(p, "exactmap", 8)) {
p                 925 arch/x86/kernel/e820.c 	oldp = p;
p                 926 arch/x86/kernel/e820.c 	mem_size = memparse(p, &p);
p                 927 arch/x86/kernel/e820.c 	if (p == oldp)
p                 931 arch/x86/kernel/e820.c 	if (*p == '@') {
p                 932 arch/x86/kernel/e820.c 		start_at = memparse(p+1, &p);
p                 934 arch/x86/kernel/e820.c 	} else if (*p == '#') {
p                 935 arch/x86/kernel/e820.c 		start_at = memparse(p+1, &p);
p                 937 arch/x86/kernel/e820.c 	} else if (*p == '$') {
p                 938 arch/x86/kernel/e820.c 		start_at = memparse(p+1, &p);
p                 940 arch/x86/kernel/e820.c 	} else if (*p == '!') {
p                 941 arch/x86/kernel/e820.c 		start_at = memparse(p+1, &p);
p                 943 arch/x86/kernel/e820.c 	} else if (*p == '%') {
p                 946 arch/x86/kernel/e820.c 		start_at = memparse(p + 1, &p);
p                 947 arch/x86/kernel/e820.c 		if (*p == '-')
p                 948 arch/x86/kernel/e820.c 			from = simple_strtoull(p + 1, &p, 0);
p                 949 arch/x86/kernel/e820.c 		if (*p == '+')
p                 950 arch/x86/kernel/e820.c 			to = simple_strtoull(p + 1, &p, 0);
p                 951 arch/x86/kernel/e820.c 		if (*p != '\0')
p                 965 arch/x86/kernel/e820.c 	return *p == '\0' ? 0 : -EINVAL;
p                  13 arch/x86/kernel/eisa.c 	void __iomem *p;
p                  18 arch/x86/kernel/eisa.c 	p = ioremap(0x0FFFD9, 4);
p                  19 arch/x86/kernel/eisa.c 	if (p && readl(p) == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24))
p                  21 arch/x86/kernel/eisa.c 	iounmap(p);
p                 117 arch/x86/kernel/head64.c 	unsigned long load_delta, *p;
p                 153 arch/x86/kernel/head64.c 	p = pgd + pgd_index(__START_KERNEL_map);
p                 155 arch/x86/kernel/head64.c 		*p = (unsigned long)level4_kernel_pgt;
p                 157 arch/x86/kernel/head64.c 		*p = (unsigned long)level3_kernel_pgt;
p                 158 arch/x86/kernel/head64.c 	*p += _PAGE_TABLE_NOENC - __START_KERNEL_map + load_delta;
p                  30 arch/x86/kernel/idt.c 		.bits.p		= 1,			\
p                 241 arch/x86/kernel/idt.c 	data.bits.p	= 1;
p                  59 arch/x86/kernel/irq.c int arch_show_interrupts(struct seq_file *p, int prec)
p                  63 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "NMI");
p                  65 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
p                  66 arch/x86/kernel/irq.c 	seq_puts(p, "  Non-maskable interrupts\n");
p                  68 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "LOC");
p                  70 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
p                  71 arch/x86/kernel/irq.c 	seq_puts(p, "  Local timer interrupts\n");
p                  73 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "SPU");
p                  75 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
p                  76 arch/x86/kernel/irq.c 	seq_puts(p, "  Spurious interrupts\n");
p                  77 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "PMI");
p                  79 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
p                  80 arch/x86/kernel/irq.c 	seq_puts(p, "  Performance monitoring interrupts\n");
p                  81 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "IWI");
p                  83 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
p                  84 arch/x86/kernel/irq.c 	seq_puts(p, "  IRQ work interrupts\n");
p                  85 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "RTR");
p                  87 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
p                  88 arch/x86/kernel/irq.c 	seq_puts(p, "  APIC ICR read retries\n");
p                  90 arch/x86/kernel/irq.c 		seq_printf(p, "%*s: ", prec, "PLT");
p                  92 arch/x86/kernel/irq.c 			seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
p                  93 arch/x86/kernel/irq.c 		seq_puts(p, "  Platform interrupts\n");
p                  97 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "RES");
p                  99 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
p                 100 arch/x86/kernel/irq.c 	seq_puts(p, "  Rescheduling interrupts\n");
p                 101 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "CAL");
p                 103 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
p                 104 arch/x86/kernel/irq.c 	seq_puts(p, "  Function call interrupts\n");
p                 105 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "TLB");
p                 107 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
p                 108 arch/x86/kernel/irq.c 	seq_puts(p, "  TLB shootdowns\n");
p                 111 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "TRM");
p                 113 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
p                 114 arch/x86/kernel/irq.c 	seq_puts(p, "  Thermal event interrupts\n");
p                 117 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "THR");
p                 119 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
p                 120 arch/x86/kernel/irq.c 	seq_puts(p, "  Threshold APIC interrupts\n");
p                 123 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "DFR");
p                 125 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
p                 126 arch/x86/kernel/irq.c 	seq_puts(p, "  Deferred Error APIC interrupts\n");
p                 129 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "MCE");
p                 131 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
p                 132 arch/x86/kernel/irq.c 	seq_puts(p, "  Machine check exceptions\n");
p                 133 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "MCP");
p                 135 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
p                 136 arch/x86/kernel/irq.c 	seq_puts(p, "  Machine check polls\n");
p                 140 arch/x86/kernel/irq.c 		seq_printf(p, "%*s: ", prec, "HYP");
p                 142 arch/x86/kernel/irq.c 			seq_printf(p, "%10u ",
p                 144 arch/x86/kernel/irq.c 		seq_puts(p, "  Hypervisor callback interrupts\n");
p                 149 arch/x86/kernel/irq.c 		seq_printf(p, "%*s: ", prec, "HRE");
p                 151 arch/x86/kernel/irq.c 			seq_printf(p, "%10u ",
p                 153 arch/x86/kernel/irq.c 		seq_puts(p, "  Hyper-V reenlightenment interrupts\n");
p                 156 arch/x86/kernel/irq.c 		seq_printf(p, "%*s: ", prec, "HVS");
p                 158 arch/x86/kernel/irq.c 			seq_printf(p, "%10u ",
p                 160 arch/x86/kernel/irq.c 		seq_puts(p, "  Hyper-V stimer0 interrupts\n");
p                 163 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
p                 165 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
p                 168 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "PIN");
p                 170 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
p                 171 arch/x86/kernel/irq.c 	seq_puts(p, "  Posted-interrupt notification event\n");
p                 173 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "NPI");
p                 175 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ",
p                 177 arch/x86/kernel/irq.c 	seq_puts(p, "  Nested posted-interrupt event\n");
p                 179 arch/x86/kernel/irq.c 	seq_printf(p, "%*s: ", prec, "PIW");
p                 181 arch/x86/kernel/irq.c 		seq_printf(p, "%10u ",
p                 183 arch/x86/kernel/irq.c 	seq_puts(p, "  Posted-interrupt wakeup event\n");
p                  35 arch/x86/kernel/kdebugfs.c 	void *p;
p                  48 arch/x86/kernel/kdebugfs.c 	p = memremap(pa, count, MEMREMAP_WB);
p                  49 arch/x86/kernel/kdebugfs.c 	if (!p)
p                  52 arch/x86/kernel/kdebugfs.c 	remain = copy_to_user(user_buf, p, count);
p                  54 arch/x86/kernel/kdebugfs.c 	memunmap(p);
p                 142 arch/x86/kernel/kgdb.c void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
p                 153 arch/x86/kernel/kgdb.c 	gdb_regs[GDB_BP]	= ((struct inactive_task_frame *)p->thread.sp)->bp;
p                 176 arch/x86/kernel/kgdb.c 	gdb_regs[GDB_SP]	= p->thread.sp;
p                  95 arch/x86/kernel/kprobes/common.h extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter);
p                  98 arch/x86/kernel/kprobes/common.h static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
p                 393 arch/x86/kernel/kprobes/core.c static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
p                 398 arch/x86/kernel/kprobes/core.c 	if (can_boost(insn, p->addr) &&
p                 404 arch/x86/kernel/kprobes/core.c 		synthesize_reljump(buf + len, p->ainsn.insn + len,
p                 405 arch/x86/kernel/kprobes/core.c 				   p->addr + insn->length);
p                 407 arch/x86/kernel/kprobes/core.c 		p->ainsn.boostable = true;
p                 409 arch/x86/kernel/kprobes/core.c 		p->ainsn.boostable = false;
p                 446 arch/x86/kernel/kprobes/core.c static int arch_copy_kprobe(struct kprobe *p)
p                 453 arch/x86/kernel/kprobes/core.c 	len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
p                 461 arch/x86/kernel/kprobes/core.c 	len = prepare_boost(buf, p, &insn);
p                 464 arch/x86/kernel/kprobes/core.c 	p->ainsn.if_modifier = is_IF_modifier(buf);
p                 467 arch/x86/kernel/kprobes/core.c 	p->opcode = buf[0];
p                 470 arch/x86/kernel/kprobes/core.c 	text_poke(p->ainsn.insn, buf, len);
p                 475 arch/x86/kernel/kprobes/core.c int arch_prepare_kprobe(struct kprobe *p)
p                 479 arch/x86/kernel/kprobes/core.c 	if (alternatives_text_reserved(p->addr, p->addr))
p                 482 arch/x86/kernel/kprobes/core.c 	if (!can_probe((unsigned long)p->addr))
p                 485 arch/x86/kernel/kprobes/core.c 	p->ainsn.insn = get_insn_slot();
p                 486 arch/x86/kernel/kprobes/core.c 	if (!p->ainsn.insn)
p                 489 arch/x86/kernel/kprobes/core.c 	ret = arch_copy_kprobe(p);
p                 491 arch/x86/kernel/kprobes/core.c 		free_insn_slot(p->ainsn.insn, 0);
p                 492 arch/x86/kernel/kprobes/core.c 		p->ainsn.insn = NULL;
p                 498 arch/x86/kernel/kprobes/core.c void arch_arm_kprobe(struct kprobe *p)
p                 500 arch/x86/kernel/kprobes/core.c 	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
p                 503 arch/x86/kernel/kprobes/core.c void arch_disarm_kprobe(struct kprobe *p)
p                 505 arch/x86/kernel/kprobes/core.c 	text_poke(p->addr, &p->opcode, 1);
p                 508 arch/x86/kernel/kprobes/core.c void arch_remove_kprobe(struct kprobe *p)
p                 510 arch/x86/kernel/kprobes/core.c 	if (p->ainsn.insn) {
p                 511 arch/x86/kernel/kprobes/core.c 		free_insn_slot(p->ainsn.insn, p->ainsn.boostable);
p                 512 arch/x86/kernel/kprobes/core.c 		p->ainsn.insn = NULL;
p                 535 arch/x86/kernel/kprobes/core.c set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
p                 538 arch/x86/kernel/kprobes/core.c 	__this_cpu_write(current_kprobe, p);
p                 541 arch/x86/kernel/kprobes/core.c 	if (p->ainsn.if_modifier)
p                 577 arch/x86/kernel/kprobes/core.c static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
p                 580 arch/x86/kernel/kprobes/core.c 	if (setup_detour_execution(p, regs, reenter))
p                 584 arch/x86/kernel/kprobes/core.c 	if (p->ainsn.boostable && !p->post_handler) {
p                 593 arch/x86/kernel/kprobes/core.c 		regs->ip = (unsigned long)p->ainsn.insn;
p                 599 arch/x86/kernel/kprobes/core.c 		set_current_kprobe(p, regs, kcb);
p                 608 arch/x86/kernel/kprobes/core.c 	if (p->opcode == BREAKPOINT_INSTRUCTION)
p                 609 arch/x86/kernel/kprobes/core.c 		regs->ip = (unsigned long)p->addr;
p                 611 arch/x86/kernel/kprobes/core.c 		regs->ip = (unsigned long)p->ainsn.insn;
p                 620 arch/x86/kernel/kprobes/core.c static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
p                 627 arch/x86/kernel/kprobes/core.c 		kprobes_inc_nmissed_count(p);
p                 628 arch/x86/kernel/kprobes/core.c 		setup_singlestep(p, regs, kcb, 1);
p                 638 arch/x86/kernel/kprobes/core.c 		dump_kprobe(p);
p                 657 arch/x86/kernel/kprobes/core.c 	struct kprobe *p;
p                 671 arch/x86/kernel/kprobes/core.c 	p = get_kprobe(addr);
p                 673 arch/x86/kernel/kprobes/core.c 	if (p) {
p                 675 arch/x86/kernel/kprobes/core.c 			if (reenter_kprobe(p, regs, kcb))
p                 678 arch/x86/kernel/kprobes/core.c 			set_current_kprobe(p, regs, kcb);
p                 688 arch/x86/kernel/kprobes/core.c 			if (!p->pre_handler || !p->pre_handler(p, regs))
p                 689 arch/x86/kernel/kprobes/core.c 				setup_singlestep(p, regs, kcb, 0);
p                 907 arch/x86/kernel/kprobes/core.c static void resume_execution(struct kprobe *p, struct pt_regs *regs,
p                 911 arch/x86/kernel/kprobes/core.c 	unsigned long copy_ip = (unsigned long)p->ainsn.insn;
p                 912 arch/x86/kernel/kprobes/core.c 	unsigned long orig_ip = (unsigned long)p->addr;
p                 913 arch/x86/kernel/kprobes/core.c 	kprobe_opcode_t *insn = p->ainsn.insn;
p                 931 arch/x86/kernel/kprobes/core.c 		p->ainsn.boostable = true;
p                 956 arch/x86/kernel/kprobes/core.c 			p->ainsn.boostable = true;
p                1085 arch/x86/kernel/kprobes/core.c int arch_trampoline_kprobe(struct kprobe *p)
p                  19 arch/x86/kernel/kprobes/ftrace.c 	struct kprobe *p;
p                  23 arch/x86/kernel/kprobes/ftrace.c 	p = get_kprobe((kprobe_opcode_t *)ip);
p                  24 arch/x86/kernel/kprobes/ftrace.c 	if (unlikely(!p) || kprobe_disabled(p))
p                  29 arch/x86/kernel/kprobes/ftrace.c 		kprobes_inc_nmissed_count(p);
p                  35 arch/x86/kernel/kprobes/ftrace.c 		__this_cpu_write(current_kprobe, p);
p                  37 arch/x86/kernel/kprobes/ftrace.c 		if (!p->pre_handler || !p->pre_handler(p, regs)) {
p                  42 arch/x86/kernel/kprobes/ftrace.c 			regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
p                  43 arch/x86/kernel/kprobes/ftrace.c 			if (unlikely(p->post_handler)) {
p                  45 arch/x86/kernel/kprobes/ftrace.c 				p->post_handler(p, regs, 0);
p                  58 arch/x86/kernel/kprobes/ftrace.c int arch_prepare_kprobe_ftrace(struct kprobe *p)
p                  60 arch/x86/kernel/kprobes/ftrace.c 	p->ainsn.insn = NULL;
p                  61 arch/x86/kernel/kprobes/ftrace.c 	p->ainsn.boostable = false;
p                 313 arch/x86/kernel/kprobes/opt.c 	struct kprobe *p;
p                 316 arch/x86/kernel/kprobes/opt.c 		p = get_kprobe(op->kp.addr + i);
p                 317 arch/x86/kernel/kprobes/opt.c 		if (p && !kprobe_disabled(p))
p                 474 arch/x86/kernel/kprobes/opt.c int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
p                 478 arch/x86/kernel/kprobes/opt.c 	if (p->flags & KPROBE_FLAG_OPTIMIZED) {
p                 480 arch/x86/kernel/kprobes/opt.c 		op = container_of(p, struct optimized_kprobe, kp);
p                 147 arch/x86/kernel/ksysfs.c 	void *p;
p                 172 arch/x86/kernel/ksysfs.c 	p = memremap(paddr + sizeof(*data), data->len, MEMREMAP_WB);
p                 173 arch/x86/kernel/ksysfs.c 	if (!p) {
p                 177 arch/x86/kernel/ksysfs.c 	memcpy(buf, p + off, count);
p                 178 arch/x86/kernel/ksysfs.c 	memunmap(p);
p                  86 arch/x86/kernel/kvm.c 	struct hlist_node *p;
p                  88 arch/x86/kernel/kvm.c 	hlist_for_each(p, &b->list) {
p                  90 arch/x86/kernel/kvm.c 			hlist_entry(p, typeof(*n), link);
p                 177 arch/x86/kernel/kvm.c 		struct hlist_node *p, *next;
p                 180 arch/x86/kernel/kvm.c 		hlist_for_each_safe(p, next, &b->list) {
p                 182 arch/x86/kernel/kvm.c 				hlist_entry(p, typeof(*n), link);
p                 228 arch/x86/kernel/kvmclock.c 	struct page *p;
p                 237 arch/x86/kernel/kvmclock.c 	p = alloc_pages(GFP_KERNEL, order);
p                 238 arch/x86/kernel/kvmclock.c 	if (!p) {
p                 243 arch/x86/kernel/kvmclock.c 	hvclock_mem = page_address(p);
p                 253 arch/x86/kernel/kvmclock.c 			__free_pages(p, order);
p                 286 arch/x86/kernel/kvmclock.c 	struct pvclock_vsyscall_time_info *p = per_cpu(hv_clock_per_cpu, cpu);
p                 293 arch/x86/kernel/kvmclock.c 	if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0)))
p                 298 arch/x86/kernel/kvmclock.c 		p = &hv_clock_boot[cpu];
p                 300 arch/x86/kernel/kvmclock.c 		p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE;
p                 304 arch/x86/kernel/kvmclock.c 	per_cpu(hv_clock_per_cpu, cpu) = p;
p                 305 arch/x86/kernel/kvmclock.c 	return p ? 0 : -ENOMEM;
p                 183 arch/x86/kernel/machine_kexec_64.c 	void *p = NULL;
p                 187 arch/x86/kernel/machine_kexec_64.c 		p = page_address(page);
p                 188 arch/x86/kernel/machine_kexec_64.c 		clear_page(p);
p                 191 arch/x86/kernel/machine_kexec_64.c 	return p;
p                  70 arch/x86/kernel/module.c 	void *p;
p                  75 arch/x86/kernel/module.c 	p = __vmalloc_node_range(size, MODULE_ALIGN,
p                  80 arch/x86/kernel/module.c 	if (p && (kasan_module_alloc(p, size) < 0)) {
p                  81 arch/x86/kernel/module.c 		vfree(p);
p                  85 arch/x86/kernel/module.c 	return p;
p                 834 arch/x86/kernel/mpparse.c static int __init parse_alloc_mptable_opt(char *p)
p                 841 arch/x86/kernel/mpparse.c 	if (!p)
p                 843 arch/x86/kernel/mpparse.c 	mpc_new_length = memparse(p, &p);
p                1464 arch/x86/kernel/pci-calgary_64.c static int __init calgary_parse_options(char *p)
p                1471 arch/x86/kernel/pci-calgary_64.c 	while (*p) {
p                1472 arch/x86/kernel/pci-calgary_64.c 		if (!strncmp(p, "64k", 3))
p                1474 arch/x86/kernel/pci-calgary_64.c 		else if (!strncmp(p, "128k", 4))
p                1476 arch/x86/kernel/pci-calgary_64.c 		else if (!strncmp(p, "256k", 4))
p                1478 arch/x86/kernel/pci-calgary_64.c 		else if (!strncmp(p, "512k", 4))
p                1480 arch/x86/kernel/pci-calgary_64.c 		else if (!strncmp(p, "1M", 2))
p                1482 arch/x86/kernel/pci-calgary_64.c 		else if (!strncmp(p, "2M", 2))
p                1484 arch/x86/kernel/pci-calgary_64.c 		else if (!strncmp(p, "4M", 2))
p                1486 arch/x86/kernel/pci-calgary_64.c 		else if (!strncmp(p, "8M", 2))
p                1490 arch/x86/kernel/pci-calgary_64.c 		if (!strncmp(p, "translate_empty_slots", len))
p                1494 arch/x86/kernel/pci-calgary_64.c 		if (!strncmp(p, "disable", len)) {
p                1495 arch/x86/kernel/pci-calgary_64.c 			p += len;
p                1496 arch/x86/kernel/pci-calgary_64.c 			if (*p == '=')
p                1497 arch/x86/kernel/pci-calgary_64.c 				++p;
p                1498 arch/x86/kernel/pci-calgary_64.c 			if (*p == '\0')
p                1500 arch/x86/kernel/pci-calgary_64.c 			ret = kstrtoul(p, 0, &val);
p                1512 arch/x86/kernel/pci-calgary_64.c 		p = strpbrk(p, ",");
p                1513 arch/x86/kernel/pci-calgary_64.c 		if (!p)
p                1516 arch/x86/kernel/pci-calgary_64.c 		p++; /* skip ',' */
p                  42 arch/x86/kernel/pci-dma.c 	struct iommu_table_entry *p;
p                  47 arch/x86/kernel/pci-dma.c 	for (p = __iommu_table; p < __iommu_table_end; p++) {
p                  48 arch/x86/kernel/pci-dma.c 		if (p && p->detect && p->detect() > 0) {
p                  49 arch/x86/kernel/pci-dma.c 			p->flags |= IOMMU_DETECTED;
p                  50 arch/x86/kernel/pci-dma.c 			if (p->early_init)
p                  51 arch/x86/kernel/pci-dma.c 				p->early_init();
p                  52 arch/x86/kernel/pci-dma.c 			if (p->flags & IOMMU_FINISH_IF_DETECTED)
p                  62 arch/x86/kernel/pci-dma.c static __init int iommu_setup(char *p)
p                  66 arch/x86/kernel/pci-dma.c 	if (!p)
p                  69 arch/x86/kernel/pci-dma.c 	while (*p) {
p                  70 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "off", 3))
p                  73 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "force", 5))
p                  75 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "noforce", 7)) {
p                  80 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "biomerge", 8)) {
p                  84 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "panic", 5))
p                  86 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "nopanic", 7))
p                  88 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "merge", 5)) {
p                  92 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "nomerge", 7))
p                  94 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "forcesac", 8))
p                  96 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "allowdac", 8))
p                  98 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "nodac", 5))
p                 100 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "usedac", 6)) {
p                 105 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "soft", 4))
p                 108 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "pt", 2))
p                 110 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "nopt", 4))
p                 113 arch/x86/kernel/pci-dma.c 		gart_parse_options(p);
p                 116 arch/x86/kernel/pci-dma.c 		if (!strncmp(p, "calgary", 7))
p                 120 arch/x86/kernel/pci-dma.c 		p += strcspn(p, ",");
p                 121 arch/x86/kernel/pci-dma.c 		if (*p == ',')
p                 122 arch/x86/kernel/pci-dma.c 			++p;
p                 130 arch/x86/kernel/pci-dma.c 	struct iommu_table_entry *p;
p                 134 arch/x86/kernel/pci-dma.c 	for (p = __iommu_table; p < __iommu_table_end; p++) {
p                 135 arch/x86/kernel/pci-dma.c 		if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
p                 136 arch/x86/kernel/pci-dma.c 			p->late_init();
p                  15 arch/x86/kernel/pci-iommu_table.c 	struct iommu_table_entry *p;
p                  20 arch/x86/kernel/pci-iommu_table.c 	for (p = start; p < finish; p++)
p                  21 arch/x86/kernel/pci-iommu_table.c 		if (p->detect == q->depend)
p                  22 arch/x86/kernel/pci-iommu_table.c 			return p;
p                  31 arch/x86/kernel/pci-iommu_table.c 	struct iommu_table_entry *p, *q, tmp;
p                  33 arch/x86/kernel/pci-iommu_table.c 	for (p = start; p < finish; p++) {
p                  35 arch/x86/kernel/pci-iommu_table.c 		q = find_dependents_of(start, finish, p);
p                  39 arch/x86/kernel/pci-iommu_table.c 		if (q > p) {
p                  40 arch/x86/kernel/pci-iommu_table.c 			tmp = *p;
p                  41 arch/x86/kernel/pci-iommu_table.c 			memmove(p, q, sizeof(*p));
p                  53 arch/x86/kernel/pci-iommu_table.c 	struct iommu_table_entry *p, *q, *x;
p                  56 arch/x86/kernel/pci-iommu_table.c 	for (p = start; p < finish; p++) {
p                  57 arch/x86/kernel/pci-iommu_table.c 		q = find_dependents_of(start, finish, p);
p                  59 arch/x86/kernel/pci-iommu_table.c 		if (p == x) {
p                  61 arch/x86/kernel/pci-iommu_table.c 			       p->detect, q->detect);
p                  67 arch/x86/kernel/pci-iommu_table.c 	for (p = start; p < finish; p++) {
p                  68 arch/x86/kernel/pci-iommu_table.c 		q = find_dependents_of(p, finish, p);
p                  69 arch/x86/kernel/pci-iommu_table.c 		if (q && q > p) {
p                  71 arch/x86/kernel/pci-iommu_table.c 			       p->detect, q->detect);
p                 802 arch/x86/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 807 arch/x86/kernel/process.c 	if (p == current || p->state == TASK_RUNNING)
p                 810 arch/x86/kernel/process.c 	if (!try_get_task_stack(p))
p                 813 arch/x86/kernel/process.c 	start = (unsigned long)task_stack_page(p);
p                 837 arch/x86/kernel/process.c 	sp = READ_ONCE(p->thread.sp);
p                 851 arch/x86/kernel/process.c 	} while (count++ < 16 && p->state != TASK_RUNNING);
p                 854 arch/x86/kernel/process.c 	put_task_stack(p);
p                 116 arch/x86/kernel/process_32.c 	unsigned long arg, struct task_struct *p, unsigned long tls)
p                 118 arch/x86/kernel/process_32.c 	struct pt_regs *childregs = task_pt_regs(p);
p                 133 arch/x86/kernel/process_32.c 	p->thread.sp = (unsigned long) fork_frame;
p                 134 arch/x86/kernel/process_32.c 	p->thread.sp0 = (unsigned long) (childregs+1);
p                 135 arch/x86/kernel/process_32.c 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
p                 137 arch/x86/kernel/process_32.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 142 arch/x86/kernel/process_32.c 		p->thread.io_bitmap_ptr = NULL;
p                 151 arch/x86/kernel/process_32.c 	task_user_gs(p) = get_user_gs(current_pt_regs());
p                 153 arch/x86/kernel/process_32.c 	p->thread.io_bitmap_ptr = NULL;
p                 158 arch/x86/kernel/process_32.c 		p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
p                 160 arch/x86/kernel/process_32.c 		if (!p->thread.io_bitmap_ptr) {
p                 161 arch/x86/kernel/process_32.c 			p->thread.io_bitmap_max = 0;
p                 164 arch/x86/kernel/process_32.c 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
p                 173 arch/x86/kernel/process_32.c 		err = do_set_thread_area(p, -1,
p                 176 arch/x86/kernel/process_32.c 	if (err && p->thread.io_bitmap_ptr) {
p                 177 arch/x86/kernel/process_32.c 		kfree(p->thread.io_bitmap_ptr);
p                 178 arch/x86/kernel/process_32.c 		p->thread.io_bitmap_max = 0;
p                 375 arch/x86/kernel/process_64.c 		unsigned long arg, struct task_struct *p, unsigned long tls)
p                 383 arch/x86/kernel/process_64.c 	childregs = task_pt_regs(p);
p                 389 arch/x86/kernel/process_64.c 	p->thread.sp = (unsigned long) fork_frame;
p                 390 arch/x86/kernel/process_64.c 	p->thread.io_bitmap_ptr = NULL;
p                 392 arch/x86/kernel/process_64.c 	savesegment(gs, p->thread.gsindex);
p                 393 arch/x86/kernel/process_64.c 	p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
p                 394 arch/x86/kernel/process_64.c 	savesegment(fs, p->thread.fsindex);
p                 395 arch/x86/kernel/process_64.c 	p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
p                 396 arch/x86/kernel/process_64.c 	savesegment(es, p->thread.es);
p                 397 arch/x86/kernel/process_64.c 	savesegment(ds, p->thread.ds);
p                 398 arch/x86/kernel/process_64.c 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
p                 400 arch/x86/kernel/process_64.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                 416 arch/x86/kernel/process_64.c 		p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
p                 418 arch/x86/kernel/process_64.c 		if (!p->thread.io_bitmap_ptr) {
p                 419 arch/x86/kernel/process_64.c 			p->thread.io_bitmap_max = 0;
p                 422 arch/x86/kernel/process_64.c 		set_tsk_thread_flag(p, TIF_IO_BITMAP);
p                 431 arch/x86/kernel/process_64.c 			err = do_set_thread_area(p, -1,
p                 435 arch/x86/kernel/process_64.c 			err = do_arch_prctl_64(p, ARCH_SET_FS, tls);
p                 441 arch/x86/kernel/process_64.c 	if (err && p->thread.io_bitmap_ptr) {
p                 442 arch/x86/kernel/process_64.c 		kfree(p->thread.io_bitmap_ptr);
p                 443 arch/x86/kernel/process_64.c 		p->thread.io_bitmap_max = 0;
p                 778 arch/x86/kernel/setup.c static int __init parse_reservelow(char *p)
p                 782 arch/x86/kernel/setup.c 	if (!p)
p                 785 arch/x86/kernel/setup.c 	size = memparse(p, &p);
p                 809 arch/x86/kernel/setup.c dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
p                  84 arch/x86/kernel/tls.c static void set_tls_desc(struct task_struct *p, int idx,
p                  87 arch/x86/kernel/tls.c 	struct thread_struct *t = &p->thread;
p                 114 arch/x86/kernel/tls.c int do_set_thread_area(struct task_struct *p, int idx,
p                 145 arch/x86/kernel/tls.c 	set_tls_desc(p, idx, &info, 1);
p                 154 arch/x86/kernel/tls.c 	if (p == current) {
p                 180 arch/x86/kernel/tls.c 		if (p->thread.fsindex == modified_sel)
p                 181 arch/x86/kernel/tls.c 			p->thread.fsbase = info.base_addr;
p                 183 arch/x86/kernel/tls.c 		if (p->thread.gsindex == modified_sel)
p                 184 arch/x86/kernel/tls.c 			p->thread.gsbase = info.base_addr;
p                 213 arch/x86/kernel/tls.c 	info->seg_not_present = !desc->p;
p                 220 arch/x86/kernel/tls.c int do_get_thread_area(struct task_struct *p, int idx,
p                 236 arch/x86/kernel/tls.c 	fill_user_desc(&info, idx, &p->thread.tls_array[index]);
p                 309 arch/x86/kernel/tsc.c static u64 tsc_read_refs(u64 *p, int hpet)
p                 318 arch/x86/kernel/tsc.c 			*p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
p                 320 arch/x86/kernel/tsc.c 			*p = acpi_pm_read_early();
p                 963 arch/x86/kvm/emulate.c 	void *p;
p                 967 arch/x86/kvm/emulate.c 		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
p                 969 arch/x86/kvm/emulate.c 		p = reg_rmw(ctxt, modrm_reg);
p                 970 arch/x86/kvm/emulate.c 	return p;
p                1665 arch/x86/kvm/emulate.c 		seg_desc.p = 1;
p                1691 arch/x86/kvm/emulate.c 			seg_desc.p = 1;
p                1717 arch/x86/kvm/emulate.c 	if (!seg_desc.p) {
p                2397 arch/x86/kvm/emulate.c 	desc->p    = (flags >> 15) & 1;
p                2649 arch/x86/kvm/emulate.c 		cs_desc.s = cs_desc.g = cs_desc.p = 1;
p                2705 arch/x86/kvm/emulate.c 	cs->p = 1;
p                2716 arch/x86/kvm/emulate.c 	ss->p = 1;
p                2990 arch/x86/kvm/emulate.c 	if (!tr_seg.p)
p                3356 arch/x86/kvm/emulate.c 	if (!next_tss_desc.p ||
p                1138 arch/x86/kvm/mmu.c 	void *p;
p                1141 arch/x86/kvm/mmu.c 	p = mc->objects[--mc->nobjs];
p                1142 arch/x86/kvm/mmu.c 	return p;
p                  26 arch/x86/kvm/mmutrace.h 	const char *saved_ptr = trace_seq_buffer_ptr(p);		\
p                  34 arch/x86/kvm/mmutrace.h 	trace_seq_printf(p, "sp gen %u gfn %llx l%u %u-byte q%u%s %s%s"	\
p                 657 arch/x86/kvm/svm.c 	unsigned base1:8, type:5, dpl:2, p:1;
p                3506 arch/x86/kvm/svm.c 		u32 value, p;
p                3512 arch/x86/kvm/svm.c 		p      = msrpm_offsets[i];
p                3513 arch/x86/kvm/svm.c 		offset = svm->nested.vmcb_msrpm + (p * 4);
p                3518 arch/x86/kvm/svm.c 		svm->nested.msrpm[p] = svm->msrpm[p] | value;
p                6598 arch/x86/kvm/svm.c 	void __user *p = NULL;
p                6616 arch/x86/kvm/svm.c 	p = (void __user *)(uintptr_t)params.uaddr;
p                6617 arch/x86/kvm/svm.c 	if (p) {
p                6646 arch/x86/kvm/svm.c 		if (copy_to_user(p, blob, params.len))
p                 401 arch/x86/kvm/vmx/vmcs12.h 	char *p = (char *)vmcs12 + offset;
p                 405 arch/x86/kvm/vmx/vmcs12.h 		return *((natural_width *)p);
p                 407 arch/x86/kvm/vmx/vmcs12.h 		return *((u16 *)p);
p                 409 arch/x86/kvm/vmx/vmcs12.h 		return *((u32 *)p);
p                 411 arch/x86/kvm/vmx/vmcs12.h 		return *((u64 *)p);
p                 421 arch/x86/kvm/vmx/vmcs12.h 	char *p = (char *)vmcs12 + offset;
p                 425 arch/x86/kvm/vmx/vmcs12.h 		*(u16 *)p = field_value;
p                 428 arch/x86/kvm/vmx/vmcs12.h 		*(u32 *)p = field_value;
p                 431 arch/x86/kvm/vmx/vmcs12.h 		*(u64 *)p = field_value;
p                 434 arch/x86/kvm/vmx/vmcs12.h 		*(natural_width *)p = field_value;
p                 717 arch/x86/kvm/vmx/vmx.c 	u16 *p = &vmx->segment_cache.seg[seg].selector;
p                 720 arch/x86/kvm/vmx/vmx.c 		*p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
p                 721 arch/x86/kvm/vmx/vmx.c 	return *p;
p                 726 arch/x86/kvm/vmx/vmx.c 	ulong *p = &vmx->segment_cache.seg[seg].base;
p                 729 arch/x86/kvm/vmx/vmx.c 		*p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
p                 730 arch/x86/kvm/vmx/vmx.c 	return *p;
p                 735 arch/x86/kvm/vmx/vmx.c 	u32 *p = &vmx->segment_cache.seg[seg].limit;
p                 738 arch/x86/kvm/vmx/vmx.c 		*p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
p                 739 arch/x86/kvm/vmx/vmx.c 	return *p;
p                 744 arch/x86/kvm/vmx/vmx.c 	u32 *p = &vmx->segment_cache.seg[seg].ar;
p                 747 arch/x86/kvm/vmx/vmx.c 		*p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
p                 748 arch/x86/kvm/vmx/vmx.c 	return *p;
p                6146 arch/x86/kvm/x86.c 	desc->p = var.present;
p                6177 arch/x86/kvm/x86.c 	var.present = desc->p;
p                 217 arch/x86/lib/insn.c 		insn_byte_t m, p;
p                 219 arch/x86/lib/insn.c 		p = insn_vex_p_bits(insn);
p                 220 arch/x86/lib/insn.c 		insn->attr = inat_get_avx_attribute(op, m, p);
p                  31 arch/x86/lib/mmx_32.c 	void *p;
p                  37 arch/x86/lib/mmx_32.c 	p = to;
p                 115 arch/x86/lib/mmx_32.c 	return p;
p                  94 arch/x86/lib/usercopy_64.c 	void *p;
p                  96 arch/x86/lib/usercopy_64.c 	for (p = (void *)((unsigned long)addr & ~clflush_mask);
p                  97 arch/x86/lib/usercopy_64.c 	     p < vend; p += x86_clflush_size)
p                  98 arch/x86/lib/usercopy_64.c 		clwb(p);
p                 431 arch/x86/mm/dump_pagetables.c #define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p)
p                 466 arch/x86/mm/dump_pagetables.c #define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p)
p                 439 arch/x86/mm/fault.c static int bad_address(void *p)
p                 443 arch/x86/mm/fault.c 	return probe_kernel_address((unsigned long *)p, dummy);
p                1476 arch/x86/mm/init_64.c 			void *p;
p                1479 arch/x86/mm/init_64.c 				p = altmap_alloc_block_buf(PMD_SIZE, altmap);
p                1481 arch/x86/mm/init_64.c 				p = vmemmap_alloc_block_buf(PMD_SIZE, node);
p                1482 arch/x86/mm/init_64.c 			if (p) {
p                1485 arch/x86/mm/init_64.c 				entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
p                1490 arch/x86/mm/init_64.c 				if (p_end != p || node_start != node) {
p                1496 arch/x86/mm/init_64.c 					p_start = p;
p                1500 arch/x86/mm/init_64.c 				p_end = p + PMD_SIZE;
p                 439 arch/x86/mm/ioremap.c 	struct vm_struct *p, *o;
p                 467 arch/x86/mm/ioremap.c 	p = find_vm_area((void __force *)addr);
p                 469 arch/x86/mm/ioremap.c 	if (!p) {
p                 475 arch/x86/mm/ioremap.c 	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
p                 479 arch/x86/mm/ioremap.c 	BUG_ON(p != o || o == NULL);
p                 480 arch/x86/mm/ioremap.c 	kfree(p);
p                  45 arch/x86/mm/kasan_init_64.c 		void *p;
p                  50 arch/x86/mm/kasan_init_64.c 			p = early_alloc(PMD_SIZE, nid, false);
p                  51 arch/x86/mm/kasan_init_64.c 			if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
p                  53 arch/x86/mm/kasan_init_64.c 			else if (p)
p                  54 arch/x86/mm/kasan_init_64.c 				memblock_free(__pa(p), PMD_SIZE);
p                  57 arch/x86/mm/kasan_init_64.c 		p = early_alloc(PAGE_SIZE, nid, true);
p                  58 arch/x86/mm/kasan_init_64.c 		pmd_populate_kernel(&init_mm, pmd, p);
p                  64 arch/x86/mm/kasan_init_64.c 		void *p;
p                  69 arch/x86/mm/kasan_init_64.c 		p = early_alloc(PAGE_SIZE, nid, true);
p                  70 arch/x86/mm/kasan_init_64.c 		entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
p                  82 arch/x86/mm/kasan_init_64.c 		void *p;
p                  87 arch/x86/mm/kasan_init_64.c 			p = early_alloc(PUD_SIZE, nid, false);
p                  88 arch/x86/mm/kasan_init_64.c 			if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
p                  90 arch/x86/mm/kasan_init_64.c 			else if (p)
p                  91 arch/x86/mm/kasan_init_64.c 				memblock_free(__pa(p), PUD_SIZE);
p                  94 arch/x86/mm/kasan_init_64.c 		p = early_alloc(PAGE_SIZE, nid, true);
p                  95 arch/x86/mm/kasan_init_64.c 		pud_populate(&init_mm, pud, p);
p                 113 arch/x86/mm/kasan_init_64.c 		void *p = early_alloc(PAGE_SIZE, nid, true);
p                 115 arch/x86/mm/kasan_init_64.c 		p4d_populate(&init_mm, p4d, p);
p                 129 arch/x86/mm/kasan_init_64.c 	void *p;
p                 134 arch/x86/mm/kasan_init_64.c 		p = early_alloc(PAGE_SIZE, nid, true);
p                 135 arch/x86/mm/kasan_init_64.c 		pgd_populate(&init_mm, pgd, p);
p                 100 arch/x86/mm/kmmio.c 	struct kmmio_probe *p;
p                 101 arch/x86/mm/kmmio.c 	list_for_each_entry_rcu(p, &kmmio_probes, list) {
p                 102 arch/x86/mm/kmmio.c 		if (addr >= p->addr && addr < (p->addr + p->len))
p                 103 arch/x86/mm/kmmio.c 			return p;
p                 438 arch/x86/mm/kmmio.c int register_kmmio_probe(struct kmmio_probe *p)
p                 443 arch/x86/mm/kmmio.c 	unsigned long addr = p->addr & PAGE_MASK;
p                 444 arch/x86/mm/kmmio.c 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
p                 461 arch/x86/mm/kmmio.c 	list_add_rcu(&p->list, &kmmio_probes);
p                 533 arch/x86/mm/kmmio.c void unregister_kmmio_probe(struct kmmio_probe *p)
p                 537 arch/x86/mm/kmmio.c 	unsigned long addr = p->addr & PAGE_MASK;
p                 538 arch/x86/mm/kmmio.c 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
p                 553 arch/x86/mm/kmmio.c 	list_del_rcu(&p->list);
p                 130 arch/x86/mm/mmio-mod.c static void pre(struct kmmio_probe *p, struct pt_regs *regs,
p                 137 arch/x86/mm/mmio-mod.c 	struct remap_trace *trace = p->private;
p                 195 arch/x86/mm/mmio-mod.c static void post(struct kmmio_probe *p, unsigned long condition,
p                 268 arch/x86/mm/pageattr-test.c 	struct task_struct *p;
p                 270 arch/x86/mm/pageattr-test.c 	p = kthread_create(do_pageattr_test, NULL, "pageattr-test");
p                 271 arch/x86/mm/pageattr-test.c 	if (!IS_ERR(p))
p                 272 arch/x86/mm/pageattr-test.c 		wake_up_process(p);
p                 151 arch/x86/mm/pageattr.c static int cpastats_show(struct seq_file *m, void *p)
p                 282 arch/x86/mm/pageattr.c 	void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
p                 285 arch/x86/mm/pageattr.c 	if (p >= vend)
p                 288 arch/x86/mm/pageattr.c 	for (; p < vend; p += clflush_size)
p                 289 arch/x86/mm/pageattr.c 		clflushopt(p);
p                  76 arch/x86/mm/pf_in.c 	unsigned char *p = addr;
p                  84 arch/x86/mm/pf_in.c 		if (*p == prefix_codes[i]) {
p                  85 arch/x86/mm/pf_in.c 			if (*p == 0x66)
p                  88 arch/x86/mm/pf_in.c 			if ((*p & 0xf8) == 0x48)
p                  90 arch/x86/mm/pf_in.c 			if ((*p & 0xf4) == 0x44)
p                  92 arch/x86/mm/pf_in.c 			if ((*p & 0xf0) == 0x40)
p                  95 arch/x86/mm/pf_in.c 			p++;
p                 100 arch/x86/mm/pf_in.c 	return (p - addr);
p                 130 arch/x86/mm/pf_in.c 	unsigned char *p;
p                 135 arch/x86/mm/pf_in.c 	p = (unsigned char *)ins_addr;
p                 136 arch/x86/mm/pf_in.c 	p += skip_prefix(p, &prf);
p                 137 arch/x86/mm/pf_in.c 	p += get_opcode(p, &opcode);
p                 151 arch/x86/mm/pf_in.c 	unsigned char *p;
p                 155 arch/x86/mm/pf_in.c 	p = (unsigned char *)ins_addr;
p                 156 arch/x86/mm/pf_in.c 	p += skip_prefix(p, &prf);
p                 157 arch/x86/mm/pf_in.c 	p += get_opcode(p, &opcode);
p                 174 arch/x86/mm/pf_in.c 	unsigned char *p;
p                 178 arch/x86/mm/pf_in.c 	p = (unsigned char *)ins_addr;
p                 179 arch/x86/mm/pf_in.c 	p += skip_prefix(p, &prf);
p                 180 arch/x86/mm/pf_in.c 	p += get_opcode(p, &opcode);
p                 398 arch/x86/mm/pf_in.c 	unsigned char *p;
p                 402 arch/x86/mm/pf_in.c 	p = (unsigned char *)ins_addr;
p                 403 arch/x86/mm/pf_in.c 	p += skip_prefix(p, &prf);
p                 404 arch/x86/mm/pf_in.c 	p += get_opcode(p, &opcode);
p                 422 arch/x86/mm/pf_in.c 		unsigned char mod_rm = *p;
p                 453 arch/x86/mm/pf_in.c 	unsigned char *p;
p                 457 arch/x86/mm/pf_in.c 	p = (unsigned char *)ins_addr;
p                 458 arch/x86/mm/pf_in.c 	p += skip_prefix(p, &prf);
p                 459 arch/x86/mm/pf_in.c 	p += get_opcode(p, &opcode);
p                 469 arch/x86/mm/pf_in.c 	mod_rm = *p;
p                 471 arch/x86/mm/pf_in.c 	p++;
p                 477 arch/x86/mm/pf_in.c 			p += 4;
p                 481 arch/x86/mm/pf_in.c 		p += 1;
p                 485 arch/x86/mm/pf_in.c 		p += 4;
p                 497 arch/x86/mm/pf_in.c 		return *(unsigned char *)p;
p                 500 arch/x86/mm/pf_in.c 		return *(unsigned short *)p;
p                 503 arch/x86/mm/pf_in.c 		return *(unsigned int *)p;
p                 507 arch/x86/mm/pf_in.c 		return *(unsigned long *)p;
p                  33 arch/x86/mm/testmmiotrace.c static void do_write_test(void __iomem *p)
p                  40 arch/x86/mm/testmmiotrace.c 		iowrite8(i, p + i);
p                  43 arch/x86/mm/testmmiotrace.c 		iowrite16(v16(i), p + i);
p                  46 arch/x86/mm/testmmiotrace.c 		iowrite32(v32(i), p + i);
p                  49 arch/x86/mm/testmmiotrace.c static void do_read_test(void __iomem *p)
p                  57 arch/x86/mm/testmmiotrace.c 		if (ioread8(p + i) != i)
p                  61 arch/x86/mm/testmmiotrace.c 		if (ioread16(p + i) != v16(i))
p                  65 arch/x86/mm/testmmiotrace.c 		if (ioread32(p + i) != v32(i))
p                  72 arch/x86/mm/testmmiotrace.c static void do_read_far_test(void __iomem *p)
p                  77 arch/x86/mm/testmmiotrace.c 	ioread32(p + read_far);
p                  82 arch/x86/mm/testmmiotrace.c 	void __iomem *p = ioremap_nocache(mmio_address, size);
p                  83 arch/x86/mm/testmmiotrace.c 	if (!p) {
p                  87 arch/x86/mm/testmmiotrace.c 	mmiotrace_printk("ioremap returned %p.\n", p);
p                  88 arch/x86/mm/testmmiotrace.c 	do_write_test(p);
p                  89 arch/x86/mm/testmmiotrace.c 	do_read_test(p);
p                  91 arch/x86/mm/testmmiotrace.c 		do_read_far_test(p);
p                  92 arch/x86/mm/testmmiotrace.c 	iounmap(p);
p                 103 arch/x86/mm/testmmiotrace.c 	void __iomem *p;
p                 107 arch/x86/mm/testmmiotrace.c 		p = ioremap_nocache(mmio_address, PAGE_SIZE);
p                 108 arch/x86/mm/testmmiotrace.c 		if (p)
p                 109 arch/x86/mm/testmmiotrace.c 			iounmap(p);
p                 121 arch/x86/pci/sta2x11-fixup.c static dma_addr_t p2a(dma_addr_t p, struct pci_dev *pdev)
p                 127 arch/x86/pci/sta2x11-fixup.c 	a = p + map->amba_base;
p                 140 arch/x86/pci/sta2x11-fixup.c 	dma_addr_t p;
p                 143 arch/x86/pci/sta2x11-fixup.c 	p = a - map->amba_base;
p                 144 arch/x86/pci/sta2x11-fixup.c 	return p;
p                  36 arch/x86/platform/ce4100/ce4100.c static unsigned int mem_serial_in(struct uart_port *p, int offset)
p                  38 arch/x86/platform/ce4100/ce4100.c 	offset = offset << p->regshift;
p                  39 arch/x86/platform/ce4100/ce4100.c 	return readl(p->membase + offset);
p                  52 arch/x86/platform/ce4100/ce4100.c static unsigned int ce4100_mem_serial_in(struct uart_port *p, int offset)
p                  57 arch/x86/platform/ce4100/ce4100.c 		offset = offset << p->regshift;
p                  58 arch/x86/platform/ce4100/ce4100.c 		ret = readl(p->membase + offset);
p                  61 arch/x86/platform/ce4100/ce4100.c 			ier = mem_serial_in(p, UART_IER);
p                  64 arch/x86/platform/ce4100/ce4100.c 				lsr = mem_serial_in(p, UART_LSR);
p                  72 arch/x86/platform/ce4100/ce4100.c 		ret =  mem_serial_in(p, offset);
p                  76 arch/x86/platform/ce4100/ce4100.c static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value)
p                  78 arch/x86/platform/ce4100/ce4100.c 	offset = offset << p->regshift;
p                  79 arch/x86/platform/ce4100/ce4100.c 	writel(value, p->membase + offset);
p                 814 arch/x86/platform/efi/efi.c 	void *p, *new_memmap = NULL;
p                 821 arch/x86/platform/efi/efi.c 	p = NULL;
p                 822 arch/x86/platform/efi/efi.c 	while ((p = efi_map_next_entry(p))) {
p                 823 arch/x86/platform/efi/efi.c 		md = p;
p                 511 arch/x86/platform/efi/quirks.c 	void *p, *tablep;
p                 534 arch/x86/platform/efi/quirks.c 	p = tablep = early_memremap(tables, nr_tables * sz);
p                 535 arch/x86/platform/efi/quirks.c 	if (!p) {
p                 544 arch/x86/platform/efi/quirks.c 		guid = ((efi_config_table_64_t *)p)->guid;
p                 547 arch/x86/platform/efi/quirks.c 			((efi_config_table_64_t *)p)->table = data->smbios;
p                 548 arch/x86/platform/efi/quirks.c 		p += sz;
p                 119 arch/x86/platform/geode/alix.c 	const char *p;
p                 131 arch/x86/platform/geode/alix.c 	for (p = bios_virt; p < scan_end; p++) {
p                 135 arch/x86/platform/geode/alix.c 		if (memcmp(p, alix_sig, alix_sig_len) != 0)
p                 138 arch/x86/platform/geode/alix.c 		memcpy(name, p, sizeof(name));
p                 150 arch/x86/platform/geode/alix.c 		tail = p + alix_sig_len;
p                  62 arch/x86/platform/olpc/olpc.c #define wait_on_ibf(p, d) __wait_on_ibf(__LINE__, (p), (d))
p                  82 arch/x86/platform/olpc/olpc.c #define wait_on_obf(p, d) __wait_on_obf(__LINE__, (p), (d))
p                 221 arch/x86/platform/olpc/olpc_dt.c 	char buf[64], *p;
p                 229 arch/x86/platform/olpc/olpc_dt.c 	for (p = buf; p < buf + plen; p += strlen(p) + 1) {
p                 230 arch/x86/platform/olpc/olpc_dt.c 		if (strcmp(p, compat) == 0)
p                  52 arch/x86/platform/olpc/olpc_ofw.c 	int ret, i, *p;
p                  63 arch/x86/platform/olpc/olpc_ofw.c 	p = &ofw_args[3];
p                  64 arch/x86/platform/olpc/olpc_ofw.c 	for (i = 0; i < nr_args; i++, p++)
p                  65 arch/x86/platform/olpc/olpc_ofw.c 		*p = (int)args[i];
p                  73 arch/x86/platform/olpc/olpc_ofw.c 		for (i = 0; i < nr_res; i++, p++)
p                  74 arch/x86/platform/olpc/olpc_ofw.c 			*((int *)res[i]) = *p;
p                1561 arch/x86/platform/uv/tlb_uv.c 	char *p;
p                1567 arch/x86/platform/uv/tlb_uv.c 	p = instr + strspn(instr, WHITESPACE);
p                1568 arch/x86/platform/uv/tlb_uv.c 	q = p;
p                1569 arch/x86/platform/uv/tlb_uv.c 	for (; *p; p = q + strspn(q, WHITESPACE)) {
p                1570 arch/x86/platform/uv/tlb_uv.c 		q = p + strcspn(p, WHITESPACE);
p                1572 arch/x86/platform/uv/tlb_uv.c 		if (q == p)
p                1580 arch/x86/platform/uv/tlb_uv.c 	p = instr + strspn(instr, WHITESPACE);
p                1581 arch/x86/platform/uv/tlb_uv.c 	q = p;
p                1582 arch/x86/platform/uv/tlb_uv.c 	for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
p                1583 arch/x86/platform/uv/tlb_uv.c 		q = p + strcspn(p, WHITESPACE);
p                1584 arch/x86/platform/uv/tlb_uv.c 		val = local_atoi(p);
p                 112 arch/x86/platform/uv/uv_nmi.c #define param_check_local64(name, p) __param_check(name, p, local64_t)
p                 188 arch/x86/platform/uv/uv_nmi.c 	char arg[ACTION_LEN], *p;
p                 193 arch/x86/platform/uv/uv_nmi.c 	p = strchr(arg, '\n');
p                 194 arch/x86/platform/uv/uv_nmi.c 	if (p)
p                 195 arch/x86/platform/uv/uv_nmi.c 		*p = '\0';
p                 218 arch/x86/platform/uv/uv_nmi.c #define param_check_action(name, p) __param_check(name, p, action_t)
p                  12 arch/x86/um/stub_segv.c stub_segv_handler(int sig, siginfo_t *info, void *p)
p                  14 arch/x86/um/stub_segv.c 	ucontext_t *uc = p;
p                 703 arch/x86/xen/enlighten_pv.c 	unsigned long p = (unsigned long)&dt[entrynum];
p                 717 arch/x86/xen/enlighten_pv.c 	if (p >= start && (p + 8) <= end) {
p                 197 arch/x86/xen/p2m.c static void __ref free_p2m_page(void *p)
p                 200 arch/x86/xen/p2m.c 		memblock_free((unsigned long)p, PAGE_SIZE);
p                 204 arch/x86/xen/p2m.c 	free_page((unsigned long)p);
p                 179 arch/x86/xen/platform-pci-unplug.c 	char *p, *q;
p                 182 arch/x86/xen/platform-pci-unplug.c 	for (p = arg; p; p = q) {
p                 183 arch/x86/xen/platform-pci-unplug.c 		q = strchr(p, ',');
p                 185 arch/x86/xen/platform-pci-unplug.c 			l = q - p;
p                 188 arch/x86/xen/platform-pci-unplug.c 			l = strlen(p);
p                 190 arch/x86/xen/platform-pci-unplug.c 		if (!strncmp(p, "all", l))
p                 192 arch/x86/xen/platform-pci-unplug.c 		else if (!strncmp(p, "ide-disks", l))
p                 194 arch/x86/xen/platform-pci-unplug.c 		else if (!strncmp(p, "aux-ide-disks", l))
p                 196 arch/x86/xen/platform-pci-unplug.c 		else if (!strncmp(p, "nics", l))
p                 198 arch/x86/xen/platform-pci-unplug.c 		else if (!strncmp(p, "unnecessary", l))
p                 200 arch/x86/xen/platform-pci-unplug.c 		else if (!strncmp(p, "never", l))
p                 204 arch/x86/xen/platform-pci-unplug.c 				 "in parameter 'xen_emul_unplug'\n", p);
p                  15 arch/xtensa/boot/lib/zmem.c         void *p = avail_ram;
p                  24 arch/xtensa/boot/lib/zmem.c         return p;
p                  44 arch/xtensa/include/asm/asm-uaccess.h 	GET_CURRENT(\ad,\sp)
p                  64 arch/xtensa/include/asm/asm-uaccess.h 	GET_CURRENT(\at,\sp)
p                  92 arch/xtensa/include/asm/asm-uaccess.h 	get_fs	\at, \sp
p                 150 arch/xtensa/include/asm/asm-uaccess.h 	kernel_ok  \at, \sp, .Laccess_ok_\@
p                 101 arch/xtensa/include/asm/bitops.h static inline void set_bit(unsigned int bit, volatile unsigned long *p)
p                 106 arch/xtensa/include/asm/bitops.h 	p += bit >> 5;
p                 115 arch/xtensa/include/asm/bitops.h 			: "a" (mask), "a" (p)
p                 119 arch/xtensa/include/asm/bitops.h static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
p                 124 arch/xtensa/include/asm/bitops.h 	p += bit >> 5;
p                 133 arch/xtensa/include/asm/bitops.h 			: "a" (~mask), "a" (p)
p                 137 arch/xtensa/include/asm/bitops.h static inline void change_bit(unsigned int bit, volatile unsigned long *p)
p                 142 arch/xtensa/include/asm/bitops.h 	p += bit >> 5;
p                 151 arch/xtensa/include/asm/bitops.h 			: "a" (mask), "a" (p)
p                 156 arch/xtensa/include/asm/bitops.h test_and_set_bit(unsigned int bit, volatile unsigned long *p)
p                 161 arch/xtensa/include/asm/bitops.h 	p += bit >> 5;
p                 170 arch/xtensa/include/asm/bitops.h 			: "a" (mask), "a" (p)
p                 177 arch/xtensa/include/asm/bitops.h test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
p                 182 arch/xtensa/include/asm/bitops.h 	p += bit >> 5;
p                 191 arch/xtensa/include/asm/bitops.h 			: "a" (~mask), "a" (p)
p                 198 arch/xtensa/include/asm/bitops.h test_and_change_bit(unsigned int bit, volatile unsigned long *p)
p                 203 arch/xtensa/include/asm/bitops.h 	p += bit >> 5;
p                 212 arch/xtensa/include/asm/bitops.h 			: "a" (mask), "a" (p)
p                 220 arch/xtensa/include/asm/bitops.h static inline void set_bit(unsigned int bit, volatile unsigned long *p)
p                 225 arch/xtensa/include/asm/bitops.h 	p += bit >> 5;
p                 234 arch/xtensa/include/asm/bitops.h 			: "a" (mask), "a" (p)
p                 238 arch/xtensa/include/asm/bitops.h static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
p                 243 arch/xtensa/include/asm/bitops.h 	p += bit >> 5;
p                 252 arch/xtensa/include/asm/bitops.h 			: "a" (~mask), "a" (p)
p                 256 arch/xtensa/include/asm/bitops.h static inline void change_bit(unsigned int bit, volatile unsigned long *p)
p                 261 arch/xtensa/include/asm/bitops.h 	p += bit >> 5;
p                 270 arch/xtensa/include/asm/bitops.h 			: "a" (mask), "a" (p)
p                 275 arch/xtensa/include/asm/bitops.h test_and_set_bit(unsigned int bit, volatile unsigned long *p)
p                 280 arch/xtensa/include/asm/bitops.h 	p += bit >> 5;
p                 289 arch/xtensa/include/asm/bitops.h 			: "a" (mask), "a" (p)
p                 296 arch/xtensa/include/asm/bitops.h test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
p                 301 arch/xtensa/include/asm/bitops.h 	p += bit >> 5;
p                 310 arch/xtensa/include/asm/bitops.h 			: "a" (~mask), "a" (p)
p                 317 arch/xtensa/include/asm/bitops.h test_and_change_bit(unsigned int bit, volatile unsigned long *p)
p                 322 arch/xtensa/include/asm/bitops.h 	p += bit >> 5;
p                 331 arch/xtensa/include/asm/bitops.h 			: "a" (mask), "a" (p)
p                  65 arch/xtensa/include/asm/cacheflush.h # define __flush_invalidate_dcache_page(p)	__invalidate_dcache_page(p)
p                  66 arch/xtensa/include/asm/cacheflush.h # define __flush_invalidate_dcache_range(p,s)	__invalidate_dcache_range(p,s)
p                  24 arch/xtensa/include/asm/cmpxchg.h __cmpxchg_u32(volatile int *p, int old, int new)
p                  38 arch/xtensa/include/asm/cmpxchg.h 			: "a" (new), "a" (p), "a" (old)
p                  48 arch/xtensa/include/asm/cmpxchg.h 			: "a" (p), "a" (old)
p                  63 arch/xtensa/include/asm/cmpxchg.h 			: "a" (p), "a" (old), "r" (new)
p                 177 arch/xtensa/include/asm/cmpxchg.h 	volatile u32 *p = ptr - off;
p                 188 arch/xtensa/include/asm/cmpxchg.h 		oldv = READ_ONCE(*p);
p                 191 arch/xtensa/include/asm/cmpxchg.h 	} while (__cmpxchg_u32(p, oldv, newv) != oldv);
p                  78 arch/xtensa/include/asm/platform.h bool platform_vaddr_cached(const void *p);
p                  83 arch/xtensa/include/asm/platform.h bool platform_vaddr_uncached(const void *p);
p                  88 arch/xtensa/include/asm/platform.h void *platform_vaddr_to_uncached(void *p);
p                  93 arch/xtensa/include/asm/platform.h void *platform_vaddr_to_cached(void *p);
p                 217 arch/xtensa/include/asm/processor.h extern unsigned long get_wchan(struct task_struct *p);
p                  30 arch/xtensa/include/asm/smp.h void show_ipi_list(struct seq_file *p, int prec);
p                  54 arch/xtensa/kernel/irq.c int arch_show_interrupts(struct seq_file *p, int prec)
p                  58 arch/xtensa/kernel/irq.c 	show_ipi_list(p, prec);
p                  61 arch/xtensa/kernel/irq.c 	seq_printf(p, "%*s:", prec, "NMI");
p                  63 arch/xtensa/kernel/irq.c 		seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
p                  64 arch/xtensa/kernel/irq.c 	seq_puts(p, "   Non-maskable interrupts\n");
p                  85 arch/xtensa/kernel/pci-dma.c bool platform_vaddr_cached(const void *p)
p                  87 arch/xtensa/kernel/pci-dma.c 	unsigned long addr = (unsigned long)p;
p                  93 arch/xtensa/kernel/pci-dma.c bool platform_vaddr_uncached(const void *p)
p                  95 arch/xtensa/kernel/pci-dma.c 	unsigned long addr = (unsigned long)p;
p                 101 arch/xtensa/kernel/pci-dma.c void *platform_vaddr_to_uncached(void *p)
p                 103 arch/xtensa/kernel/pci-dma.c 	return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
p                 106 arch/xtensa/kernel/pci-dma.c void *platform_vaddr_to_cached(void *p)
p                 108 arch/xtensa/kernel/pci-dma.c 	return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
p                 111 arch/xtensa/kernel/pci-dma.c bool __attribute__((weak)) platform_vaddr_cached(const void *p)
p                 117 arch/xtensa/kernel/pci-dma.c bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
p                 123 arch/xtensa/kernel/pci-dma.c void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
p                 126 arch/xtensa/kernel/pci-dma.c 	return p;
p                 129 arch/xtensa/kernel/pci-dma.c void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
p                 132 arch/xtensa/kernel/pci-dma.c 	return p;
p                 168 arch/xtensa/kernel/pci-dma.c 		void *p;
p                 170 arch/xtensa/kernel/pci-dma.c 		p = dma_common_contiguous_remap(page, size,
p                 173 arch/xtensa/kernel/pci-dma.c 		if (!p) {
p                 177 arch/xtensa/kernel/pci-dma.c 		return p;
p                 206 arch/xtensa/kernel/process.c 		unsigned long thread_fn_arg, struct task_struct *p,
p                 209 arch/xtensa/kernel/process.c 	struct pt_regs *childregs = task_pt_regs(p);
p                 219 arch/xtensa/kernel/process.c 	p->thread.sp = (unsigned long)childregs;
p                 221 arch/xtensa/kernel/process.c 	if (!(p->flags & PF_KTHREAD)) {
p                 226 arch/xtensa/kernel/process.c 		p->thread.ra = MAKE_RA_FOR_CALL(
p                 271 arch/xtensa/kernel/process.c 		p->thread.ra = MAKE_RA_FOR_CALL(
p                 286 arch/xtensa/kernel/process.c 	ti = task_thread_info(p);
p                 290 arch/xtensa/kernel/process.c 	clear_ptrace_hw_breakpoint(p);
p                 300 arch/xtensa/kernel/process.c unsigned long get_wchan(struct task_struct *p)
p                 303 arch/xtensa/kernel/process.c 	unsigned long stack_page = (unsigned long) task_stack_page(p);
p                 306 arch/xtensa/kernel/process.c 	if (!p || p == current || p->state == TASK_RUNNING)
p                 309 arch/xtensa/kernel/process.c 	sp = p->thread.sp;
p                 310 arch/xtensa/kernel/process.c 	pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
p                 171 arch/xtensa/kernel/smp.c static void mx_cpu_start(void *p)
p                 173 arch/xtensa/kernel/smp.c 	unsigned cpu = (unsigned)p;
p                 181 arch/xtensa/kernel/smp.c static void mx_cpu_stop(void *p)
p                 183 arch/xtensa/kernel/smp.c 	unsigned cpu = (unsigned)p;
p                 443 arch/xtensa/kernel/smp.c void show_ipi_list(struct seq_file *p, int prec)
p                 449 arch/xtensa/kernel/smp.c 		seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
p                 451 arch/xtensa/kernel/smp.c 			seq_printf(p, " %10lu",
p                 453 arch/xtensa/kernel/smp.c 		seq_printf(p, "   %s\n", ipi_text[i].long_text);
p                  90 arch/xtensa/kernel/xtensa_ksyms.c unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v)
p                  96 arch/xtensa/kernel/xtensa_ksyms.c unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
p                 206 arch/xtensa/mm/init.c static void __init parse_memmap_one(char *p)
p                 211 arch/xtensa/mm/init.c 	if (!p)
p                 214 arch/xtensa/mm/init.c 	oldp = p;
p                 215 arch/xtensa/mm/init.c 	mem_size = memparse(p, &p);
p                 216 arch/xtensa/mm/init.c 	if (p == oldp)
p                 219 arch/xtensa/mm/init.c 	switch (*p) {
p                 221 arch/xtensa/mm/init.c 		start_at = memparse(p + 1, &p);
p                 226 arch/xtensa/mm/init.c 		start_at = memparse(p + 1, &p);
p                 235 arch/xtensa/mm/init.c 		pr_warn("Unrecognized memmap syntax: %s\n", p);
p                 240 arch/xtensa/mm/tlb.c 				struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
p                 242 arch/xtensa/mm/tlb.c 						page_count(p),
p                 243 arch/xtensa/mm/tlb.c 						page_mapcount(p));
p                 244 arch/xtensa/mm/tlb.c 				if (!page_count(p))
p                 246 arch/xtensa/mm/tlb.c 				else if (page_mapcount(p))
p                 292 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h 	.macro xchal_cp0_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 293 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h 	.macro xchal_cp0_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 294 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h 	.macro xchal_cp2_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 295 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h 	.macro xchal_cp2_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 296 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h 	.macro xchal_cp3_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 297 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h 	.macro xchal_cp3_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 298 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h 	.macro xchal_cp4_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 299 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h 	.macro xchal_cp4_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 300 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h 	.macro xchal_cp5_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 301 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h 	.macro xchal_cp5_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 302 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h 	.macro xchal_cp6_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 303 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h 	.macro xchal_cp6_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 304 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h 	.macro xchal_cp7_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 305 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h 	.macro xchal_cp7_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 313 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h 	.macro xchal_cp0_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 314 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h 	.macro xchal_cp0_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 315 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h 	.macro xchal_cp2_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 316 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h 	.macro xchal_cp2_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 317 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h 	.macro xchal_cp3_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 318 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h 	.macro xchal_cp3_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 319 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h 	.macro xchal_cp4_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 320 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h 	.macro xchal_cp4_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 321 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h 	.macro xchal_cp5_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 322 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h 	.macro xchal_cp5_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 323 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h 	.macro xchal_cp6_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 324 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h 	.macro xchal_cp6_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 325 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h 	.macro xchal_cp7_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 326 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h 	.macro xchal_cp7_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 167 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h 	.macro xchal_cp0_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 168 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h 	.macro xchal_cp0_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 169 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h 	.macro xchal_cp2_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 170 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h 	.macro xchal_cp2_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 171 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h 	.macro xchal_cp3_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 172 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h 	.macro xchal_cp3_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 173 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h 	.macro xchal_cp4_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 174 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h 	.macro xchal_cp4_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 175 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h 	.macro xchal_cp5_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 176 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h 	.macro xchal_cp5_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 177 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h 	.macro xchal_cp6_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 178 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h 	.macro xchal_cp6_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 179 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h 	.macro xchal_cp7_store	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                 180 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h 	.macro xchal_cp7_load	p a b c d continue=0 ofs=-1 select=-1 ; .endm
p                  58 block/badblocks.c 	u64 *p = bb->page;
p                  88 block/badblocks.c 		sector_t a = BB_OFFSET(p[mid]);
p                 105 block/badblocks.c 		       BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
p                 106 block/badblocks.c 			if (BB_OFFSET(p[lo]) < target) {
p                 110 block/badblocks.c 				if (rv != -1 && BB_ACK(p[lo]))
p                 114 block/badblocks.c 				*first_bad = BB_OFFSET(p[lo]);
p                 115 block/badblocks.c 				*bad_sectors = BB_LEN(p[lo]);
p                 130 block/badblocks.c 	u64 *p = bb->page;
p                 138 block/badblocks.c 		if (!BB_ACK(p[i])) {
p                 166 block/badblocks.c 	u64 *p;
p                 187 block/badblocks.c 	p = bb->page;
p                 193 block/badblocks.c 		sector_t a = BB_OFFSET(p[mid]);
p                 200 block/badblocks.c 	if (hi > lo && BB_OFFSET(p[lo]) > s)
p                 207 block/badblocks.c 		sector_t a = BB_OFFSET(p[lo]);
p                 208 block/badblocks.c 		sector_t e = a + BB_LEN(p[lo]);
p                 209 block/badblocks.c 		int ack = BB_ACK(p[lo]);
p                 222 block/badblocks.c 				p[lo] = BB_MAKE(a, e-a, ack);
p                 228 block/badblocks.c 				if (BB_LEN(p[lo]) != BB_MAX_LEN)
p                 229 block/badblocks.c 					p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
p                 239 block/badblocks.c 		sector_t a = BB_OFFSET(p[hi]);
p                 240 block/badblocks.c 		sector_t e = a + BB_LEN(p[hi]);
p                 241 block/badblocks.c 		int ack = BB_ACK(p[hi]);
p                 254 block/badblocks.c 				p[hi] = BB_MAKE(a, e-a, ack);
p                 257 block/badblocks.c 				p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
p                 268 block/badblocks.c 		sector_t a = BB_OFFSET(p[hi]);
p                 269 block/badblocks.c 		int lolen = BB_LEN(p[lo]);
p                 270 block/badblocks.c 		int hilen = BB_LEN(p[hi]);
p                 275 block/badblocks.c 			int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
p                 277 block/badblocks.c 			p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
p                 278 block/badblocks.c 			memmove(p + hi, p + hi + 1,
p                 294 block/badblocks.c 			memmove(p + hi + 1, p + hi,
p                 300 block/badblocks.c 			p[hi] = BB_MAKE(s, this_sectors, acknowledged);
p                 333 block/badblocks.c 	u64 *p;
p                 353 block/badblocks.c 	p = bb->page;
p                 359 block/badblocks.c 		sector_t a = BB_OFFSET(p[mid]);
p                 371 block/badblocks.c 		if ((BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) &&
p                 372 block/badblocks.c 		    (BB_OFFSET(p[lo]) < target)) {
p                 374 block/badblocks.c 			int ack = BB_ACK(p[lo]);
p                 375 block/badblocks.c 			sector_t a = BB_OFFSET(p[lo]);
p                 376 block/badblocks.c 			sector_t end = a + BB_LEN(p[lo]);
p                 384 block/badblocks.c 				memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
p                 386 block/badblocks.c 				p[lo] = BB_MAKE(a, s-a, ack);
p                 389 block/badblocks.c 			p[lo] = BB_MAKE(target, end - target, ack);
p                 395 block/badblocks.c 		       (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) &&
p                 396 block/badblocks.c 		       (BB_OFFSET(p[lo]) < target)) {
p                 398 block/badblocks.c 			if (BB_OFFSET(p[lo]) < s) {
p                 400 block/badblocks.c 				int ack = BB_ACK(p[lo]);
p                 401 block/badblocks.c 				sector_t start = BB_OFFSET(p[lo]);
p                 403 block/badblocks.c 				p[lo] = BB_MAKE(start, s - start, ack);
p                 413 block/badblocks.c 			memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
p                 441 block/badblocks.c 		u64 *p = bb->page;
p                 445 block/badblocks.c 			if (!BB_ACK(p[i])) {
p                 446 block/badblocks.c 				sector_t start = BB_OFFSET(p[i]);
p                 447 block/badblocks.c 				int len = BB_LEN(p[i]);
p                 449 block/badblocks.c 				p[i] = BB_MAKE(start, len, 1);
p                 471 block/badblocks.c 	u64 *p = bb->page;
p                 484 block/badblocks.c 		sector_t s = BB_OFFSET(p[i]);
p                 485 block/badblocks.c 		unsigned int length = BB_LEN(p[i]);
p                 486 block/badblocks.c 		int ack = BB_ACK(p[i]);
p                 555 block/bfq-iosched.c 	struct rb_node **p, *parent;
p                 559 block/bfq-iosched.c 	p = &root->rb_node;
p                 560 block/bfq-iosched.c 	while (*p) {
p                 563 block/bfq-iosched.c 		parent = *p;
p                 571 block/bfq-iosched.c 			n = &(*p)->rb_right;
p                 573 block/bfq-iosched.c 			n = &(*p)->rb_left;
p                 576 block/bfq-iosched.c 		p = n;
p                 582 block/bfq-iosched.c 		*rb_link = p;
p                 609 block/bfq-iosched.c 	struct rb_node **p, *parent;
p                 636 block/bfq-iosched.c 			blk_rq_pos(bfqq->next_rq), &parent, &p);
p                 638 block/bfq-iosched.c 		rb_link_node(&bfqq->pos_node, parent, p);
p                 245 block/bio.c    	void *p;
p                 255 block/bio.c    		p = bio;
p                 256 block/bio.c    		p -= bs->front_pad;
p                 258 block/bio.c    		mempool_free(p, &bs->bio_pool);
p                 437 block/bio.c    	void *p;
p                 443 block/bio.c    		p = kmalloc(sizeof(struct bio) +
p                 480 block/bio.c    		p = mempool_alloc(&bs->bio_pool, gfp_mask);
p                 481 block/bio.c    		if (!p && gfp_mask != saved_gfp) {
p                 484 block/bio.c    			p = mempool_alloc(&bs->bio_pool, gfp_mask);
p                 491 block/bio.c    	if (unlikely(!p))
p                 494 block/bio.c    	bio = p + front_pad;
p                 521 block/bio.c    	mempool_free(p, &bs->bio_pool);
p                1576 block/bio.c    	char *p = bio->bi_private;
p                1581 block/bio.c    		memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
p                1582 block/bio.c    		p += bvec->bv_len;
p                1606 block/bio.c    	void *p = data;
p                1632 block/bio.c    			memcpy(page_address(page), p, bytes);
p                1638 block/bio.c    		p += bytes;
p                 839 block/blk-core.c 	struct hd_struct *p;
p                 843 block/blk-core.c 	p = __disk_get_part(bio->bi_disk, bio->bi_partno);
p                 844 block/blk-core.c 	if (unlikely(!p))
p                 846 block/blk-core.c 	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
p                 848 block/blk-core.c 	if (unlikely(bio_check_ro(bio, p)))
p                 856 block/blk-core.c 		if (bio_check_eod(bio, part_nr_sects_read(p)))
p                 858 block/blk-core.c 		bio->bi_iter.bi_sector += p->start_sect;
p                 859 block/blk-core.c 		trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
p                 860 block/blk-core.c 				      bio->bi_iter.bi_sector - p->start_sect);
p                 272 block/blk-integrity.c 	char *p = (char *) page;
p                 273 block/blk-integrity.c 	unsigned long val = simple_strtoul(p, &p, 10);
p                 291 block/blk-integrity.c 	char *p = (char *) page;
p                 292 block/blk-integrity.c 	unsigned long val = simple_strtoul(p, &p, 10);
p                 722 block/blk-iocost.c 	const struct ioc_params *p = &autop[idx];
p                 747 block/blk-iocost.c 	if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
p                 756 block/blk-iocost.c 	if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
p                 818 block/blk-iocost.c 	const struct ioc_params *p;
p                 824 block/blk-iocost.c 	p = &autop[idx];
p                 837 block/blk-iocost.c 		memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
p                 839 block/blk-iocost.c 		memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
p                2214 block/blk-iocost.c 	char *p;
p                2235 block/blk-iocost.c 	while ((p = strsep(&input, " \t\n"))) {
p                2241 block/blk-iocost.c 		if (!*p)
p                2244 block/blk-iocost.c 		switch (match_token(p, qos_ctrl_tokens, args)) {
p                2260 block/blk-iocost.c 		tok = match_token(p, qos_tokens, args);
p                2380 block/blk-iocost.c 	char *p;
p                2400 block/blk-iocost.c 	while ((p = strsep(&input, " \t\n"))) {
p                2406 block/blk-iocost.c 		if (!*p)
p                2409 block/blk-iocost.c 		switch (match_token(p, cost_ctrl_tokens, args)) {
p                2426 block/blk-iocost.c 		tok = match_token(p, i_lcoef_tokens, args);
p                 794 block/blk-iolatency.c 	char *p, *tok;
p                 805 block/blk-iolatency.c 	p = ctx.body;
p                 808 block/blk-iolatency.c 	while ((tok = strsep(&p, " "))) {
p                2182 block/blk-mq.c 		void *p;
p                2205 block/blk-mq.c 		p = page_address(page);
p                2210 block/blk-mq.c 		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
p                2215 block/blk-mq.c 			struct request *rq = p;
p                2223 block/blk-mq.c 			p += rq_size;
p                1612 block/blk-throttle.c 		char *p;
p                1623 block/blk-throttle.c 		p = tok;
p                1624 block/blk-throttle.c 		strsep(&p, "=");
p                1625 block/blk-throttle.c 		if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
p                  58 block/blk-timeout.c 		char *p = (char *) buf;
p                  60 block/blk-timeout.c 		val = simple_strtoul(p, &p, 10);
p                  84 block/compat_ioctl.c 	unsigned long __user *p;
p                  87 block/compat_ioctl.c 	p = compat_alloc_user_space(sizeof(unsigned long));
p                  89 block/compat_ioctl.c 				cmd, (unsigned long)p);
p                  93 block/compat_ioctl.c 		if (get_user(v, p) || put_user(v, uvp))
p                 257 block/elevator.c 	struct rb_node **p = &root->rb_node;
p                 261 block/elevator.c 	while (*p) {
p                 262 block/elevator.c 		parent = *p;
p                 266 block/elevator.c 			p = &(*p)->rb_left;
p                 268 block/elevator.c 			p = &(*p)->rb_right;
p                 271 block/elevator.c 	rb_link_node(&rq->rb_node, parent, p);
p                 356 block/genhd.c  	struct blk_major_name **n, *p;
p                 386 block/genhd.c  	p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
p                 387 block/genhd.c  	if (p == NULL) {
p                 392 block/genhd.c  	p->major = major;
p                 393 block/genhd.c  	strlcpy(p->name, name, sizeof(p->name));
p                 394 block/genhd.c  	p->next = NULL;
p                 402 block/genhd.c  		*n = p;
p                 409 block/genhd.c  		kfree(p);
p                 421 block/genhd.c  	struct blk_major_name *p = NULL;
p                 431 block/genhd.c  		p = *n;
p                 432 block/genhd.c  		*n = p->next;
p                 435 block/genhd.c  	kfree(p);
p                 582 block/genhd.c  	struct gendisk *p = data;
p                 584 block/genhd.c  	return &disk_to_dev(p)->kobj;
p                 589 block/genhd.c  	struct gendisk *p = data;
p                 591 block/genhd.c  	if (!get_disk_and_module(p))
p                1043 block/genhd.c  	void *p;
p                1045 block/genhd.c  	p = disk_seqf_start(seqf, pos);
p                1046 block/genhd.c  	if (!IS_ERR_OR_NULL(p) && !*pos)
p                1048 block/genhd.c  	return p;
p                  20 block/ioctl.c  	struct blkpg_partition p;
p                  29 block/ioctl.c  	if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
p                  34 block/ioctl.c  	partno = p.pno;
p                  39 block/ioctl.c  			start = p.start >> 9;
p                  40 block/ioctl.c  			length = p.length >> 9;
p                  50 block/ioctl.c  			if (p.start & (bdev_logical_block_size(bdev) - 1))
p                 101 block/ioctl.c  			start = p.start >> 9;
p                 103 block/ioctl.c  			length = p.length >> 9;
p                 147 block/ioctl.c  			i_size_write(bdevp->bd_inode, p.length);
p                 373 block/ioctl.c  	struct pr_preempt p;
p                 379 block/ioctl.c  	if (copy_from_user(&p, arg, sizeof(p)))
p                 382 block/ioctl.c  	if (p.flags)
p                 384 block/ioctl.c  	return ops->pr_preempt(bdev, p.old_key, p.new_key, p.type, abort);
p                  96 block/ioprio.c 	struct task_struct *p, *g;
p                 111 block/ioprio.c 				p = current;
p                 113 block/ioprio.c 				p = find_task_by_vpid(who);
p                 114 block/ioprio.c 			if (p)
p                 115 block/ioprio.c 				ret = set_task_ioprio(p, ioprio);
p                 122 block/ioprio.c 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
p                 123 block/ioprio.c 				ret = set_task_ioprio(p, ioprio);
p                 126 block/ioprio.c 			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
p                 140 block/ioprio.c 			for_each_process_thread(g, p) {
p                 141 block/ioprio.c 				if (!uid_eq(task_uid(p), uid) ||
p                 142 block/ioprio.c 				    !task_pid_vnr(p))
p                 144 block/ioprio.c 				ret = set_task_ioprio(p, ioprio);
p                 160 block/ioprio.c static int get_task_ioprio(struct task_struct *p)
p                 164 block/ioprio.c 	ret = security_task_getioprio(p);
p                 168 block/ioprio.c 	task_lock(p);
p                 169 block/ioprio.c 	if (p->io_context)
p                 170 block/ioprio.c 		ret = p->io_context->ioprio;
p                 171 block/ioprio.c 	task_unlock(p);
p                 188 block/ioprio.c 	struct task_struct *g, *p;
p                 199 block/ioprio.c 				p = current;
p                 201 block/ioprio.c 				p = find_task_by_vpid(who);
p                 202 block/ioprio.c 			if (p)
p                 203 block/ioprio.c 				ret = get_task_ioprio(p);
p                 210 block/ioprio.c 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
p                 211 block/ioprio.c 				tmpio = get_task_ioprio(p);
p                 218 block/ioprio.c 			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
p                 230 block/ioprio.c 			for_each_process_thread(g, p) {
p                 231 block/ioprio.c 				if (!uid_eq(task_uid(p), user->uid) ||
p                 232 block/ioprio.c 				    !task_pid_vnr(p))
p                 234 block/ioprio.c 				tmpio = get_task_ioprio(p);
p                 599 block/mq-deadline.c 	char *p = (char *) page;
p                 601 block/mq-deadline.c 	*var = simple_strtol(p, &p, 10);
p                  77 block/partition-generic.c 	struct hd_struct *p = dev_to_part(dev);
p                  79 block/partition-generic.c 	return sprintf(buf, "%d\n", p->partno);
p                  85 block/partition-generic.c 	struct hd_struct *p = dev_to_part(dev);
p                  87 block/partition-generic.c 	return sprintf(buf, "%llu\n",(unsigned long long)p->start_sect);
p                  93 block/partition-generic.c 	struct hd_struct *p = dev_to_part(dev);
p                  94 block/partition-generic.c 	return sprintf(buf, "%llu\n",(unsigned long long)part_nr_sects_read(p));
p                 100 block/partition-generic.c 	struct hd_struct *p = dev_to_part(dev);
p                 101 block/partition-generic.c 	return sprintf(buf, "%d\n", p->policy ? 1 : 0);
p                 107 block/partition-generic.c 	struct hd_struct *p = dev_to_part(dev);
p                 108 block/partition-generic.c 	return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset);
p                 114 block/partition-generic.c 	struct hd_struct *p = dev_to_part(dev);
p                 115 block/partition-generic.c 	return sprintf(buf, "%u\n", p->discard_alignment);
p                 121 block/partition-generic.c 	struct hd_struct *p = dev_to_part(dev);
p                 122 block/partition-generic.c 	struct request_queue *q = part_to_disk(p)->queue;
p                 125 block/partition-generic.c 	inflight = part_in_flight(q, p);
p                 132 block/partition-generic.c 		part_stat_read(p, ios[STAT_READ]),
p                 133 block/partition-generic.c 		part_stat_read(p, merges[STAT_READ]),
p                 134 block/partition-generic.c 		(unsigned long long)part_stat_read(p, sectors[STAT_READ]),
p                 135 block/partition-generic.c 		(unsigned int)part_stat_read_msecs(p, STAT_READ),
p                 136 block/partition-generic.c 		part_stat_read(p, ios[STAT_WRITE]),
p                 137 block/partition-generic.c 		part_stat_read(p, merges[STAT_WRITE]),
p                 138 block/partition-generic.c 		(unsigned long long)part_stat_read(p, sectors[STAT_WRITE]),
p                 139 block/partition-generic.c 		(unsigned int)part_stat_read_msecs(p, STAT_WRITE),
p                 141 block/partition-generic.c 		jiffies_to_msecs(part_stat_read(p, io_ticks)),
p                 142 block/partition-generic.c 		jiffies_to_msecs(part_stat_read(p, time_in_queue)),
p                 143 block/partition-generic.c 		part_stat_read(p, ios[STAT_DISCARD]),
p                 144 block/partition-generic.c 		part_stat_read(p, merges[STAT_DISCARD]),
p                 145 block/partition-generic.c 		(unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]),
p                 146 block/partition-generic.c 		(unsigned int)part_stat_read_msecs(p, STAT_DISCARD));
p                 152 block/partition-generic.c 	struct hd_struct *p = dev_to_part(dev);
p                 153 block/partition-generic.c 	struct request_queue *q = part_to_disk(p)->queue;
p                 156 block/partition-generic.c 	part_in_flight_rw(q, p, inflight);
p                 164 block/partition-generic.c 	struct hd_struct *p = dev_to_part(dev);
p                 166 block/partition-generic.c 	return sprintf(buf, "%d\n", p->make_it_fail);
p                 173 block/partition-generic.c 	struct hd_struct *p = dev_to_part(dev);
p                 177 block/partition-generic.c 		p->make_it_fail = (i == 0) ? 0 : 1;
p                 225 block/partition-generic.c 	struct hd_struct *p = dev_to_part(dev);
p                 227 block/partition-generic.c 	hd_free_part(p);
p                 228 block/partition-generic.c 	kfree(p);
p                 313 block/partition-generic.c 	struct hd_struct *p;
p                 329 block/partition-generic.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 330 block/partition-generic.c 	if (!p)
p                 333 block/partition-generic.c 	if (!init_part_stats(p)) {
p                 338 block/partition-generic.c 	seqcount_init(&p->nr_sects_seq);
p                 339 block/partition-generic.c 	pdev = part_to_dev(p);
p                 341 block/partition-generic.c 	p->start_sect = start;
p                 342 block/partition-generic.c 	p->alignment_offset =
p                 344 block/partition-generic.c 	p->discard_alignment =
p                 346 block/partition-generic.c 	p->nr_sects = len;
p                 347 block/partition-generic.c 	p->partno = partno;
p                 348 block/partition-generic.c 	p->policy = get_disk_ro(disk);
p                 357 block/partition-generic.c 		p->info = pinfo;
p                 371 block/partition-generic.c 	err = blk_alloc_devt(p, &devt);
p                 383 block/partition-generic.c 	p->holder_dir = kobject_create_and_add("holders", &pdev->kobj);
p                 384 block/partition-generic.c 	if (!p->holder_dir)
p                 394 block/partition-generic.c 	err = hd_ref_init(p);
p                 402 block/partition-generic.c 	rcu_assign_pointer(ptbl->part[partno], p);
p                 407 block/partition-generic.c 	return p;
p                 410 block/partition-generic.c 	free_part_info(p);
p                 412 block/partition-generic.c 	free_part_stats(p);
p                 414 block/partition-generic.c 	kfree(p);
p                 419 block/partition-generic.c 	kobject_put(p->holder_dir);
p                 516 block/partition-generic.c 	int p, highest, res;
p                 567 block/partition-generic.c 	for (p = 1, highest = 0; p < state->limit; p++)
p                 568 block/partition-generic.c 		if (state->parts[p].size)
p                 569 block/partition-generic.c 			highest = p;
p                 574 block/partition-generic.c 	for (p = 1; p < state->limit; p++) {
p                 577 block/partition-generic.c 		size = state->parts[p].size;
p                 581 block/partition-generic.c 		from = state->parts[p].from;
p                 585 block/partition-generic.c 			       disk->disk_name, p, (unsigned long long) from);
p                 594 block/partition-generic.c 			       disk->disk_name, p, (unsigned long long) size);
p                 620 block/partition-generic.c 			       disk->disk_name, p, (unsigned long long) from,
p                 625 block/partition-generic.c 		part = add_partition(disk, p, from, size,
p                 626 block/partition-generic.c 				     state->parts[p].flags,
p                 627 block/partition-generic.c 				     &state->parts[p].info);
p                 630 block/partition-generic.c 			       disk->disk_name, p, -PTR_ERR(part));
p                 634 block/partition-generic.c 		if (state->parts[p].flags & ADDPART_FLAG_RAID)
p                 662 block/partition-generic.c unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
p                 671 block/partition-generic.c 		p->v = page;
p                 676 block/partition-generic.c 	p->v = NULL;
p                 354 block/partitions/acorn.c 	const struct ics_part *p;
p                 372 block/partitions/acorn.c 	for (slot = 1, p = (const struct ics_part *)data; p->size; p++) {
p                 373 block/partitions/acorn.c 		u32 start = le32_to_cpu(p->start);
p                 374 block/partitions/acorn.c 		s32 size = le32_to_cpu(p->size); /* yes, it's signed. */
p                 451 block/partitions/acorn.c 	const struct ptec_part *p;
p                 466 block/partitions/acorn.c 	for (i = 0, p = (const struct ptec_part *)data; i < 12; i++, p++) {
p                 467 block/partitions/acorn.c 		u32 start = le32_to_cpu(p->start);
p                 468 block/partitions/acorn.c 		u32 size = le32_to_cpu(p->size);
p                 513 block/partitions/acorn.c 	struct eesox_part *p;
p                 529 block/partitions/acorn.c 	for (i = 0, p = (struct eesox_part *)buffer; i < 8; i++, p++) {
p                 532 block/partitions/acorn.c 		if (memcmp(p->magic, "Eesox", 6))
p                 535 block/partitions/acorn.c 		next = le32_to_cpu(p->start);
p                 133 block/partitions/aix.c 	struct pvd *p;
p                 135 block/partitions/aix.c 	p = kmalloc(count, GFP_KERNEL);
p                 136 block/partitions/aix.c 	if (!p)
p                 139 block/partitions/aix.c 	if (read_lba(state, lba, (u8 *) p, count) < count) {
p                 140 block/partitions/aix.c 		kfree(p);
p                 143 block/partitions/aix.c 	return p;
p                 158 block/partitions/aix.c 	struct lvname *p;
p                 160 block/partitions/aix.c 	p = kmalloc(count, GFP_KERNEL);
p                 161 block/partitions/aix.c 	if (!p)
p                 164 block/partitions/aix.c 	if (read_lba(state, lba, (u8 *) p, count) < count) {
p                 165 block/partitions/aix.c 		kfree(p);
p                 168 block/partitions/aix.c 	return p;
p                 191 block/partitions/aix.c 		struct lvm_rec *p = (struct lvm_rec *)d;
p                 192 block/partitions/aix.c 		u16 lvm_version = be16_to_cpu(p->version);
p                 196 block/partitions/aix.c 			int pp_size_log2 = be16_to_cpu(p->pp_size);
p                 203 block/partitions/aix.c 			vgda_len = be32_to_cpu(p->vgda_len);
p                 204 block/partitions/aix.c 			vgda_sector = be32_to_cpu(p->vgda_psn[0]);
p                 214 block/partitions/aix.c 		struct vgda *p = (struct vgda *)d;
p                 216 block/partitions/aix.c 		numlvs = be16_to_cpu(p->numlvs);
p                 223 block/partitions/aix.c 		struct lvd *p = (struct lvd *)d;
p                 231 block/partitions/aix.c 				lvip[i].pps_per_lv = be16_to_cpu(p[i].num_lps);
p                 249 block/partitions/aix.c 			struct ppe *p = pvd->ppe + i;
p                 252 block/partitions/aix.c 			lp_ix = be16_to_cpu(p->lp_ix);
p                 257 block/partitions/aix.c 			lv_ix = be16_to_cpu(p->lv_ix) - 1;
p                  32 block/partitions/check.h 				     sector_t n, Sector *p)
p                  38 block/partitions/check.h 	return read_dev_sector(state->bdev, n, p);
p                  42 block/partitions/check.h put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size)
p                  44 block/partitions/check.h 	if (n < p->limit) {
p                  47 block/partitions/check.h 		p->parts[n].from = from;
p                  48 block/partitions/check.h 		p->parts[n].size = size;
p                  49 block/partitions/check.h 		snprintf(tmp, sizeof(tmp), " %s%d", p->name, n);
p                  50 block/partitions/check.h 		strlcat(p->pp_buf, tmp, PAGE_SIZE);
p                  32 block/partitions/karma.c 	struct d_partition *p;
p                  44 block/partitions/karma.c 	p = label->d_partitions;
p                  45 block/partitions/karma.c 	for (i = 0 ; i < 2; i++, p++) {
p                  49 block/partitions/karma.c 		if (p->p_fstype == 0x4d && le32_to_cpu(p->p_size)) {
p                  50 block/partitions/karma.c 			put_partition(state, slot, le32_to_cpu(p->p_offset),
p                  51 block/partitions/karma.c 				le32_to_cpu(p->p_size));
p                 496 block/partitions/ldm.c 	struct partition *p;
p                 511 block/partitions/ldm.c 	p = (struct partition*)(data + 0x01BE);
p                 512 block/partitions/ldm.c 	for (i = 0; i < 4; i++, p++)
p                 513 block/partitions/ldm.c 		if (SYS_IND (p) == LDM_PARTITION) {
p                  88 block/partitions/ldm.h #define SYS_IND(p)		(get_unaligned(&(p)->sys_ind))
p                  36 block/partitions/msdos.c #define SYS_IND(p)	get_unaligned(&p->sys_ind)
p                  38 block/partitions/msdos.c static inline sector_t nr_sects(struct partition *p)
p                  40 block/partitions/msdos.c 	return (sector_t)get_unaligned_le32(&p->nr_sects);
p                  43 block/partitions/msdos.c static inline sector_t start_sect(struct partition *p)
p                  45 block/partitions/msdos.c 	return (sector_t)get_unaligned_le32(&p->start_sect);
p                  48 block/partitions/msdos.c static inline int is_extended_partition(struct partition *p)
p                  50 block/partitions/msdos.c 	return (SYS_IND(p) == DOS_EXTENDED_PARTITION ||
p                  51 block/partitions/msdos.c 		SYS_IND(p) == WIN98_EXTENDED_PARTITION ||
p                  52 block/partitions/msdos.c 		SYS_IND(p) == LINUX_EXTENDED_PARTITION);
p                  59 block/partitions/msdos.c msdos_magic_present(unsigned char *p)
p                  61 block/partitions/msdos.c 	return (p[0] == MSDOS_LABEL_MAGIC1 && p[1] == MSDOS_LABEL_MAGIC2);
p                  69 block/partitions/msdos.c static int aix_magic_present(struct parsed_partitions *state, unsigned char *p)
p                  71 block/partitions/msdos.c 	struct partition *pt = (struct partition *) (p + 0x1be);
p                  76 block/partitions/msdos.c 	if (!(p[0] == AIX_LABEL_MAGIC1 &&
p                  77 block/partitions/msdos.c 		p[1] == AIX_LABEL_MAGIC2 &&
p                  78 block/partitions/msdos.c 		p[2] == AIX_LABEL_MAGIC3 &&
p                  79 block/partitions/msdos.c 		p[3] == AIX_LABEL_MAGIC4))
p                 125 block/partitions/msdos.c 	struct partition *p;
p                 149 block/partitions/msdos.c 		p = (struct partition *) (data + 0x1be);
p                 163 block/partitions/msdos.c 		for (i = 0; i < 4; i++, p++) {
p                 166 block/partitions/msdos.c 			if (!nr_sects(p) || is_extended_partition(p))
p                 171 block/partitions/msdos.c 			offs = start_sect(p)*sector_size;
p                 172 block/partitions/msdos.c 			size = nr_sects(p)*sector_size;
p                 185 block/partitions/msdos.c 			if (SYS_IND(p) == LINUX_RAID_PARTITION)
p                 198 block/partitions/msdos.c 		p -= 4;
p                 199 block/partitions/msdos.c 		for (i = 0; i < 4; i++, p++)
p                 200 block/partitions/msdos.c 			if (nr_sects(p) && is_extended_partition(p))
p                 205 block/partitions/msdos.c 		this_sector = first_sector + start_sect(p) * sector_size;
p                 206 block/partitions/msdos.c 		this_size = nr_sects(p) * sector_size;
p                 279 block/partitions/msdos.c 	struct bsd_partition *p;
p                 295 block/partitions/msdos.c 	for (p = l->d_partitions; p - l->d_partitions < max_partitions; p++) {
p                 300 block/partitions/msdos.c 		if (p->p_fstype == BSD_FS_UNUSED)
p                 302 block/partitions/msdos.c 		bsd_start = le32_to_cpu(p->p_offset);
p                 303 block/partitions/msdos.c 		bsd_size = le32_to_cpu(p->p_size);
p                 362 block/partitions/msdos.c 	struct unixware_slice *p;
p                 378 block/partitions/msdos.c 	p = &l->vtoc.v_slice[1];
p                 380 block/partitions/msdos.c 	while (p - &l->vtoc.v_slice[0] < UNIXWARE_NUMSLICE) {
p                 384 block/partitions/msdos.c 		if (p->s_label != UNIXWARE_FS_UNUSED)
p                 386 block/partitions/msdos.c 				      le32_to_cpu(p->start_sect),
p                 387 block/partitions/msdos.c 				      le32_to_cpu(p->nr_sects));
p                 388 block/partitions/msdos.c 		p++;
p                 406 block/partitions/msdos.c 	struct partition *p;
p                 413 block/partitions/msdos.c 	p = (struct partition *)(data + 0x1be);
p                 419 block/partitions/msdos.c 	    SYS_IND(p) == MINIX_PARTITION) { /* subpartition table present */
p                 424 block/partitions/msdos.c 		for (i = 0; i < MINIX_NR_SUBPARTITIONS; i++, p++) {
p                 428 block/partitions/msdos.c 			if (SYS_IND(p) == MINIX_PARTITION)
p                 430 block/partitions/msdos.c 					      start_sect(p), nr_sects(p));
p                 457 block/partitions/msdos.c 	struct partition *p;
p                 491 block/partitions/msdos.c 	p = (struct partition *) (data + 0x1be);
p                 492 block/partitions/msdos.c 	for (slot = 1; slot <= 4; slot++, p++) {
p                 493 block/partitions/msdos.c 		if (p->boot_ind != 0 && p->boot_ind != 0x80) {
p                 513 block/partitions/msdos.c 	p = (struct partition *) (data + 0x1be);
p                 514 block/partitions/msdos.c 	for (slot = 1 ; slot <= 4 ; slot++, p++) {
p                 516 block/partitions/msdos.c 		if (SYS_IND(p) == EFI_PMBR_OSTYPE_EFI_GPT) {
p                 522 block/partitions/msdos.c 	p = (struct partition *) (data + 0x1be);
p                 533 block/partitions/msdos.c 	for (slot = 1 ; slot <= 4 ; slot++, p++) {
p                 534 block/partitions/msdos.c 		sector_t start = start_sect(p)*sector_size;
p                 535 block/partitions/msdos.c 		sector_t size = nr_sects(p)*sector_size;
p                 539 block/partitions/msdos.c 		if (is_extended_partition(p)) {
p                 558 block/partitions/msdos.c 		if (SYS_IND(p) == LINUX_RAID_PARTITION)
p                 560 block/partitions/msdos.c 		if (SYS_IND(p) == DM6_PARTITION)
p                 562 block/partitions/msdos.c 		if (SYS_IND(p) == EZD_PARTITION)
p                 569 block/partitions/msdos.c 	p = (struct partition *) (0x1be + data);
p                 570 block/partitions/msdos.c 	for (slot = 1 ; slot <= 4 ; slot++, p++) {
p                 571 block/partitions/msdos.c 		unsigned char id = SYS_IND(p);
p                 574 block/partitions/msdos.c 		if (!nr_sects(p))
p                 582 block/partitions/msdos.c 		subtypes[n].parse(state, start_sect(p) * sector_size,
p                 583 block/partitions/msdos.c 				  nr_sects(p) * sector_size, slot);
p                  40 block/partitions/sgi.c 	struct sgi_partition *p;
p                  46 block/partitions/sgi.c 	p = &label->partitions[0];
p                  70 block/partitions/sgi.c 	for(i = 0; i < 16; i++, p++) {
p                  71 block/partitions/sgi.c 		blocks = be32_to_cpu(p->num_blocks);
p                  72 block/partitions/sgi.c 		start  = be32_to_cpu(p->first_block);
p                  75 block/partitions/sgi.c 			if (be32_to_cpu(p->type) == LINUX_RAID_PARTITION)
p                  59 block/partitions/sun.c 	struct sun_partition *p;
p                  69 block/partitions/sun.c 	p = label->partitions;
p                 102 block/partitions/sun.c 	for (i = 0; i < nparts; i++, p++) {
p                 106 block/partitions/sun.c 		st_sector = be32_to_cpu(p->start_cylinder) * spc;
p                 107 block/partitions/sun.c 		num_sectors = be32_to_cpu(p->num_sectors);
p                  40 block/scsi_ioctl.c static int sg_get_version(int __user *p)
p                  43 block/scsi_ioctl.c 	return put_user(sg_version_num, p);
p                  46 block/scsi_ioctl.c static int scsi_get_idlun(struct request_queue *q, int __user *p)
p                  48 block/scsi_ioctl.c 	return put_user(0, p);
p                  51 block/scsi_ioctl.c static int scsi_get_bus(struct request_queue *q, int __user *p)
p                  53 block/scsi_ioctl.c 	return put_user(0, p);
p                  61 block/scsi_ioctl.c static int sg_set_timeout(struct request_queue *q, int __user *p)
p                  63 block/scsi_ioctl.c 	int timeout, err = get_user(timeout, p);
p                  80 block/scsi_ioctl.c static int sg_get_reserved_size(struct request_queue *q, int __user *p)
p                  84 block/scsi_ioctl.c 	return put_user(val, p);
p                  87 block/scsi_ioctl.c static int sg_set_reserved_size(struct request_queue *q, int __user *p)
p                  89 block/scsi_ioctl.c 	int size, err = get_user(size, p);
p                 105 block/scsi_ioctl.c static int sg_emulated_host(struct request_queue *q, int __user *p)
p                 107 block/scsi_ioctl.c 	return put_user(1, p);
p                2448 block/sed-opal.c 	void *p;
p                2458 block/sed-opal.c 	p = memdup_user(arg, _IOC_SIZE(cmd));
p                2459 block/sed-opal.c 	if (IS_ERR(p))
p                2460 block/sed-opal.c 		return PTR_ERR(p);
p                2464 block/sed-opal.c 		ret = opal_save(dev, p);
p                2467 block/sed-opal.c 		ret = opal_lock_unlock(dev, p);
p                2470 block/sed-opal.c 		ret = opal_take_ownership(dev, p);
p                2473 block/sed-opal.c 		ret = opal_activate_lsp(dev, p);
p                2476 block/sed-opal.c 		ret = opal_set_new_pw(dev, p);
p                2479 block/sed-opal.c 		ret = opal_activate_user(dev, p);
p                2482 block/sed-opal.c 		ret = opal_reverttper(dev, p, false);
p                2485 block/sed-opal.c 		ret = opal_setup_locking_range(dev, p);
p                2488 block/sed-opal.c 		ret = opal_add_user_to_lr(dev, p);
p                2491 block/sed-opal.c 		ret = opal_enable_disable_shadow_mbr(dev, p);
p                2494 block/sed-opal.c 		ret = opal_set_mbr_done(dev, p);
p                2497 block/sed-opal.c 		ret = opal_write_shadow_mbr(dev, p);
p                2500 block/sed-opal.c 		ret = opal_erase_locking_range(dev, p);
p                2503 block/sed-opal.c 		ret = opal_secure_erase_locking_range(dev, p);
p                2506 block/sed-opal.c 		ret = opal_reverttper(dev, p, true);
p                2512 block/sed-opal.c 	kfree(p);
p                 149 block/t10-pi.c 			void *p, *pmap;
p                 153 block/t10-pi.c 			p = pmap + iv.bv_offset;
p                 155 block/t10-pi.c 				struct t10_pi_tuple *pi = p;
p                 161 block/t10-pi.c 				p += tuple_sz;
p                 197 block/t10-pi.c 			void *p, *pmap;
p                 201 block/t10-pi.c 			p = pmap + iv.bv_offset;
p                 203 block/t10-pi.c 				struct t10_pi_tuple *pi = p;
p                 210 block/t10-pi.c 				p += tuple_sz;
p                 113 certs/blacklist.c 	char *buffer, *p;
p                 119 certs/blacklist.c 	p = memcpy(buffer, type, type_len);
p                 120 certs/blacklist.c 	p += type_len;
p                 121 certs/blacklist.c 	*p++ = ':';
p                 122 certs/blacklist.c 	bin2hex(p, hash, hash_len);
p                 123 certs/blacklist.c 	p += hash_len * 2;
p                 124 certs/blacklist.c 	*p = 0;
p                 140 certs/system_keyring.c 	const u8 *p, *end;
p                 145 certs/system_keyring.c 	p = system_certificate_list;
p                 146 certs/system_keyring.c 	end = p + system_certificate_list_size;
p                 147 certs/system_keyring.c 	while (p < end) {
p                 151 certs/system_keyring.c 		if (end - p < 4)
p                 153 certs/system_keyring.c 		if (p[0] != 0x30 &&
p                 154 certs/system_keyring.c 		    p[1] != 0x82)
p                 156 certs/system_keyring.c 		plen = (p[2] << 8) | p[3];
p                 158 certs/system_keyring.c 		if (plen > end - p)
p                 164 certs/system_keyring.c 					   p,
p                 179 certs/system_keyring.c 		p += plen;
p                  35 crypto/ablkcipher.c static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
p                  37 crypto/ablkcipher.c 	scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
p                  42 crypto/ablkcipher.c 	struct ablkcipher_buffer *p, *tmp;
p                  44 crypto/ablkcipher.c 	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
p                  45 crypto/ablkcipher.c 		ablkcipher_buffer_write(p);
p                  46 crypto/ablkcipher.c 		list_del(&p->entry);
p                  47 crypto/ablkcipher.c 		kfree(p);
p                  53 crypto/ablkcipher.c 					  struct ablkcipher_buffer *p)
p                  55 crypto/ablkcipher.c 	p->dst = walk->out;
p                  56 crypto/ablkcipher.c 	list_add_tail(&p->entry, &walk->buffers);
p                 144 crypto/ablkcipher.c 	struct ablkcipher_buffer *p;
p                 152 crypto/ablkcipher.c 	p = kmalloc(n, GFP_ATOMIC);
p                 153 crypto/ablkcipher.c 	if (!p)
p                 156 crypto/ablkcipher.c 	base = p + 1;
p                 161 crypto/ablkcipher.c 	p->len = bsize;
p                 162 crypto/ablkcipher.c 	p->data = dst;
p                 166 crypto/ablkcipher.c 	ablkcipher_queue_write(walk, p);
p                  25 crypto/aegis.h #define AEGIS_ALIGNED(p) IS_ALIGNED((uintptr_t)p, AEGIS_BLOCK_ALIGN)
p                 873 crypto/algapi.c 	char *p;
p                 876 crypto/algapi.c 	p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn),
p                 878 crypto/algapi.c 	if (!p)
p                 881 crypto/algapi.c 	inst = (void *)(p + head);
p                 887 crypto/algapi.c 	return p;
p                 890 crypto/algapi.c 	kfree(p);
p                  95 crypto/algboss.c 	const char *p;
p                 106 crypto/algboss.c 	for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++)
p                 109 crypto/algboss.c 	len = p - name;
p                 110 crypto/algboss.c 	if (!len || *p != '(')
p                 119 crypto/algboss.c 		name = ++p;
p                 121 crypto/algboss.c 		for (; isalnum(*p) || *p == '-' || *p == '_'; p++)
p                 122 crypto/algboss.c 			notnum |= !isdigit(*p);
p                 124 crypto/algboss.c 		if (*p == '(') {
p                 128 crypto/algboss.c 				if (!*++p)
p                 130 crypto/algboss.c 				if (*p == '(')
p                 132 crypto/algboss.c 				else if (*p == ')' && !recursion--)
p                 137 crypto/algboss.c 			p++;
p                 140 crypto/algboss.c 		len = p - name;
p                 163 crypto/algboss.c 		if (*p == ')')
p                 166 crypto/algboss.c 		if (*p != ',')
p                 632 crypto/asymmetric_keys/asym_tpm.c 	const struct asn1_template *p;
p                 634 crypto/asymmetric_keys/asym_tpm.c 	for (p = asn1_templates; p->name; p++)
p                 635 crypto/asymmetric_keys/asym_tpm.c 		if (strcmp(name, p->name) == 0)
p                 636 crypto/asymmetric_keys/asym_tpm.c 			return p;
p                  54 crypto/asymmetric_keys/asymmetric_type.c 	char *req, *p;
p                  68 crypto/asymmetric_keys/asymmetric_type.c 	p = req = kmalloc(2 + 1 + len * 2 + 1, GFP_KERNEL);
p                  73 crypto/asymmetric_keys/asymmetric_type.c 		*p++ = 'i';
p                  74 crypto/asymmetric_keys/asymmetric_type.c 		*p++ = 'd';
p                  76 crypto/asymmetric_keys/asymmetric_type.c 		*p++ = 'e';
p                  77 crypto/asymmetric_keys/asymmetric_type.c 		*p++ = 'x';
p                  79 crypto/asymmetric_keys/asymmetric_type.c 	*p++ = ':';
p                  80 crypto/asymmetric_keys/asymmetric_type.c 	p = bin2hex(p, lookup, len);
p                  81 crypto/asymmetric_keys/asymmetric_type.c 	*p = 0;
p                 335 crypto/asymmetric_keys/asymmetric_type.c 	const unsigned char *p;
p                 348 crypto/asymmetric_keys/asymmetric_type.c 			p = kid->data;
p                 350 crypto/asymmetric_keys/asymmetric_type.c 				p += n - 4;
p                 353 crypto/asymmetric_keys/asymmetric_type.c 			seq_printf(m, "%*phN", n, p);
p                  27 crypto/asymmetric_keys/pkcs7_trust.c 	struct x509_certificate *x509, *last = NULL, *p;
p                 127 crypto/asymmetric_keys/pkcs7_trust.c 		for (p = sinfo->signer; p != x509; p = p->signer)
p                 128 crypto/asymmetric_keys/pkcs7_trust.c 			p->verified = true;
p                 162 crypto/asymmetric_keys/pkcs7_trust.c 	struct x509_certificate *p;
p                 166 crypto/asymmetric_keys/pkcs7_trust.c 	for (p = pkcs7->certs; p; p = p->next)
p                 167 crypto/asymmetric_keys/pkcs7_trust.c 		p->seen = false;
p                 204 crypto/asymmetric_keys/pkcs7_verify.c 	struct x509_certificate *x509 = sinfo->signer, *p;
p                 210 crypto/asymmetric_keys/pkcs7_verify.c 	for (p = pkcs7->certs; p; p = p->next)
p                 211 crypto/asymmetric_keys/pkcs7_verify.c 		p->seen = false;
p                 224 crypto/asymmetric_keys/pkcs7_verify.c 			for (p = sinfo->signer; p != x509; p = p->signer)
p                 225 crypto/asymmetric_keys/pkcs7_verify.c 				p->blacklisted = true;
p                 261 crypto/asymmetric_keys/pkcs7_verify.c 			for (p = pkcs7->certs; p; p = p->next) {
p                 263 crypto/asymmetric_keys/pkcs7_verify.c 					 p->index, p->id->len, p->id->data);
p                 264 crypto/asymmetric_keys/pkcs7_verify.c 				if (asymmetric_key_id_same(p->id, auth))
p                 270 crypto/asymmetric_keys/pkcs7_verify.c 			for (p = pkcs7->certs; p; p = p->next) {
p                 271 crypto/asymmetric_keys/pkcs7_verify.c 				if (!p->skid)
p                 274 crypto/asymmetric_keys/pkcs7_verify.c 					 p->index, p->skid->len, p->skid->data);
p                 275 crypto/asymmetric_keys/pkcs7_verify.c 				if (asymmetric_key_id_same(p->skid, auth))
p                 289 crypto/asymmetric_keys/pkcs7_verify.c 		    !asymmetric_key_id_same(p->skid, sig->auth_ids[1])) {
p                 291 crypto/asymmetric_keys/pkcs7_verify.c 				sinfo->index, x509->index, p->index);
p                 295 crypto/asymmetric_keys/pkcs7_verify.c 		pr_debug("- subject %s\n", p->subject);
p                 296 crypto/asymmetric_keys/pkcs7_verify.c 		if (p->seen) {
p                 301 crypto/asymmetric_keys/pkcs7_verify.c 		ret = public_key_verify_signature(p->pub, x509->sig);
p                 304 crypto/asymmetric_keys/pkcs7_verify.c 		x509->signer = p;
p                 305 crypto/asymmetric_keys/pkcs7_verify.c 		if (x509 == p) {
p                 309 crypto/asymmetric_keys/pkcs7_verify.c 		x509 = p;
p                  30 crypto/asymmetric_keys/restrict.c 		struct asymmetric_key_id *p = &cakey.id;
p                  39 crypto/asymmetric_keys/restrict.c 		ret = __asymmetric_key_hex_to_key_id(str + 3, p, hexlen);
p                  43 crypto/asymmetric_keys/restrict.c 			ca_keyid = p;	/* owner key 'id:xxxxxx' */
p                 539 crypto/asymmetric_keys/x509_cert_parser.c 	const unsigned char *p = value;
p                 549 crypto/asymmetric_keys/x509_cert_parser.c 		year = DD2bin(p);
p                 558 crypto/asymmetric_keys/x509_cert_parser.c 		year = DD2bin(p) * 100 + DD2bin(p);
p                 565 crypto/asymmetric_keys/x509_cert_parser.c 	mon  = DD2bin(p);
p                 566 crypto/asymmetric_keys/x509_cert_parser.c 	day = DD2bin(p);
p                 567 crypto/asymmetric_keys/x509_cert_parser.c 	hour = DD2bin(p);
p                 568 crypto/asymmetric_keys/x509_cert_parser.c 	min  = DD2bin(p);
p                 569 crypto/asymmetric_keys/x509_cert_parser.c 	sec  = DD2bin(p);
p                 571 crypto/asymmetric_keys/x509_cert_parser.c 	if (*p != 'Z')
p                 162 crypto/asymmetric_keys/x509_public_key.c 	char *desc = NULL, *p;
p                 209 crypto/asymmetric_keys/x509_public_key.c 	p = memcpy(desc, cert->subject, sulen);
p                 210 crypto/asymmetric_keys/x509_public_key.c 	p += sulen;
p                 211 crypto/asymmetric_keys/x509_public_key.c 	*p++ = ':';
p                 212 crypto/asymmetric_keys/x509_public_key.c 	*p++ = ' ';
p                 213 crypto/asymmetric_keys/x509_public_key.c 	p = bin2hex(p, q, srlen);
p                 214 crypto/asymmetric_keys/x509_public_key.c 	*p = 0;
p                 363 crypto/async_tx/async_pq.c 		void *p, *q, *s;
p                 386 crypto/async_tx/async_pq.c 			p = page_address(p_src) + offset;
p                 388 crypto/async_tx/async_pq.c 			*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
p                 150 crypto/async_tx/async_raid6_recov.c 	struct page *p, *q, *a, *b;
p                 158 crypto/async_tx/async_raid6_recov.c 	p = blocks[disks-2];
p                 166 crypto/async_tx/async_raid6_recov.c 	srcs[0] = p;
p                 174 crypto/async_tx/async_raid6_recov.c 	srcs[0] = p;
p                 189 crypto/async_tx/async_raid6_recov.c 	struct page *p, *q, *g, *dp, *dq;
p                 210 crypto/async_tx/async_raid6_recov.c 	p = blocks[disks-2];
p                 228 crypto/async_tx/async_raid6_recov.c 	srcs[1] = p;
p                 263 crypto/async_tx/async_raid6_recov.c 	struct page *p, *q, *dp, *dq;
p                 271 crypto/async_tx/async_raid6_recov.c 	p = blocks[disks-2];
p                 291 crypto/async_tx/async_raid6_recov.c 	blocks[disks-2] = p;
p                 296 crypto/async_tx/async_raid6_recov.c 	srcs[1] = p;
p                 413 crypto/async_tx/async_raid6_recov.c 	struct page *p, *q, *dq;
p                 460 crypto/async_tx/async_raid6_recov.c 	p = blocks[disks-2];
p                 478 crypto/async_tx/async_raid6_recov.c 		tx = async_memcpy(p, g, 0, 0, bytes, submit);
p                 505 crypto/async_tx/async_raid6_recov.c 	srcs[0] = p;
p                 509 crypto/async_tx/async_raid6_recov.c 	tx = async_xor(p, srcs, 0, 2, bytes, submit);
p                 223 crypto/async_tx/async_xor.c static int page_is_zero(struct page *p, unsigned int offset, size_t len)
p                 225 crypto/async_tx/async_xor.c 	return !memchr_inv(page_address(p) + offset, 0, len);
p                 312 crypto/blowfish_common.c 	const u32 *P = bctx->p;
p                 347 crypto/blowfish_common.c 	u32 *P = ctx->p;
p                  41 crypto/blowfish_generic.c 	const u32 *P = ctx->p;
p                  75 crypto/blowfish_generic.c 	const u32 *P = ctx->p;
p                 852 crypto/ccm.c   static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p,
p                 865 crypto/ccm.c   		crypto_xor(dg + ctx->len, p, l);
p                 868 crypto/ccm.c   		p += l;
p                 117 crypto/cmac.c  static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p,
p                 131 crypto/cmac.c  		memcpy(odds + ctx->len, p, len);
p                 137 crypto/cmac.c  	memcpy(odds + ctx->len, p, bs - ctx->len);
p                 139 crypto/cmac.c  	p += bs - ctx->len;
p                 149 crypto/cmac.c  		crypto_xor(prev, p, bs);
p                 151 crypto/cmac.c  		p += bs;
p                 157 crypto/cmac.c  		memcpy(odds, p, len);
p                 180 crypto/cmac.c  		u8 *p = odds + ctx->len;
p                 182 crypto/cmac.c  		*p = 0x80;
p                 183 crypto/cmac.c  		p++;
p                 187 crypto/cmac.c  			memset(p, 0, rlen);
p                 227 crypto/cryptd.c 	char *p;
p                 231 crypto/cryptd.c 	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
p                 232 crypto/cryptd.c 	if (!p)
p                 235 crypto/cryptd.c 	inst = (void *)(p + head);
p                 242 crypto/cryptd.c 	return p;
p                 245 crypto/cryptd.c 	kfree(p);
p                 246 crypto/cryptd.c 	p = ERR_PTR(err);
p                  36 crypto/crypto_user_base.c struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
p                  48 crypto/crypto_user_base.c 		if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
p                  51 crypto/crypto_user_base.c 		if (strlen(p->cru_driver_name))
p                  53 crypto/crypto_user_base.c 					p->cru_driver_name);
p                  55 crypto/crypto_user_base.c 			match = !strcmp(q->cra_name, p->cru_name);
p                 188 crypto/crypto_user_base.c 	struct crypto_user_alg *p = nlmsg_data(in_nlh);
p                 194 crypto/crypto_user_base.c 	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
p                 197 crypto/crypto_user_base.c 	alg = crypto_alg_match(p, 0);
p                 264 crypto/crypto_user_base.c 	struct crypto_user_alg *p = nlmsg_data(nlh);
p                 271 crypto/crypto_user_base.c 	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
p                 274 crypto/crypto_user_base.c 	if (priority && !strlen(p->cru_driver_name))
p                 277 crypto/crypto_user_base.c 	alg = crypto_alg_match(p, 1);
p                 300 crypto/crypto_user_base.c 	struct crypto_user_alg *p = nlmsg_data(nlh);
p                 306 crypto/crypto_user_base.c 	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
p                 309 crypto/crypto_user_base.c 	alg = crypto_alg_match(p, 1);
p                 339 crypto/crypto_user_base.c 	struct crypto_user_alg *p = nlmsg_data(nlh);
p                 345 crypto/crypto_user_base.c 	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
p                 348 crypto/crypto_user_base.c 	if (strlen(p->cru_driver_name))
p                 354 crypto/crypto_user_base.c 	alg = crypto_alg_match(p, exact);
p                 360 crypto/crypto_user_base.c 	if (strlen(p->cru_driver_name))
p                 361 crypto/crypto_user_base.c 		name = p->cru_driver_name;
p                 363 crypto/crypto_user_base.c 		name = p->cru_name;
p                 365 crypto/crypto_user_base.c 	alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
p                 303 crypto/crypto_user_stat.c 	struct crypto_user_alg *p = nlmsg_data(in_nlh);
p                 309 crypto/crypto_user_stat.c 	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
p                 312 crypto/crypto_user_stat.c 	alg = crypto_alg_match(p, 0);
p                  15 crypto/dh.c    	MPI p;	/* Value is guaranteed to be set. */
p                  23 crypto/dh.c    	mpi_free(ctx->p);
p                  39 crypto/dh.c    	return mpi_powm(val, base, ctx->xa, ctx->p);
p                  57 crypto/dh.c    	ctx->p = mpi_read_raw_data(params->p, params->p_size);
p                  58 crypto/dh.c    	if (!ctx->p)
p                 111 crypto/dh.c    	if (unlikely(!ctx->p))
p                 120 crypto/dh.c    	if (mpi_cmp_ui(y, 1) < 1 || mpi_cmp(y, ctx->p) >= 0)
p                 131 crypto/dh.c    		ret = mpi_powm(val, y, ctx->q, ctx->p);
p                 200 crypto/dh.c    	return mpi_get_size(ctx->p);
p                  29 crypto/dh_helper.c static inline unsigned int dh_data_size(const struct dh *p)
p                  31 crypto/dh_helper.c 	return p->key_size + p->p_size + p->q_size + p->g_size;
p                  34 crypto/dh_helper.c unsigned int crypto_dh_key_len(const struct dh *p)
p                  36 crypto/dh_helper.c 	return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p);
p                  59 crypto/dh_helper.c 	ptr = dh_pack_data(ptr, end, params->p, params->p_size);
p                  99 crypto/dh_helper.c 	params->p = (void *)(ptr + params->key_size);
p                 109 crypto/dh_helper.c 	if (memchr_inv(params->p, 0, params->p_size) == NULL)
p                  75 crypto/ecc.c   	struct ecc_point *p = kmalloc(sizeof(*p), GFP_KERNEL);
p                  77 crypto/ecc.c   	if (!p)
p                  80 crypto/ecc.c   	p->x = ecc_alloc_digits_space(ndigits);
p                  81 crypto/ecc.c   	if (!p->x)
p                  84 crypto/ecc.c   	p->y = ecc_alloc_digits_space(ndigits);
p                  85 crypto/ecc.c   	if (!p->y)
p                  88 crypto/ecc.c   	p->ndigits = ndigits;
p                  90 crypto/ecc.c   	return p;
p                  93 crypto/ecc.c   	ecc_free_digits_space(p->x);
p                  95 crypto/ecc.c   	kfree(p);
p                  99 crypto/ecc.c   static void ecc_free_point(struct ecc_point *p)
p                 101 crypto/ecc.c   	if (!p)
p                 104 crypto/ecc.c   	kzfree(p->x);
p                 105 crypto/ecc.c   	kzfree(p->y);
p                 106 crypto/ecc.c   	kzfree(p);
p                1152 crypto/ecc.c   	u64 *curve_prime = curve->p;
p                1207 crypto/ecc.c   		   const struct ecc_point *p, const struct ecc_point *q,
p                1217 crypto/ecc.c   	vli_mod_sub(z, result->x, p->x, curve->p, ndigits);
p                1218 crypto/ecc.c   	vli_set(px, p->x, ndigits);
p                1219 crypto/ecc.c   	vli_set(py, p->y, ndigits);
p                1220 crypto/ecc.c   	xycz_add(px, py, result->x, result->y, curve->p, ndigits);
p                1221 crypto/ecc.c   	vli_mod_inv(z, z, curve->p, ndigits);
p                1222 crypto/ecc.c   	apply_z(result->x, result->y, z, curve->p, ndigits);
p                1229 crypto/ecc.c   			   const u64 *u1, const struct ecc_point *p,
p                1245 crypto/ecc.c   	ecc_point_add(&sum, p, q, curve);
p                1247 crypto/ecc.c   	points[1] = p;
p                1263 crypto/ecc.c   		ecc_point_double_jacobian(rx, ry, z, curve->p, ndigits);
p                1273 crypto/ecc.c   			apply_z(tx, ty, z, curve->p, ndigits);
p                1274 crypto/ecc.c   			vli_mod_sub(tz, rx, tx, curve->p, ndigits);
p                1275 crypto/ecc.c   			xycz_add(tx, ty, rx, ry, curve->p, ndigits);
p                1276 crypto/ecc.c   			vli_mod_mult_fast(z, z, tz, curve->p, ndigits);
p                1279 crypto/ecc.c   	vli_mod_inv(z, z, curve->p, ndigits);
p                1280 crypto/ecc.c   	apply_z(rx, ry, z, curve->p, ndigits);
p                1436 crypto/ecc.c   	if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1)
p                1438 crypto/ecc.c   	if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1)
p                1442 crypto/ecc.c   	vli_mod_square_fast(yy, pk->y, curve->p, pk->ndigits); /* y^2 */
p                1443 crypto/ecc.c   	vli_mod_square_fast(xxx, pk->x, curve->p, pk->ndigits); /* x^2 */
p                1444 crypto/ecc.c   	vli_mod_mult_fast(xxx, xxx, pk->x, curve->p, pk->ndigits); /* x^3 */
p                1445 crypto/ecc.c   	vli_mod_mult_fast(w, curve->a, pk->x, curve->p, pk->ndigits); /* a·x */
p                1446 crypto/ecc.c   	vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */
p                1447 crypto/ecc.c   	vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */
p                  67 crypto/ecc.h   	u64 *p;
p                 242 crypto/ecc.h   			   const u64 *x, const struct ecc_point *p,
p                  25 crypto/ecc_curve_defs.h 	.p = nist_p192_p,
p                  51 crypto/ecc_curve_defs.h 	.p = nist_p256_p,
p                  55 crypto/ecrdsa_defs.h 	.p = cp256a_p,
p                  88 crypto/ecrdsa_defs.h 	.p = cp256b_p,
p                 125 crypto/ecrdsa_defs.h 	.p = cp256c_p,
p                 174 crypto/ecrdsa_defs.h 	.p = tc512a_p,
p                 219 crypto/ecrdsa_defs.h 	.p = tc512b_p,
p                 404 crypto/essiv.c 	const char *p, *q;
p                 408 crypto/essiv.c 	p = strrchr(cra_name, '(');
p                 409 crypto/essiv.c 	if (!p++)
p                 413 crypto/essiv.c 	q = strchr(p, ')');
p                 417 crypto/essiv.c 	len = q - p;
p                 421 crypto/essiv.c 	memcpy(essiv_cipher_name, p, len);
p                 172 crypto/gf128mul.c 	be128 p[8];
p                 175 crypto/gf128mul.c 	p[0] = *r;
p                 177 crypto/gf128mul.c 		gf128mul_x_lle(&p[i + 1], &p[i]);
p                 184 crypto/gf128mul.c 			be128_xor(r, r, &p[0]);
p                 186 crypto/gf128mul.c 			be128_xor(r, r, &p[1]);
p                 188 crypto/gf128mul.c 			be128_xor(r, r, &p[2]);
p                 190 crypto/gf128mul.c 			be128_xor(r, r, &p[3]);
p                 192 crypto/gf128mul.c 			be128_xor(r, r, &p[4]);
p                 194 crypto/gf128mul.c 			be128_xor(r, r, &p[5]);
p                 196 crypto/gf128mul.c 			be128_xor(r, r, &p[6]);
p                 198 crypto/gf128mul.c 			be128_xor(r, r, &p[7]);
p                 210 crypto/gf128mul.c 	be128 p[8];
p                 213 crypto/gf128mul.c 	p[0] = *r;
p                 215 crypto/gf128mul.c 		gf128mul_x_bbe(&p[i + 1], &p[i]);
p                 222 crypto/gf128mul.c 			be128_xor(r, r, &p[7]);
p                 224 crypto/gf128mul.c 			be128_xor(r, r, &p[6]);
p                 226 crypto/gf128mul.c 			be128_xor(r, r, &p[5]);
p                 228 crypto/gf128mul.c 			be128_xor(r, r, &p[4]);
p                 230 crypto/gf128mul.c 			be128_xor(r, r, &p[3]);
p                 232 crypto/gf128mul.c 			be128_xor(r, r, &p[2]);
p                 234 crypto/gf128mul.c 			be128_xor(r, r, &p[1]);
p                 236 crypto/gf128mul.c 			be128_xor(r, r, &p[0]);
p                  28 crypto/hmac.c  static inline void *align_ptr(void *p, unsigned int align)
p                  30 crypto/hmac.c  	return (void *)ALIGN((unsigned long)p, align);
p                 448 crypto/jitterentropy.c 	unsigned char *p = data;
p                 462 crypto/jitterentropy.c 		jent_memcpy(p, &ec->data, tocopy);
p                 465 crypto/jitterentropy.c 		p += tocopy;
p                 188 crypto/md4.c   	char *p = (char *)mctx->block + offset;
p                 191 crypto/md4.c   	*p++ = 0x80;
p                 193 crypto/md4.c   		memset(p, 0x00, padding + sizeof (u64));
p                 195 crypto/md4.c   		p = (char *)mctx->block;
p                 199 crypto/md4.c   	memset(p, 0, padding);
p                 181 crypto/md5.c   	char *p = (char *)mctx->block + offset;
p                 184 crypto/md5.c   	*p++ = 0x80;
p                 186 crypto/md5.c   		memset(p, 0x00, padding + sizeof (u64));
p                 188 crypto/md5.c   		p = (char *)mctx->block;
p                 192 crypto/md5.c   	memset(p, 0, padding);
p                  26 crypto/proc.c  static void *c_next(struct seq_file *m, void *p, loff_t *pos)
p                  28 crypto/proc.c  	return seq_list_next(p, &crypto_alg_list, pos);
p                  31 crypto/proc.c  static void c_stop(struct seq_file *m, void *p)
p                  36 crypto/proc.c  static int c_show(struct seq_file *m, void *p)
p                  38 crypto/proc.c  	struct crypto_alg *alg = list_entry(p, struct crypto_alg, cra_list);
p                  82 crypto/rsa-pkcs1pad.c 	const struct rsa_asn1_template *p;
p                  84 crypto/rsa-pkcs1pad.c 	for (p = rsa_asn1_templates; p->name; p++)
p                  85 crypto/rsa-pkcs1pad.c 		if (strcmp(name, p->name) == 0)
p                  86 crypto/rsa-pkcs1pad.c 			return p;
p                  85 crypto/rsa_helper.c 	key->p = value;
p                 183 crypto/skcipher.c 	struct skcipher_walk_buffer *p, *tmp;
p                 185 crypto/skcipher.c 	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
p                 191 crypto/skcipher.c 		data = p->data;
p                 193 crypto/skcipher.c 			data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
p                 197 crypto/skcipher.c 		scatterwalk_copychunks(data, &p->dst, p->len, 1);
p                 199 crypto/skcipher.c 		if (offset_in_page(p->data) + p->len + walk->stride >
p                 201 crypto/skcipher.c 			free_page((unsigned long)p->data);
p                 204 crypto/skcipher.c 		list_del(&p->entry);
p                 205 crypto/skcipher.c 		kfree(p);
p                 218 crypto/skcipher.c 				 struct skcipher_walk_buffer *p)
p                 220 crypto/skcipher.c 	p->dst = walk->out;
p                 221 crypto/skcipher.c 	list_add_tail(&p->entry, &walk->buffers);
p                 228 crypto/skcipher.c 	struct skcipher_walk_buffer *p;
p                 248 crypto/skcipher.c 		a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
p                 249 crypto/skcipher.c 		n += sizeof(*p);
p                 263 crypto/skcipher.c 		p = v;
p                 264 crypto/skcipher.c 		p->len = bsize;
p                 265 crypto/skcipher.c 		skcipher_queue_write(walk, p);
p                 266 crypto/skcipher.c 		buffer = p->buffer;
p                 287 crypto/skcipher.c 	struct skcipher_walk_buffer *p;
p                 300 crypto/skcipher.c 	p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
p                 301 crypto/skcipher.c 	if (!p)
p                 304 crypto/skcipher.c 	p->data = walk->page;
p                 305 crypto/skcipher.c 	p->len = walk->nbytes;
p                 306 crypto/skcipher.c 	skcipher_queue_write(walk, p);
p                1357 crypto/tcrypt.c 				unsigned int p = 0;
p                1362 crypto/tcrypt.c 					sg_set_buf(cur->sg + p, cur->xbuf[p],
p                1364 crypto/tcrypt.c 					memset(cur->xbuf[p], 0xff, PAGE_SIZE);
p                1365 crypto/tcrypt.c 					p++;
p                1369 crypto/tcrypt.c 				sg_set_buf(cur->sg + p, cur->xbuf[p], k);
p                1370 crypto/tcrypt.c 				memset(cur->xbuf[p], 0xff, k);
p                 830 crypto/testmgr.c 					   size_t max_divs, char *p, char *end,
p                 891 crypto/testmgr.c 		p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s", flushtype_str,
p                 900 crypto/testmgr.c 	return p;
p                 907 crypto/testmgr.c 	char *p = name;
p                 914 crypto/testmgr.c 	p += scnprintf(p, end - p, "random:");
p                 918 crypto/testmgr.c 		p += scnprintf(p, end - p, " inplace");
p                 923 crypto/testmgr.c 		p += scnprintf(p, end - p, " may_sleep");
p                 929 crypto/testmgr.c 		p += scnprintf(p, end - p, " use_final");
p                 933 crypto/testmgr.c 		p += scnprintf(p, end - p, " use_finup");
p                 937 crypto/testmgr.c 		p += scnprintf(p, end - p, " use_digest");
p                 944 crypto/testmgr.c 		p += scnprintf(p, end - p, " nosimd");
p                 947 crypto/testmgr.c 	p += scnprintf(p, end - p, " src_divs=[");
p                 948 crypto/testmgr.c 	p = generate_random_sgl_divisions(cfg->src_divs,
p                 949 crypto/testmgr.c 					  ARRAY_SIZE(cfg->src_divs), p, end,
p                 953 crypto/testmgr.c 	p += scnprintf(p, end - p, "]");
p                 956 crypto/testmgr.c 		p += scnprintf(p, end - p, " dst_divs=[");
p                 957 crypto/testmgr.c 		p = generate_random_sgl_divisions(cfg->dst_divs,
p                 959 crypto/testmgr.c 						  p, end, false,
p                 961 crypto/testmgr.c 		p += scnprintf(p, end - p, "]");
p                 966 crypto/testmgr.c 		p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
p                 286 crypto/vmac.c  	u64 p, q, t;
p                 289 crypto/vmac.c  	p = MUL32(a3, k3);
p                 290 crypto/vmac.c  	p += p;
p                 291 crypto/vmac.c  	p += *(u64 *)mh;
p                 292 crypto/vmac.c  	p += MUL32(a0, k2);
p                 293 crypto/vmac.c  	p += MUL32(a1, k1);
p                 294 crypto/vmac.c  	p += MUL32(a2, k0);
p                 295 crypto/vmac.c  	t = (u32)(p);
p                 296 crypto/vmac.c  	p >>= 32;
p                 297 crypto/vmac.c  	p += MUL32(a0, k3);
p                 298 crypto/vmac.c  	p += MUL32(a1, k2);
p                 299 crypto/vmac.c  	p += MUL32(a2, k1);
p                 300 crypto/vmac.c  	p += MUL32(a3, k0);
p                 301 crypto/vmac.c  	t |= ((u64)((u32)p & 0x7fffffff)) << 32;
p                 302 crypto/vmac.c  	p >>= 31;
p                 303 crypto/vmac.c  	p += (u64)(((u32 *)ml)[INDEX_LOW]);
p                 304 crypto/vmac.c  	p += MUL32(a0, k0);
p                 309 crypto/vmac.c  	p += q;
p                 310 crypto/vmac.c  	t2 = (u32)(p);
p                 311 crypto/vmac.c  	p >>= 32;
p                 312 crypto/vmac.c  	p += (u64)(((u32 *)ml)[INDEX_HIGH]);
p                 313 crypto/vmac.c  	p += MUL32(a0, k1);
p                 314 crypto/vmac.c  	p += MUL32(a1, k0);
p                 318 crypto/vmac.c  	p += q;
p                 319 crypto/vmac.c  	*(u64 *)(alo) = (p << 32) | t2;
p                 320 crypto/vmac.c  	p >>= 32;
p                 321 crypto/vmac.c  	*(u64 *)(ahi) = p + t;
p                 493 crypto/vmac.c  static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
p                 502 crypto/vmac.c  		memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n);
p                 504 crypto/vmac.c  		p += n;
p                 510 crypto/vmac.c  		memcpy(&dctx->partial[dctx->partial_size], p, n);
p                 512 crypto/vmac.c  		p += n;
p                 523 crypto/vmac.c  		vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
p                 524 crypto/vmac.c  		p += n;
p                 529 crypto/vmac.c  		memcpy(dctx->partial, p, len);
p                  84 crypto/xcbc.c  static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p,
p                  98 crypto/xcbc.c  		memcpy(odds + ctx->len, p, len);
p                 104 crypto/xcbc.c  	memcpy(odds + ctx->len, p, bs - ctx->len);
p                 106 crypto/xcbc.c  	p += bs - ctx->len;
p                 116 crypto/xcbc.c  		crypto_xor(prev, p, bs);
p                 118 crypto/xcbc.c  		p += bs;
p                 124 crypto/xcbc.c  		memcpy(odds, p, len);
p                 146 crypto/xcbc.c  		u8 *p = odds + ctx->len;
p                 148 crypto/xcbc.c  		*p = 0x80;
p                 149 crypto/xcbc.c  		p++;
p                 153 crypto/xcbc.c  			memset(p, 0, rlen);
p                 141 drivers/acpi/acpi_adxl.c 	union acpi_object *p;
p                 169 drivers/acpi/acpi_adxl.c 	p = params->package.elements + 1;
p                 170 drivers/acpi/acpi_adxl.c 	adxl_count = p->package.count;
p                 176 drivers/acpi/acpi_adxl.c 	p = p->package.elements;
p                 188 drivers/acpi/acpi_adxl.c 		adxl_component_names[i] = p[i].string.pointer;
p                 262 drivers/acpi/acpi_dbg.c 	char *p;
p                 269 drivers/acpi/acpi_dbg.c 	p = &crc->buf[crc->head];
p                 271 drivers/acpi/acpi_dbg.c 	memcpy(p, buf, n);
p                 283 drivers/acpi/acpi_dbg.c 	char *p;
p                 290 drivers/acpi/acpi_dbg.c 	p = &crc->buf[crc->tail];
p                 291 drivers/acpi/acpi_dbg.c 	ret = (int)*p;
p                 584 drivers/acpi/acpi_dbg.c 	char *p;
p                 591 drivers/acpi/acpi_dbg.c 	p = &crc->buf[crc->tail];
p                 593 drivers/acpi/acpi_dbg.c 	if (copy_to_user(buf, p, n)) {
p                 654 drivers/acpi/acpi_dbg.c 	char *p;
p                 661 drivers/acpi/acpi_dbg.c 	p = &crc->buf[crc->head];
p                 663 drivers/acpi/acpi_dbg.c 	if (copy_from_user(p, buf, n)) {
p                 143 drivers/acpi/acpi_tad.c 	char *p;
p                 145 drivers/acpi/acpi_tad.c 	p = strchr(s, ':');
p                 146 drivers/acpi/acpi_tad.c 	if (!p)
p                 149 drivers/acpi/acpi_tad.c 	*p = '\0';
p                 153 drivers/acpi/acpi_tad.c 	return p + 1;
p                 375 drivers/acpi/acpica/acmacros.h #define ACPI_SET_DESCRIPTOR_PTR(d, p)   (((union acpi_descriptor *)(void *)(d))->common.common_pointer = (p))
p                 424 drivers/acpi/acpica/acmacros.h #define ACPI_ERROR_NAMESPACE(s, p, e)       acpi_ut_prefixed_namespace_error (AE_INFO, s, p, e);
p                 425 drivers/acpi/acpica/acmacros.h #define ACPI_ERROR_METHOD(s, n, p, e)       acpi_ut_method_error (AE_INFO, s, n, p, e);
p                 435 drivers/acpi/acpica/acmacros.h #define ACPI_ERROR_NAMESPACE(s, p, e)
p                 436 drivers/acpi/acpica/acmacros.h #define ACPI_ERROR_METHOD(s, n, p, e)
p                 330 drivers/acpi/acpica/utprint.c 	const void *p;
p                 496 drivers/acpi/acpica/utprint.c 			p = va_arg(args, void *);
p                 498 drivers/acpi/acpica/utprint.c 			    acpi_ut_format_number(pos, end, ACPI_TO_INTEGER(p),
p                 128 drivers/acpi/apei/erst-dbg.c 		void *p;
p                 130 drivers/acpi/apei/erst-dbg.c 		p = kmalloc(len, GFP_KERNEL);
p                 131 drivers/acpi/apei/erst-dbg.c 		if (!p)
p                 134 drivers/acpi/apei/erst-dbg.c 		erst_dbg_buf = p;
p                 169 drivers/acpi/apei/erst-dbg.c 		void *p;
p                 171 drivers/acpi/apei/erst-dbg.c 		p = kmalloc(usize, GFP_KERNEL);
p                 172 drivers/acpi/apei/erst-dbg.c 		if (!p)
p                 175 drivers/acpi/apei/erst-dbg.c 		erst_dbg_buf = p;
p                  80 drivers/acpi/evged.c 	struct acpi_resource_irq *p = &ares->data.irq;
p                  93 drivers/acpi/evged.c 		gsi = p->interrupts[0];
p                  94 drivers/acpi/evged.c 		trigger = p->triggering;
p                  97 drivers/acpi/evged.c 		trigger = p->triggering;
p                 389 drivers/acpi/hmat/hmat.c 	struct acpi_hmat_proximity_domain *p = (void *)header;
p                 392 drivers/acpi/hmat/hmat.c 	if (p->header.length != sizeof(*p)) {
p                 394 drivers/acpi/hmat/hmat.c 			 p->header.length);
p                 400 drivers/acpi/hmat/hmat.c 			p->reserved3, p->reserved4, p->flags, p->processor_PD,
p                 401 drivers/acpi/hmat/hmat.c 			p->memory_PD);
p                 404 drivers/acpi/hmat/hmat.c 			p->flags, p->processor_PD, p->memory_PD);
p                 406 drivers/acpi/hmat/hmat.c 	if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
p                 407 drivers/acpi/hmat/hmat.c 		target = find_mem_target(p->memory_PD);
p                 413 drivers/acpi/hmat/hmat.c 	if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) {
p                 414 drivers/acpi/hmat/hmat.c 		int p_node = pxm_to_node(p->processor_PD);
p                 532 drivers/acpi/nfit/core.c 		struct nd_cmd_get_config_data_hdr *p = buf;
p                 534 drivers/acpi/nfit/core.c 		out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
p                 537 drivers/acpi/nfit/core.c 		struct nd_cmd_set_config_hdr *p = buf;
p                 539 drivers/acpi/nfit/core.c 		out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
p                 540 drivers/acpi/nfit/core.c 				p->in_buf);
p                 122 drivers/acpi/numa.c 			struct acpi_srat_cpu_affinity *p =
p                 125 drivers/acpi/numa.c 				 p->apic_id, p->local_sapic_eid,
p                 126 drivers/acpi/numa.c 				 p->proximity_domain_lo,
p                 127 drivers/acpi/numa.c 				 (p->flags & ACPI_SRAT_CPU_ENABLED) ?
p                 134 drivers/acpi/numa.c 			struct acpi_srat_mem_affinity *p =
p                 137 drivers/acpi/numa.c 				 (unsigned long long)p->base_address,
p                 138 drivers/acpi/numa.c 				 (unsigned long long)p->length,
p                 139 drivers/acpi/numa.c 				 p->proximity_domain,
p                 140 drivers/acpi/numa.c 				 (p->flags & ACPI_SRAT_MEM_ENABLED) ?
p                 142 drivers/acpi/numa.c 				 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
p                 144 drivers/acpi/numa.c 				 (p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ?
p                 151 drivers/acpi/numa.c 			struct acpi_srat_x2apic_cpu_affinity *p =
p                 154 drivers/acpi/numa.c 				 p->apic_id,
p                 155 drivers/acpi/numa.c 				 p->proximity_domain,
p                 156 drivers/acpi/numa.c 				 (p->flags & ACPI_SRAT_CPU_ENABLED) ?
p                 163 drivers/acpi/numa.c 			struct acpi_srat_gicc_affinity *p =
p                 166 drivers/acpi/numa.c 				 p->acpi_processor_uid,
p                 167 drivers/acpi/numa.c 				 p->proximity_domain,
p                 168 drivers/acpi/numa.c 				 (p->flags & ACPI_SRAT_GICC_ENABLED) ?
p                1375 drivers/acpi/osl.c 	char *p = acpi_os_name;
p                1383 drivers/acpi/osl.c 			*p++ = *str;
p                1389 drivers/acpi/osl.c 	*p = 0;
p                  98 drivers/acpi/pci_link.c 			struct acpi_resource_irq *p = &resource->data.irq;
p                  99 drivers/acpi/pci_link.c 			if (!p || !p->interrupt_count) {
p                 105 drivers/acpi/pci_link.c 			     (i < p->interrupt_count
p                 107 drivers/acpi/pci_link.c 				if (!p->interrupts[i]) {
p                 110 drivers/acpi/pci_link.c 					       p->interrupts[i]);
p                 113 drivers/acpi/pci_link.c 				link->irq.possible[i] = p->interrupts[i];
p                 116 drivers/acpi/pci_link.c 			link->irq.triggering = p->triggering;
p                 117 drivers/acpi/pci_link.c 			link->irq.polarity = p->polarity;
p                 123 drivers/acpi/pci_link.c 			struct acpi_resource_extended_irq *p =
p                 125 drivers/acpi/pci_link.c 			if (!p || !p->interrupt_count) {
p                 131 drivers/acpi/pci_link.c 			     (i < p->interrupt_count
p                 133 drivers/acpi/pci_link.c 				if (!p->interrupts[i]) {
p                 136 drivers/acpi/pci_link.c 					       p->interrupts[i]);
p                 139 drivers/acpi/pci_link.c 				link->irq.possible[i] = p->interrupts[i];
p                 142 drivers/acpi/pci_link.c 			link->irq.triggering = p->triggering;
p                 143 drivers/acpi/pci_link.c 			link->irq.polarity = p->polarity;
p                 185 drivers/acpi/pci_link.c 			struct acpi_resource_irq *p = &resource->data.irq;
p                 186 drivers/acpi/pci_link.c 			if (!p || !p->interrupt_count) {
p                 195 drivers/acpi/pci_link.c 			*irq = p->interrupts[0];
p                 200 drivers/acpi/pci_link.c 			struct acpi_resource_extended_irq *p =
p                 202 drivers/acpi/pci_link.c 			if (!p || !p->interrupt_count) {
p                 211 drivers/acpi/pci_link.c 			*irq = p->interrupts[0];
p                1124 drivers/acpi/processor_idle.c 	struct acpi_lpi_state *p, *t = curr_level->entries;
p                1150 drivers/acpi/processor_idle.c 			p = prev_level->composite_states[i];
p                1151 drivers/acpi/processor_idle.c 			if (t->index <= p->enable_parent_state &&
p                1152 drivers/acpi/processor_idle.c 			    combine_lpi_states(p, t, flpi)) {
p                 394 drivers/acpi/resource.c 	int irq, p, t;
p                 411 drivers/acpi/resource.c 	if (legacy && !acpi_get_override_irq(gsi, &t, &p)) {
p                 413 drivers/acpi/resource.c 		u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
p                 417 drivers/acpi/resource.c 				   t ? "level" : "edge", p ? "low" : "high");
p                  64 drivers/acpi/tables.c 			struct acpi_madt_local_apic *p =
p                  67 drivers/acpi/tables.c 				 p->processor_id, p->id,
p                  68 drivers/acpi/tables.c 				 (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
p                  74 drivers/acpi/tables.c 			struct acpi_madt_local_x2apic *p =
p                  77 drivers/acpi/tables.c 				 p->local_apic_id, p->uid,
p                  78 drivers/acpi/tables.c 				 (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
p                  84 drivers/acpi/tables.c 			struct acpi_madt_io_apic *p =
p                  87 drivers/acpi/tables.c 				 p->id, p->address, p->global_irq_base);
p                  93 drivers/acpi/tables.c 			struct acpi_madt_interrupt_override *p =
p                  96 drivers/acpi/tables.c 				p->bus, p->source_irq, p->global_irq,
p                  97 drivers/acpi/tables.c 				mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
p                  98 drivers/acpi/tables.c 				mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]);
p                  99 drivers/acpi/tables.c 			if (p->inti_flags  &
p                 102 drivers/acpi/tables.c 					p->inti_flags  &
p                 109 drivers/acpi/tables.c 			struct acpi_madt_nmi_source *p =
p                 112 drivers/acpi/tables.c 				mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
p                 113 drivers/acpi/tables.c 				mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
p                 114 drivers/acpi/tables.c 				p->global_irq);
p                 120 drivers/acpi/tables.c 			struct acpi_madt_local_apic_nmi *p =
p                 123 drivers/acpi/tables.c 				p->processor_id,
p                 124 drivers/acpi/tables.c 				mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK	],
p                 125 drivers/acpi/tables.c 				mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
p                 126 drivers/acpi/tables.c 				p->lint);
p                 133 drivers/acpi/tables.c 			struct acpi_madt_local_x2apic_nmi *p =
p                 136 drivers/acpi/tables.c 			polarity = p->inti_flags & ACPI_MADT_POLARITY_MASK;
p                 137 drivers/acpi/tables.c 			trigger = (p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
p                 140 drivers/acpi/tables.c 				p->uid,
p                 143 drivers/acpi/tables.c 				p->lint);
p                 149 drivers/acpi/tables.c 			struct acpi_madt_local_apic_override *p =
p                 152 drivers/acpi/tables.c 				(void *)(unsigned long)p->address);
p                 158 drivers/acpi/tables.c 			struct acpi_madt_io_sapic *p =
p                 161 drivers/acpi/tables.c 				 p->id, (void *)(unsigned long)p->address,
p                 162 drivers/acpi/tables.c 				 p->global_irq_base);
p                 168 drivers/acpi/tables.c 			struct acpi_madt_local_sapic *p =
p                 171 drivers/acpi/tables.c 				 p->processor_id, p->id, p->eid,
p                 172 drivers/acpi/tables.c 				 (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
p                 178 drivers/acpi/tables.c 			struct acpi_madt_interrupt_source *p =
p                 181 drivers/acpi/tables.c 				mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
p                 182 drivers/acpi/tables.c 				mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
p                 183 drivers/acpi/tables.c 				p->type, p->id, p->eid, p->io_sapic_vector,
p                 184 drivers/acpi/tables.c 				p->global_irq);
p                 190 drivers/acpi/tables.c 			struct acpi_madt_generic_interrupt *p =
p                 193 drivers/acpi/tables.c 				 p->uid, p->base_address,
p                 194 drivers/acpi/tables.c 				 p->arm_mpidr,
p                 195 drivers/acpi/tables.c 				 (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
p                 202 drivers/acpi/tables.c 			struct acpi_madt_generic_distributor *p =
p                 205 drivers/acpi/tables.c 				 p->gic_id, p->base_address,
p                 206 drivers/acpi/tables.c 				 p->global_irq_base);
p                  28 drivers/acpi/utils.c acpi_util_eval_error(acpi_handle h, acpi_string p, acpi_status s)
p                  35 drivers/acpi/utils.c 		(char *) prefix, p, acpi_format_exception(s)));
p                1108 drivers/android/binder.c 	struct rb_node **p = &proc->nodes.rb_node;
p                1117 drivers/android/binder.c 	while (*p) {
p                1119 drivers/android/binder.c 		parent = *p;
p                1123 drivers/android/binder.c 			p = &(*p)->rb_left;
p                1125 drivers/android/binder.c 			p = &(*p)->rb_right;
p                1139 drivers/android/binder.c 	rb_link_node(&node->rb_node, parent, p);
p                1441 drivers/android/binder.c 	struct rb_node **p = &proc->refs_by_node.rb_node;
p                1446 drivers/android/binder.c 	while (*p) {
p                1447 drivers/android/binder.c 		parent = *p;
p                1451 drivers/android/binder.c 			p = &(*p)->rb_left;
p                1453 drivers/android/binder.c 			p = &(*p)->rb_right;
p                1464 drivers/android/binder.c 	rb_link_node(&new_ref->rb_node_node, parent, p);
p                1475 drivers/android/binder.c 	p = &proc->refs_by_desc.rb_node;
p                1476 drivers/android/binder.c 	while (*p) {
p                1477 drivers/android/binder.c 		parent = *p;
p                1481 drivers/android/binder.c 			p = &(*p)->rb_left;
p                1483 drivers/android/binder.c 			p = &(*p)->rb_right;
p                1487 drivers/android/binder.c 	rb_link_node(&new_ref->rb_node_desc, parent, p);
p                4635 drivers/android/binder.c 	struct rb_node **p = &proc->threads.rb_node;
p                4637 drivers/android/binder.c 	while (*p) {
p                4638 drivers/android/binder.c 		parent = *p;
p                4642 drivers/android/binder.c 			p = &(*p)->rb_left;
p                4644 drivers/android/binder.c 			p = &(*p)->rb_right;
p                4657 drivers/android/binder.c 	rb_link_node(&thread->rb_node, parent, p);
p                  71 drivers/android/binder_alloc.c 	struct rb_node **p = &alloc->free_buffers.rb_node;
p                  85 drivers/android/binder_alloc.c 	while (*p) {
p                  86 drivers/android/binder_alloc.c 		parent = *p;
p                  93 drivers/android/binder_alloc.c 			p = &parent->rb_left;
p                  95 drivers/android/binder_alloc.c 			p = &parent->rb_right;
p                  97 drivers/android/binder_alloc.c 	rb_link_node(&new_buffer->rb_node, parent, p);
p                 104 drivers/android/binder_alloc.c 	struct rb_node **p = &alloc->allocated_buffers.rb_node;
p                 110 drivers/android/binder_alloc.c 	while (*p) {
p                 111 drivers/android/binder_alloc.c 		parent = *p;
p                 116 drivers/android/binder_alloc.c 			p = &parent->rb_left;
p                 118 drivers/android/binder_alloc.c 			p = &parent->rb_right;
p                 122 drivers/android/binder_alloc.c 	rb_link_node(&new_buffer->rb_node, parent, p);
p                 274 drivers/android/binderfs.c 	char *p, *stats;
p                 278 drivers/android/binderfs.c 	while ((p = strsep(&data, ",")) != NULL) {
p                 283 drivers/android/binderfs.c 		if (!*p)
p                 286 drivers/android/binderfs.c 		token = match_token(p, tokens, args);
p                 144 drivers/ata/ahci_brcm.c 	void __iomem *p;
p                 151 drivers/ata/ahci_brcm.c 	p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_1;
p                 152 drivers/ata/ahci_brcm.c 	reg = brcm_sata_readreg(p);
p                 154 drivers/ata/ahci_brcm.c 	brcm_sata_writereg(reg, p);
p                 157 drivers/ata/ahci_brcm.c 	p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_2;
p                 158 drivers/ata/ahci_brcm.c 	reg = brcm_sata_readreg(p);
p                 162 drivers/ata/ahci_brcm.c 	brcm_sata_writereg(reg, p);
p                 163 drivers/ata/ahci_brcm.c 	reg = brcm_sata_readreg(p);
p                 165 drivers/ata/ahci_brcm.c 	brcm_sata_writereg(reg, p);
p                 166 drivers/ata/ahci_brcm.c 	reg = brcm_sata_readreg(p);
p                 168 drivers/ata/ahci_brcm.c 	brcm_sata_writereg(reg, p);
p                 169 drivers/ata/ahci_brcm.c 	(void)brcm_sata_readreg(p);
p                 176 drivers/ata/ahci_brcm.c 	void __iomem *p;
p                 183 drivers/ata/ahci_brcm.c 	p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_2;
p                 184 drivers/ata/ahci_brcm.c 	reg = brcm_sata_readreg(p);
p                 188 drivers/ata/ahci_brcm.c 	brcm_sata_writereg(reg, p);
p                 191 drivers/ata/ahci_brcm.c 	p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_1;
p                 192 drivers/ata/ahci_brcm.c 	reg = brcm_sata_readreg(p);
p                 194 drivers/ata/ahci_brcm.c 	brcm_sata_writereg(reg, p);
p                1362 drivers/ata/ata_piix.c 	char *p = buf, *end = buf + sizeof(buf);
p                1372 drivers/ata/ata_piix.c 			p += scnprintf(p, end - p, " XX");
p                1376 drivers/ata/ata_piix.c 			p += scnprintf(p, end - p, " --");
p                1383 drivers/ata/ata_piix.c 			p += scnprintf(p, end - p, " IDE IDE");
p                1387 drivers/ata/ata_piix.c 			p += scnprintf(p, end - p, " P%d", map[i]);
p                1151 drivers/ata/libata-core.c 	unsigned char *p;
p                1155 drivers/ata/libata-core.c 	p = s + strnlen(s, len - 1);
p                1156 drivers/ata/libata-core.c 	while (p > s && p[-1] == ' ')
p                1157 drivers/ata/libata-core.c 		p--;
p                1158 drivers/ata/libata-core.c 	*p = '\0';
p                3273 drivers/ata/libata-core.c 	struct ata_timing p;
p                3290 drivers/ata/libata-core.c 		memset(&p, 0, sizeof(p));
p                3294 drivers/ata/libata-core.c 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
p                3297 drivers/ata/libata-core.c 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
p                3299 drivers/ata/libata-core.c 			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
p                3301 drivers/ata/libata-core.c 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
p                3317 drivers/ata/libata-core.c 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
p                3318 drivers/ata/libata-core.c 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
p                6954 drivers/ata/libata-core.c 	char *start = *cur, *p = *cur;
p                6960 drivers/ata/libata-core.c 	while (*p != '\0' && *p != ',')
p                6961 drivers/ata/libata-core.c 		p++;
p                6963 drivers/ata/libata-core.c 	if (*p == '\0')
p                6964 drivers/ata/libata-core.c 		*cur = p;
p                6966 drivers/ata/libata-core.c 		*cur = p + 1;
p                6968 drivers/ata/libata-core.c 	*p = '\0';
p                6971 drivers/ata/libata-core.c 	p = strchr(start, ':');
p                6972 drivers/ata/libata-core.c 	if (!p) {
p                6976 drivers/ata/libata-core.c 	*p = '\0';
p                6979 drivers/ata/libata-core.c 	val = strstrip(p + 1);
p                6982 drivers/ata/libata-core.c 	p = strchr(id, '.');
p                6983 drivers/ata/libata-core.c 	if (p) {
p                6984 drivers/ata/libata-core.c 		*p++ = '\0';
p                6985 drivers/ata/libata-core.c 		force_ent->device = simple_strtoul(p, &endp, 10);
p                6986 drivers/ata/libata-core.c 		if (p == endp || *endp != '\0') {
p                7033 drivers/ata/libata-core.c 	char *p, *cur, *next;
p                7036 drivers/ata/libata-core.c 	for (p = ata_force_param_buf; *p; p++)
p                7037 drivers/ata/libata-core.c 		if (*p == ',')
p                2585 drivers/ata/libata-scsi.c 	u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
p                2619 drivers/ata/libata-scsi.c 		p += 4 + (ebd ? 8 : 0);
p                2621 drivers/ata/libata-scsi.c 		p += 8 + (ebd ? 8 : 0);
p                2636 drivers/ata/libata-scsi.c 		p += ata_msense_rw_recovery(p, page_control == 1);
p                2640 drivers/ata/libata-scsi.c 		p += ata_msense_caching(args->id, p, page_control == 1);
p                2644 drivers/ata/libata-scsi.c 		p += ata_msense_control(args->dev, p, page_control == 1);
p                2648 drivers/ata/libata-scsi.c 		p += ata_msense_rw_recovery(p, page_control == 1);
p                2649 drivers/ata/libata-scsi.c 		p += ata_msense_caching(args->id, p, page_control == 1);
p                2650 drivers/ata/libata-scsi.c 		p += ata_msense_control(args->dev, p, page_control == 1);
p                2664 drivers/ata/libata-scsi.c 		rbuf[0] = p - rbuf - 1;
p                2671 drivers/ata/libata-scsi.c 		unsigned int output_len = p - rbuf - 2;
p                3981 drivers/ata/libata-scsi.c 	const u8 *p;
p                4020 drivers/ata/libata-scsi.c 	p = page_address(sg_page(scsi_sglist(scmd)));
p                4027 drivers/ata/libata-scsi.c 		bd_len = p[3];
p                4029 drivers/ata/libata-scsi.c 		bd_len = (p[6] << 8) + p[7];
p                4032 drivers/ata/libata-scsi.c 	p += hdr_len;
p                4042 drivers/ata/libata-scsi.c 	p += bd_len;
p                4047 drivers/ata/libata-scsi.c 	pg = p[0] & 0x3f;
p                4048 drivers/ata/libata-scsi.c 	if (p[0] & 0x40) {
p                4052 drivers/ata/libata-scsi.c 		spg = p[1];
p                4053 drivers/ata/libata-scsi.c 		pg_len = (p[2] << 8) | p[3];
p                4054 drivers/ata/libata-scsi.c 		p += 4;
p                4061 drivers/ata/libata-scsi.c 		pg_len = p[1];
p                4062 drivers/ata/libata-scsi.c 		p += 2;
p                4071 drivers/ata/libata-scsi.c 		fp = (p[0] & 0x40) ? 1 : 0;
p                4080 drivers/ata/libata-scsi.c 		if (ata_mselect_caching(qc, p, pg_len, &fp) < 0) {
p                4086 drivers/ata/libata-scsi.c 		if (ata_mselect_control(qc, p, pg_len, &fp) < 0) {
p                  14 drivers/ata/libata-trace.c libata_trace_parse_status(struct trace_seq *p, unsigned char status)
p                  16 drivers/ata/libata-trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  18 drivers/ata/libata-trace.c 	trace_seq_printf(p, "{ ");
p                  20 drivers/ata/libata-trace.c 		trace_seq_printf(p, "BUSY ");
p                  22 drivers/ata/libata-trace.c 		trace_seq_printf(p, "DRDY ");
p                  24 drivers/ata/libata-trace.c 		trace_seq_printf(p, "DF ");
p                  26 drivers/ata/libata-trace.c 		trace_seq_printf(p, "DSC ");
p                  28 drivers/ata/libata-trace.c 		trace_seq_printf(p, "DRQ ");
p                  30 drivers/ata/libata-trace.c 		trace_seq_printf(p, "CORR ");
p                  32 drivers/ata/libata-trace.c 		trace_seq_printf(p, "SENSE ");
p                  34 drivers/ata/libata-trace.c 		trace_seq_printf(p, "ERR ");
p                  35 drivers/ata/libata-trace.c 	trace_seq_putc(p, '}');
p                  36 drivers/ata/libata-trace.c 	trace_seq_putc(p, 0);
p                  42 drivers/ata/libata-trace.c libata_trace_parse_eh_action(struct trace_seq *p, unsigned int eh_action)
p                  44 drivers/ata/libata-trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  46 drivers/ata/libata-trace.c 	trace_seq_printf(p, "%x", eh_action);
p                  48 drivers/ata/libata-trace.c 		trace_seq_printf(p, "{ ");
p                  50 drivers/ata/libata-trace.c 			trace_seq_printf(p, "REVALIDATE ");
p                  52 drivers/ata/libata-trace.c 			trace_seq_printf(p, "RESET ");
p                  54 drivers/ata/libata-trace.c 			trace_seq_printf(p, "SOFTRESET ");
p                  56 drivers/ata/libata-trace.c 			trace_seq_printf(p, "HARDRESET ");
p                  58 drivers/ata/libata-trace.c 			trace_seq_printf(p, "ENABLE_LINK ");
p                  60 drivers/ata/libata-trace.c 			trace_seq_printf(p, "PARK ");
p                  61 drivers/ata/libata-trace.c 		trace_seq_putc(p, '}');
p                  63 drivers/ata/libata-trace.c 	trace_seq_putc(p, 0);
p                  69 drivers/ata/libata-trace.c libata_trace_parse_eh_err_mask(struct trace_seq *p, unsigned int eh_err_mask)
p                  71 drivers/ata/libata-trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  73 drivers/ata/libata-trace.c 	trace_seq_printf(p, "%x", eh_err_mask);
p                  75 drivers/ata/libata-trace.c 		trace_seq_printf(p, "{ ");
p                  77 drivers/ata/libata-trace.c 			trace_seq_printf(p, "DEV ");
p                  79 drivers/ata/libata-trace.c 			trace_seq_printf(p, "HSM ");
p                  81 drivers/ata/libata-trace.c 			trace_seq_printf(p, "TIMEOUT ");
p                  83 drivers/ata/libata-trace.c 			trace_seq_printf(p, "MEDIA ");
p                  85 drivers/ata/libata-trace.c 			trace_seq_printf(p, "ATA_BUS ");
p                  87 drivers/ata/libata-trace.c 			trace_seq_printf(p, "HOST_BUS ");
p                  89 drivers/ata/libata-trace.c 			trace_seq_printf(p, "SYSTEM ");
p                  91 drivers/ata/libata-trace.c 			trace_seq_printf(p, "INVALID ");
p                  93 drivers/ata/libata-trace.c 			trace_seq_printf(p, "OTHER ");
p                  95 drivers/ata/libata-trace.c 			trace_seq_printf(p, "NODEV_HINT ");
p                  97 drivers/ata/libata-trace.c 			trace_seq_printf(p, "NCQ ");
p                  98 drivers/ata/libata-trace.c 		trace_seq_putc(p, '}');
p                 100 drivers/ata/libata-trace.c 	trace_seq_putc(p, 0);
p                 106 drivers/ata/libata-trace.c libata_trace_parse_qc_flags(struct trace_seq *p, unsigned int qc_flags)
p                 108 drivers/ata/libata-trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 110 drivers/ata/libata-trace.c 	trace_seq_printf(p, "%x", qc_flags);
p                 112 drivers/ata/libata-trace.c 		trace_seq_printf(p, "{ ");
p                 114 drivers/ata/libata-trace.c 			trace_seq_printf(p, "ACTIVE ");
p                 116 drivers/ata/libata-trace.c 			trace_seq_printf(p, "DMAMAP ");
p                 118 drivers/ata/libata-trace.c 			trace_seq_printf(p, "IO ");
p                 120 drivers/ata/libata-trace.c 			trace_seq_printf(p, "RESULT_TF ");
p                 122 drivers/ata/libata-trace.c 			trace_seq_printf(p, "CLEAR_EXCL ");
p                 124 drivers/ata/libata-trace.c 			trace_seq_printf(p, "QUIET ");
p                 126 drivers/ata/libata-trace.c 			trace_seq_printf(p, "RETRY ");
p                 128 drivers/ata/libata-trace.c 			trace_seq_printf(p, "FAILED ");
p                 130 drivers/ata/libata-trace.c 			trace_seq_printf(p, "SENSE_VALID ");
p                 132 drivers/ata/libata-trace.c 			trace_seq_printf(p, "EH_SCHEDULED ");
p                 133 drivers/ata/libata-trace.c 		trace_seq_putc(p, '}');
p                 135 drivers/ata/libata-trace.c 	trace_seq_putc(p, 0);
p                 141 drivers/ata/libata-trace.c libata_trace_parse_subcmd(struct trace_seq *p, unsigned char cmd,
p                 144 drivers/ata/libata-trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 150 drivers/ata/libata-trace.c 			trace_seq_printf(p, " READ_LOG_DMA_EXT");
p                 153 drivers/ata/libata-trace.c 			trace_seq_printf(p, " ZAC_MGMT_IN");
p                 160 drivers/ata/libata-trace.c 			trace_seq_printf(p, " WRITE_LOG_DMA_EXT");
p                 163 drivers/ata/libata-trace.c 			trace_seq_printf(p, " DATASET_MANAGEMENT");
p                 170 drivers/ata/libata-trace.c 			trace_seq_printf(p, " ABORT_QUEUE");
p                 173 drivers/ata/libata-trace.c 			trace_seq_printf(p, " SET_FEATURES");
p                 176 drivers/ata/libata-trace.c 			trace_seq_printf(p, " ZERO_EXT");
p                 179 drivers/ata/libata-trace.c 			trace_seq_printf(p, " ZAC_MGMT_OUT");
p                 186 drivers/ata/libata-trace.c 			trace_seq_printf(p, " REPORT_ZONES");
p                 193 drivers/ata/libata-trace.c 			trace_seq_printf(p, " CLOSE_ZONE");
p                 196 drivers/ata/libata-trace.c 			trace_seq_printf(p, " FINISH_ZONE");
p                 199 drivers/ata/libata-trace.c 			trace_seq_printf(p, " OPEN_ZONE");
p                 202 drivers/ata/libata-trace.c 			trace_seq_printf(p, " RESET_WRITE_POINTER");
p                 207 drivers/ata/libata-trace.c 	trace_seq_putc(p, 0);
p                 215 drivers/ata/pata_ali.c 		struct ata_timing p;
p                 216 drivers/ata/pata_ali.c 		ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
p                 217 drivers/ata/pata_ali.c 		ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
p                 219 drivers/ata/pata_ali.c 			ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
p                 220 drivers/ata/pata_ali.c 			ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
p                 264 drivers/ata/pata_ali.c 			struct ata_timing p;
p                 265 drivers/ata/pata_ali.c 			ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
p                 266 drivers/ata/pata_ali.c 			ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
p                 268 drivers/ata/pata_ali.c 				ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
p                 269 drivers/ata/pata_ali.c 				ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
p                 207 drivers/ata/pata_atp867x.c 	struct ata_timing t, p;
p                 216 drivers/ata/pata_atp867x.c 		ata_timing_compute(peer, peer->pio_mode, &p, T, UT);
p                 217 drivers/ata/pata_atp867x.c 		ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT);
p                  71 drivers/ata/pata_cmd640.c 		struct ata_timing p;
p                  72 drivers/ata/pata_cmd640.c 		ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
p                  73 drivers/ata/pata_cmd640.c 		ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP);
p                1022 drivers/ata/pata_legacy.c static void __init legacy_check_special_cases(struct pci_dev *p, int *primary,
p                1026 drivers/ata/pata_legacy.c 	if (p->vendor == 0x1078 && p->device == 0x0000) {
p                1031 drivers/ata/pata_legacy.c 	if (p->vendor == 0x1078 && p->device == 0x0002) {
p                1036 drivers/ata/pata_legacy.c 	if (p->vendor == 0x8086 && p->device == 0x1234) {
p                1038 drivers/ata/pata_legacy.c 		pci_read_config_word(p, 0x6C, &r);
p                1179 drivers/ata/pata_legacy.c 	struct pci_dev *p = NULL;
p                1181 drivers/ata/pata_legacy.c 	for_each_pci_dev(p) {
p                1187 drivers/ata/pata_legacy.c 			if (pci_resource_start(p, r) == 0x1f0)
p                1189 drivers/ata/pata_legacy.c 			if (pci_resource_start(p, r) == 0x170)
p                1193 drivers/ata/pata_legacy.c 		legacy_check_special_cases(p, &primary, &secondary);
p                 190 drivers/ata/pata_pcmcia.c 	int is_kme = 0, ret = -ENOMEM, p;
p                 252 drivers/ata/pata_pcmcia.c 	for (p = 0; p < n_ports; p++) {
p                 253 drivers/ata/pata_pcmcia.c 		ap = host->ports[p];
p                 258 drivers/ata/pata_pcmcia.c 		ap->ioaddr.cmd_addr = io_addr + 0x10 * p;
p                 259 drivers/ata/pata_pcmcia.c 		ap->ioaddr.altstatus_addr = ctl_addr + 0x10 * p;
p                 260 drivers/ata/pata_pcmcia.c 		ap->ioaddr.ctl_addr = ctl_addr + 0x10 * p;
p                 171 drivers/ata/pata_serverworks.c 	const char *p;
p                 182 drivers/ata/pata_serverworks.c 	for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) {
p                 183 drivers/ata/pata_serverworks.c 		if (!strcmp(p, model_num))
p                 250 drivers/ata/pata_via.c 	struct ata_timing t, p;
p                 272 drivers/ata/pata_via.c 			ata_timing_compute(peer, peer->pio_mode, &p, T, UT);
p                 273 drivers/ata/pata_via.c 			ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT);
p                 168 drivers/ata/sata_dwc_460ex.c #define HSDEV_FROM_HSDEVP(p)	((struct sata_dwc_device *)(p)->hsdev)
p                 294 drivers/ata/sata_gemini.c 	struct pinctrl *p;
p                 298 drivers/ata/sata_gemini.c 	p = devm_pinctrl_get(dev);
p                 299 drivers/ata/sata_gemini.c 	if (IS_ERR(p))
p                 300 drivers/ata/sata_gemini.c 		return PTR_ERR(p);
p                 302 drivers/ata/sata_gemini.c 	ide_state = pinctrl_lookup_state(p, "ide");
p                 306 drivers/ata/sata_gemini.c 	ret = pinctrl_select_state(p, ide_state);
p                1287 drivers/ata/sata_mv.c 	int start_port, num_ports, p, start_hc, num_hcs, hc;
p                1315 drivers/ata/sata_mv.c 	for (p = start_port; p < start_port + num_ports; p++) {
p                1316 drivers/ata/sata_mv.c 		port_base = mv_port_base(mmio_base, p);
p                1317 drivers/ata/sata_mv.c 		DPRINTK("EDMA regs (port %i):\n", p);
p                1319 drivers/ata/sata_mv.c 		DPRINTK("SATA regs (port %i):\n", p);
p                2893 drivers/ata/sata_mv.c 		unsigned int p, shift, hardport, port_cause;
p                2925 drivers/ata/sata_mv.c 			for (p = 0; p < MV_PORTS_PER_HC; ++p) {
p                2926 drivers/ata/sata_mv.c 				if ((port + p) >= hpriv->n_ports)
p                2928 drivers/ata/sata_mv.c 				port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
p                2930 drivers/ata/sata_mv.c 					ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
p                1397 drivers/atm/firestream.c 	struct FS_QENTRY *p;
p                1404 drivers/atm/firestream.c 	p = aligned_kmalloc (sz, GFP_KERNEL, 0x10);
p                1405 drivers/atm/firestream.c 	fs_dprintk (FS_DEBUG_ALLOC, "Alloc queue: %p(%d)\n", p, sz);
p                1407 drivers/atm/firestream.c 	if (!p) return 0;
p                1409 drivers/atm/firestream.c 	write_fs (dev, Q_SA(queue), virt_to_bus(p));
p                1410 drivers/atm/firestream.c 	write_fs (dev, Q_EA(queue), virt_to_bus(p+nentries-1));
p                1411 drivers/atm/firestream.c 	write_fs (dev, Q_WP(queue), virt_to_bus(p));
p                1412 drivers/atm/firestream.c 	write_fs (dev, Q_RP(queue), virt_to_bus(p));
p                1420 drivers/atm/firestream.c 	txq->sa = p;
p                1421 drivers/atm/firestream.c 	txq->ea = p;
p                2670 drivers/atm/idt77252.c 		char *p;
p                2684 drivers/atm/idt77252.c 		p = page;
p                2685 drivers/atm/idt77252.c 		p += sprintf(p, "  %4u: %u.%u: ", i, vcc->vpi, vcc->vci);
p                2689 drivers/atm/idt77252.c 			p += sprintf(p, " %08x", read_sram(card, tct + i));
p                2690 drivers/atm/idt77252.c 		p += sprintf(p, "\n");
p                2691 drivers/atm/idt77252.c 		return p - page;
p                 115 drivers/atm/nicstar.c #define scq_virt_to_bus(scq, p) \
p                 116 drivers/atm/nicstar.c 		(scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org))
p                  94 drivers/auxdisplay/charlcd.c #define charlcd_to_priv(p)	container_of(p, struct charlcd_priv, lcd)
p                  44 drivers/base/bus.c 		kset_get(&bus->p->subsys);
p                  53 drivers/base/bus.c 		kset_put(&bus->p->subsys);
p                 134 drivers/base/bus.c 		error = sysfs_create_file(&bus->p->subsys.kobj, &attr->attr);
p                 145 drivers/base/bus.c 		sysfs_remove_file(&bus->p->subsys.kobj, &attr->attr);
p                 157 drivers/base/bus.c 	bus->p = NULL;
p                 231 drivers/base/bus.c 	return sprintf(buf, "%d\n", bus->p->drivers_autoprobe);
p                 238 drivers/base/bus.c 		bus->p->drivers_autoprobe = 0;
p                 240 drivers/base/bus.c 		bus->p->drivers_autoprobe = 1;
p                 298 drivers/base/bus.c 	if (!bus || !bus->p)
p                 301 drivers/base/bus.c 	klist_iter_init_node(&bus->p->klist_devices, &i,
p                 302 drivers/base/bus.c 			     (start ? &start->p->knode_bus : NULL));
p                 332 drivers/base/bus.c 	if (!bus || !bus->p)
p                 335 drivers/base/bus.c 	klist_iter_init_node(&bus->p->klist_devices, &i,
p                 336 drivers/base/bus.c 			     (start ? &start->p->knode_bus : NULL));
p                 365 drivers/base/bus.c 		klist_iter_init_node(&subsys->p->klist_devices, &i, &hint->p->knode_bus);
p                 374 drivers/base/bus.c 	klist_iter_init_node(&subsys->p->klist_devices, &i, NULL);
p                 427 drivers/base/bus.c 	klist_iter_init_node(&bus->p->klist_drivers, &i,
p                 428 drivers/base/bus.c 			     start ? &start->p->knode_bus : NULL);
p                 454 drivers/base/bus.c 		error = sysfs_create_link(&bus->p->devices_kset->kobj,
p                 459 drivers/base/bus.c 				&dev->bus->p->subsys.kobj, "subsystem");
p                 462 drivers/base/bus.c 		klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices);
p                 467 drivers/base/bus.c 	sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev));
p                 489 drivers/base/bus.c 	if (bus->p->drivers_autoprobe)
p                 492 drivers/base/bus.c 	mutex_lock(&bus->p->mutex);
p                 493 drivers/base/bus.c 	list_for_each_entry(sif, &bus->p->interfaces, node)
p                 496 drivers/base/bus.c 	mutex_unlock(&bus->p->mutex);
p                 517 drivers/base/bus.c 	mutex_lock(&bus->p->mutex);
p                 518 drivers/base/bus.c 	list_for_each_entry(sif, &bus->p->interfaces, node)
p                 521 drivers/base/bus.c 	mutex_unlock(&bus->p->mutex);
p                 524 drivers/base/bus.c 	sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
p                 527 drivers/base/bus.c 	if (klist_node_attached(&dev->p->knode_bus))
p                 528 drivers/base/bus.c 		klist_del(&dev->p->knode_bus);
p                 584 drivers/base/bus.c 	rc = kobject_synth_uevent(&drv->p->kobj, buf, count);
p                 612 drivers/base/bus.c 	drv->p = priv;
p                 613 drivers/base/bus.c 	priv->kobj.kset = bus->p->drivers_kset;
p                 619 drivers/base/bus.c 	klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
p                 620 drivers/base/bus.c 	if (drv->bus->p->drivers_autoprobe) {
p                 653 drivers/base/bus.c 	drv->p = NULL;
p                 676 drivers/base/bus.c 	klist_remove(&drv->p->knode_bus);
p                 680 drivers/base/bus.c 	kobject_put(&drv->p->kobj);
p                 751 drivers/base/bus.c 	return sysfs_create_groups(&bus->p->subsys.kobj, groups);
p                 757 drivers/base/bus.c 	sysfs_remove_groups(&bus->p->subsys.kobj, groups);
p                 781 drivers/base/bus.c 	rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
p                 812 drivers/base/bus.c 	bus->p = priv;
p                 865 drivers/base/bus.c 	kset_unregister(bus->p->drivers_kset);
p                 867 drivers/base/bus.c 	kset_unregister(bus->p->devices_kset);
p                 871 drivers/base/bus.c 	kset_unregister(&bus->p->subsys);
p                 873 drivers/base/bus.c 	kfree(bus->p);
p                 874 drivers/base/bus.c 	bus->p = NULL;
p                 893 drivers/base/bus.c 	kset_unregister(bus->p->drivers_kset);
p                 894 drivers/base/bus.c 	kset_unregister(bus->p->devices_kset);
p                 896 drivers/base/bus.c 	kset_unregister(&bus->p->subsys);
p                 902 drivers/base/bus.c 	return blocking_notifier_chain_register(&bus->p->bus_notifier, nb);
p                 908 drivers/base/bus.c 	return blocking_notifier_chain_unregister(&bus->p->bus_notifier, nb);
p                 914 drivers/base/bus.c 	return &bus->p->subsys;
p                 920 drivers/base/bus.c 	return &bus->p->klist_devices;
p                 943 drivers/base/bus.c 			list_move_tail(&a->p->knode_bus.n_node,
p                 944 drivers/base/bus.c 				       &b->p->knode_bus.n_node);
p                 948 drivers/base/bus.c 	list_move_tail(&a->p->knode_bus.n_node, list);
p                 992 drivers/base/bus.c 		start_knode = &start->p->knode_bus;
p                 993 drivers/base/bus.c 	klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode);
p                1052 drivers/base/bus.c 	mutex_lock(&subsys->p->mutex);
p                1053 drivers/base/bus.c 	list_add_tail(&sif->node, &subsys->p->interfaces);
p                1060 drivers/base/bus.c 	mutex_unlock(&subsys->p->mutex);
p                1077 drivers/base/bus.c 	mutex_lock(&subsys->p->mutex);
p                1085 drivers/base/bus.c 	mutex_unlock(&subsys->p->mutex);
p                  93 drivers/base/class.c 		error = sysfs_create_file_ns(&cls->p->subsys.kobj,
p                 104 drivers/base/class.c 		sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns);
p                 110 drivers/base/class.c 		kset_get(&cls->p->subsys);
p                 117 drivers/base/class.c 		kset_put(&cls->p->subsys);
p                 122 drivers/base/class.c 	struct device_private *p = to_device_private_class(n);
p                 123 drivers/base/class.c 	return p->device;
p                 143 drivers/base/class.c 	return sysfs_create_groups(&cls->p->subsys.kobj, groups);
p                 149 drivers/base/class.c 	return sysfs_remove_groups(&cls->p->subsys.kobj, groups);
p                 185 drivers/base/class.c 	cls->p = cp;
p                 202 drivers/base/class.c 	kset_unregister(&cls->p->subsys);
p                 286 drivers/base/class.c 		start_knode = &start->p->knode_class;
p                 287 drivers/base/class.c 	klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode);
p                 360 drivers/base/class.c 	if (!class->p) {
p                 407 drivers/base/class.c 	if (!class->p) {
p                 439 drivers/base/class.c 	mutex_lock(&parent->p->mutex);
p                 440 drivers/base/class.c 	list_add_tail(&class_intf->node, &parent->p->interfaces);
p                 447 drivers/base/class.c 	mutex_unlock(&parent->p->mutex);
p                 461 drivers/base/class.c 	mutex_lock(&parent->p->mutex);
p                 469 drivers/base/class.c 	mutex_unlock(&parent->p->mutex);
p                1083 drivers/base/core.c 	struct device_private *p = dev->p;
p                1105 drivers/base/core.c 	kfree(p);
p                1663 drivers/base/core.c 	struct device_private *p = to_device_private_parent(n);
p                1664 drivers/base/core.c 	struct device *dev = p->device;
p                1671 drivers/base/core.c 	struct device_private *p = to_device_private_parent(n);
p                1672 drivers/base/core.c 	struct device *dev = p->device;
p                1770 drivers/base/core.c 	dir->kobj.kset = &class->p->glue_dirs;
p                1795 drivers/base/core.c 			return &block_class.p->subsys.kobj;
p                1814 drivers/base/core.c 		spin_lock(&dev->class->p->glue_dirs.list_lock);
p                1815 drivers/base/core.c 		list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
p                1820 drivers/base/core.c 		spin_unlock(&dev->class->p->glue_dirs.list_lock);
p                1846 drivers/base/core.c 	    kobj->kset != &dev->class->p->glue_dirs)
p                1941 drivers/base/core.c 				  &dev->class->p->subsys.kobj,
p                1960 drivers/base/core.c 	error = sysfs_create_link(&dev->class->p->subsys.kobj,
p                1992 drivers/base/core.c 	sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
p                2062 drivers/base/core.c 	dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
p                2063 drivers/base/core.c 	if (!dev->p)
p                2065 drivers/base/core.c 	dev->p->device = dev;
p                2066 drivers/base/core.c 	klist_init(&dev->p->klist_children, klist_children_get,
p                2068 drivers/base/core.c 	INIT_LIST_HEAD(&dev->p->deferred_probe);
p                2111 drivers/base/core.c 	if (!dev->p) {
p                2198 drivers/base/core.c 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
p                2204 drivers/base/core.c 		klist_add_tail(&dev->p->knode_parent,
p                2205 drivers/base/core.c 			       &parent->p->klist_children);
p                2208 drivers/base/core.c 		mutex_lock(&dev->class->p->mutex);
p                2210 drivers/base/core.c 		klist_add_tail(&dev->p->knode_class,
p                2211 drivers/base/core.c 			       &dev->class->p->klist_devices);
p                2215 drivers/base/core.c 				    &dev->class->p->interfaces, node)
p                2218 drivers/base/core.c 		mutex_unlock(&dev->class->p->mutex);
p                2248 drivers/base/core.c 	kfree(dev->p);
p                2249 drivers/base/core.c 	dev->p = NULL;
p                2316 drivers/base/core.c 	if (dev->p->dead)
p                2318 drivers/base/core.c 	dev->p->dead = true;
p                2350 drivers/base/core.c 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
p                2355 drivers/base/core.c 		klist_del(&dev->p->knode_parent);
p                2364 drivers/base/core.c 		mutex_lock(&dev->class->p->mutex);
p                2367 drivers/base/core.c 				    &dev->class->p->interfaces, node)
p                2371 drivers/base/core.c 		klist_del(&dev->p->knode_class);
p                2372 drivers/base/core.c 		mutex_unlock(&dev->class->p->mutex);
p                2384 drivers/base/core.c 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
p                2417 drivers/base/core.c 	struct device_private *p;
p                2420 drivers/base/core.c 		p = to_device_private_parent(n);
p                2421 drivers/base/core.c 		dev = p->device;
p                2430 drivers/base/core.c 	struct device_private *p;
p                2433 drivers/base/core.c 		p = to_device_private_parent(n);
p                2434 drivers/base/core.c 		dev = p->device;
p                2503 drivers/base/core.c 	if (!parent->p)
p                2506 drivers/base/core.c 	klist_iter_init(&parent->p->klist_children, &i);
p                2533 drivers/base/core.c 	if (!parent->p)
p                2536 drivers/base/core.c 	klist_iter_init(&parent->p->klist_children, &i);
p                2570 drivers/base/core.c 	klist_iter_init(&parent->p->klist_children, &i);
p                2598 drivers/base/core.c 	klist_iter_init(&parent->p->klist_children, &i);
p                3050 drivers/base/core.c 		error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
p                3121 drivers/base/core.c 		klist_remove(&dev->p->knode_parent);
p                3123 drivers/base/core.c 		klist_add_tail(&dev->p->knode_parent,
p                3124 drivers/base/core.c 			       &new_parent->p->klist_children);
p                3135 drivers/base/core.c 					klist_remove(&dev->p->knode_parent);
p                3138 drivers/base/core.c 					klist_add_tail(&dev->p->knode_parent,
p                3139 drivers/base/core.c 						       &old_parent->p->klist_children);
p                  93 drivers/base/dd.c 					typeof(*dev->p), deferred_probe);
p                 126 drivers/base/dd.c 	if (list_empty(&dev->p->deferred_probe)) {
p                 128 drivers/base/dd.c 		list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
p                 136 drivers/base/dd.c 	if (!list_empty(&dev->p->deferred_probe)) {
p                 138 drivers/base/dd.c 		list_del_init(&dev->p->deferred_probe);
p                 303 drivers/base/dd.c 	struct device_private *private, *p;
p                 309 drivers/base/dd.c 	list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
p                 364 drivers/base/dd.c 	return dev->p && klist_node_attached(&dev->p->knode_driver);
p                 378 drivers/base/dd.c 	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
p                 391 drivers/base/dd.c 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
p                 413 drivers/base/dd.c 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
p                 416 drivers/base/dd.c 	ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
p                 421 drivers/base/dd.c 	ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
p                 433 drivers/base/dd.c 	sysfs_remove_link(&dev->driver->p->kobj,
p                 447 drivers/base/dd.c 		sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
p                 474 drivers/base/dd.c 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
p                 604 drivers/base/dd.c 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
p                 851 drivers/base/dd.c 	if (dev->p->dead || dev->driver)
p                 997 drivers/base/dd.c 	if (!dev->p->dead && !dev->driver)
p                1013 drivers/base/dd.c 	drv = dev->p->async_driver;
p                1019 drivers/base/dd.c 	if (!dev->p->dead && !dev->driver)
p                1068 drivers/base/dd.c 			dev->p->async_driver = drv;
p                1126 drivers/base/dd.c 			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
p                1150 drivers/base/dd.c 		klist_remove(&dev->p->knode_driver);
p                1153 drivers/base/dd.c 			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
p                1220 drivers/base/dd.c 		spin_lock(&drv->p->klist_devices.k_lock);
p                1221 drivers/base/dd.c 		if (list_empty(&drv->p->klist_devices.k_list)) {
p                1222 drivers/base/dd.c 			spin_unlock(&drv->p->klist_devices.k_lock);
p                1225 drivers/base/dd.c 		dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
p                1230 drivers/base/dd.c 		spin_unlock(&drv->p->klist_devices.k_lock);
p                 698 drivers/base/devres.c static int devm_action_match(struct device *dev, void *res, void *p)
p                 701 drivers/base/devres.c 	struct action_devres *target = p;
p                 892 drivers/base/devres.c 	char *p;
p                 899 drivers/base/devres.c 	p = devm_kmalloc(dev, len+1, gfp);
p                 900 drivers/base/devres.c 	if (!p)
p                 903 drivers/base/devres.c 	vsnprintf(p, len+1, fmt, ap);
p                 905 drivers/base/devres.c 	return p;
p                 923 drivers/base/devres.c 	char *p;
p                 926 drivers/base/devres.c 	p = devm_kvasprintf(dev, gfp, fmt, ap);
p                 929 drivers/base/devres.c 	return p;
p                 940 drivers/base/devres.c void devm_kfree(struct device *dev, const void *p)
p                 948 drivers/base/devres.c 	if (unlikely(is_kernel_rodata((unsigned long)p)))
p                 952 drivers/base/devres.c 			    devm_kmalloc_match, (void *)p);
p                 968 drivers/base/devres.c 	void *p;
p                 970 drivers/base/devres.c 	p = devm_kmalloc(dev, len, gfp);
p                 971 drivers/base/devres.c 	if (p)
p                 972 drivers/base/devres.c 		memcpy(p, src, len);
p                 974 drivers/base/devres.c 	return p;
p                 983 drivers/base/devres.c static int devm_pages_match(struct device *dev, void *res, void *p)
p                 986 drivers/base/devres.c 	struct pages_devres *target = p;
p                1056 drivers/base/devres.c 	void __percpu *p;
p                1058 drivers/base/devres.c 	p = *(void __percpu **)pdata;
p                1059 drivers/base/devres.c 	free_percpu(p);
p                1062 drivers/base/devres.c static int devm_percpu_match(struct device *dev, void *data, void *p)
p                1066 drivers/base/devres.c 	return *(void **)devr->data == p;
p                1084 drivers/base/devres.c 	void *p;
p                1091 drivers/base/devres.c 	p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
p                1092 drivers/base/devres.c 	if (!p) {
p                1097 drivers/base/devres.c 	*(void __percpu **)p = pcpu;
p                1099 drivers/base/devres.c 	devres_add(dev, p);
p                 325 drivers/base/devtmpfs.c 		struct path p = {.mnt = parent.mnt, .dentry = dentry};
p                 326 drivers/base/devtmpfs.c 		err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
p                 391 drivers/base/devtmpfs.c static int devtmpfsd(void *p)
p                 393 drivers/base/devtmpfs.c 	int *err = p;
p                  51 drivers/base/driver.c 	klist_iter_init_node(&drv->p->klist_devices, &i,
p                  52 drivers/base/driver.c 			     start ? &start->p->knode_driver : NULL);
p                  82 drivers/base/driver.c 	if (!drv || !drv->p)
p                  85 drivers/base/driver.c 	klist_iter_init_node(&drv->p->klist_devices, &i,
p                  86 drivers/base/driver.c 			     (start ? &start->p->knode_driver : NULL));
p                 106 drivers/base/driver.c 		error = sysfs_create_file(&drv->p->kobj, &attr->attr);
p                 122 drivers/base/driver.c 		sysfs_remove_file(&drv->p->kobj, &attr->attr);
p                 129 drivers/base/driver.c 	return sysfs_create_groups(&drv->p->kobj, groups);
p                 135 drivers/base/driver.c 	sysfs_remove_groups(&drv->p->kobj, groups);
p                 151 drivers/base/driver.c 	if (!drv->bus->p) {
p                 178 drivers/base/driver.c 	kobject_uevent(&drv->p->kobj, KOBJ_ADD);
p                 192 drivers/base/driver.c 	if (!drv || !drv->p) {
p                 215 drivers/base/driver.c 	struct kobject *k = kset_find_obj(bus->p->drivers_kset, name);
p                  39 drivers/base/map.c 	struct probe *p;
p                  44 drivers/base/map.c 	p = kmalloc_array(n, sizeof(struct probe), GFP_KERNEL);
p                  45 drivers/base/map.c 	if (p == NULL)
p                  48 drivers/base/map.c 	for (i = 0; i < n; i++, p++) {
p                  49 drivers/base/map.c 		p->owner = module;
p                  50 drivers/base/map.c 		p->get = probe;
p                  51 drivers/base/map.c 		p->lock = lock;
p                  52 drivers/base/map.c 		p->dev = dev;
p                  53 drivers/base/map.c 		p->range = range;
p                  54 drivers/base/map.c 		p->data = data;
p                  57 drivers/base/map.c 	for (i = 0, p -= n; i < n; i++, p++, index++) {
p                  61 drivers/base/map.c 		p->next = *s;
p                  62 drivers/base/map.c 		*s = p;
p                  82 drivers/base/map.c 			struct probe *p = *s;
p                  83 drivers/base/map.c 			if (p->dev == dev && p->range == range) {
p                  84 drivers/base/map.c 				*s = p->next;
p                  86 drivers/base/map.c 					found = p;
p                  98 drivers/base/map.c 	struct probe *p;
p                 103 drivers/base/map.c 	for (p = domain->probes[MAJOR(dev) % 255]; p; p = p->next) {
p                 108 drivers/base/map.c 		if (p->dev > dev || p->dev + p->range - 1 < dev)
p                 110 drivers/base/map.c 		if (p->range - 1 >= best)
p                 112 drivers/base/map.c 		if (!try_module_get(p->owner))
p                 114 drivers/base/map.c 		owner = p->owner;
p                 115 drivers/base/map.c 		data = p->data;
p                 116 drivers/base/map.c 		probe = p->get;
p                 117 drivers/base/map.c 		best = p->range - 1;
p                 118 drivers/base/map.c 		*index = dev - p->dev;
p                 119 drivers/base/map.c 		if (p->lock && p->lock(dev, data) < 0) {
p                 137 drivers/base/map.c 	struct kobj_map *p = kmalloc(sizeof(struct kobj_map), GFP_KERNEL);
p                 141 drivers/base/map.c 	if ((p == NULL) || (base == NULL)) {
p                 142 drivers/base/map.c 		kfree(p);
p                 151 drivers/base/map.c 		p->probes[i] = base;
p                 152 drivers/base/map.c 	p->lock = lock;
p                 153 drivers/base/map.c 	return p;
p                  52 drivers/base/module.c 			drv->p->mkobj = mk;
p                  62 drivers/base/module.c 	no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
p                  66 drivers/base/module.c 		no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj,
p                  80 drivers/base/module.c 	sysfs_remove_link(&drv->p->kobj, "module");
p                  84 drivers/base/module.c 	else if (drv->p->mkobj)
p                  85 drivers/base/module.c 		mk = drv->p->mkobj;
p                  32 drivers/base/pinctrl.c 	dev->pins->p = devm_pinctrl_get(dev);
p                  33 drivers/base/pinctrl.c 	if (IS_ERR(dev->pins->p)) {
p                  35 drivers/base/pinctrl.c 		ret = PTR_ERR(dev->pins->p);
p                  39 drivers/base/pinctrl.c 	dev->pins->default_state = pinctrl_lookup_state(dev->pins->p,
p                  47 drivers/base/pinctrl.c 	dev->pins->init_state = pinctrl_lookup_state(dev->pins->p,
p                  53 drivers/base/pinctrl.c 		ret = pinctrl_select_state(dev->pins->p,
p                  56 drivers/base/pinctrl.c 		ret = pinctrl_select_state(dev->pins->p, dev->pins->init_state);
p                  70 drivers/base/pinctrl.c 	dev->pins->sleep_state = pinctrl_lookup_state(dev->pins->p,
p                  76 drivers/base/pinctrl.c 	dev->pins->idle_state = pinctrl_lookup_state(dev->pins->p,
p                  91 drivers/base/pinctrl.c 	devm_pinctrl_put(dev->pins->p);
p                 511 drivers/base/platform.c 		struct resource *p, *r = &pdev->resource[i];
p                 516 drivers/base/platform.c 		p = r->parent;
p                 517 drivers/base/platform.c 		if (!p) {
p                 519 drivers/base/platform.c 				p = &iomem_resource;
p                 521 drivers/base/platform.c 				p = &ioport_resource;
p                 524 drivers/base/platform.c 		if (p) {
p                 525 drivers/base/platform.c 			ret = insert_resource(p, r);
p                 812 drivers/base/platform.c 	spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
p                 814 drivers/base/platform.c 	if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
p                 817 drivers/base/platform.c 	spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
p                 121 drivers/base/power/domain.c #define genpd_lock(p)			p->lock_ops->lock(p)
p                 122 drivers/base/power/domain.c #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
p                 123 drivers/base/power/domain.c #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
p                 124 drivers/base/power/domain.c #define genpd_unlock(p)			p->lock_ops->unlock(p)
p                2749 drivers/base/power/domain.c 	const char *p = "";
p                2752 drivers/base/power/domain.c 		p = "error";
p                2754 drivers/base/power/domain.c 		p = "unsupported";
p                2756 drivers/base/power/domain.c 		p = status_lookup[dev->power.runtime_status];
p                2760 drivers/base/power/domain.c 	seq_puts(s, p);
p                 150 drivers/base/power/sysfs.c 	const char *p;
p                 153 drivers/base/power/sysfs.c 		p = "error\n";
p                 155 drivers/base/power/sysfs.c 		p = "unsupported\n";
p                 159 drivers/base/power/sysfs.c 			p = "suspended\n";
p                 162 drivers/base/power/sysfs.c 			p = "suspending\n";
p                 165 drivers/base/power/sysfs.c 			p = "resuming\n";
p                 168 drivers/base/power/sysfs.c 			p = "active\n";
p                 174 drivers/base/power/sysfs.c 	return sprintf(buf, p);
p                 132 drivers/base/regmap/regcache-lzo.c 	const char *p, *end;
p                 175 drivers/base/regmap/regcache-lzo.c 	p = map->reg_defaults_raw;
p                 178 drivers/base/regmap/regcache-lzo.c 	for (i = 0; i < blkcount; i++, p += blksize) {
p                 179 drivers/base/regmap/regcache-lzo.c 		lzo_blocks[i]->src = p;
p                 180 drivers/base/regmap/regcache-lzo.c 		if (p + blksize > end)
p                 181 drivers/base/regmap/regcache-lzo.c 			lzo_blocks[i]->src_len = end - p;
p                 102 drivers/base/regmap/regmap-debugfs.c 	loff_t p = 0;
p                 122 drivers/base/regmap/regmap-debugfs.c 					c->max = p - 1;
p                 140 drivers/base/regmap/regmap-debugfs.c 				c->min = p;
p                 144 drivers/base/regmap/regmap-debugfs.c 			p += map->debugfs_tot_len;
p                 150 drivers/base/regmap/regmap-debugfs.c 		c->max = p - 1;
p                 221 drivers/base/regmap/regmap-debugfs.c 	loff_t p = *ppos;
p                 237 drivers/base/regmap/regmap-debugfs.c 	start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
p                 243 drivers/base/regmap/regmap-debugfs.c 		if (p >= *ppos) {
p                 265 drivers/base/regmap/regmap-debugfs.c 		p += map->debugfs_tot_len;
p                 364 drivers/base/regmap/regmap-debugfs.c 	loff_t p = 0;
p                 390 drivers/base/regmap/regmap-debugfs.c 	regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
p                 394 drivers/base/regmap/regmap-debugfs.c 	p = 0;
p                 399 drivers/base/regmap/regmap-debugfs.c 		if (p >= *ppos) {
p                 405 drivers/base/regmap/regmap-debugfs.c 		p += entry_len;
p                3024 drivers/base/regmap/regmap.c 	struct reg_sequence *p;
p                3032 drivers/base/regmap/regmap.c 	p = krealloc(map->patch,
p                3035 drivers/base/regmap/regmap.c 	if (p) {
p                3036 drivers/base/regmap/regmap.c 		memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
p                3037 drivers/base/regmap/regmap.c 		map->patch = p;
p                 126 drivers/base/soc.c 	if (!soc_bus_type.p) {
p                 322 drivers/base/swnode.c static void property_entry_free_data(const struct property_entry *p)
p                 324 drivers/base/swnode.c 	const void *pointer = property_get_pointer(p);
p                 327 drivers/base/swnode.c 	if (p->is_array) {
p                 328 drivers/base/swnode.c 		if (p->type == DEV_PROP_STRING && p->pointer.str) {
p                 329 drivers/base/swnode.c 			nval = p->length / sizeof(const char *);
p                 331 drivers/base/swnode.c 				kfree(p->pointer.str[i]);
p                 334 drivers/base/swnode.c 	} else if (p->type == DEV_PROP_STRING) {
p                 335 drivers/base/swnode.c 		kfree(p->value.str);
p                 337 drivers/base/swnode.c 	kfree(p->name);
p                 421 drivers/base/swnode.c 	struct property_entry *p;
p                 431 drivers/base/swnode.c 	p = kcalloc(n + 1, sizeof(*p), GFP_KERNEL);
p                 432 drivers/base/swnode.c 	if (!p)
p                 436 drivers/base/swnode.c 		ret = property_entry_copy_data(&p[i], &properties[i]);
p                 439 drivers/base/swnode.c 				property_entry_free_data(&p[i]);
p                 440 drivers/base/swnode.c 			kfree(p);
p                 445 drivers/base/swnode.c 	return p;
p                 458 drivers/base/swnode.c 	const struct property_entry *p;
p                 463 drivers/base/swnode.c 	for (p = properties; p->name; p++)
p                 464 drivers/base/swnode.c 		property_entry_free_data(p);
p                 533 drivers/base/swnode.c 	struct swnode *p = to_swnode(fwnode);
p                 536 drivers/base/swnode.c 	if (!p || list_empty(&p->children) ||
p                 537 drivers/base/swnode.c 	    (c && list_is_last(&c->entry, &p->children)))
p                 543 drivers/base/swnode.c 		c = list_first_entry(&p->children, struct swnode, entry);
p                 820 drivers/base/swnode.c 	struct swnode *p = NULL;
p                 828 drivers/base/swnode.c 		p = to_swnode(parent);
p                 841 drivers/base/swnode.c 	node->parent = p ? p->node : NULL;
p                 843 drivers/base/swnode.c 	return swnode_register(node, p, 1);
p                1538 drivers/block/amiflop.c 	struct amiga_floppy_struct *p = bdev->bd_disk->private_data;
p                1539 drivers/block/amiflop.c 	int drive = p - unit;
p                1563 drivers/block/amiflop.c 		if (param < p->type->tracks * p->type->heads)
p                1567 drivers/block/amiflop.c 				memset(p->trackbuf, FD_FILL_BYTE,
p                1568 drivers/block/amiflop.c 				       p->dtype->sects * p->type->sect_mult * 512);
p                1583 drivers/block/amiflop.c 		getprm.track=p->type->tracks;
p                1584 drivers/block/amiflop.c 		getprm.head=p->type->heads;
p                1585 drivers/block/amiflop.c 		getprm.sect=p->dtype->sects * p->type->sect_mult;
p                1586 drivers/block/amiflop.c 		getprm.size=p->blocks;
p                1599 drivers/block/amiflop.c 		if (copy_to_user(argp, raw_buf, p->type->read_size))
p                1602 drivers/block/amiflop.c 			return p->type->read_size;
p                1709 drivers/block/amiflop.c 	struct amiga_floppy_struct *p = disk->private_data;
p                1710 drivers/block/amiflop.c 	int drive = p - unit;
p                1736 drivers/block/amiflop.c 	struct amiga_floppy_struct *p = disk->private_data;
p                1737 drivers/block/amiflop.c 	int drive = p - unit;
p                1753 drivers/block/amiflop.c 		p->track = -1;
p                1754 drivers/block/amiflop.c 		p->dirty = 0;
p                  66 drivers/block/aoe/aoeblk.c 	char *p;
p                  89 drivers/block/aoe/aoeblk.c 	for (p = page; nd < ne; nd++)
p                  90 drivers/block/aoe/aoeblk.c 		p += snprintf(p, PAGE_SIZE - (p-page), "%s%s",
p                  91 drivers/block/aoe/aoeblk.c 			p == page ? "" : ",", (*nd)->name);
p                  92 drivers/block/aoe/aoeblk.c 	p += snprintf(p, PAGE_SIZE - (p-page), "\n");
p                  93 drivers/block/aoe/aoeblk.c 	return p-page;
p                 199 drivers/block/aoe/aoeblk.c 	char *p;
p                 203 drivers/block/aoe/aoeblk.c 	p = strchr(d->gd->disk_name, '/');
p                 204 drivers/block/aoe/aoeblk.c 	if (p == NULL)
p                 205 drivers/block/aoe/aoeblk.c 		p = d->gd->disk_name;
p                 207 drivers/block/aoe/aoeblk.c 		p++;
p                 208 drivers/block/aoe/aoeblk.c 	BUG_ON(*p == '\0');
p                 209 drivers/block/aoe/aoeblk.c 	d->debugfs = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
p                 528 drivers/block/aoe/aoecmd.c 	struct aoeif *p, *e;
p                 530 drivers/block/aoe/aoecmd.c 	p = t->ifs;
p                 531 drivers/block/aoe/aoecmd.c 	e = p + NAOEIFS;
p                 532 drivers/block/aoe/aoecmd.c 	for (; p < e; p++)
p                 533 drivers/block/aoe/aoecmd.c 		if (p->nd == nd)
p                 534 drivers/block/aoe/aoecmd.c 			return p;
p                1031 drivers/block/aoe/aoecmd.c 		char *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
p                1032 drivers/block/aoe/aoecmd.c 		skb_copy_bits(skb, soff, p, bv.bv_len);
p                1033 drivers/block/aoe/aoecmd.c 		kunmap_atomic(p);
p                1495 drivers/block/aoe/aoecmd.c 	struct aoeif *p, *e;
p                1500 drivers/block/aoe/aoecmd.c 	p = t->ifs;
p                1501 drivers/block/aoe/aoecmd.c 	e = p + NAOEIFS;
p                1502 drivers/block/aoe/aoecmd.c 	for (; p < e; p++) {
p                1503 drivers/block/aoe/aoecmd.c 		if (p->nd == NULL)
p                1505 drivers/block/aoe/aoecmd.c 		if (p->nd == nd) {
p                1506 drivers/block/aoe/aoecmd.c 			p->bcnt = bcnt;	/* we're updating */
p                1508 drivers/block/aoe/aoecmd.c 		} else if (minbcnt > p->bcnt)
p                1509 drivers/block/aoe/aoecmd.c 			minbcnt = p->bcnt; /* find the min interface */
p                1512 drivers/block/aoe/aoecmd.c 		if (p == e) {
p                1517 drivers/block/aoe/aoecmd.c 		p->nd = nd;
p                1518 drivers/block/aoe/aoecmd.c 		p->bcnt = bcnt;
p                1685 drivers/block/aoe/aoecmd.c 	void *p;
p                1690 drivers/block/aoe/aoecmd.c 	p = (void *) get_zeroed_page(GFP_KERNEL);
p                1691 drivers/block/aoe/aoecmd.c 	if (!p)
p                1693 drivers/block/aoe/aoecmd.c 	empty_page = virt_to_page(p);
p                 245 drivers/block/aoe/aoedev.c 	const char *p;
p                 250 drivers/block/aoe/aoedev.c 	p = kbasename(d->gd->disk_name);
p                 252 drivers/block/aoe/aoedev.c 	lim -= p - d->gd->disk_name;
p                 256 drivers/block/aoe/aoedev.c 	return !strncmp(s, p, lim);
p                  75 drivers/block/aoe/aoenet.c 	register char *p, *q;
p                  81 drivers/block/aoe/aoenet.c 	p = aoe_iflist + strspn(aoe_iflist, WHITESPACE);
p                  82 drivers/block/aoe/aoenet.c 	for (; *p; p = q + strspn(q, WHITESPACE)) {
p                  83 drivers/block/aoe/aoenet.c 		q = p + strcspn(p, WHITESPACE);
p                  84 drivers/block/aoe/aoenet.c 		if (q != p)
p                  85 drivers/block/aoe/aoenet.c 			len = q - p;
p                  87 drivers/block/aoe/aoenet.c 			len = strlen(p); /* last token in aoe_iflist */
p                  89 drivers/block/aoe/aoenet.c 		if (strlen(ifp->name) == len && !strncmp(ifp->name, p, len))
p                  91 drivers/block/aoe/aoenet.c 		if (q == p)
p                 720 drivers/block/ataflop.c 	memset( p, val, n );	\
p                 721 drivers/block/ataflop.c 	p += n;			\
p                 727 drivers/block/ataflop.c 	unsigned char	*p;
p                 756 drivers/block/ataflop.c 	p = TrackBuffer;
p                 767 drivers/block/ataflop.c 		*p++ = 0xfe;
p                 768 drivers/block/ataflop.c 		*p++ = desc->track;
p                 769 drivers/block/ataflop.c 		*p++ = desc->head;
p                 770 drivers/block/ataflop.c 		*p++ = (nsect + sect - desc->sect_offset) % nsect + 1;
p                 771 drivers/block/ataflop.c 		*p++ = 2;
p                 772 drivers/block/ataflop.c 		*p++ = 0xf7;
p                 776 drivers/block/ataflop.c 		*p++ = 0xfb;
p                 778 drivers/block/ataflop.c 		*p++ = 0xf7;
p                 781 drivers/block/ataflop.c 	FILL( TrackBuffer+BUFFER_SIZE-p, 0x4e );
p                1409 drivers/block/ataflop.c 	struct atari_floppy_struct *p = disk->private_data;
p                1410 drivers/block/ataflop.c 	unsigned int drive = p - unit;
p                1431 drivers/block/ataflop.c 	struct atari_floppy_struct *p = disk->private_data;
p                1432 drivers/block/ataflop.c 	unsigned int drive = p - unit;
p                1436 drivers/block/ataflop.c 	    p->disktype == 0) {
p                1891 drivers/block/ataflop.c 	struct atari_floppy_struct *p = bdev->bd_disk->private_data;
p                1895 drivers/block/ataflop.c 	if (p->ref && p->type != type)
p                1898 drivers/block/ataflop.c 	if (p->ref == -1 || (p->ref && mode & FMODE_EXCL))
p                1902 drivers/block/ataflop.c 		p->ref = -1;
p                1904 drivers/block/ataflop.c 		p->ref++;
p                1906 drivers/block/ataflop.c 	p->type = type;
p                1914 drivers/block/ataflop.c 			if (p->wpstat) {
p                1915 drivers/block/ataflop.c 				if (p->ref < 0)
p                1916 drivers/block/ataflop.c 					p->ref = 0;
p                1918 drivers/block/ataflop.c 					p->ref--;
p                1939 drivers/block/ataflop.c 	struct atari_floppy_struct *p = disk->private_data;
p                1941 drivers/block/ataflop.c 	if (p->ref < 0)
p                1942 drivers/block/ataflop.c 		p->ref = 0;
p                1943 drivers/block/ataflop.c 	else if (!p->ref--) {
p                1945 drivers/block/ataflop.c 		p->ref = 0;
p                 542 drivers/block/drbd/drbd_main.c 	struct task_struct *p = current;
p                 547 drivers/block/drbd/drbd_main.c 	set_cpus_allowed_ptr(p, resource->cpu_mask);
p                 622 drivers/block/drbd/drbd_main.c 	void *p;
p                 625 drivers/block/drbd/drbd_main.c 	p = __conn_prepare_command(connection, sock);
p                 626 drivers/block/drbd/drbd_main.c 	if (!p)
p                 629 drivers/block/drbd/drbd_main.c 	return p;
p                 721 drivers/block/drbd/drbd_main.c 	struct p_rs_param_95 *p;
p                 729 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                 730 drivers/block/drbd/drbd_main.c 	if (!p)
p                 745 drivers/block/drbd/drbd_main.c 	memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
p                 749 drivers/block/drbd/drbd_main.c 		p->resync_rate = cpu_to_be32(dc->resync_rate);
p                 750 drivers/block/drbd/drbd_main.c 		p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
p                 751 drivers/block/drbd/drbd_main.c 		p->c_delay_target = cpu_to_be32(dc->c_delay_target);
p                 752 drivers/block/drbd/drbd_main.c 		p->c_fill_target = cpu_to_be32(dc->c_fill_target);
p                 753 drivers/block/drbd/drbd_main.c 		p->c_max_rate = cpu_to_be32(dc->c_max_rate);
p                 756 drivers/block/drbd/drbd_main.c 		p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
p                 757 drivers/block/drbd/drbd_main.c 		p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
p                 758 drivers/block/drbd/drbd_main.c 		p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
p                 759 drivers/block/drbd/drbd_main.c 		p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
p                 760 drivers/block/drbd/drbd_main.c 		p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
p                 764 drivers/block/drbd/drbd_main.c 		strcpy(p->verify_alg, nc->verify_alg);
p                 766 drivers/block/drbd/drbd_main.c 		strcpy(p->csums_alg, nc->csums_alg);
p                 775 drivers/block/drbd/drbd_main.c 	struct p_protocol *p;
p                 780 drivers/block/drbd/drbd_main.c 	p = __conn_prepare_command(connection, sock);
p                 781 drivers/block/drbd/drbd_main.c 	if (!p)
p                 793 drivers/block/drbd/drbd_main.c 	size = sizeof(*p);
p                 797 drivers/block/drbd/drbd_main.c 	p->protocol      = cpu_to_be32(nc->wire_protocol);
p                 798 drivers/block/drbd/drbd_main.c 	p->after_sb_0p   = cpu_to_be32(nc->after_sb_0p);
p                 799 drivers/block/drbd/drbd_main.c 	p->after_sb_1p   = cpu_to_be32(nc->after_sb_1p);
p                 800 drivers/block/drbd/drbd_main.c 	p->after_sb_2p   = cpu_to_be32(nc->after_sb_2p);
p                 801 drivers/block/drbd/drbd_main.c 	p->two_primaries = cpu_to_be32(nc->two_primaries);
p                 807 drivers/block/drbd/drbd_main.c 	p->conn_flags    = cpu_to_be32(cf);
p                 810 drivers/block/drbd/drbd_main.c 		strcpy(p->integrity_alg, nc->integrity_alg);
p                 831 drivers/block/drbd/drbd_main.c 	struct p_uuids *p;
p                 838 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                 839 drivers/block/drbd/drbd_main.c 	if (!p) {
p                 845 drivers/block/drbd/drbd_main.c 		p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
p                 849 drivers/block/drbd/drbd_main.c 	p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
p                 855 drivers/block/drbd/drbd_main.c 	p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
p                 858 drivers/block/drbd/drbd_main.c 	return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
p                 893 drivers/block/drbd/drbd_main.c 	struct p_rs_uuid *p;
p                 908 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                 909 drivers/block/drbd/drbd_main.c 	if (p) {
p                 910 drivers/block/drbd/drbd_main.c 		p->uuid = cpu_to_be64(uuid);
p                 911 drivers/block/drbd/drbd_main.c 		drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
p                 917 drivers/block/drbd/drbd_main.c assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p,
p                 921 drivers/block/drbd/drbd_main.c 		p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
p                 922 drivers/block/drbd/drbd_main.c 		p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
p                 923 drivers/block/drbd/drbd_main.c 		p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q));
p                 924 drivers/block/drbd/drbd_main.c 		p->qlim->io_min = cpu_to_be32(queue_io_min(q));
p                 925 drivers/block/drbd/drbd_main.c 		p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
p                 926 drivers/block/drbd/drbd_main.c 		p->qlim->discard_enabled = blk_queue_discard(q);
p                 927 drivers/block/drbd/drbd_main.c 		p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
p                 930 drivers/block/drbd/drbd_main.c 		p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
p                 931 drivers/block/drbd/drbd_main.c 		p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
p                 932 drivers/block/drbd/drbd_main.c 		p->qlim->alignment_offset = 0;
p                 933 drivers/block/drbd/drbd_main.c 		p->qlim->io_min = cpu_to_be32(queue_io_min(q));
p                 934 drivers/block/drbd/drbd_main.c 		p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
p                 935 drivers/block/drbd/drbd_main.c 		p->qlim->discard_enabled = 0;
p                 936 drivers/block/drbd/drbd_main.c 		p->qlim->write_same_capable = 0;
p                 944 drivers/block/drbd/drbd_main.c 	struct p_sizes *p;
p                 951 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                 952 drivers/block/drbd/drbd_main.c 	if (!p)
p                 955 drivers/block/drbd/drbd_main.c 	packet_size = sizeof(*p);
p                 957 drivers/block/drbd/drbd_main.c 		packet_size += sizeof(p->qlim[0]);
p                 959 drivers/block/drbd/drbd_main.c 	memset(p, 0, packet_size);
p                 969 drivers/block/drbd/drbd_main.c 		assign_p_sizes_qlim(device, p, q);
p                 976 drivers/block/drbd/drbd_main.c 		assign_p_sizes_qlim(device, p, NULL);
p                 984 drivers/block/drbd/drbd_main.c 	p->d_size = cpu_to_be64(d_size);
p                 985 drivers/block/drbd/drbd_main.c 	p->u_size = cpu_to_be64(u_size);
p                 986 drivers/block/drbd/drbd_main.c 	p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
p                 987 drivers/block/drbd/drbd_main.c 	p->max_bio_size = cpu_to_be32(max_bio_size);
p                 988 drivers/block/drbd/drbd_main.c 	p->queue_order_type = cpu_to_be16(q_order_type);
p                 989 drivers/block/drbd/drbd_main.c 	p->dds_flags = cpu_to_be16(flags);
p                1001 drivers/block/drbd/drbd_main.c 	struct p_state *p;
p                1004 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                1005 drivers/block/drbd/drbd_main.c 	if (!p)
p                1007 drivers/block/drbd/drbd_main.c 	p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
p                1008 drivers/block/drbd/drbd_main.c 	return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
p                1024 drivers/block/drbd/drbd_main.c 	struct p_state *p;
p                1027 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                1028 drivers/block/drbd/drbd_main.c 	if (!p)
p                1030 drivers/block/drbd/drbd_main.c 	p->state = cpu_to_be32(state.i); /* Within the send mutex */
p                1031 drivers/block/drbd/drbd_main.c 	return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
p                1037 drivers/block/drbd/drbd_main.c 	struct p_req_state *p;
p                1040 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                1041 drivers/block/drbd/drbd_main.c 	if (!p)
p                1043 drivers/block/drbd/drbd_main.c 	p->mask = cpu_to_be32(mask.i);
p                1044 drivers/block/drbd/drbd_main.c 	p->val = cpu_to_be32(val.i);
p                1045 drivers/block/drbd/drbd_main.c 	return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
p                1052 drivers/block/drbd/drbd_main.c 	struct p_req_state *p;
p                1056 drivers/block/drbd/drbd_main.c 	p = conn_prepare_command(connection, sock);
p                1057 drivers/block/drbd/drbd_main.c 	if (!p)
p                1059 drivers/block/drbd/drbd_main.c 	p->mask = cpu_to_be32(mask.i);
p                1060 drivers/block/drbd/drbd_main.c 	p->val = cpu_to_be32(val.i);
p                1061 drivers/block/drbd/drbd_main.c 	return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
p                1067 drivers/block/drbd/drbd_main.c 	struct p_req_state_reply *p;
p                1070 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                1071 drivers/block/drbd/drbd_main.c 	if (p) {
p                1072 drivers/block/drbd/drbd_main.c 		p->retcode = cpu_to_be32(retcode);
p                1073 drivers/block/drbd/drbd_main.c 		drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
p                1080 drivers/block/drbd/drbd_main.c 	struct p_req_state_reply *p;
p                1084 drivers/block/drbd/drbd_main.c 	p = conn_prepare_command(connection, sock);
p                1085 drivers/block/drbd/drbd_main.c 	if (p) {
p                1086 drivers/block/drbd/drbd_main.c 		p->retcode = cpu_to_be32(retcode);
p                1087 drivers/block/drbd/drbd_main.c 		conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
p                1091 drivers/block/drbd/drbd_main.c static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
p                1094 drivers/block/drbd/drbd_main.c 	p->encoding = (p->encoding & ~0xf) | code;
p                1097 drivers/block/drbd/drbd_main.c static void dcbp_set_start(struct p_compressed_bm *p, int set)
p                1099 drivers/block/drbd/drbd_main.c 	p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
p                1102 drivers/block/drbd/drbd_main.c static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
p                1105 drivers/block/drbd/drbd_main.c 	p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
p                1109 drivers/block/drbd/drbd_main.c 			 struct p_compressed_bm *p,
p                1132 drivers/block/drbd/drbd_main.c 	bitstream_init(&bs, p->code, size, 0);
p                1133 drivers/block/drbd/drbd_main.c 	memset(p->code, 0, size);
p                1155 drivers/block/drbd/drbd_main.c 				dcbp_set_start(p, 1);
p                1160 drivers/block/drbd/drbd_main.c 			dcbp_set_start(p, 0);
p                1184 drivers/block/drbd/drbd_main.c 	len = bs.cur.b - p->code + !!bs.cur.bit;
p                1200 drivers/block/drbd/drbd_main.c 	dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
p                1216 drivers/block/drbd/drbd_main.c 	struct p_compressed_bm *p = sock->sbuf + header_size;
p                1219 drivers/block/drbd/drbd_main.c 	len = fill_bitmap_rle_bits(device, p,
p                1220 drivers/block/drbd/drbd_main.c 			DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
p                1225 drivers/block/drbd/drbd_main.c 		dcbp_set_code(p, RLE_VLI_Bits);
p                1227 drivers/block/drbd/drbd_main.c 				     P_COMPRESSED_BITMAP, sizeof(*p) + len,
p                1230 drivers/block/drbd/drbd_main.c 		c->bytes[0] += header_size + sizeof(*p) + len;
p                1239 drivers/block/drbd/drbd_main.c 		unsigned long *p = sock->sbuf + header_size;
p                1242 drivers/block/drbd/drbd_main.c 		num_words = min_t(size_t, data_size / sizeof(*p),
p                1244 drivers/block/drbd/drbd_main.c 		len = num_words * sizeof(*p);
p                1246 drivers/block/drbd/drbd_main.c 			drbd_bm_get_lel(device, c->word_offset, num_words, p);
p                1320 drivers/block/drbd/drbd_main.c 	struct p_barrier_ack *p;
p                1326 drivers/block/drbd/drbd_main.c 	p = conn_prepare_command(connection, sock);
p                1327 drivers/block/drbd/drbd_main.c 	if (!p)
p                1329 drivers/block/drbd/drbd_main.c 	p->barrier = barrier_nr;
p                1330 drivers/block/drbd/drbd_main.c 	p->set_size = cpu_to_be32(set_size);
p                1331 drivers/block/drbd/drbd_main.c 	conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
p                1346 drivers/block/drbd/drbd_main.c 	struct p_block_ack *p;
p                1352 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                1353 drivers/block/drbd/drbd_main.c 	if (!p)
p                1355 drivers/block/drbd/drbd_main.c 	p->sector = sector;
p                1356 drivers/block/drbd/drbd_main.c 	p->block_id = block_id;
p                1357 drivers/block/drbd/drbd_main.c 	p->blksize = blksize;
p                1358 drivers/block/drbd/drbd_main.c 	p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
p                1359 drivers/block/drbd/drbd_main.c 	return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
p                1410 drivers/block/drbd/drbd_main.c 	struct p_block_desc *p;
p                1413 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                1414 drivers/block/drbd/drbd_main.c 	if (!p)
p                1416 drivers/block/drbd/drbd_main.c 	p->sector = cpu_to_be64(peer_req->i.sector);
p                1417 drivers/block/drbd/drbd_main.c 	p->blksize = cpu_to_be32(peer_req->i.size);
p                1418 drivers/block/drbd/drbd_main.c 	p->pad = 0;
p                1419 drivers/block/drbd/drbd_main.c 	return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0);
p                1426 drivers/block/drbd/drbd_main.c 	struct p_block_req *p;
p                1429 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                1430 drivers/block/drbd/drbd_main.c 	if (!p)
p                1432 drivers/block/drbd/drbd_main.c 	p->sector = cpu_to_be64(sector);
p                1433 drivers/block/drbd/drbd_main.c 	p->block_id = block_id;
p                1434 drivers/block/drbd/drbd_main.c 	p->blksize = cpu_to_be32(size);
p                1435 drivers/block/drbd/drbd_main.c 	return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
p                1442 drivers/block/drbd/drbd_main.c 	struct p_block_req *p;
p                1447 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                1448 drivers/block/drbd/drbd_main.c 	if (!p)
p                1450 drivers/block/drbd/drbd_main.c 	p->sector = cpu_to_be64(sector);
p                1451 drivers/block/drbd/drbd_main.c 	p->block_id = ID_SYNCER /* unused */;
p                1452 drivers/block/drbd/drbd_main.c 	p->blksize = cpu_to_be32(size);
p                1453 drivers/block/drbd/drbd_main.c 	return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
p                1459 drivers/block/drbd/drbd_main.c 	struct p_block_req *p;
p                1462 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                1463 drivers/block/drbd/drbd_main.c 	if (!p)
p                1465 drivers/block/drbd/drbd_main.c 	p->sector = cpu_to_be64(sector);
p                1466 drivers/block/drbd/drbd_main.c 	p->block_id = ID_SYNCER /* unused */;
p                1467 drivers/block/drbd/drbd_main.c 	p->blksize = cpu_to_be32(size);
p                1468 drivers/block/drbd/drbd_main.c 	return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
p                1676 drivers/block/drbd/drbd_main.c 	struct p_data *p;
p                1684 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                1688 drivers/block/drbd/drbd_main.c 	if (!p)
p                1690 drivers/block/drbd/drbd_main.c 	p->sector = cpu_to_be64(req->i.sector);
p                1691 drivers/block/drbd/drbd_main.c 	p->block_id = (unsigned long)req;
p                1692 drivers/block/drbd/drbd_main.c 	p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
p                1706 drivers/block/drbd/drbd_main.c 	p->dp_flags = cpu_to_be32(dp_flags);
p                1710 drivers/block/drbd/drbd_main.c 		struct p_trim *t = (struct p_trim*)p;
p                1719 drivers/block/drbd/drbd_main.c 		wsame = (struct p_wsame*)p;
p                1723 drivers/block/drbd/drbd_main.c 		digest_out = p + 1;
p                1737 drivers/block/drbd/drbd_main.c 				   sizeof(*p) + digest_size, NULL, req->i.size);
p                1761 drivers/block/drbd/drbd_main.c 			if (memcmp(p + 1, digest, digest_size)) {
p                1785 drivers/block/drbd/drbd_main.c 	struct p_data *p;
p                1790 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                1795 drivers/block/drbd/drbd_main.c 	if (!p)
p                1797 drivers/block/drbd/drbd_main.c 	p->sector = cpu_to_be64(peer_req->i.sector);
p                1798 drivers/block/drbd/drbd_main.c 	p->block_id = peer_req->block_id;
p                1799 drivers/block/drbd/drbd_main.c 	p->seq_num = 0;  /* unused */
p                1800 drivers/block/drbd/drbd_main.c 	p->dp_flags = 0;
p                1802 drivers/block/drbd/drbd_main.c 		drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
p                1803 drivers/block/drbd/drbd_main.c 	err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size);
p                1814 drivers/block/drbd/drbd_main.c 	struct p_block_desc *p;
p                1817 drivers/block/drbd/drbd_main.c 	p = drbd_prepare_command(peer_device, sock);
p                1818 drivers/block/drbd/drbd_main.c 	if (!p)
p                1820 drivers/block/drbd/drbd_main.c 	p->sector = cpu_to_be64(req->i.sector);
p                1821 drivers/block/drbd/drbd_main.c 	p->blksize = cpu_to_be32(req->i.size);
p                1822 drivers/block/drbd/drbd_main.c 	return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
p                1772 drivers/block/drbd/drbd_receiver.c 	struct p_barrier *p = pi->data;
p                1778 drivers/block/drbd/drbd_receiver.c 	connection->current_epoch->barrier_nr = p->barrier;
p                2149 drivers/block/drbd/drbd_receiver.c 	struct p_data *p = pi->data;
p                2156 drivers/block/drbd/drbd_receiver.c 	sector = be64_to_cpu(p->sector);
p                2159 drivers/block/drbd/drbd_receiver.c 	req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
p                2183 drivers/block/drbd/drbd_receiver.c 	struct p_data *p = pi->data;
p                2190 drivers/block/drbd/drbd_receiver.c 	sector = be64_to_cpu(p->sector);
p                2191 drivers/block/drbd/drbd_receiver.c 	D_ASSERT(device, p->block_id == ID_SYNCER);
p                2204 drivers/block/drbd/drbd_receiver.c 		drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
p                2592 drivers/block/drbd/drbd_receiver.c 	struct p_data *p = pi->data;
p                2593 drivers/block/drbd/drbd_receiver.c 	u32 peer_seq = be32_to_cpu(p->seq_num);
p                2607 drivers/block/drbd/drbd_receiver.c 		drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
p                2621 drivers/block/drbd/drbd_receiver.c 	sector = be64_to_cpu(p->sector);
p                2622 drivers/block/drbd/drbd_receiver.c 	peer_req = read_in_block(peer_device, p->block_id, sector, pi);
p                2632 drivers/block/drbd/drbd_receiver.c 	dp_flags = be32_to_cpu(p->dp_flags);
p                2845 drivers/block/drbd/drbd_receiver.c 	struct p_block_req *p =	pi->data;
p                2853 drivers/block/drbd/drbd_receiver.c 	sector = be64_to_cpu(p->sector);
p                2854 drivers/block/drbd/drbd_receiver.c 	size   = be32_to_cpu(p->blksize);
p                2871 drivers/block/drbd/drbd_receiver.c 			drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
p                2877 drivers/block/drbd/drbd_receiver.c 			drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p);
p                2898 drivers/block/drbd/drbd_receiver.c 	peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
p                3668 drivers/block/drbd/drbd_receiver.c 	struct p_protocol *p = pi->data;
p                3676 drivers/block/drbd/drbd_receiver.c 	p_proto		= be32_to_cpu(p->protocol);
p                3677 drivers/block/drbd/drbd_receiver.c 	p_after_sb_0p	= be32_to_cpu(p->after_sb_0p);
p                3678 drivers/block/drbd/drbd_receiver.c 	p_after_sb_1p	= be32_to_cpu(p->after_sb_1p);
p                3679 drivers/block/drbd/drbd_receiver.c 	p_after_sb_2p	= be32_to_cpu(p->after_sb_2p);
p                3680 drivers/block/drbd/drbd_receiver.c 	p_two_primaries = be32_to_cpu(p->two_primaries);
p                3681 drivers/block/drbd/drbd_receiver.c 	cf		= be32_to_cpu(p->conn_flags);
p                3882 drivers/block/drbd/drbd_receiver.c 	struct p_rs_param_95 *p;
p                3924 drivers/block/drbd/drbd_receiver.c 	p = pi->data;
p                3925 drivers/block/drbd/drbd_receiver.c 	memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
p                3927 drivers/block/drbd/drbd_receiver.c 	err = drbd_recv_all(peer_device->connection, p, header_size);
p                3945 drivers/block/drbd/drbd_receiver.c 		new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
p                3958 drivers/block/drbd/drbd_receiver.c 			err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size);
p                3963 drivers/block/drbd/drbd_receiver.c 			D_ASSERT(device, p->verify_alg[data_size-1] == 0);
p                3964 drivers/block/drbd/drbd_receiver.c 			p->verify_alg[data_size-1] = 0;
p                3969 drivers/block/drbd/drbd_receiver.c 			D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0);
p                3970 drivers/block/drbd/drbd_receiver.c 			D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0);
p                3971 drivers/block/drbd/drbd_receiver.c 			p->verify_alg[SHARED_SECRET_MAX-1] = 0;
p                3972 drivers/block/drbd/drbd_receiver.c 			p->csums_alg[SHARED_SECRET_MAX-1] = 0;
p                3975 drivers/block/drbd/drbd_receiver.c 		if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
p                3978 drivers/block/drbd/drbd_receiver.c 				    old_net_conf->verify_alg, p->verify_alg);
p                3982 drivers/block/drbd/drbd_receiver.c 					p->verify_alg, "verify-alg");
p                3989 drivers/block/drbd/drbd_receiver.c 		if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
p                3992 drivers/block/drbd/drbd_receiver.c 				    old_net_conf->csums_alg, p->csums_alg);
p                3996 drivers/block/drbd/drbd_receiver.c 					p->csums_alg, "csums-alg");
p                4004 drivers/block/drbd/drbd_receiver.c 			new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
p                4005 drivers/block/drbd/drbd_receiver.c 			new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
p                4006 drivers/block/drbd/drbd_receiver.c 			new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
p                4007 drivers/block/drbd/drbd_receiver.c 			new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
p                4030 drivers/block/drbd/drbd_receiver.c 				strcpy(new_net_conf->verify_alg, p->verify_alg);
p                4031 drivers/block/drbd/drbd_receiver.c 				new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
p                4034 drivers/block/drbd/drbd_receiver.c 				drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
p                4037 drivers/block/drbd/drbd_receiver.c 				strcpy(new_net_conf->csums_alg, p->csums_alg);
p                4038 drivers/block/drbd/drbd_receiver.c 				new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
p                4041 drivers/block/drbd/drbd_receiver.c 				drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
p                4107 drivers/block/drbd/drbd_receiver.c 	struct p_sizes *p = pi->data;
p                4108 drivers/block/drbd/drbd_receiver.c 	struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL;
p                4121 drivers/block/drbd/drbd_receiver.c 	p_size = be64_to_cpu(p->d_size);
p                4122 drivers/block/drbd/drbd_receiver.c 	p_usize = be64_to_cpu(p->u_size);
p                4123 drivers/block/drbd/drbd_receiver.c 	p_csize = be64_to_cpu(p->c_size);
p                4185 drivers/block/drbd/drbd_receiver.c 	device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
p                4191 drivers/block/drbd/drbd_receiver.c 	ddsf = be16_to_cpu(p->dds_flags);
p                4254 drivers/block/drbd/drbd_receiver.c 		if (be64_to_cpu(p->c_size) !=
p                4280 drivers/block/drbd/drbd_receiver.c 	struct p_uuids *p = pi->data;
p                4296 drivers/block/drbd/drbd_receiver.c 		p_uuid[i] = be64_to_cpu(p->uuid[i]);
p                4387 drivers/block/drbd/drbd_receiver.c 	struct p_req_state *p = pi->data;
p                4396 drivers/block/drbd/drbd_receiver.c 	mask.i = be32_to_cpu(p->mask);
p                4397 drivers/block/drbd/drbd_receiver.c 	val.i = be32_to_cpu(p->val);
p                4418 drivers/block/drbd/drbd_receiver.c 	struct p_req_state *p = pi->data;
p                4422 drivers/block/drbd/drbd_receiver.c 	mask.i = be32_to_cpu(p->mask);
p                4423 drivers/block/drbd/drbd_receiver.c 	val.i = be32_to_cpu(p->val);
p                4444 drivers/block/drbd/drbd_receiver.c 	struct p_state *p = pi->data;
p                4455 drivers/block/drbd/drbd_receiver.c 	peer_state.i = be32_to_cpu(p->state);
p                4641 drivers/block/drbd/drbd_receiver.c 	struct p_rs_uuid *p = pi->data;
p                4659 drivers/block/drbd/drbd_receiver.c 		_drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid));
p                4680 drivers/block/drbd/drbd_receiver.c 		     unsigned long *p, struct bm_xfer_ctx *c)
p                4684 drivers/block/drbd/drbd_receiver.c 	unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
p                4686 drivers/block/drbd/drbd_receiver.c 	unsigned int want = num_words * sizeof(*p);
p                4695 drivers/block/drbd/drbd_receiver.c 	err = drbd_recv_all(peer_device->connection, p, want);
p                4699 drivers/block/drbd/drbd_receiver.c 	drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p);
p                4709 drivers/block/drbd/drbd_receiver.c static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
p                4711 drivers/block/drbd/drbd_receiver.c 	return (enum drbd_bitmap_code)(p->encoding & 0x0f);
p                4714 drivers/block/drbd/drbd_receiver.c static int dcbp_get_start(struct p_compressed_bm *p)
p                4716 drivers/block/drbd/drbd_receiver.c 	return (p->encoding & 0x80) != 0;
p                4719 drivers/block/drbd/drbd_receiver.c static int dcbp_get_pad_bits(struct p_compressed_bm *p)
p                4721 drivers/block/drbd/drbd_receiver.c 	return (p->encoding >> 4) & 0x7;
p                4732 drivers/block/drbd/drbd_receiver.c 		struct p_compressed_bm *p,
p                4742 drivers/block/drbd/drbd_receiver.c 	int toggle = dcbp_get_start(p);
p                4746 drivers/block/drbd/drbd_receiver.c 	bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
p                4769 drivers/block/drbd/drbd_receiver.c 				(unsigned int)(bs.cur.b - p->code),
p                4801 drivers/block/drbd/drbd_receiver.c 		struct p_compressed_bm *p,
p                4805 drivers/block/drbd/drbd_receiver.c 	if (dcbp_get_code(p) == RLE_VLI_Bits)
p                4806 drivers/block/drbd/drbd_receiver.c 		return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p));
p                4812 drivers/block/drbd/drbd_receiver.c 	drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
p                4888 drivers/block/drbd/drbd_receiver.c 			struct p_compressed_bm *p = pi->data;
p                4895 drivers/block/drbd/drbd_receiver.c 			if (pi->size <= sizeof(*p)) {
p                4900 drivers/block/drbd/drbd_receiver.c 			err = drbd_recv_all(peer_device->connection, p, pi->size);
p                4903 drivers/block/drbd/drbd_receiver.c 			err = decode_bitmap_c(peer_device, p, &c, pi->size);
p                4970 drivers/block/drbd/drbd_receiver.c 	struct p_block_desc *p = pi->data;
p                4987 drivers/block/drbd/drbd_receiver.c 	drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
p                4995 drivers/block/drbd/drbd_receiver.c 	struct p_block_desc *p = pi->data;
p                5005 drivers/block/drbd/drbd_receiver.c 	sector = be64_to_cpu(p->sector);
p                5006 drivers/block/drbd/drbd_receiver.c 	size = be32_to_cpu(p->blksize);
p                5308 drivers/block/drbd/drbd_receiver.c 	struct p_connection_features *p;
p                5311 drivers/block/drbd/drbd_receiver.c 	p = conn_prepare_command(connection, sock);
p                5312 drivers/block/drbd/drbd_receiver.c 	if (!p)
p                5314 drivers/block/drbd/drbd_receiver.c 	memset(p, 0, sizeof(*p));
p                5315 drivers/block/drbd/drbd_receiver.c 	p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
p                5316 drivers/block/drbd/drbd_receiver.c 	p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
p                5317 drivers/block/drbd/drbd_receiver.c 	p->feature_flags = cpu_to_be32(PRO_FEATURES);
p                5318 drivers/block/drbd/drbd_receiver.c 	return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
p                5331 drivers/block/drbd/drbd_receiver.c 	struct p_connection_features *p;
p                5356 drivers/block/drbd/drbd_receiver.c 	p = pi.data;
p                5357 drivers/block/drbd/drbd_receiver.c 	err = drbd_recv_all_warn(connection, p, expect);
p                5361 drivers/block/drbd/drbd_receiver.c 	p->protocol_min = be32_to_cpu(p->protocol_min);
p                5362 drivers/block/drbd/drbd_receiver.c 	p->protocol_max = be32_to_cpu(p->protocol_max);
p                5363 drivers/block/drbd/drbd_receiver.c 	if (p->protocol_max == 0)
p                5364 drivers/block/drbd/drbd_receiver.c 		p->protocol_max = p->protocol_min;
p                5366 drivers/block/drbd/drbd_receiver.c 	if (PRO_VERSION_MAX < p->protocol_min ||
p                5367 drivers/block/drbd/drbd_receiver.c 	    PRO_VERSION_MIN > p->protocol_max)
p                5370 drivers/block/drbd/drbd_receiver.c 	connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
p                5371 drivers/block/drbd/drbd_receiver.c 	connection->agreed_features = PRO_FEATURES & be32_to_cpu(p->feature_flags);
p                5390 drivers/block/drbd/drbd_receiver.c 	    p->protocol_min, p->protocol_max);
p                5625 drivers/block/drbd/drbd_receiver.c 	struct p_req_state_reply *p = pi->data;
p                5626 drivers/block/drbd/drbd_receiver.c 	int retcode = be32_to_cpu(p->retcode);
p                5644 drivers/block/drbd/drbd_receiver.c 	struct p_req_state_reply *p = pi->data;
p                5645 drivers/block/drbd/drbd_receiver.c 	int retcode = be32_to_cpu(p->retcode);
p                5689 drivers/block/drbd/drbd_receiver.c 	struct p_block_ack *p = pi->data;
p                5690 drivers/block/drbd/drbd_receiver.c 	sector_t sector = be64_to_cpu(p->sector);
p                5691 drivers/block/drbd/drbd_receiver.c 	int blksize = be32_to_cpu(p->blksize);
p                5700 drivers/block/drbd/drbd_receiver.c 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
p                5741 drivers/block/drbd/drbd_receiver.c 	struct p_block_ack *p = pi->data;
p                5742 drivers/block/drbd/drbd_receiver.c 	sector_t sector = be64_to_cpu(p->sector);
p                5743 drivers/block/drbd/drbd_receiver.c 	int blksize = be32_to_cpu(p->blksize);
p                5751 drivers/block/drbd/drbd_receiver.c 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
p                5753 drivers/block/drbd/drbd_receiver.c 	if (p->block_id == ID_SYNCER) {
p                5778 drivers/block/drbd/drbd_receiver.c 	return validate_req_change_req_state(device, p->block_id, sector,
p                5787 drivers/block/drbd/drbd_receiver.c 	struct p_block_ack *p = pi->data;
p                5788 drivers/block/drbd/drbd_receiver.c 	sector_t sector = be64_to_cpu(p->sector);
p                5789 drivers/block/drbd/drbd_receiver.c 	int size = be32_to_cpu(p->blksize);
p                5797 drivers/block/drbd/drbd_receiver.c 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
p                5799 drivers/block/drbd/drbd_receiver.c 	if (p->block_id == ID_SYNCER) {
p                5805 drivers/block/drbd/drbd_receiver.c 	err = validate_req_change_req_state(device, p->block_id, sector,
p                5823 drivers/block/drbd/drbd_receiver.c 	struct p_block_ack *p = pi->data;
p                5824 drivers/block/drbd/drbd_receiver.c 	sector_t sector = be64_to_cpu(p->sector);
p                5831 drivers/block/drbd/drbd_receiver.c 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
p                5834 drivers/block/drbd/drbd_receiver.c 	    (unsigned long long)sector, be32_to_cpu(p->blksize));
p                5836 drivers/block/drbd/drbd_receiver.c 	return validate_req_change_req_state(device, p->block_id, sector,
p                5847 drivers/block/drbd/drbd_receiver.c 	struct p_block_ack *p = pi->data;
p                5854 drivers/block/drbd/drbd_receiver.c 	sector = be64_to_cpu(p->sector);
p                5855 drivers/block/drbd/drbd_receiver.c 	size = be32_to_cpu(p->blksize);
p                5857 drivers/block/drbd/drbd_receiver.c 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
p                5879 drivers/block/drbd/drbd_receiver.c 	struct p_barrier_ack *p = pi->data;
p                5883 drivers/block/drbd/drbd_receiver.c 	tl_release(connection, p->barrier, be32_to_cpu(p->set_size));
p                5905 drivers/block/drbd/drbd_receiver.c 	struct p_block_ack *p = pi->data;
p                5915 drivers/block/drbd/drbd_receiver.c 	sector = be64_to_cpu(p->sector);
p                5916 drivers/block/drbd/drbd_receiver.c 	size = be32_to_cpu(p->blksize);
p                5918 drivers/block/drbd/drbd_receiver.c 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
p                5920 drivers/block/drbd/drbd_receiver.c 	if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
p                 572 drivers/block/drbd/drbd_req.c 	int p, rv = 0;
p                 594 drivers/block/drbd/drbd_req.c 		p = nc->wire_protocol;
p                 597 drivers/block/drbd/drbd_req.c 			p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
p                 598 drivers/block/drbd/drbd_req.c 			p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
p                 707 drivers/block/drbd/drbd_req.c 		p = nc->max_epoch_size;
p                 709 drivers/block/drbd/drbd_req.c 		if (connection->current_tle_writes >= p)
p                 241 drivers/block/drbd/drbd_state.c 		struct drbd_peer_device_state_change *p =
p                 244 drivers/block/drbd/drbd_state.c 		OLD_TO_NEW(p->disk_state);
p                 245 drivers/block/drbd/drbd_state.c 		OLD_TO_NEW(p->repl_state);
p                 246 drivers/block/drbd/drbd_state.c 		OLD_TO_NEW(p->resync_susp_user);
p                 247 drivers/block/drbd/drbd_state.c 		OLD_TO_NEW(p->resync_susp_peer);
p                 248 drivers/block/drbd/drbd_state.c 		OLD_TO_NEW(p->resync_susp_dependency);
p                1585 drivers/block/drbd/drbd_state.c 				     struct drbd_peer_device_state_change *p,
p                1588 drivers/block/drbd/drbd_state.c 	struct drbd_peer_device *peer_device = p->peer_device;
p                1590 drivers/block/drbd/drbd_state.c 		.peer_repl_state = p->repl_state[NEW],
p                1591 drivers/block/drbd/drbd_state.c 		.peer_disk_state = p->disk_state[NEW],
p                1592 drivers/block/drbd/drbd_state.c 		.peer_resync_susp_user = p->resync_susp_user[NEW],
p                1593 drivers/block/drbd/drbd_state.c 		.peer_resync_susp_peer = p->resync_susp_peer[NEW],
p                1594 drivers/block/drbd/drbd_state.c 		.peer_resync_susp_dependency = p->resync_susp_dependency[NEW],
p                1653 drivers/block/drbd/drbd_state.c 		struct drbd_peer_device_state_change *p =
p                1656 drivers/block/drbd/drbd_state.c 		if (HAS_CHANGED(p->disk_state) ||
p                1657 drivers/block/drbd/drbd_state.c 		    HAS_CHANGED(p->repl_state) ||
p                1658 drivers/block/drbd/drbd_state.c 		    HAS_CHANGED(p->resync_susp_user) ||
p                1659 drivers/block/drbd/drbd_state.c 		    HAS_CHANGED(p->resync_susp_peer) ||
p                1660 drivers/block/drbd/drbd_state.c 		    HAS_CHANGED(p->resync_susp_dependency))
p                1662 drivers/block/drbd/drbd_state.c 					      p, NOTIFY_CHANGE);
p                1368 drivers/block/drbd/drbd_worker.c 	struct p_barrier *p;
p                1372 drivers/block/drbd/drbd_worker.c 	p = conn_prepare_command(connection, sock);
p                1373 drivers/block/drbd/drbd_worker.c 	if (!p)
p                1375 drivers/block/drbd/drbd_worker.c 	p->barrier = connection->send.current_epoch_nr;
p                1376 drivers/block/drbd/drbd_worker.c 	p->pad = 0;
p                1380 drivers/block/drbd/drbd_worker.c 	return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0);
p                3706 drivers/block/floppy.c 	struct floppy_struct *p;
p                3711 drivers/block/floppy.c 	err = get_floppy_geometry(drive, ITYPE(UDRS->fd_device), &p);
p                3716 drivers/block/floppy.c 	memcpy(&v, p, offsetof(struct floppy_struct, name));
p                4469 drivers/block/floppy.c 	struct platform_device *p = to_platform_device(dev);
p                4472 drivers/block/floppy.c 	drive = p->id;
p                4779 drivers/block/floppy.c static void floppy_release_allocated_regions(int fdc, const struct io_region *p)
p                4781 drivers/block/floppy.c 	while (p != io_regions) {
p                4782 drivers/block/floppy.c 		p--;
p                4783 drivers/block/floppy.c 		release_region(FDCS->address + p->offset, p->size);
p                4791 drivers/block/floppy.c 	const struct io_region *p;
p                4793 drivers/block/floppy.c 	for (p = io_regions; p < ARRAY_END(io_regions); p++) {
p                4794 drivers/block/floppy.c 		if (!request_region(FDCS->address + p->offset,
p                4795 drivers/block/floppy.c 				    p->size, "floppy")) {
p                4797 drivers/block/floppy.c 			       FDCS->address + p->offset);
p                4798 drivers/block/floppy.c 			floppy_release_allocated_regions(fdc, p);
p                 776 drivers/block/loop.c 	char *p = NULL;
p                 780 drivers/block/loop.c 		p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
p                 783 drivers/block/loop.c 	if (IS_ERR_OR_NULL(p))
p                 784 drivers/block/loop.c 		ret = PTR_ERR(p);
p                 786 drivers/block/loop.c 		ret = strlen(p);
p                 787 drivers/block/loop.c 		memmove(buf, p, ret);
p                3169 drivers/block/mtip32xx/mtip32xx.c 	char *p;
p                3172 drivers/block/mtip32xx/mtip32xx.c 	p = end - 1;
p                3173 drivers/block/mtip32xx/mtip32xx.c 	*p = '\0';
p                3176 drivers/block/mtip32xx/mtip32xx.c 		if (p == begin)
p                3178 drivers/block/mtip32xx/mtip32xx.c 		*--p = 'a' + (index % unit);
p                3182 drivers/block/mtip32xx/mtip32xx.c 	memmove(begin, p, end - p);
p                 350 drivers/block/paride/bpck.c {       int i, j, k, p, v, f, om, od;
p                 363 drivers/block/paride/bpck.c 	    p = 0x100;
p                 365 drivers/block/paride/bpck.c 		f = (((i + 0x180) & p) != 0) * 2;
p                 369 drivers/block/paride/bpck.c 		p = (p >> 1);
p                 213 drivers/block/paride/bpck6.c 	Interface *p = kzalloc(sizeof(Interface), GFP_KERNEL);
p                 215 drivers/block/paride/bpck6.c 	if (p) {
p                 216 drivers/block/paride/bpck6.c 		pi->private = (unsigned long)p;
p                  73 drivers/block/paride/paride.c static void pi_wake_up(void *p)
p                  75 drivers/block/paride/paride.c 	PIA *pi = (PIA *) p;
p                 354 drivers/block/paride/paride.c 	int p, k, s, e;
p                 373 drivers/block/paride/paride.c 	for (p = s; p < e; p++) {
p                 374 drivers/block/paride/paride.c 		struct pi_protocol *proto = protocols[p];
p                 379 drivers/block/paride/pcd.c 	int j, r, e, s, p;
p                 389 drivers/block/paride/pcd.c 		p = read_reg(cd, 2);
p                 395 drivers/block/paride/pcd.c 			       cd->name, fun, msg, r, s, e, j, p);
p                 434 drivers/block/paride/pcd.c 	int r, d, p, n, k, j;
p                 446 drivers/block/paride/pcd.c 			p = read_reg(cd, 2) & 3;
p                 448 drivers/block/paride/pcd.c 			if ((p == 2) && (n > 0) && (j == 0)) {
p                 459 drivers/block/paride/pcd.c 					     cd->name, fun, p, d, k);
p                 602 drivers/block/paride/pcd.c 	int k, p;
p                 608 drivers/block/paride/pcd.c 		p = cd->last_sense;
p                 609 drivers/block/paride/pcd.c 		if (!p)
p                 611 drivers/block/paride/pcd.c 		if (!(((p & 0xffff) == 0x0402) || ((p & 0xff) == 6)))
p                 612 drivers/block/paride/pcd.c 			return p;
p                 840 drivers/block/paride/pd.c static void pd_release(struct gendisk *p, fmode_t mode)
p                 842 drivers/block/paride/pd.c 	struct pd_unit *disk = p->private_data;
p                 850 drivers/block/paride/pd.c static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing)
p                 852 drivers/block/paride/pd.c 	struct pd_unit *disk = p->private_data;
p                 862 drivers/block/paride/pd.c static int pd_revalidate(struct gendisk *p)
p                 864 drivers/block/paride/pd.c 	struct pd_unit *disk = p->private_data;
p                 866 drivers/block/paride/pd.c 		set_capacity(p, disk->capacity);
p                 868 drivers/block/paride/pd.c 		set_capacity(p, 0);
p                 890 drivers/block/paride/pd.c 	struct gendisk *p;
p                 892 drivers/block/paride/pd.c 	p = alloc_disk(1 << PD_BITS);
p                 893 drivers/block/paride/pd.c 	if (!p)
p                 896 drivers/block/paride/pd.c 	strcpy(p->disk_name, disk->name);
p                 897 drivers/block/paride/pd.c 	p->fops = &pd_fops;
p                 898 drivers/block/paride/pd.c 	p->major = major;
p                 899 drivers/block/paride/pd.c 	p->first_minor = (disk - pd) << PD_BITS;
p                 900 drivers/block/paride/pd.c 	p->events = DISK_EVENT_MEDIA_CHANGE;
p                 901 drivers/block/paride/pd.c 	disk->gd = p;
p                 902 drivers/block/paride/pd.c 	p->private_data = disk;
p                 916 drivers/block/paride/pd.c 	p->queue = blk_mq_init_queue(&disk->tag_set);
p                 917 drivers/block/paride/pd.c 	if (IS_ERR(p->queue)) {
p                 919 drivers/block/paride/pd.c 		p->queue = NULL;
p                 923 drivers/block/paride/pd.c 	p->queue->queuedata = disk;
p                 924 drivers/block/paride/pd.c 	blk_queue_max_hw_sectors(p->queue, cluster);
p                 925 drivers/block/paride/pd.c 	blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
p                 934 drivers/block/paride/pd.c 	put_disk(p);
p                1028 drivers/block/paride/pd.c 		struct gendisk *p = disk->gd;
p                1029 drivers/block/paride/pd.c 		if (p) {
p                1031 drivers/block/paride/pd.c 			del_gendisk(p);
p                1032 drivers/block/paride/pd.c 			blk_cleanup_queue(p->queue);
p                1034 drivers/block/paride/pd.c 			put_disk(p);
p                 428 drivers/block/paride/pf.c 	int j, r, e, s, p;
p                 438 drivers/block/paride/pf.c 		p = read_reg(pf, 2);
p                 444 drivers/block/paride/pf.c 			       pf->name, fun, msg, r, s, e, j, p);
p                 290 drivers/block/paride/pg.c 	int j, r, e, s, p, to;
p                 308 drivers/block/paride/pg.c 		p = read_reg(dev, 2);
p                 311 drivers/block/paride/pg.c 			       dev->name, msg, s, e, p, to ? " timeout" : "");
p                 359 drivers/block/paride/pg.c 	int r, d, n, p;
p                 369 drivers/block/paride/pg.c 		p = read_reg(dev, 2) & 3;
p                 370 drivers/block/paride/pg.c 		if (p == 0)
p                 372 drivers/block/paride/pg.c 		if (p == 2)
p                 376 drivers/block/paride/pg.c 			       p ? "Read" : "Write", n);
p                 377 drivers/block/paride/pg.c 		dev->dlen += (1 - p) * d;
p                 274 drivers/block/paride/pt.c 	int j, r, e, s, p;
p                 285 drivers/block/paride/pt.c 		p = read_reg(pi, 2);
p                 291 drivers/block/paride/pt.c 			       tape->name, fun, msg, r, s, e, j, p);
p                 332 drivers/block/paride/pt.c 	int r, s, n, p;
p                 340 drivers/block/paride/pt.c 		p = read_reg(pi, 2) & 3;
p                 341 drivers/block/paride/pt.c 		if (p == 0)
p                 343 drivers/block/paride/pt.c 		if (p == 2)
p                 487 drivers/block/paride/pt.c 	int k, p;
p                 493 drivers/block/paride/pt.c 		p = tape->last_sense;
p                 494 drivers/block/paride/pt.c 		if (!p)
p                 496 drivers/block/paride/pt.c 		if (!(((p & 0xffff) == 0x0402) || ((p & 0xff) == 6)))
p                 497 drivers/block/paride/pt.c 			return p;
p                 708 drivers/block/paride/pt.c 	struct mtop __user *p = (void __user *)arg;
p                 713 drivers/block/paride/pt.c 		if (copy_from_user(&mtop, p, sizeof(struct mtop)))
p                 770 drivers/block/paride/pt.c 	int k, n, r, p, s, t, b;
p                 827 drivers/block/paride/pt.c 			p = (read_reg(pi, 2) & 3);
p                 828 drivers/block/paride/pt.c 			if (p != 2) {
p                 831 drivers/block/paride/pt.c 				       p);
p                 867 drivers/block/paride/pt.c 	int k, n, r, p, s, t, b;
p                 928 drivers/block/paride/pt.c 			p = (read_reg(pi, 2) & 3);
p                 929 drivers/block/paride/pt.c 			if (p != 0) {
p                 932 drivers/block/paride/pt.c 				       tape->name, p);
p                 109 drivers/block/pktcdvd.c static int pkt_seq_show(struct seq_file *m, void *p);
p                 124 drivers/block/pktcdvd.c 	struct pktcdvd_kobj *p;
p                 127 drivers/block/pktcdvd.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 128 drivers/block/pktcdvd.c 	if (!p)
p                 130 drivers/block/pktcdvd.c 	p->pd = pd;
p                 131 drivers/block/pktcdvd.c 	error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
p                 133 drivers/block/pktcdvd.c 		kobject_put(&p->kobj);
p                 136 drivers/block/pktcdvd.c 	kobject_uevent(&p->kobj, KOBJ_ADD);
p                 137 drivers/block/pktcdvd.c 	return p;
p                 142 drivers/block/pktcdvd.c static void pkt_kobj_remove(struct pktcdvd_kobj *p)
p                 144 drivers/block/pktcdvd.c 	if (p)
p                 145 drivers/block/pktcdvd.c 		kobject_put(&p->kobj);
p                 455 drivers/block/pktcdvd.c static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
p                 457 drivers/block/pktcdvd.c 	return pkt_seq_show(m, p);
p                 678 drivers/block/pktcdvd.c 	struct rb_node **p = &pd->bio_queue.rb_node;
p                 683 drivers/block/pktcdvd.c 	while (*p) {
p                 684 drivers/block/pktcdvd.c 		parent = *p;
p                 687 drivers/block/pktcdvd.c 			p = &(*p)->rb_left;
p                 689 drivers/block/pktcdvd.c 			p = &(*p)->rb_right;
p                 691 drivers/block/pktcdvd.c 	rb_link_node(&node->rb_node, parent, p);
p                1024 drivers/block/pktcdvd.c 		int p, offset;
p                1036 drivers/block/pktcdvd.c 		p = (f * CD_FRAMESIZE) / PAGE_SIZE;
p                1039 drivers/block/pktcdvd.c 			f, pkt->pages[p], offset);
p                1040 drivers/block/pktcdvd.c 		if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
p                1164 drivers/block/pktcdvd.c 	struct packet_data *pkt, *p;
p                1192 drivers/block/pktcdvd.c 		list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
p                1193 drivers/block/pktcdvd.c 			if (p->sector == zone) {
p                2502 drivers/block/pktcdvd.c static int pkt_seq_show(struct seq_file *m, void *p)
p                 339 drivers/block/ps3disk.c 	unsigned char *p;
p                 345 drivers/block/ps3disk.c 	p = s + strnlen(s, len - 1);
p                 346 drivers/block/ps3disk.c 	while (p > s && p[-1] == ' ')
p                 347 drivers/block/ps3disk.c 		p--;
p                 348 drivers/block/ps3disk.c 	*p = '\0';
p                1813 drivers/block/rbd.c 	u8 *p;
p                1819 drivers/block/rbd.c 	p = &rbd_dev->object_map[index];
p                1820 drivers/block/rbd.c 	*p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
p                1934 drivers/block/rbd.c static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
p                1942 drivers/block/rbd.c 	ceph_decode_32_safe(p, end, header_len, e_inval);
p                1943 drivers/block/rbd.c 	header_end = *p + header_len;
p                1945 drivers/block/rbd.c 	ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
p                1950 drivers/block/rbd.c 	ceph_decode_64_safe(p, end, *object_map_size, e_inval);
p                1952 drivers/block/rbd.c 	*p = header_end;
p                1964 drivers/block/rbd.c 	void *p, *end;
p                1991 drivers/block/rbd.c 	p = page_address(pages[0]);
p                1992 drivers/block/rbd.c 	end = p + min(reply_len, (size_t)PAGE_SIZE);
p                1993 drivers/block/rbd.c 	ret = decode_object_map_header(&p, end, &object_map_size);
p                2004 drivers/block/rbd.c 	if (offset_in_page(p) + object_map_bytes > reply_len) {
p                2017 drivers/block/rbd.c 				   offset_in_page(p), object_map_bytes);
p                2092 drivers/block/rbd.c 	void *p;
p                2110 drivers/block/rbd.c 	p = page_address(osd_data->pages[0]);
p                2111 drivers/block/rbd.c 	objno = ceph_decode_64(&p);
p                2113 drivers/block/rbd.c 	rbd_assert(ceph_decode_64(&p) == objno + 1);
p                2114 drivers/block/rbd.c 	new_state = ceph_decode_8(&p);
p                2115 drivers/block/rbd.c 	has_current_state = ceph_decode_8(&p);
p                2117 drivers/block/rbd.c 		current_state = ceph_decode_8(&p);
p                2158 drivers/block/rbd.c 	void *p, *start;
p                2169 drivers/block/rbd.c 	p = start = page_address(pages[0]);
p                2170 drivers/block/rbd.c 	ceph_encode_64(&p, objno);
p                2171 drivers/block/rbd.c 	ceph_encode_64(&p, objno + 1);
p                2172 drivers/block/rbd.c 	ceph_encode_8(&p, new_state);
p                2174 drivers/block/rbd.c 		ceph_encode_8(&p, 1);
p                2175 drivers/block/rbd.c 		ceph_encode_8(&p, *current_state);
p                2177 drivers/block/rbd.c 		ceph_encode_8(&p, 0);
p                2180 drivers/block/rbd.c 	osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
p                3848 drivers/block/rbd.c 	void *p = buf;
p                3853 drivers/block/rbd.c 	ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
p                3854 drivers/block/rbd.c 	ceph_encode_32(&p, notify_op);
p                3855 drivers/block/rbd.c 	ceph_encode_64(&p, cid.gid);
p                3856 drivers/block/rbd.c 	ceph_encode_64(&p, cid.handle);
p                3906 drivers/block/rbd.c 		void *p = page_address(reply_pages[0]);
p                3907 drivers/block/rbd.c 		void *const end = p + reply_len;
p                3910 drivers/block/rbd.c 		ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
p                3915 drivers/block/rbd.c 			ceph_decode_need(&p, end, 8 + 8, e_inval);
p                3916 drivers/block/rbd.c 			p += 8 + 8; /* skip gid and cookie */
p                3918 drivers/block/rbd.c 			ceph_decode_32_safe(&p, end, len, e_inval);
p                3930 drivers/block/rbd.c 			ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
p                3939 drivers/block/rbd.c 			ret = ceph_decode_32(&p);
p                4331 drivers/block/rbd.c 				     void **p)
p                4336 drivers/block/rbd.c 		cid.gid = ceph_decode_64(p);
p                4337 drivers/block/rbd.c 		cid.handle = ceph_decode_64(p);
p                4364 drivers/block/rbd.c 				     void **p)
p                4369 drivers/block/rbd.c 		cid.gid = ceph_decode_64(p);
p                4370 drivers/block/rbd.c 		cid.handle = ceph_decode_64(p);
p                4400 drivers/block/rbd.c 				   void **p)
p                4407 drivers/block/rbd.c 		cid.gid = ceph_decode_64(p);
p                4408 drivers/block/rbd.c 		cid.handle = ceph_decode_64(p);
p                4455 drivers/block/rbd.c 		void *p = buf;
p                4458 drivers/block/rbd.c 		ceph_start_encoding(&p, 1, 1,
p                4460 drivers/block/rbd.c 		ceph_encode_32(&p, *result);
p                4490 drivers/block/rbd.c 	void *p = data;
p                4491 drivers/block/rbd.c 	void *const end = p + data_len;
p                4500 drivers/block/rbd.c 		ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
p                4508 drivers/block/rbd.c 		notify_op = ceph_decode_32(&p);
p                4518 drivers/block/rbd.c 		rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
p                4522 drivers/block/rbd.c 		rbd_handle_released_lock(rbd_dev, struct_v, &p);
p                4526 drivers/block/rbd.c 		ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
p                5683 drivers/block/rbd.c 	void *p;
p                5698 drivers/block/rbd.c 	p = reply_buf;
p                5699 drivers/block/rbd.c 	rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
p                5700 drivers/block/rbd.c 						p + ret, NULL, GFP_NOIO);
p                5798 drivers/block/rbd.c static int decode_parent_image_spec(void **p, void *end,
p                5805 drivers/block/rbd.c 	ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
p                5810 drivers/block/rbd.c 	ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
p                5811 drivers/block/rbd.c 	pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
p                5817 drivers/block/rbd.c 	pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
p                5823 drivers/block/rbd.c 	ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
p                5837 drivers/block/rbd.c 	void *p, *end;
p                5846 drivers/block/rbd.c 	p = page_address(reply_page);
p                5847 drivers/block/rbd.c 	end = p + reply_len;
p                5848 drivers/block/rbd.c 	ret = decode_parent_image_spec(&p, end, pii);
p                5858 drivers/block/rbd.c 	p = page_address(reply_page);
p                5859 drivers/block/rbd.c 	end = p + reply_len;
p                5860 drivers/block/rbd.c 	ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
p                5862 drivers/block/rbd.c 		ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
p                5880 drivers/block/rbd.c 	void *p, *end;
p                5889 drivers/block/rbd.c 	p = page_address(reply_page);
p                5890 drivers/block/rbd.c 	end = p + reply_len;
p                5891 drivers/block/rbd.c 	ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
p                5892 drivers/block/rbd.c 	pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
p                5898 drivers/block/rbd.c 	ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
p                5900 drivers/block/rbd.c 	ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
p                5912 drivers/block/rbd.c 	void *p;
p                5925 drivers/block/rbd.c 	p = page_address(req_page);
p                5926 drivers/block/rbd.c 	ceph_encode_64(&p, rbd_dev->spec->snap_id);
p                6040 drivers/block/rbd.c 	void *p;
p                6052 drivers/block/rbd.c 	p = &striping_info_buf;
p                6053 drivers/block/rbd.c 	rbd_dev->header.stripe_unit = ceph_decode_64(&p);
p                6054 drivers/block/rbd.c 	rbd_dev->header.stripe_count = ceph_decode_64(&p);
p                6081 drivers/block/rbd.c 	void *p;
p                6097 drivers/block/rbd.c 	p = image_id;
p                6099 drivers/block/rbd.c 	ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
p                6112 drivers/block/rbd.c 	p = reply_buf;
p                6115 drivers/block/rbd.c 	image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
p                6269 drivers/block/rbd.c 	void *p;
p                6295 drivers/block/rbd.c 	p = reply_buf;
p                6298 drivers/block/rbd.c 	ceph_decode_64_safe(&p, end, seq, out);
p                6299 drivers/block/rbd.c 	ceph_decode_32_safe(&p, end, snap_count, out);
p                6312 drivers/block/rbd.c 	if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
p                6323 drivers/block/rbd.c 		snapc->snaps[i] = ceph_decode_64(&p);
p                6343 drivers/block/rbd.c 	void *p;
p                6362 drivers/block/rbd.c 	p = reply_buf;
p                6364 drivers/block/rbd.c 	snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
p                6732 drivers/block/rbd.c 		void *p = response;
p                6734 drivers/block/rbd.c 		image_id = ceph_extract_encoded_string(&p, p + ret,
p                  50 drivers/block/rsxx/core.c static int rsxx_attr_pci_regs_show(struct seq_file *m, void *p)
p                 112 drivers/block/rsxx/core.c static int rsxx_attr_stats_show(struct seq_file *m, void *p)
p                 190 drivers/block/umem.c 	unsigned char *p;
p                 193 drivers/block/umem.c 	p = card->csr_remap;
p                 195 drivers/block/umem.c 		printk(KERN_DEBUG "%p   ", p);
p                 198 drivers/block/umem.c 			printk("%02x ", *p++);
p                 338 drivers/block/umem.c 	struct mm_page *p;
p                 370 drivers/block/umem.c 	p = &card->mm_pages[card->Ready];
p                 371 drivers/block/umem.c 	desc = &p->desc[p->cnt];
p                 372 drivers/block/umem.c 	p->cnt++;
p                 373 drivers/block/umem.c 	if (p->bio == NULL)
p                 374 drivers/block/umem.c 		p->iter = card->current_iter;
p                 375 drivers/block/umem.c 	if ((p->biotail) != &bio->bi_next) {
p                 376 drivers/block/umem.c 		*(p->biotail) = bio;
p                 377 drivers/block/umem.c 		p->biotail = &(bio->bi_next);
p                 386 drivers/block/umem.c 	offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc));
p                 387 drivers/block/umem.c 	desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
p                 389 drivers/block/umem.c 	offset = (((char *)(desc+1)) - ((char *)p->desc));
p                 390 drivers/block/umem.c 	desc->next_desc_addr = cpu_to_le64(p->page_dma+offset);
p                 638 drivers/block/virtio_blk.c 	char *p;
p                 641 drivers/block/virtio_blk.c 	p = end - 1;
p                 642 drivers/block/virtio_blk.c 	*p = '\0';
p                 645 drivers/block/virtio_blk.c 		if (p == begin)
p                 647 drivers/block/virtio_blk.c 		*--p = 'a' + (index % unit);
p                 651 drivers/block/virtio_blk.c 	memmove(begin, p, end - p);
p                 418 drivers/block/zram/zram_drv.c 	char *p;
p                 429 drivers/block/zram/zram_drv.c 	p = file_path(file, buf, PAGE_SIZE - 1);
p                 430 drivers/block/zram/zram_drv.c 	if (IS_ERR(p)) {
p                 431 drivers/block/zram/zram_drv.c 		ret = PTR_ERR(p);
p                 435 drivers/block/zram/zram_drv.c 	ret = strlen(p);
p                 436 drivers/block/zram/zram_drv.c 	memmove(buf, p, ret);
p                 122 drivers/bluetooth/btmrvl_sdio.h #define ALIGN_SZ(p, a)	\
p                 123 drivers/bluetooth/btmrvl_sdio.h 	(((p) + ((a) - 1)) & ~((a) - 1))
p                 126 drivers/bluetooth/btmrvl_sdio.h #define ALIGN_ADDR(p, a)	\
p                 127 drivers/bluetooth/btmrvl_sdio.h 	((((unsigned long)(p)) + (((unsigned long)(a)) - 1)) & \
p                 193 drivers/bluetooth/hci_bcm.c 	struct list_head *p;
p                 201 drivers/bluetooth/hci_bcm.c 	list_for_each(p, &bcm_device_list) {
p                 202 drivers/bluetooth/hci_bcm.c 		struct bcm_device *dev = list_entry(p, struct bcm_device, list);
p                 404 drivers/bluetooth/hci_bcm.c 	struct list_head *p;
p                 430 drivers/bluetooth/hci_bcm.c 	list_for_each(p, &bcm_device_list) {
p                 431 drivers/bluetooth/hci_bcm.c 		struct bcm_device *dev = list_entry(p, struct bcm_device, list);
p                 291 drivers/bluetooth/hci_intel.c 	struct list_head *p;
p                 299 drivers/bluetooth/hci_intel.c 	list_for_each(p, &intel_device_list) {
p                 300 drivers/bluetooth/hci_intel.c 		struct intel_device *idev = list_entry(p, struct intel_device,
p                 365 drivers/bluetooth/hci_intel.c 	struct list_head *p;
p                 374 drivers/bluetooth/hci_intel.c 	list_for_each(p, &intel_device_list) {
p                 375 drivers/bluetooth/hci_intel.c 		struct intel_device *idev = list_entry(p, struct intel_device,
p                 536 drivers/bluetooth/hci_intel.c 	struct list_head *p;
p                 842 drivers/bluetooth/hci_intel.c 	list_for_each(p, &intel_device_list) {
p                 843 drivers/bluetooth/hci_intel.c 		struct intel_device *dev = list_entry(p, struct intel_device,
p                1002 drivers/bluetooth/hci_intel.c 	struct list_head *p;
p                1013 drivers/bluetooth/hci_intel.c 	list_for_each(p, &intel_device_list) {
p                1014 drivers/bluetooth/hci_intel.c 		struct intel_device *idev = list_entry(p, struct intel_device,
p                  42 drivers/bluetooth/hci_ldisc.c int hci_uart_register_proto(const struct hci_uart_proto *p)
p                  44 drivers/bluetooth/hci_ldisc.c 	if (p->id >= HCI_UART_MAX_PROTO)
p                  47 drivers/bluetooth/hci_ldisc.c 	if (hup[p->id])
p                  50 drivers/bluetooth/hci_ldisc.c 	hup[p->id] = p;
p                  52 drivers/bluetooth/hci_ldisc.c 	BT_INFO("HCI UART protocol %s registered", p->name);
p                  57 drivers/bluetooth/hci_ldisc.c int hci_uart_unregister_proto(const struct hci_uart_proto *p)
p                  59 drivers/bluetooth/hci_ldisc.c 	if (p->id >= HCI_UART_MAX_PROTO)
p                  62 drivers/bluetooth/hci_ldisc.c 	if (!hup[p->id])
p                  65 drivers/bluetooth/hci_ldisc.c 	hup[p->id] = NULL;
p                 697 drivers/bluetooth/hci_ldisc.c 	const struct hci_uart_proto *p;
p                 700 drivers/bluetooth/hci_ldisc.c 	p = hci_uart_get_proto(id);
p                 701 drivers/bluetooth/hci_ldisc.c 	if (!p)
p                 704 drivers/bluetooth/hci_ldisc.c 	hu->proto = p;
p                 269 drivers/bluetooth/hci_serdev.c 			     const struct hci_uart_proto *p)
p                 282 drivers/bluetooth/hci_serdev.c 	err = p->open(hu);
p                 286 drivers/bluetooth/hci_serdev.c 	hu->proto = p;
p                 348 drivers/bluetooth/hci_serdev.c 	p->close(hu);
p                  97 drivers/bluetooth/hci_uart.h int hci_uart_register_proto(const struct hci_uart_proto *p);
p                  98 drivers/bluetooth/hci_uart.h int hci_uart_unregister_proto(const struct hci_uart_proto *p);
p                  99 drivers/bluetooth/hci_uart.h int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p);
p                 266 drivers/bus/brcmstb_gisb.c 			   void *p);
p                 277 drivers/bus/brcmstb_gisb.c 			   void *p)
p                  83 drivers/bus/imx-weim.c 	const __be32 *p;
p                 102 drivers/bus/imx-weim.c 	of_property_for_each_u32(np, "ranges", prop, p, val) {
p                 496 drivers/bus/moxtet.c 	u8 *p = hex;
p                 502 drivers/bus/moxtet.c 		p = hex_byte_pack(p, moxtet->tx[moxtet->count - i]);
p                 506 drivers/bus/moxtet.c 	*p++ = '\n';
p                 508 drivers/bus/moxtet.c 	return simple_read_from_buffer(buf, len, ppos, hex, p - hex);
p                 655 drivers/bus/moxtet.c static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
p                 663 drivers/bus/moxtet.c 	seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
p                1755 drivers/bus/ti-sysc.c 	const __be32 *p;
p                1758 drivers/bus/ti-sysc.c 	of_property_for_each_u32(np, name, prop, p, val) {
p                 172 drivers/char/agp/agp.h #define PGE_EMPTY(b, p)	(!(p) || (p) == (unsigned long) (b)->scratch_page)
p                  26 drivers/char/hw_random/cavium-rng-vf.c 	struct cavium_rng *p = container_of(rng, struct cavium_rng, ops);
p                  30 drivers/char/hw_random/cavium-rng-vf.c 		*((u64 *)dat) = readq(p->result);
p                  35 drivers/char/hw_random/cavium-rng-vf.c 		*((u8 *)dat) = readb(p->result);
p                  23 drivers/char/hw_random/hisi-rng.c #define to_hisi_rng(p)	container_of(p, struct hisi_rng, rng)
p                  33 drivers/char/hw_random/mtk-rng.c #define to_mtk_rng(p)	container_of(p, struct mtk_rng, rng)
p                  31 drivers/char/hw_random/octeon-rng.c 	struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
p                  36 drivers/char/hw_random/octeon-rng.c 	cvmx_write_csr((u64)p->control_status, ctl.u64);
p                  43 drivers/char/hw_random/octeon-rng.c 	struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
p                  47 drivers/char/hw_random/octeon-rng.c 	cvmx_write_csr((u64)p->control_status, ctl.u64);
p                  52 drivers/char/hw_random/octeon-rng.c 	struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
p                  54 drivers/char/hw_random/octeon-rng.c 	*data = cvmx_read64_uint32((u64)p->result);
p                  59 drivers/char/hw_random/s390-trng.c 	u8 *p = buf;
p                  70 drivers/char/hw_random/s390-trng.c 		p = (u8 *) __get_free_page(GFP_KERNEL);
p                  71 drivers/char/hw_random/s390-trng.c 		if (!p)
p                  85 drivers/char/hw_random/s390-trng.c 		cpacf_trng(NULL, 0, p, n);
p                  87 drivers/char/hw_random/s390-trng.c 		if (copy_to_user(ubuf, p, n)) {
p                  96 drivers/char/hw_random/s390-trng.c 	if (p != buf)
p                  97 drivers/char/hw_random/s390-trng.c 		free_page((unsigned long) p);
p                  44 drivers/char/ipmi/ipmi_dmi.c 	struct ipmi_plat_data p;
p                  46 drivers/char/ipmi/ipmi_dmi.c 	memset(&p, 0, sizeof(p));
p                  49 drivers/char/ipmi/ipmi_dmi.c 	p.iftype = IPMI_PLAT_IF_SI;
p                  53 drivers/char/ipmi/ipmi_dmi.c 		p.iftype = IPMI_PLAT_IF_SSIF;
p                  54 drivers/char/ipmi/ipmi_dmi.c 		p.type = SI_TYPE_INVALID;
p                  57 drivers/char/ipmi/ipmi_dmi.c 		p.type = SI_BT;
p                  60 drivers/char/ipmi/ipmi_dmi.c 		p.type = SI_KCS;
p                  63 drivers/char/ipmi/ipmi_dmi.c 		p.type = SI_SMIC;
p                  70 drivers/char/ipmi/ipmi_dmi.c 	p.addr = base_addr;
p                  71 drivers/char/ipmi/ipmi_dmi.c 	p.space = space;
p                  72 drivers/char/ipmi/ipmi_dmi.c 	p.regspacing = offset;
p                  73 drivers/char/ipmi/ipmi_dmi.c 	p.irq = irq;
p                  74 drivers/char/ipmi/ipmi_dmi.c 	p.slave_addr = slave_addr;
p                  75 drivers/char/ipmi/ipmi_dmi.c 	p.addr_source = SI_SMBIOS;
p                  81 drivers/char/ipmi/ipmi_dmi.c 		info->si_type = p.type;
p                  89 drivers/char/ipmi/ipmi_dmi.c 	if (ipmi_platform_add(name, ipmi_dmi_nr, &p))
p                4923 drivers/char/ipmi/ipmi_msghandler.c 	char *p = str;
p                5042 drivers/char/ipmi/ipmi_msghandler.c 	while (*p) {
p                5043 drivers/char/ipmi/ipmi_msghandler.c 		int size = strlen(p);
p                5056 drivers/char/ipmi/ipmi_msghandler.c 		strncpy(data+5, p, 11);
p                5057 drivers/char/ipmi/ipmi_msghandler.c 		p += size;
p                  12 drivers/char/ipmi/ipmi_plat_data.c 					  struct ipmi_plat_data *p)
p                  24 drivers/char/ipmi/ipmi_plat_data.c 	if (p->iftype == IPMI_PLAT_IF_SI) {
p                  25 drivers/char/ipmi/ipmi_plat_data.c 		if (p->type == SI_BT)
p                  27 drivers/char/ipmi/ipmi_plat_data.c 		else if (p->type != SI_TYPE_INVALID)
p                  30 drivers/char/ipmi/ipmi_plat_data.c 		if (p->regsize == 0)
p                  31 drivers/char/ipmi/ipmi_plat_data.c 			p->regsize = DEFAULT_REGSIZE;
p                  32 drivers/char/ipmi/ipmi_plat_data.c 		if (p->regspacing == 0)
p                  33 drivers/char/ipmi/ipmi_plat_data.c 			p->regspacing = p->regsize;
p                  35 drivers/char/ipmi/ipmi_plat_data.c 		pr[pidx++] = PROPERTY_ENTRY_U8("ipmi-type", p->type);
p                  36 drivers/char/ipmi/ipmi_plat_data.c 	} else if (p->iftype == IPMI_PLAT_IF_SSIF) {
p                  37 drivers/char/ipmi/ipmi_plat_data.c 		pr[pidx++] = PROPERTY_ENTRY_U16("i2c-addr", p->addr);
p                  40 drivers/char/ipmi/ipmi_plat_data.c 	if (p->slave_addr)
p                  41 drivers/char/ipmi/ipmi_plat_data.c 		pr[pidx++] = PROPERTY_ENTRY_U8("slave-addr", p->slave_addr);
p                  42 drivers/char/ipmi/ipmi_plat_data.c 	pr[pidx++] = PROPERTY_ENTRY_U8("addr-source", p->addr_source);
p                  43 drivers/char/ipmi/ipmi_plat_data.c 	if (p->regshift)
p                  44 drivers/char/ipmi/ipmi_plat_data.c 		pr[pidx++] = PROPERTY_ENTRY_U8("reg-shift", p->regshift);
p                  45 drivers/char/ipmi/ipmi_plat_data.c 	pr[pidx++] = PROPERTY_ENTRY_U8("reg-size", p->regsize);
p                  64 drivers/char/ipmi/ipmi_plat_data.c 	if (p->space == IPMI_IO_ADDR_SPACE)
p                  69 drivers/char/ipmi/ipmi_plat_data.c 	r[0].start = p->addr;
p                  70 drivers/char/ipmi/ipmi_plat_data.c 	r[0].end = r[0].start + p->regsize - 1;
p                  75 drivers/char/ipmi/ipmi_plat_data.c 		r[1].start = r[0].start + p->regspacing;
p                  76 drivers/char/ipmi/ipmi_plat_data.c 		r[1].end = r[1].start + p->regsize - 1;
p                  83 drivers/char/ipmi/ipmi_plat_data.c 		r[2].start = r[1].start + p->regspacing;
p                  84 drivers/char/ipmi/ipmi_plat_data.c 		r[2].end = r[2].start + p->regsize - 1;
p                  90 drivers/char/ipmi/ipmi_plat_data.c 	if (p->irq) {
p                  91 drivers/char/ipmi/ipmi_plat_data.c 		r[num_r].start = p->irq;
p                  92 drivers/char/ipmi/ipmi_plat_data.c 		r[num_r].end = p->irq;
p                  25 drivers/char/ipmi/ipmi_plat_data.h 					  struct ipmi_plat_data *p);
p                  82 drivers/char/ipmi/ipmi_si_hardcode.c 	struct ipmi_plat_data p;
p                  84 drivers/char/ipmi/ipmi_si_hardcode.c 	memset(&p, 0, sizeof(p));
p                  86 drivers/char/ipmi/ipmi_si_hardcode.c 	p.iftype = IPMI_PLAT_IF_SI;
p                  88 drivers/char/ipmi/ipmi_si_hardcode.c 		p.type = SI_KCS;
p                  90 drivers/char/ipmi/ipmi_si_hardcode.c 		p.type = SI_SMIC;
p                  92 drivers/char/ipmi/ipmi_si_hardcode.c 		p.type = SI_BT;
p                  98 drivers/char/ipmi/ipmi_si_hardcode.c 		p.type = SI_TYPE_INVALID;
p                 105 drivers/char/ipmi/ipmi_si_hardcode.c 	p.regsize = regsizes[i];
p                 106 drivers/char/ipmi/ipmi_si_hardcode.c 	p.slave_addr = slave_addrs[i];
p                 107 drivers/char/ipmi/ipmi_si_hardcode.c 	p.addr_source = SI_HARDCODED;
p                 108 drivers/char/ipmi/ipmi_si_hardcode.c 	p.regshift = regshifts[i];
p                 109 drivers/char/ipmi/ipmi_si_hardcode.c 	p.regsize = regsizes[i];
p                 110 drivers/char/ipmi/ipmi_si_hardcode.c 	p.addr = addr;
p                 111 drivers/char/ipmi/ipmi_si_hardcode.c 	p.space = addr_space;
p                 113 drivers/char/ipmi/ipmi_si_hardcode.c 	ipmi_platform_add("hardcode-ipmi-si", i, &p);
p                 114 drivers/char/mem.c 	phys_addr_t p = *ppos;
p                 120 drivers/char/mem.c 	if (p != *ppos)
p                 123 drivers/char/mem.c 	if (!valid_phys_addr_range(p, count))
p                 128 drivers/char/mem.c 	if (p < PAGE_SIZE) {
p                 129 drivers/char/mem.c 		sz = size_inside_page(p, count);
p                 134 drivers/char/mem.c 			p += sz;
p                 149 drivers/char/mem.c 		sz = size_inside_page(p, count);
p                 152 drivers/char/mem.c 		allowed = page_is_allowed(p >> PAGE_SHIFT);
p                 166 drivers/char/mem.c 			ptr = xlate_dev_mem_ptr(p);
p                 171 drivers/char/mem.c 			unxlate_dev_mem_ptr(p, ptr);
p                 182 drivers/char/mem.c 		p += sz;
p                 201 drivers/char/mem.c 	phys_addr_t p = *ppos;
p                 206 drivers/char/mem.c 	if (p != *ppos)
p                 209 drivers/char/mem.c 	if (!valid_phys_addr_range(p, count))
p                 216 drivers/char/mem.c 	if (p < PAGE_SIZE) {
p                 217 drivers/char/mem.c 		sz = size_inside_page(p, count);
p                 220 drivers/char/mem.c 		p += sz;
p                 229 drivers/char/mem.c 		sz = size_inside_page(p, count);
p                 231 drivers/char/mem.c 		allowed = page_is_allowed(p >> PAGE_SHIFT);
p                 242 drivers/char/mem.c 			ptr = xlate_dev_mem_ptr(p);
p                 250 drivers/char/mem.c 			unxlate_dev_mem_ptr(p, ptr);
p                 260 drivers/char/mem.c 		p += sz;
p                 439 drivers/char/mem.c 	unsigned long p = *ppos;
p                 445 drivers/char/mem.c 	if (p < (unsigned long) high_memory) {
p                 447 drivers/char/mem.c 		if (count > (unsigned long)high_memory - p)
p                 448 drivers/char/mem.c 			low_count = (unsigned long)high_memory - p;
p                 452 drivers/char/mem.c 		if (p < PAGE_SIZE && low_count > 0) {
p                 453 drivers/char/mem.c 			sz = size_inside_page(p, low_count);
p                 457 drivers/char/mem.c 			p += sz;
p                 464 drivers/char/mem.c 			sz = size_inside_page(p, low_count);
p                 471 drivers/char/mem.c 			kbuf = xlate_dev_kmem_ptr((void *)p);
p                 478 drivers/char/mem.c 			p += sz;
p                 494 drivers/char/mem.c 			sz = size_inside_page(p, count);
p                 495 drivers/char/mem.c 			if (!is_vmalloc_or_module_addr((void *)p)) {
p                 499 drivers/char/mem.c 			sz = vread(kbuf, (char *)p, sz);
p                 509 drivers/char/mem.c 			p += sz;
p                 515 drivers/char/mem.c 	*ppos = p;
p                 520 drivers/char/mem.c static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
p                 529 drivers/char/mem.c 	if (p < PAGE_SIZE) {
p                 530 drivers/char/mem.c 		sz = size_inside_page(p, count);
p                 533 drivers/char/mem.c 		p += sz;
p                 542 drivers/char/mem.c 		sz = size_inside_page(p, count);
p                 549 drivers/char/mem.c 		ptr = xlate_dev_kmem_ptr((void *)p);
p                 561 drivers/char/mem.c 		p += sz;
p                 578 drivers/char/mem.c 	unsigned long p = *ppos;
p                 584 drivers/char/mem.c 	if (p < (unsigned long) high_memory) {
p                 586 drivers/char/mem.c 					       (unsigned long)high_memory - p);
p                 587 drivers/char/mem.c 		wrote = do_write_kmem(p, buf, to_write, ppos);
p                 590 drivers/char/mem.c 		p += wrote;
p                 600 drivers/char/mem.c 			unsigned long sz = size_inside_page(p, count);
p                 603 drivers/char/mem.c 			if (!is_vmalloc_or_module_addr((void *)p)) {
p                 612 drivers/char/mem.c 			vwrite(kbuf, (char *)p, sz);
p                 616 drivers/char/mem.c 			p += sz;
p                 623 drivers/char/mem.c 	*ppos = p;
p                  85 drivers/char/misc.c 	const struct miscdevice *p = list_entry(v, struct miscdevice, list);
p                  87 drivers/char/misc.c 	seq_printf(seq, "%3i %s\n", p->minor, p->name ? p->name : "");
p                 171 drivers/char/nvram.c 	char *p = buf;
p                 179 drivers/char/nvram.c 	for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
p                 180 drivers/char/nvram.c 		*p = __nvram_read_byte(i);
p                 184 drivers/char/nvram.c 	return p - buf;
p                 189 drivers/char/nvram.c 	char *p = buf;
p                 197 drivers/char/nvram.c 	for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
p                 198 drivers/char/nvram.c 		__nvram_write_byte(*p, i);
p                 203 drivers/char/nvram.c 	return p - buf;
p                  46 drivers/char/nwflash.c static int write_block(unsigned long p, const char __user *buf, int count);
p                 146 drivers/char/nwflash.c 	unsigned long p = *ppos;
p                 154 drivers/char/nwflash.c 		       p, buf, count);
p                 159 drivers/char/nwflash.c 	if (p < 64 * 1024 && (!gbWriteBase64Enable))
p                 165 drivers/char/nwflash.c 	if (p >= gbFlashSize)
p                 168 drivers/char/nwflash.c 	if (count > gbFlashSize - p)
p                 169 drivers/char/nwflash.c 		count = gbFlashSize - p;
p                 182 drivers/char/nwflash.c 	nBlock = (int) p >> 16;	//block # of 64K bytes
p                 187 drivers/char/nwflash.c 	temp = ((int) (p + count) >> 16) - nBlock + 1;
p                 192 drivers/char/nwflash.c 	if (((int) (p + count) & 0xFFFF) == 0)
p                 220 drivers/char/nwflash.c 			       "from buf %p, bytes left %X.\n", p, buf,
p                 226 drivers/char/nwflash.c 		rc = write_block(p, buf, count - written);
p                 249 drivers/char/nwflash.c 		p += rc;
p                 397 drivers/char/nwflash.c static int write_block(unsigned long p, const char __user *buf, int count)
p                 407 drivers/char/nwflash.c 	pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p));
p                 412 drivers/char/nwflash.c 	offset = p & 0xFFFF;
p                 532 drivers/char/nwflash.c 	pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p));
p                 932 drivers/char/random.c 	char *p;
p                 940 drivers/char/random.c 	p = (unsigned char *) &primary_crng.state[4];
p                 942 drivers/char/random.c 		p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
p                1901 drivers/char/random.c 	char *p = buf;
p                1911 drivers/char/random.c 		memcpy(p, &v, chunk);
p                1912 drivers/char/random.c 		p += chunk;
p                2066 drivers/char/random.c 	const char __user *p = buffer;
p                2072 drivers/char/random.c 		if (copy_from_user(&buf, p, bytes))
p                2082 drivers/char/random.c 		p += bytes;
p                2106 drivers/char/random.c 	int __user *p = (int __user *)arg;
p                2113 drivers/char/random.c 		if (put_user(ent_count, p))
p                2119 drivers/char/random.c 		if (get_user(ent_count, p))
p                2125 drivers/char/random.c 		if (get_user(ent_count, p++))
p                2129 drivers/char/random.c 		if (get_user(size, p++))
p                2131 drivers/char/random.c 		retval = write_pool(&input_pool, (const char __user *)p,
p                 127 drivers/char/tpm/tpm_nsc.c 	u8 data, *p;
p                 147 drivers/char/tpm/tpm_nsc.c 	for (p = buffer; p < &buffer[count]; p++) {
p                 156 drivers/char/tpm/tpm_nsc.c 		*p = inb(priv->base + NSC_DATA);
p                  12 drivers/clk/at91/at91sam9260.c 	char *p;
p                  74 drivers/clk/at91/at91sam9260.c 	{ .n = "uhpck", .p = "usbck",    .id = 6 },
p                  75 drivers/clk/at91/at91sam9260.c 	{ .n = "udpck", .p = "usbck",    .id = 7 },
p                  76 drivers/clk/at91/at91sam9260.c 	{ .n = "pck0",  .p = "prog0",    .id = 8 },
p                  77 drivers/clk/at91/at91sam9260.c 	{ .n = "pck1",  .p = "prog1",    .id = 9 },
p                 215 drivers/clk/at91/at91sam9260.c 	{ .n = "uhpck", .p = "usbck",    .id = 6 },
p                 216 drivers/clk/at91/at91sam9260.c 	{ .n = "udpck", .p = "usbck",    .id = 7 },
p                 217 drivers/clk/at91/at91sam9260.c 	{ .n = "pck0",  .p = "prog0",    .id = 8 },
p                 218 drivers/clk/at91/at91sam9260.c 	{ .n = "pck1",  .p = "prog1",    .id = 9 },
p                 219 drivers/clk/at91/at91sam9260.c 	{ .n = "pck2",  .p = "prog2",    .id = 10 },
p                 220 drivers/clk/at91/at91sam9260.c 	{ .n = "pck3",  .p = "prog3",    .id = 11 },
p                 221 drivers/clk/at91/at91sam9260.c 	{ .n = "hclk0", .p = "masterck", .id = 16 },
p                 222 drivers/clk/at91/at91sam9260.c 	{ .n = "hclk1", .p = "masterck", .id = 17 },
p                 279 drivers/clk/at91/at91sam9260.c 	{ .n = "uhpck", .p = "usbck",    .id = 6 },
p                 280 drivers/clk/at91/at91sam9260.c 	{ .n = "udpck", .p = "usbck",    .id = 7 },
p                 281 drivers/clk/at91/at91sam9260.c 	{ .n = "pck0",  .p = "prog0",    .id = 8 },
p                 282 drivers/clk/at91/at91sam9260.c 	{ .n = "pck1",  .p = "prog1",    .id = 9 },
p                 283 drivers/clk/at91/at91sam9260.c 	{ .n = "pck2",  .p = "prog2",    .id = 10 },
p                 284 drivers/clk/at91/at91sam9260.c 	{ .n = "pck3",  .p = "prog3",    .id = 11 },
p                 441 drivers/clk/at91/at91sam9260.c 					      data->sck[i].p,
p                  31 drivers/clk/at91/at91sam9rl.c 	char *p;
p                  34 drivers/clk/at91/at91sam9rl.c 	{ .n = "pck0",  .p = "prog0",    .id = 8 },
p                  35 drivers/clk/at91/at91sam9rl.c 	{ .n = "pck1",  .p = "prog1",    .id = 9 },
p                 145 drivers/clk/at91/at91sam9rl.c 					      at91sam9rl_systemck[i].p,
p                  41 drivers/clk/at91/at91sam9x5.c 	char *p;
p                  44 drivers/clk/at91/at91sam9x5.c 	{ .n = "ddrck", .p = "masterck", .id = 2 },
p                  45 drivers/clk/at91/at91sam9x5.c 	{ .n = "smdck", .p = "smdclk",   .id = 4 },
p                  46 drivers/clk/at91/at91sam9x5.c 	{ .n = "uhpck", .p = "usbck",    .id = 6 },
p                  47 drivers/clk/at91/at91sam9x5.c 	{ .n = "udpck", .p = "usbck",    .id = 7 },
p                  48 drivers/clk/at91/at91sam9x5.c 	{ .n = "pck0",  .p = "prog0",    .id = 8 },
p                  49 drivers/clk/at91/at91sam9x5.c 	{ .n = "pck1",  .p = "prog1",    .id = 9 },
p                 234 drivers/clk/at91/at91sam9x5.c 					      at91sam9x5_systemck[i].p,
p                  62 drivers/clk/at91/sam9x60.c 	char *p;
p                  65 drivers/clk/at91/sam9x60.c 	{ .n = "ddrck",  .p = "masterck", .id = 2 },
p                  66 drivers/clk/at91/sam9x60.c 	{ .n = "uhpck",  .p = "usbck",    .id = 6 },
p                  67 drivers/clk/at91/sam9x60.c 	{ .n = "pck0",   .p = "prog0",    .id = 8 },
p                  68 drivers/clk/at91/sam9x60.c 	{ .n = "pck1",   .p = "prog1",    .id = 9 },
p                  69 drivers/clk/at91/sam9x60.c 	{ .n = "qspick", .p = "masterck", .id = 19 },
p                 265 drivers/clk/at91/sam9x60.c 					      sam9x60_systemck[i].p,
p                  40 drivers/clk/at91/sama5d2.c 	char *p;
p                  43 drivers/clk/at91/sama5d2.c 	{ .n = "ddrck", .p = "masterck", .id = 2 },
p                  44 drivers/clk/at91/sama5d2.c 	{ .n = "lcdck", .p = "masterck", .id = 3 },
p                  45 drivers/clk/at91/sama5d2.c 	{ .n = "uhpck", .p = "usbck",    .id = 6 },
p                  46 drivers/clk/at91/sama5d2.c 	{ .n = "udpck", .p = "usbck",    .id = 7 },
p                  47 drivers/clk/at91/sama5d2.c 	{ .n = "pck0",  .p = "prog0",    .id = 8 },
p                  48 drivers/clk/at91/sama5d2.c 	{ .n = "pck1",  .p = "prog1",    .id = 9 },
p                  49 drivers/clk/at91/sama5d2.c 	{ .n = "pck2",  .p = "prog2",    .id = 10 },
p                  50 drivers/clk/at91/sama5d2.c 	{ .n = "iscck", .p = "masterck", .id = 18 },
p                 274 drivers/clk/at91/sama5d2.c 					      sama5d2_systemck[i].p,
p                  39 drivers/clk/at91/sama5d4.c 	char *p;
p                  42 drivers/clk/at91/sama5d4.c 	{ .n = "ddrck", .p = "masterck", .id = 2 },
p                  43 drivers/clk/at91/sama5d4.c 	{ .n = "lcdck", .p = "masterck", .id = 3 },
p                  44 drivers/clk/at91/sama5d4.c 	{ .n = "smdck", .p = "smdclk",   .id = 4 },
p                  45 drivers/clk/at91/sama5d4.c 	{ .n = "uhpck", .p = "usbck",    .id = 6 },
p                  46 drivers/clk/at91/sama5d4.c 	{ .n = "udpck", .p = "usbck",    .id = 7 },
p                  47 drivers/clk/at91/sama5d4.c 	{ .n = "pck0",  .p = "prog0",    .id = 8 },
p                  48 drivers/clk/at91/sama5d4.c 	{ .n = "pck1",  .p = "prog1",    .id = 9 },
p                  49 drivers/clk/at91/sama5d4.c 	{ .n = "pck2",  .p = "prog2",    .id = 10 },
p                 231 drivers/clk/at91/sama5d4.c 					      sama5d4_systemck[i].p,
p                 157 drivers/clk/clk-aspeed.c 		u32 p = (val >> 13) & 0x3f;
p                 162 drivers/clk/clk-aspeed.c 		div = p + 1;
p                  68 drivers/clk/clk-aspeed.h #define to_aspeed_reset(p) container_of((p), struct aspeed_reset, rcdev)
p                 169 drivers/clk/clk-ast2600.c 		u32 p = (val >> 19) & 0xf;
p                 171 drivers/clk/clk-ast2600.c 		div = (p + 1);
p                 186 drivers/clk/clk-cdce925.c 	u8 p;
p                 191 drivers/clk/clk-cdce925.c 	p = 4;
p                 194 drivers/clk/clk-cdce925.c 		--p;
p                 196 drivers/clk/clk-cdce925.c 	return p;
p                 224 drivers/clk/clk-cdce925.c 	u8 p;
p                 237 drivers/clk/clk-cdce925.c 		p = cdce925_pll_calc_p(n, m);
p                 239 drivers/clk/clk-cdce925.c 		nn = n * BIT(p);
p                 252 drivers/clk/clk-cdce925.c 			n, m, p, q, r);
p                 257 drivers/clk/clk-cdce925.c 		pll[3] = ((q & 0x07) << 5) | (p << 2) |
p                 117 drivers/clk/clk-gemini.c #define to_gemini_reset(p) container_of((p), struct gemini_reset, rcdev)
p                  42 drivers/clk/clk-highbank.c #define to_hb_clk(p) container_of(p, struct hb_clk, hw)
p                 782 drivers/clk/clk-qoriq.c #define to_mux_hwclock(p)	container_of(p, struct mux_hwclock, hw)
p                1162 drivers/clk/clk-si5351.c 	const __be32 *p;
p                1177 drivers/clk/clk-si5351.c 	of_property_for_each_u32(np, "silabs,pll-source", prop, p, num) {
p                1184 drivers/clk/clk-si5351.c 		p = of_prop_next_u32(prop, p, &val);
p                1185 drivers/clk/clk-si5351.c 		if (!p) {
p                2860 drivers/clk/clk.c bool clk_is_match(const struct clk *p, const struct clk *q)
p                2863 drivers/clk/clk.c 	if (p == q)
p                2867 drivers/clk/clk.c 	if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
p                2868 drivers/clk/clk.c 		if (p->core == q->core)
p                  38 drivers/clk/clkdev.c 	struct clk_lookup *p, *cl = NULL;
p                  48 drivers/clk/clkdev.c 	list_for_each_entry(p, &clocks, node) {
p                  50 drivers/clk/clkdev.c 		if (p->dev_id) {
p                  51 drivers/clk/clkdev.c 			if (!dev_id || strcmp(p->dev_id, dev_id))
p                  55 drivers/clk/clkdev.c 		if (p->con_id) {
p                  56 drivers/clk/clkdev.c 			if (!con_id || strcmp(p->con_id, con_id))
p                  62 drivers/clk/clkdev.c 			cl = p;
p                  70 drivers/clk/davinci/pll.h #define SYSCLK(i, n, p, w, f)				\
p                  73 drivers/clk/davinci/pll.h 	.parent_name	= #p,				\
p                  69 drivers/clk/davinci/psc.h #define LPSC(m, d, n, p, c, f)	\
p                  72 drivers/clk/davinci/psc.h 	.parent	= #p,		\
p                 391 drivers/clk/keystone/sci-clk.c static int ti_sci_init_clocks(struct sci_clk_provider *p)
p                 396 drivers/clk/keystone/sci-clk.c 	for (i = 0; i < p->num_clocks; i++) {
p                 397 drivers/clk/keystone/sci-clk.c 		ret = _sci_clk_build(p, p->clocks[i]);
p                  38 drivers/clk/meson/clk-dualdiv.c 			const struct meson_clk_dualdiv_param *p)
p                  40 drivers/clk/meson/clk-dualdiv.c 	if (!p->dual)
p                  41 drivers/clk/meson/clk-dualdiv.c 		return DIV_ROUND_CLOSEST(parent_rate, p->n1);
p                  43 drivers/clk/meson/clk-dualdiv.c 	return DIV_ROUND_CLOSEST(parent_rate * (p->m1 + p->m2),
p                  44 drivers/clk/meson/clk-dualdiv.c 				 p->n1 * p->m1 + p->n2 * p->m2);
p                  22 drivers/clk/meson/parm.h #define MESON_PARM_APPLICABLE(p)		(!!((p)->width))
p                  30 drivers/clk/meson/parm.h static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p)
p                  34 drivers/clk/meson/parm.h 	regmap_read(map, p->reg_off, &val);
p                  35 drivers/clk/meson/parm.h 	return PARM_GET(p->width, p->shift, val);
p                  38 drivers/clk/meson/parm.h static inline void meson_parm_write(struct regmap *map, struct parm *p,
p                  41 drivers/clk/meson/parm.h 	regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift),
p                  42 drivers/clk/meson/parm.h 			   val << p->shift);
p                  77 drivers/clk/mvebu/clk-corediv.c #define to_corediv_clk(p) container_of(p, struct clk_corediv, hw)
p                  46 drivers/clk/mvebu/clk-cpu.c #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
p                 587 drivers/clk/nxp/clk-lpc32xx.c 	u64 m = 0, n = 0, p = 0;
p                 611 drivers/clk/nxp/clk-lpc32xx.c 				p = p_i;
p                 625 drivers/clk/nxp/clk-lpc32xx.c 	clk->p_div = p;
p                 628 drivers/clk/nxp/clk-lpc32xx.c 	if (!p)
p                 633 drivers/clk/nxp/clk-lpc32xx.c 	o = div64_u64(i * m, n * (1 << p));
p                 637 drivers/clk/nxp/clk-lpc32xx.c 			 clk_hw_get_name(hw), rate, m, n, p);
p                 640 drivers/clk/nxp/clk-lpc32xx.c 			 clk_hw_get_name(hw), rate, m, n, p, o);
p                 172 drivers/clk/pistachio/clk-pistachio.c 	struct pistachio_clk_provider *p;
p                 175 drivers/clk/pistachio/clk-pistachio.c 	p = pistachio_clk_alloc_provider(np, CLK_NR_CLKS);
p                 176 drivers/clk/pistachio/clk-pistachio.c 	if (!p)
p                 179 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_register_pll(p, pistachio_plls,
p                 181 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_register_mux(p, pistachio_muxes,
p                 183 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_register_div(p, pistachio_divs,
p                 185 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_register_fixed_factor(p, pistachio_ffs,
p                 187 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_register_gate(p, pistachio_gates,
p                 193 drivers/clk/pistachio/clk-pistachio.c 					   p->base + 0x200, 18, 0x1f, 0,
p                 195 drivers/clk/pistachio/clk-pistachio.c 	p->clk_data.clks[CLK_DEBUG_MUX] = debug_clk;
p                 197 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_register_provider(p);
p                 199 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_force_enable(p, pistachio_critical_clks_core,
p                 255 drivers/clk/pistachio/clk-pistachio.c 	struct pistachio_clk_provider *p;
p                 257 drivers/clk/pistachio/clk-pistachio.c 	p = pistachio_clk_alloc_provider(np, PERIPH_CLK_NR_CLKS);
p                 258 drivers/clk/pistachio/clk-pistachio.c 	if (!p)
p                 261 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_register_div(p, pistachio_periph_divs,
p                 263 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_register_gate(p, pistachio_periph_gates,
p                 266 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_register_provider(p);
p                 268 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_force_enable(p, pistachio_critical_clks_sys,
p                 302 drivers/clk/pistachio/clk-pistachio.c 	struct pistachio_clk_provider *p;
p                 304 drivers/clk/pistachio/clk-pistachio.c 	p = pistachio_clk_alloc_provider(np, SYS_CLK_NR_CLKS);
p                 305 drivers/clk/pistachio/clk-pistachio.c 	if (!p)
p                 308 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_register_gate(p, pistachio_sys_gates,
p                 311 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_register_provider(p);
p                 323 drivers/clk/pistachio/clk-pistachio.c 	struct pistachio_clk_provider *p;
p                 325 drivers/clk/pistachio/clk-pistachio.c 	p = pistachio_clk_alloc_provider(np, EXT_CLK_NR_CLKS);
p                 326 drivers/clk/pistachio/clk-pistachio.c 	if (!p)
p                 329 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_register_gate(p, pistachio_ext_gates,
p                 332 drivers/clk/pistachio/clk-pistachio.c 	pistachio_clk_register_provider(p);
p                 496 drivers/clk/pistachio/clk-pll.c void pistachio_clk_register_pll(struct pistachio_clk_provider *p,
p                 505 drivers/clk/pistachio/clk-pll.c 				   0, p->base + pll[i].reg_base,
p                 508 drivers/clk/pistachio/clk-pll.c 		p->clk_data.clks[pll[i].id] = clk;
p                  18 drivers/clk/pistachio/clk.c 	struct pistachio_clk_provider *p;
p                  20 drivers/clk/pistachio/clk.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                  21 drivers/clk/pistachio/clk.c 	if (!p)
p                  22 drivers/clk/pistachio/clk.c 		return p;
p                  24 drivers/clk/pistachio/clk.c 	p->clk_data.clks = kcalloc(num_clks, sizeof(struct clk *), GFP_KERNEL);
p                  25 drivers/clk/pistachio/clk.c 	if (!p->clk_data.clks)
p                  27 drivers/clk/pistachio/clk.c 	p->clk_data.clk_num = num_clks;
p                  28 drivers/clk/pistachio/clk.c 	p->node = node;
p                  29 drivers/clk/pistachio/clk.c 	p->base = of_iomap(node, 0);
p                  30 drivers/clk/pistachio/clk.c 	if (!p->base) {
p                  35 drivers/clk/pistachio/clk.c 	return p;
p                  38 drivers/clk/pistachio/clk.c 	kfree(p->clk_data.clks);
p                  40 drivers/clk/pistachio/clk.c 	kfree(p);
p                  44 drivers/clk/pistachio/clk.c void pistachio_clk_register_provider(struct pistachio_clk_provider *p)
p                  48 drivers/clk/pistachio/clk.c 	for (i = 0; i < p->clk_data.clk_num; i++) {
p                  49 drivers/clk/pistachio/clk.c 		if (IS_ERR(p->clk_data.clks[i]))
p                  51 drivers/clk/pistachio/clk.c 				PTR_ERR(p->clk_data.clks[i]));
p                  54 drivers/clk/pistachio/clk.c 	of_clk_add_provider(p->node, of_clk_src_onecell_get, &p->clk_data);
p                  57 drivers/clk/pistachio/clk.c void pistachio_clk_register_gate(struct pistachio_clk_provider *p,
p                  67 drivers/clk/pistachio/clk.c 					p->base + gate[i].reg, gate[i].shift,
p                  69 drivers/clk/pistachio/clk.c 		p->clk_data.clks[gate[i].id] = clk;
p                  73 drivers/clk/pistachio/clk.c void pistachio_clk_register_mux(struct pistachio_clk_provider *p,
p                  84 drivers/clk/pistachio/clk.c 				       p->base + mux[i].reg, mux[i].shift,
p                  87 drivers/clk/pistachio/clk.c 		p->clk_data.clks[mux[i].id] = clk;
p                  91 drivers/clk/pistachio/clk.c void pistachio_clk_register_div(struct pistachio_clk_provider *p,
p                 100 drivers/clk/pistachio/clk.c 					   0, p->base + div[i].reg, 0,
p                 103 drivers/clk/pistachio/clk.c 		p->clk_data.clks[div[i].id] = clk;
p                 107 drivers/clk/pistachio/clk.c void pistachio_clk_register_fixed_factor(struct pistachio_clk_provider *p,
p                 117 drivers/clk/pistachio/clk.c 		p->clk_data.clks[ff[i].id] = clk;
p                 121 drivers/clk/pistachio/clk.c void pistachio_clk_force_enable(struct pistachio_clk_provider *p,
p                 128 drivers/clk/pistachio/clk.c 		struct clk *clk = p->clk_data.clks[clk_ids[i]];
p                 149 drivers/clk/pistachio/clk.h extern void pistachio_clk_register_provider(struct pistachio_clk_provider *p);
p                 151 drivers/clk/pistachio/clk.h extern void pistachio_clk_register_gate(struct pistachio_clk_provider *p,
p                 154 drivers/clk/pistachio/clk.h extern void pistachio_clk_register_mux(struct pistachio_clk_provider *p,
p                 157 drivers/clk/pistachio/clk.h extern void pistachio_clk_register_div(struct pistachio_clk_provider *p,
p                 161 drivers/clk/pistachio/clk.h pistachio_clk_register_fixed_factor(struct pistachio_clk_provider *p,
p                 164 drivers/clk/pistachio/clk.h extern void pistachio_clk_register_pll(struct pistachio_clk_provider *p,
p                 168 drivers/clk/pistachio/clk.h extern void pistachio_clk_force_enable(struct pistachio_clk_provider *p,
p                  15 drivers/clk/qcom/clk-alpha-pll.c #define PLL_MODE(p)		((p)->offset + 0x0)
p                  34 drivers/clk/qcom/clk-alpha-pll.c #define PLL_L_VAL(p)		((p)->offset + (p)->regs[PLL_OFF_L_VAL])
p                  35 drivers/clk/qcom/clk-alpha-pll.c #define PLL_CAL_L_VAL(p)	((p)->offset + (p)->regs[PLL_OFF_CAL_L_VAL])
p                  36 drivers/clk/qcom/clk-alpha-pll.c #define PLL_ALPHA_VAL(p)	((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL])
p                  37 drivers/clk/qcom/clk-alpha-pll.c #define PLL_ALPHA_VAL_U(p)	((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL_U])
p                  39 drivers/clk/qcom/clk-alpha-pll.c #define PLL_USER_CTL(p)		((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
p                  41 drivers/clk/qcom/clk-alpha-pll.c # define PLL_POST_DIV_MASK(p)	GENMASK((p)->width, 0)
p                  47 drivers/clk/qcom/clk-alpha-pll.c #define PLL_USER_CTL_U(p)	((p)->offset + (p)->regs[PLL_OFF_USER_CTL_U])
p                  48 drivers/clk/qcom/clk-alpha-pll.c #define PLL_USER_CTL_U1(p)	((p)->offset + (p)->regs[PLL_OFF_USER_CTL_U1])
p                  50 drivers/clk/qcom/clk-alpha-pll.c #define PLL_CONFIG_CTL(p)	((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL])
p                  51 drivers/clk/qcom/clk-alpha-pll.c #define PLL_CONFIG_CTL_U(p)	((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U])
p                  52 drivers/clk/qcom/clk-alpha-pll.c #define PLL_CONFIG_CTL_U1(p)	((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U1])
p                  53 drivers/clk/qcom/clk-alpha-pll.c #define PLL_TEST_CTL(p)		((p)->offset + (p)->regs[PLL_OFF_TEST_CTL])
p                  54 drivers/clk/qcom/clk-alpha-pll.c #define PLL_TEST_CTL_U(p)	((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
p                  55 drivers/clk/qcom/clk-alpha-pll.c #define PLL_STATUS(p)		((p)->offset + (p)->regs[PLL_OFF_STATUS])
p                  56 drivers/clk/qcom/clk-alpha-pll.c #define PLL_OPMODE(p)		((p)->offset + (p)->regs[PLL_OFF_OPMODE])
p                  57 drivers/clk/qcom/clk-alpha-pll.c #define PLL_FRAC(p)		((p)->offset + (p)->regs[PLL_OFF_FRAC])
p                  58 drivers/clk/qcom/clk-alpha-pll.c #define PLL_CAL_VAL(p)		((p)->offset + (p)->regs[PLL_OFF_CAL_VAL])
p                 147 drivers/clk/qcom/clk-alpha-pll.c #define pll_alpha_width(p)					\
p                 148 drivers/clk/qcom/clk-alpha-pll.c 		((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ?	\
p                 151 drivers/clk/qcom/clk-alpha-pll.c #define pll_has_64bit_config(p)	((PLL_CONFIG_CTL_U(p) - PLL_CONFIG_CTL(p)) == 4)
p                 203 drivers/clk/qcom/clk-pll.c 	struct clk_pll *p = to_clk_pll(clk_hw_get_parent(hw));
p                 209 drivers/clk/qcom/clk-pll.c 	return wait_for_pll(p);
p                 113 drivers/clk/qcom/clk-rcg.c static u32 ns_to_pre_div(struct pre_div *p, u32 ns)
p                 115 drivers/clk/qcom/clk-rcg.c 	ns >>= p->pre_div_shift;
p                 116 drivers/clk/qcom/clk-rcg.c 	ns &= BIT(p->pre_div_width) - 1;
p                 120 drivers/clk/qcom/clk-rcg.c static u32 pre_div_to_ns(struct pre_div *p, u8 pre_div, u32 ns)
p                 124 drivers/clk/qcom/clk-rcg.c 	mask = BIT(p->pre_div_width) - 1;
p                 125 drivers/clk/qcom/clk-rcg.c 	mask <<= p->pre_div_shift;
p                 128 drivers/clk/qcom/clk-rcg.c 	ns |= pre_div << p->pre_div_shift;
p                 203 drivers/clk/qcom/clk-rcg.c 	struct pre_div *p;
p                 208 drivers/clk/qcom/clk-rcg.c 	bool banked_p = !!rcg->p[1].pre_div_width;
p                 266 drivers/clk/qcom/clk-rcg.c 		p = &rcg->p[new_bank];
p                 267 drivers/clk/qcom/clk-rcg.c 		ns = pre_div_to_ns(p, f->pre_div - 1, ns);
p                 298 drivers/clk/qcom/clk-rcg.c 	bool banked_p = !!rcg->p[1].pre_div_width;
p                 312 drivers/clk/qcom/clk-rcg.c 		f.pre_div = ns_to_pre_div(&rcg->p[bank], ns) + 1;
p                 349 drivers/clk/qcom/clk-rcg.c 	pre_div = ns_to_pre_div(&rcg->p, ns);
p                 373 drivers/clk/qcom/clk-rcg.c 	bool banked_p = !!rcg->p[1].pre_div_width;
p                 394 drivers/clk/qcom/clk-rcg.c 		pre_div = ns_to_pre_div(&rcg->p[bank], ns);
p                 404 drivers/clk/qcom/clk-rcg.c 	struct clk_hw *p;
p                 416 drivers/clk/qcom/clk-rcg.c 	p = clk_hw_get_parent_by_index(hw, index);
p                 426 drivers/clk/qcom/clk-rcg.c 		rate =  clk_hw_get_rate(p);
p                 428 drivers/clk/qcom/clk-rcg.c 	req->best_parent_hw = p;
p                 464 drivers/clk/qcom/clk-rcg.c 	struct clk_hw *p;
p                 467 drivers/clk/qcom/clk-rcg.c 	req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
p                 468 drivers/clk/qcom/clk-rcg.c 	req->best_parent_rate = clk_hw_round_rate(p, req->rate);
p                 508 drivers/clk/qcom/clk-rcg.c 	ns = pre_div_to_ns(&rcg->p, f->pre_div - 1, ns);
p                 540 drivers/clk/qcom/clk-rcg.c 	struct clk_hw *p;
p                 542 drivers/clk/qcom/clk-rcg.c 	p = req->best_parent_hw;
p                 543 drivers/clk/qcom/clk-rcg.c 	req->best_parent_rate = clk_hw_round_rate(p, req->rate);
p                 562 drivers/clk/qcom/clk-rcg.c 	f.pre_div = ns_to_pre_div(&rcg->p, ns) + 1;
p                 671 drivers/clk/qcom/clk-rcg.c 	int pre_div_max = BIT(rcg->p.pre_div_width);
p                 696 drivers/clk/qcom/clk-rcg.c 	int pre_div_max = BIT(rcg->p.pre_div_width);
p                  80 drivers/clk/qcom/clk-rcg.h 	struct pre_div	p;
p                 118 drivers/clk/qcom/clk-rcg.h 	struct pre_div	p[2];
p                 196 drivers/clk/qcom/clk-rcg2.c 	struct clk_hw *p;
p                 219 drivers/clk/qcom/clk-rcg2.c 	p = clk_hw_get_parent_by_index(hw, index);
p                 220 drivers/clk/qcom/clk-rcg2.c 	if (!p)
p                 239 drivers/clk/qcom/clk-rcg2.c 		rate =  clk_hw_get_rate(p);
p                 241 drivers/clk/qcom/clk-rcg2.c 	req->best_parent_hw = p;
p                 516 drivers/clk/qcom/clk-rcg2.c 	struct clk_hw *p;
p                 521 drivers/clk/qcom/clk-rcg2.c 	req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
p                 522 drivers/clk/qcom/clk-rcg2.c 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
p                 572 drivers/clk/qcom/clk-rcg2.c 	struct clk_hw *p;
p                 578 drivers/clk/qcom/clk-rcg2.c 	p = req->best_parent_hw;
p                 579 drivers/clk/qcom/clk-rcg2.c 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
p                 956 drivers/clk/qcom/clk-rcg2.c 	struct clk_hw *p;
p                 975 drivers/clk/qcom/clk-rcg2.c 			p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
p                 976 drivers/clk/qcom/clk-rcg2.c 			prate = clk_hw_get_rate(p);
p                 213 drivers/clk/qcom/clk-regmap-mux-div.c 			struct clk_hw *p = clk_hw_get_parent_by_index(hw, i);
p                 214 drivers/clk/qcom/clk-regmap-mux-div.c 			unsigned long parent_rate = clk_hw_get_rate(p);
p                 202 drivers/clk/qcom/common.c 	const __be32 *p;
p                 205 drivers/clk/qcom/common.c 	of_property_for_each_u32(np, "protected-clocks", prop, p, i) {
p                 351 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                 402 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                 453 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                 504 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                 555 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                 606 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                 670 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                 719 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                 768 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                 817 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                 866 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                 915 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                1065 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                1114 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                1163 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                1218 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                1277 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                1325 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                1408 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                1570 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                1662 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                1754 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                1851 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                1988 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                2058 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                2128 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                2192 drivers/clk/qcom/gcc-ipq806x.c 	.p = {
p                2323 drivers/clk/qcom/gcc-ipq806x.c 	.p[0] = {
p                2327 drivers/clk/qcom/gcc-ipq806x.c 	.p[1] = {
p                2395 drivers/clk/qcom/gcc-ipq806x.c 	.p[0] = {
p                2399 drivers/clk/qcom/gcc-ipq806x.c 	.p[1] = {
p                2467 drivers/clk/qcom/gcc-ipq806x.c 	.p[0] = {
p                2471 drivers/clk/qcom/gcc-ipq806x.c 	.p[1] = {
p                2539 drivers/clk/qcom/gcc-ipq806x.c 	.p[0] = {
p                2543 drivers/clk/qcom/gcc-ipq806x.c 	.p[1] = {
p                2599 drivers/clk/qcom/gcc-ipq806x.c 	.p[0] = {
p                2603 drivers/clk/qcom/gcc-ipq806x.c 	.p[1] = {
p                2677 drivers/clk/qcom/gcc-ipq806x.c 	.p[0] = {
p                2681 drivers/clk/qcom/gcc-ipq806x.c 	.p[1] = {
p                2730 drivers/clk/qcom/gcc-ipq806x.c 	.p[0] = {
p                2734 drivers/clk/qcom/gcc-ipq806x.c 	.p[1] = {
p                 195 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 246 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 297 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 348 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 399 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 462 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 511 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 560 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 609 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 658 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 713 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 762 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 811 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 866 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 925 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 973 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                1026 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                1075 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                1130 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                1185 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                1240 drivers/clk/qcom/gcc-mdm9615.c 	.p = {
p                 111 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 162 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 213 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 264 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 315 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 366 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 417 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 468 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 517 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 566 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 615 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 664 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 726 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 775 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 824 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 873 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 922 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 971 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1020 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1069 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1118 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1167 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1216 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1265 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1327 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1376 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1425 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1480 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1536 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1584 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1632 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1680 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1728 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1781 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1835 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1884 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                1951 drivers/clk/qcom/gcc-msm8660.c 	.p = {
p                 337 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                 388 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                 439 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                 490 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                 541 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                 592 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                 643 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                 694 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                 743 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                 792 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                 841 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                 890 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                 952 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1001 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1050 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1099 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1148 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1197 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1246 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1295 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1344 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1393 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1442 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1491 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1553 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1602 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1651 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1706 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1765 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1813 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1861 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1909 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                1957 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                2010 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                2064 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                2113 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                2162 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                2211 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                2306 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                2373 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                2861 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                2923 drivers/clk/qcom/gcc-msm8960.c 	.p = {
p                 238 drivers/clk/qcom/krait-cc.c 	void *p = NULL;
p                 244 drivers/clk/qcom/krait-cc.c 		s = p = kasprintf(GFP_KERNEL, "%d", id);
p                 266 drivers/clk/qcom/krait-cc.c 	kfree(p);
p                 119 drivers/clk/qcom/lcc-ipq806x.c 	.p = {
p                 233 drivers/clk/qcom/lcc-ipq806x.c 	.p = {
p                 313 drivers/clk/qcom/lcc-ipq806x.c 	.p = {
p                 372 drivers/clk/qcom/lcc-ipq806x.c 	.p = {
p                 102 drivers/clk/qcom/lcc-mdm9615.c 	.p = {
p                 208 drivers/clk/qcom/lcc-mdm9615.c 	.p = {							\
p                 352 drivers/clk/qcom/lcc-mdm9615.c 	.p = {
p                 420 drivers/clk/qcom/lcc-mdm9615.c 	.p = {
p                 100 drivers/clk/qcom/lcc-msm8960.c 	.p = {
p                 206 drivers/clk/qcom/lcc-msm8960.c 	.p = {							\
p                 350 drivers/clk/qcom/lcc-msm8960.c 	.p = {
p                 418 drivers/clk/qcom/lcc-msm8960.c 	.p = {
p                 181 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                 230 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                 279 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                 334 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                 398 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                 462 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                 543 drivers/clk/qcom/mmcc-msm8960.c 		struct clk_hw *p = clk_hw_get_parent_by_index(hw, i);
p                 544 drivers/clk/qcom/mmcc-msm8960.c 		ret = clk_prepare_enable(p->clk);
p                 573 drivers/clk/qcom/mmcc-msm8960.c 		struct clk_hw *p = clk_hw_get_parent_by_index(hw, i);
p                 574 drivers/clk/qcom/mmcc-msm8960.c 		clk_disable_unprepare(p->clk);
p                 714 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                1142 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                1190 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                1360 drivers/clk/qcom/mmcc-msm8960.c 	.p[0] = {
p                1364 drivers/clk/qcom/mmcc-msm8960.c 	.p[1] = {
p                1432 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                1654 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                1722 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                2057 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                2105 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                2144 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                2183 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                2222 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                2260 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                2307 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                2354 drivers/clk/qcom/mmcc-msm8960.c 	.p = {
p                 593 drivers/clk/renesas/r9a06g032-clocks.c 			unsigned long p =
p                 600 drivers/clk/renesas/r9a06g032-clocks.c 			div = p >= m ? clk->table[i] : clk->table[i + 1];
p                 935 drivers/clk/samsung/clk-pll.c 	u32 r, p, m, s, pll_stat;
p                 942 drivers/clk/samsung/clk-pll.c 	p = (pll_stat >> PLL2550X_P_SHIFT) & PLL2550X_P_MASK;
p                 947 drivers/clk/samsung/clk-pll.c 	do_div(fvco, (p << s));
p                  16 drivers/clk/socfpga/clk-gate-a10.c #define to_socfpga_gate_clk(p) container_of(p, struct socfpga_gate_clk, hw.hw)
p                  12 drivers/clk/socfpga/clk-gate-s10.c #define to_socfpga_gate_clk(p) container_of(p, struct socfpga_gate_clk, hw.hw)
p                  24 drivers/clk/socfpga/clk-gate.c #define to_socfpga_gate_clk(p) container_of(p, struct socfpga_gate_clk, hw.hw)
p                  18 drivers/clk/socfpga/clk-periph-a10.c #define to_socfpga_periph_clk(p) container_of(p, struct socfpga_periph_clk, hw.hw)
p                  16 drivers/clk/socfpga/clk-periph-s10.c #define to_periph_clk(p) container_of(p, struct socfpga_periph_clk, hw.hw)
p                  15 drivers/clk/socfpga/clk-periph.c #define to_socfpga_periph_clk(p) container_of(p, struct socfpga_periph_clk, hw.hw)
p                  30 drivers/clk/socfpga/clk-pll-a10.c #define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
p                  28 drivers/clk/socfpga/clk-pll-s10.c #define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
p                  34 drivers/clk/socfpga/clk-pll.c #define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
p                  79 drivers/clk/spear/clk-vco-pll.c 		*pll_rate = (rate / (1 << rtbl[index].p)) * 10000;
p                 129 drivers/clk/spear/clk-vco-pll.c 	unsigned int p;
p                 134 drivers/clk/spear/clk-vco-pll.c 	p = readl_relaxed(pll->vco->cfg_reg);
p                 139 drivers/clk/spear/clk-vco-pll.c 	p = (p >> PLL_DIV_P_SHIFT) & PLL_DIV_P_MASK;
p                 141 drivers/clk/spear/clk-vco-pll.c 	return parent_rate / (1 << p);
p                 159 drivers/clk/spear/clk-vco-pll.c 	val |= (rtbl[i].p & PLL_DIV_P_MASK) << PLL_DIV_P_SHIFT;
p                  90 drivers/clk/spear/clk.h 	u8 p;
p                 235 drivers/clk/spear/spear1310_clock.c 	{.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */
p                 236 drivers/clk/spear/spear1310_clock.c 	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */
p                 237 drivers/clk/spear/spear1310_clock.c 	{.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */
p                 238 drivers/clk/spear/spear1310_clock.c 	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */
p                 239 drivers/clk/spear/spear1310_clock.c 	{.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */
p                 240 drivers/clk/spear/spear1310_clock.c 	{.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */
p                 241 drivers/clk/spear/spear1310_clock.c 	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
p                 246 drivers/clk/spear/spear1310_clock.c 	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */
p                 247 drivers/clk/spear/spear1310_clock.c 	{.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */
p                 248 drivers/clk/spear/spear1310_clock.c 	{.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */
p                 249 drivers/clk/spear/spear1310_clock.c 	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
p                 168 drivers/clk/spear/spear1340_clock.c 	{.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */
p                 169 drivers/clk/spear/spear1340_clock.c 	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */
p                 170 drivers/clk/spear/spear1340_clock.c 	{.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */
p                 171 drivers/clk/spear/spear1340_clock.c 	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */
p                 172 drivers/clk/spear/spear1340_clock.c 	{.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */
p                 173 drivers/clk/spear/spear1340_clock.c 	{.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */
p                 174 drivers/clk/spear/spear1340_clock.c 	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
p                 175 drivers/clk/spear/spear1340_clock.c 	{.mode = 0, .m = 0x96, .n = 0x06, .p = 0x0}, /* vco 1200, pll 1200 MHz */
p                 180 drivers/clk/spear/spear1340_clock.c 	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */
p                 181 drivers/clk/spear/spear1340_clock.c 	{.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */
p                 182 drivers/clk/spear/spear1340_clock.c 	{.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */
p                 183 drivers/clk/spear/spear1340_clock.c 	{.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
p                 101 drivers/clk/spear/spear3xx_clock.c 	{.mode = 0, .m = 0x53, .n = 0x0C, .p = 0x1}, /* vco 332 & pll 166 MHz */
p                 102 drivers/clk/spear/spear3xx_clock.c 	{.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* vco 532 & pll 266 MHz */
p                 103 drivers/clk/spear/spear3xx_clock.c 	{.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* vco 664 & pll 332 MHz */
p                  85 drivers/clk/spear/spear6xx_clock.c 	{.mode = 0, .m = 0x53, .n = 0x0F, .p = 0x1}, /* vco 332 & pll 166 MHz */
p                  86 drivers/clk/spear/spear6xx_clock.c 	{.mode = 0, .m = 0x85, .n = 0x0F, .p = 0x1}, /* vco 532 & pll 266 MHz */
p                  87 drivers/clk/spear/spear6xx_clock.c 	{.mode = 0, .m = 0xA6, .n = 0x0F, .p = 0x1}, /* vco 664 & pll 332 MHz */
p                 585 drivers/clk/st/clkgen-fsyn.c 		signed long input, unsigned long output, uint64_t *p,
p                 594 drivers/clk/st/clkgen-fsyn.c 	*p = (uint64_t)input * P20 - (32LL  + (uint64_t)m) * val * (P20 / 32LL);
p                 596 drivers/clk/st/clkgen-fsyn.c 	*p = div64_u64(*p, val);
p                 598 drivers/clk/st/clkgen-fsyn.c 	if (*p > 32767LL)
p                 602 drivers/clk/st/clkgen-fsyn.c 	fs_tmp.pe = (unsigned long)*p;
p                 612 drivers/clk/st/clkgen-fsyn.c 		fs->pe = (unsigned long)*p;
p                 628 drivers/clk/st/clkgen-fsyn.c 	uint64_t p, p1, p2;	/* pe value */
p                 648 drivers/clk/st/clkgen-fsyn.c 					input, output, &p, fs);
p                  33 drivers/clk/sunxi-ng/ccu-sun4i-a10.c 	.p		= _SUNXI_CCU_DIV(16, 2),
p                 101 drivers/clk/sunxi-ng/ccu-sun4i-a10.c 	.p		= _SUNXI_CCU_DIV(16, 2),
p                 608 drivers/clk/sunxi-ng/ccu-sun4i-a10.c 	.p		= _SUNXI_CCU_DIV(16, 2),
p                 824 drivers/clk/sunxi-ng/ccu-sun4i-a10.c 	.p		= _SUNXI_CCU_DIV(20, 2),
p                 843 drivers/clk/sunxi-ng/ccu-sun4i-a10.c 	.p		= _SUNXI_CCU_DIV(20, 2),
p                  32 drivers/clk/sunxi-ng/ccu-sun50i-a64.c 	.p		= _SUNXI_CCU_DIV_MAX(16, 2, 4),
p                  55 drivers/clk/sunxi-ng/ccu-sun50i-h6.c 	.p		= _SUNXI_CCU_DIV(0, 1), /* output divider */
p                  70 drivers/clk/sunxi-ng/ccu-sun50i-h6.c 	.p		= _SUNXI_CCU_DIV(0, 1), /* output divider */
p                  87 drivers/clk/sunxi-ng/ccu-sun50i-h6.c 	.p		= _SUNXI_CCU_DIV(0, 1), /* output divider */
p                 104 drivers/clk/sunxi-ng/ccu-sun50i-h6.c 	.p		= _SUNXI_CCU_DIV(0, 1), /* output divider */
p                 159 drivers/clk/sunxi-ng/ccu-sun50i-h6.c 	.p		= _SUNXI_CCU_DIV(0, 1), /* output divider */
p                 174 drivers/clk/sunxi-ng/ccu-sun50i-h6.c 	.p		= _SUNXI_CCU_DIV(0, 1), /* output divider */
p                 189 drivers/clk/sunxi-ng/ccu-sun50i-h6.c 	.p		= _SUNXI_CCU_DIV(0, 1), /* output divider */
p                  31 drivers/clk/sunxi-ng/ccu-sun5i.c 	.p		= _SUNXI_CCU_DIV(16, 2),
p                 103 drivers/clk/sunxi-ng/ccu-sun5i.c 	.p		= _SUNXI_CCU_DIV(16, 2),
p                 412 drivers/clk/sunxi-ng/ccu-sun5i.c 	.p		= _SUNXI_CCU_DIV(20, 2),
p                 745 drivers/clk/sunxi-ng/ccu-sun6i-a31.c 	.p		= _SUNXI_CCU_DIV(20, 2),
p                 766 drivers/clk/sunxi-ng/ccu-sun6i-a31.c 	.p		= _SUNXI_CCU_DIV(20, 2),
p                 787 drivers/clk/sunxi-ng/ccu-sun6i-a31.c 	.p		= _SUNXI_CCU_DIV(20, 2),
p                  34 drivers/clk/sunxi-ng/ccu-sun8i-a23.c 	.p	= _SUNXI_CCU_DIV_MAX(16, 2, 4),
p                  32 drivers/clk/sunxi-ng/ccu-sun8i-a33.c 	.p	= _SUNXI_CCU_DIV_MAX(16, 2, 4),
p                 103 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c 	.p		= _SUNXI_CCU_DIV(0, 2), /* output divider */
p                 120 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c 	.p		= _SUNXI_CCU_DIV(18, 1), /* output divider */
p                 136 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c 	.p		= _SUNXI_CCU_DIV(18, 1), /* output divider */
p                 152 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c 	.p		= _SUNXI_CCU_DIV(18, 1), /* output divider */
p                 168 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c 	.p		= _SUNXI_CCU_DIV(18, 1), /* output divider */
p                 184 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c 	.p		= _SUNXI_CCU_DIV(18, 1), /* output divider */
p                 200 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c 	.p		= _SUNXI_CCU_DIV(18, 1), /* output divider */
p                 216 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c 	.p		= _SUNXI_CCU_DIV(0, 2), /* external divider p */
p                  97 drivers/clk/sunxi-ng/ccu-sun8i-r.c 	.p	= _SUNXI_CCU_DIV(16, 2),
p                  33 drivers/clk/sunxi-ng/ccu-sun8i-r40.c 	.p		= _SUNXI_CCU_DIV_MAX(16, 2, 4),
p                 581 drivers/clk/sunxi-ng/ccu-sun8i-r40.c 	.p	= _SUNXI_CCU_DIV(16, 2),
p                 753 drivers/clk/sunxi-ng/ccu-sun8i-r40.c 	.p	= _SUNXI_CCU_DIV(20, 2),
p                 772 drivers/clk/sunxi-ng/ccu-sun8i-r40.c 	.p	= _SUNXI_CCU_DIV(20, 2),
p                  90 drivers/clk/sunxi-ng/ccu-sun9i-a80.c 	.p		= _SUNXI_CCU_DIV(18, 1), /* output divider */
p                 106 drivers/clk/sunxi-ng/ccu-sun9i-a80.c 	.p		= _SUNXI_CCU_DIV(18, 1), /* output divider */
p                 122 drivers/clk/sunxi-ng/ccu-sun9i-a80.c 	.p		= _SUNXI_CCU_DIV(18, 1), /* output divider */
p                 153 drivers/clk/sunxi-ng/ccu-sun9i-a80.c 	.p		= _SUNXI_CCU_DIV(0, 2), /* external divider p */
p                 169 drivers/clk/sunxi-ng/ccu-sun9i-a80.c 	.p		= _SUNXI_CCU_DIV(18, 1), /* output divider */
p                 185 drivers/clk/sunxi-ng/ccu-sun9i-a80.c 	.p		= _SUNXI_CCU_DIV(18, 1), /* output divider */
p                 201 drivers/clk/sunxi-ng/ccu-sun9i-a80.c 	.p		= _SUNXI_CCU_DIV(18, 1), /* output divider */
p                 217 drivers/clk/sunxi-ng/ccu-sun9i-a80.c 	.p		= _SUNXI_CCU_DIV(18, 1), /* output divider */
p                 353 drivers/clk/sunxi-ng/ccu-sun9i-a80.c 	.p		= _SUNXI_CCU_DIV(20, 2),
p                 373 drivers/clk/sunxi-ng/ccu-sun9i-a80.c 	.p		= _SUNXI_CCU_DIV(20, 2),
p                 490 drivers/clk/sunxi-ng/ccu-sun9i-a80.c 	.p		= _SUNXI_CCU_DIV(16, 2),
p                 685 drivers/clk/sunxi-ng/ccu-sun9i-a80.c 	.p		= _SUNXI_CCU_DIV(16, 2),
p                 701 drivers/clk/sunxi-ng/ccu-sun9i-a80.c 	.p		= _SUNXI_CCU_DIV(16, 2),
p                  34 drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c 	.p	= _SUNXI_CCU_DIV_MAX(16, 2, 4),
p                  15 drivers/clk/sunxi-ng/ccu_mp.c 			     unsigned int *m, unsigned int *p)
p                  37 drivers/clk/sunxi-ng/ccu_mp.c 	*p = best_p;
p                 103 drivers/clk/sunxi-ng/ccu_mp.c 	unsigned int m, p;
p                 109 drivers/clk/sunxi-ng/ccu_mp.c 	max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
p                 112 drivers/clk/sunxi-ng/ccu_mp.c 		ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
p                 113 drivers/clk/sunxi-ng/ccu_mp.c 		rate = *parent_rate / p / m;
p                 151 drivers/clk/sunxi-ng/ccu_mp.c 	unsigned int m, p;
p                 166 drivers/clk/sunxi-ng/ccu_mp.c 	p = reg >> cmp->p.shift;
p                 167 drivers/clk/sunxi-ng/ccu_mp.c 	p &= (1 << cmp->p.width) - 1;
p                 169 drivers/clk/sunxi-ng/ccu_mp.c 	rate = (parent_rate >> p) / m;
p                 191 drivers/clk/sunxi-ng/ccu_mp.c 	unsigned int m, p;
p                 199 drivers/clk/sunxi-ng/ccu_mp.c 	max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
p                 205 drivers/clk/sunxi-ng/ccu_mp.c 	ccu_mp_find_best(parent_rate, rate, max_m, max_p, &m, &p);
p                 211 drivers/clk/sunxi-ng/ccu_mp.c 	reg &= ~GENMASK(cmp->p.width + cmp->p.shift - 1, cmp->p.shift);
p                 213 drivers/clk/sunxi-ng/ccu_mp.c 	reg |= ilog2(p) << cmp->p.shift;
p                  26 drivers/clk/sunxi-ng/ccu_mp.h 	struct ccu_div_internal		p;
p                  42 drivers/clk/sunxi-ng/ccu_mp.h 		.p	= _SUNXI_CCU_DIV(_pshift, _pwidth),		\
p                  63 drivers/clk/sunxi-ng/ccu_mp.h 		.p	= _SUNXI_CCU_DIV(_pshift, _pwidth),		\
p                 108 drivers/clk/sunxi-ng/ccu_mp.h 		.p	= _SUNXI_CCU_DIV(16, 2),			\
p                  17 drivers/clk/sunxi-ng/ccu_nkmp.c 	unsigned long	p, min_p, max_p;
p                  22 drivers/clk/sunxi-ng/ccu_nkmp.c 					unsigned long m, unsigned long p)
p                  27 drivers/clk/sunxi-ng/ccu_nkmp.c 	do_div(rate, m * p);
p                  67 drivers/clk/sunxi-ng/ccu_nkmp.c 	nkmp->p = best_p;
p                  95 drivers/clk/sunxi-ng/ccu_nkmp.c 	unsigned long n, m, k, p, rate;
p                 118 drivers/clk/sunxi-ng/ccu_nkmp.c 	p = reg >> nkmp->p.shift;
p                 119 drivers/clk/sunxi-ng/ccu_nkmp.c 	p &= (1 << nkmp->p.width) - 1;
p                 121 drivers/clk/sunxi-ng/ccu_nkmp.c 	rate = ccu_nkmp_calc_rate(parent_rate, n, k, m, 1 << p);
p                 151 drivers/clk/sunxi-ng/ccu_nkmp.c 	_nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
p                 156 drivers/clk/sunxi-ng/ccu_nkmp.c 				  _nkmp.m, _nkmp.p);
p                 182 drivers/clk/sunxi-ng/ccu_nkmp.c 	_nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
p                 201 drivers/clk/sunxi-ng/ccu_nkmp.c 	if (nkmp->p.width)
p                 202 drivers/clk/sunxi-ng/ccu_nkmp.c 		p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
p                 203 drivers/clk/sunxi-ng/ccu_nkmp.c 				 nkmp->p.shift);
p                 213 drivers/clk/sunxi-ng/ccu_nkmp.c 	reg |= (ilog2(_nkmp.p) << nkmp->p.shift) & p_mask;
p                  27 drivers/clk/sunxi-ng/ccu_nkmp.h 	struct ccu_div_internal		p;
p                  47 drivers/clk/sunxi-ng/ccu_nkmp.h 		.p		= _SUNXI_CCU_DIV(_pshift, _pwidth),	\
p                  43 drivers/clk/sunxi/clk-factors.c 	u8 n = 1, k = 0, p = 0, m = 0;
p                  60 drivers/clk/sunxi/clk-factors.c 		p = FACTOR_GET(config->pshift, config->pwidth, reg);
p                  68 drivers/clk/sunxi/clk-factors.c 			.p = p,
p                  83 drivers/clk/sunxi/clk-factors.c 	rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
p                 156 drivers/clk/sunxi/clk-factors.c 	reg = FACTOR_SET(config->pshift, config->pwidth, reg, req.p);
p                  29 drivers/clk/sunxi/clk-factors.h 	u8 p;
p                  47 drivers/clk/sunxi/clk-mod0.c 	req->p = calcp;
p                  28 drivers/clk/sunxi/clk-simple-gates.c 	const __be32 *p;
p                  50 drivers/clk/sunxi/clk-simple-gates.c 	of_property_for_each_u32(node, "clock-indices", prop, p, index) {
p                  52 drivers/clk/sunxi/clk-sun6i-ar100.c 	req->p = shift;
p                  31 drivers/clk/sunxi/clk-sun8i-bus-gates.c 	const __be32 *p;
p                  61 drivers/clk/sunxi/clk-sun8i-bus-gates.c 	of_property_for_each_u32(node, "clock-indices", prop, p, index) {
p                  30 drivers/clk/sunxi/clk-sun9i-core.c 	int p = 1;
p                  43 drivers/clk/sunxi/clk-sun9i-core.c 		p = 0;
p                  53 drivers/clk/sunxi/clk-sun9i-core.c 	req->rate = ((24000000 * n) >> p) / (m + 1);
p                  56 drivers/clk/sunxi/clk-sun9i-core.c 	req->p = p;
p                 168 drivers/clk/sunxi/clk-sun9i-core.c 	req->p = _p;
p                 247 drivers/clk/sunxi/clk-sun9i-core.c 	req->p = order_base_2(div);
p                 248 drivers/clk/sunxi/clk-sun9i-core.c 	req->m = (req->parent_rate >> req->p) - 1;
p                 249 drivers/clk/sunxi/clk-sun9i-core.c 	req->rate = (req->parent_rate >> req->p) / (req->m + 1);
p                  53 drivers/clk/sunxi/clk-sunxi.c 		req->p = 3;
p                  57 drivers/clk/sunxi/clk-sunxi.c 		req->p = 2;
p                  62 drivers/clk/sunxi/clk-sunxi.c 		req->p = 1;
p                  66 drivers/clk/sunxi/clk-sunxi.c 		req->p = 0;
p                  69 drivers/clk/sunxi/clk-sunxi.c 	div <<= req->p;
p                 177 drivers/clk/sunxi/clk-sunxi.c 		req->p = 2;
p                 182 drivers/clk/sunxi/clk-sunxi.c 		req->p = 1;
p                 186 drivers/clk/sunxi/clk-sunxi.c 		req->p = 0;
p                 189 drivers/clk/sunxi/clk-sunxi.c 	div <<= req->p;
p                 274 drivers/clk/sunxi/clk-sunxi.c 	req->p = div;
p                 319 drivers/clk/sunxi/clk-sunxi.c 	req->p = calcp;
p                 336 drivers/clk/sunxi/clk-sunxi.c 	req->rate >>= req->p;
p                 372 drivers/clk/sunxi/clk-sunxi.c 	req->p = calcp;
p                 408 drivers/clk/sunxi/clk-sunxi.c 	req->p = calcp;
p                  14 drivers/clk/tegra/clk-divider.c #define pll_out_override(p) (BIT((p->shift - 6)))
p                 199 drivers/clk/tegra/clk-periph.c 	return _tegra_clk_register_periph(init->name, init->p.parent_names,
p                  15 drivers/clk/tegra/clk-pll-out.c #define pll_out_enb(p) (BIT(p->enb_bit_idx))
p                  16 drivers/clk/tegra/clk-pll-out.c #define pll_out_rst(p) (BIT(p->rst_bit_idx))
p                 230 drivers/clk/tegra/clk-pll.c #define pll_readl(offset, p) readl_relaxed(p->clk_base + offset)
p                 231 drivers/clk/tegra/clk-pll.c #define pll_readl_base(p) pll_readl(p->params->base_reg, p)
p                 232 drivers/clk/tegra/clk-pll.c #define pll_readl_misc(p) pll_readl(p->params->misc_reg, p)
p                 233 drivers/clk/tegra/clk-pll.c #define pll_override_readl(offset, p) readl_relaxed(p->pmc + offset)
p                 234 drivers/clk/tegra/clk-pll.c #define pll_readl_sdm_din(p) pll_readl(p->params->sdm_din_reg, p)
p                 235 drivers/clk/tegra/clk-pll.c #define pll_readl_sdm_ctrl(p) pll_readl(p->params->sdm_ctrl_reg, p)
p                 237 drivers/clk/tegra/clk-pll.c #define pll_writel(val, offset, p) writel_relaxed(val, p->clk_base + offset)
p                 238 drivers/clk/tegra/clk-pll.c #define pll_writel_base(val, p) pll_writel(val, p->params->base_reg, p)
p                 239 drivers/clk/tegra/clk-pll.c #define pll_writel_misc(val, p) pll_writel(val, p->params->misc_reg, p)
p                 240 drivers/clk/tegra/clk-pll.c #define pll_override_writel(val, offset, p) writel(val, p->pmc + offset)
p                 241 drivers/clk/tegra/clk-pll.c #define pll_writel_sdm_din(val, p) pll_writel(val, p->params->sdm_din_reg, p)
p                 242 drivers/clk/tegra/clk-pll.c #define pll_writel_sdm_ctrl(val, p) pll_writel(val, p->params->sdm_ctrl_reg, p)
p                 245 drivers/clk/tegra/clk-pll.c #define divm_mask(p) mask(p->params->div_nmp->divm_width)
p                 246 drivers/clk/tegra/clk-pll.c #define divn_mask(p) mask(p->params->div_nmp->divn_width)
p                 247 drivers/clk/tegra/clk-pll.c #define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\
p                 248 drivers/clk/tegra/clk-pll.c 		      mask(p->params->div_nmp->divp_width))
p                 249 drivers/clk/tegra/clk-pll.c #define sdm_din_mask(p) p->params->sdm_din_mask
p                 250 drivers/clk/tegra/clk-pll.c #define sdm_en_mask(p) p->params->sdm_ctrl_en_mask
p                 252 drivers/clk/tegra/clk-pll.c #define divm_shift(p) (p)->params->div_nmp->divm_shift
p                 253 drivers/clk/tegra/clk-pll.c #define divn_shift(p) (p)->params->div_nmp->divn_shift
p                 254 drivers/clk/tegra/clk-pll.c #define divp_shift(p) (p)->params->div_nmp->divp_shift
p                 256 drivers/clk/tegra/clk-pll.c #define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p))
p                 257 drivers/clk/tegra/clk-pll.c #define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p))
p                 258 drivers/clk/tegra/clk-pll.c #define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p))
p                 260 drivers/clk/tegra/clk-pll.c #define divm_max(p) (divm_mask(p))
p                 261 drivers/clk/tegra/clk-pll.c #define divn_max(p) (divn_mask(p))
p                 262 drivers/clk/tegra/clk-pll.c #define divp_max(p) (1 << (divp_mask(p)))
p                 514 drivers/clk/tegra/clk-pll.c 	int p;
p                 525 drivers/clk/tegra/clk-pll.c 		p = _p_div_to_hw(hw, sel->p);
p                 526 drivers/clk/tegra/clk-pll.c 		if (p < 0)
p                 527 drivers/clk/tegra/clk-pll.c 			return p;
p                 529 drivers/clk/tegra/clk-pll.c 		p = ilog2(sel->p);
p                 536 drivers/clk/tegra/clk-pll.c 	cfg->p = p;
p                 599 drivers/clk/tegra/clk-pll.c 			cfg->p = ret;
p                 601 drivers/clk/tegra/clk-pll.c 		cfg->p = p_div;
p                 654 drivers/clk/tegra/clk-pll.c 		val |= cfg->p << div_nmp->override_divp_shift;
p                 671 drivers/clk/tegra/clk-pll.c 		       (cfg->p << divp_shift(pll));
p                 692 drivers/clk/tegra/clk-pll.c 		cfg->p = (val >> div_nmp->override_divp_shift) & divp_mask(pll);
p                 702 drivers/clk/tegra/clk-pll.c 		cfg->p = (val >> div_nmp->divp_shift) & divp_mask(pll);
p                 750 drivers/clk/tegra/clk-pll.c 			(cfg->m == old_cfg.m) && (cfg->p == old_cfg.p)) {
p                 808 drivers/clk/tegra/clk-pll.c 		cfg.p = old_cfg.p;
p                 810 drivers/clk/tegra/clk-pll.c 	if (old_cfg.m != cfg.m || old_cfg.n != cfg.n || old_cfg.p != cfg.p ||
p                 872 drivers/clk/tegra/clk-pll.c 		pdiv = _hw_to_p_div(hw, cfg.p);
p                 875 drivers/clk/tegra/clk-pll.c 			     clk_hw_get_name(hw), cfg.p);
p                 969 drivers/clk/tegra/clk-pll.c 		val |= sel.p << divp_shift(pll);
p                1179 drivers/clk/tegra/clk-pll.c 	unsigned int p;
p                1185 drivers/clk/tegra/clk-pll.c 	p = DIV_ROUND_UP(pll->params->vco_min, rate);
p                1187 drivers/clk/tegra/clk-pll.c 	cfg->output_rate = rate * p;
p                1191 drivers/clk/tegra/clk-pll.c 	p_div = _p_div_to_hw(hw, p);
p                1195 drivers/clk/tegra/clk-pll.c 	cfg->p = p_div;
p                1275 drivers/clk/tegra/clk-pll.c 	if (cfg->p >  pll->params->max_p)
p                1299 drivers/clk/tegra/clk-pll.c 		cfg.p = old_cfg.p;
p                1301 drivers/clk/tegra/clk-pll.c 	if (old_cfg.m != cfg.m || old_cfg.n != cfg.n || old_cfg.p != cfg.p)
p                1322 drivers/clk/tegra/clk-pll.c 	p_div = _hw_to_p_div(hw, cfg.p);
p                1462 drivers/clk/tegra/clk-pll.c 	if (old_cfg.n == cfg.n && old_cfg.p == cfg.p)
p                1519 drivers/clk/tegra/clk-pll.c 	cfg.p = old_cfg.p;
p                2180 drivers/clk/tegra/clk-pll.c 			cfg.p = p_tohw->hw_val;
p                2327 drivers/clk/tegra/clk-pll.c 	cfg.p = pll_params->pdiv_tohw[i-1].hw_val;
p                 234 drivers/clk/tegra/clk-tegra-periph.c 		.p.parent_name = _parent_name,				\
p                 244 drivers/clk/tegra/clk-tegra-periph.c 		.p.parent_name = _parent_name,				\
p                 916 drivers/clk/tegra/clk-tegra-periph.c 				data->p.parent_name, data->periph.gate.flags,
p                 941 drivers/clk/tegra/clk-tegra-periph.c 				data->p.parent_name, clk_base + data->offset,
p                 882 drivers/clk/tegra/clk-tegra20.c 					data->p.parent_names,
p                1322 drivers/clk/tegra/clk-tegra210.c #define divm_mask(p) mask(p->params->div_nmp->divm_width)
p                1323 drivers/clk/tegra/clk-tegra210.c #define divn_mask(p) mask(p->params->div_nmp->divn_width)
p                1324 drivers/clk/tegra/clk-tegra210.c #define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\
p                1325 drivers/clk/tegra/clk-tegra210.c 		      mask(p->params->div_nmp->divp_width))
p                1327 drivers/clk/tegra/clk-tegra210.c #define divm_shift(p) ((p)->params->div_nmp->divm_shift)
p                1328 drivers/clk/tegra/clk-tegra210.c #define divn_shift(p) ((p)->params->div_nmp->divn_shift)
p                1329 drivers/clk/tegra/clk-tegra210.c #define divp_shift(p) ((p)->params->div_nmp->divp_shift)
p                1331 drivers/clk/tegra/clk-tegra210.c #define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p))
p                1332 drivers/clk/tegra/clk-tegra210.c #define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p))
p                1333 drivers/clk/tegra/clk-tegra210.c #define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p))
p                1386 drivers/clk/tegra/clk-tegra210.c 		 __clk_get_name(pllx->hw.clk), cfg->m, cfg->n, cfg->p,
p                1388 drivers/clk/tegra/clk-tegra210.c 		 pllx->params->pdiv_tohw[cfg->p].pdiv / 1000);
p                1407 drivers/clk/tegra/clk-tegra210.c 	int p;
p                1415 drivers/clk/tegra/clk-tegra210.c 		p = DIV_ROUND_UP(params->vco_min, rate);
p                1416 drivers/clk/tegra/clk-tegra210.c 		p = params->round_p_to_pdiv(p, &pdiv);
p                1418 drivers/clk/tegra/clk-tegra210.c 		p = rate >= params->vco_min ? 1 : -EINVAL;
p                1421 drivers/clk/tegra/clk-tegra210.c 	if (p < 0)
p                1425 drivers/clk/tegra/clk-tegra210.c 	cfg->p = p;
p                1428 drivers/clk/tegra/clk-tegra210.c 	cfg->p = tegra_pll_p_div_to_hw(pll, cfg->p);
p                1430 drivers/clk/tegra/clk-tegra210.c 	p_rate = rate * p;
p                1449 drivers/clk/tegra/clk-tegra210.c 		cfg->output_rate /= p * cfg->m * PLL_SDM_COEFF;
p                1452 drivers/clk/tegra/clk-tegra210.c 		cfg->output_rate /= p * cfg->m;
p                1522 drivers/clk/tegra/clk-tegra210.c static u32 pll_qlin_p_to_pdiv(u32 p, u32 *pdiv)
p                1526 drivers/clk/tegra/clk-tegra210.c 	if (p) {
p                1528 drivers/clk/tegra/clk-tegra210.c 			if (p <= pll_qlin_pdiv_to_hw[i].pdiv) {
p                1551 drivers/clk/tegra/clk-tegra210.c static u32 pll_expo_p_to_pdiv(u32 p, u32 *pdiv)
p                1553 drivers/clk/tegra/clk-tegra210.c 	if (p) {
p                1554 drivers/clk/tegra/clk-tegra210.c 		u32 i = fls(p);
p                1556 drivers/clk/tegra/clk-tegra210.c 		if (i == ffs(p))
p                2850 drivers/clk/tegra/clk-tegra210.c 	reg |= fentry->p << 16;
p                1076 drivers/clk/tegra/clk-tegra30.c 					data->p.parent_names,
p                 110 drivers/clk/tegra/clk.h 	u8		p;
p                 276 drivers/clk/tegra/clk.h 	u32	(*round_p_to_pdiv)(u32 p, u32 *pdiv);
p                 615 drivers/clk/tegra/clk.h 	} p;
p                 632 drivers/clk/tegra/clk.h 		.p.parent_names = _parent_names,			\
p                  46 drivers/clk/uniphier/clk-uniphier-core.c 	const struct uniphier_clk_data *p, *data;
p                  64 drivers/clk/uniphier/clk-uniphier-core.c 	for (p = data; p->name; p++)
p                  65 drivers/clk/uniphier/clk-uniphier-core.c 		clk_num = max(clk_num, p->idx + 1);
p                  79 drivers/clk/uniphier/clk-uniphier-core.c 	for (p = data; p->name; p++) {
p                  82 drivers/clk/uniphier/clk-uniphier-core.c 		dev_dbg(dev, "register %s (index=%d)\n", p->name, p->idx);
p                  83 drivers/clk/uniphier/clk-uniphier-core.c 		hw = uniphier_clk_register(dev, regmap, p);
p                  84 drivers/clk/uniphier/clk-uniphier-core.c 		if (WARN(IS_ERR(hw), "failed to register %s", p->name))
p                  87 drivers/clk/uniphier/clk-uniphier-core.c 		if (p->idx >= 0)
p                  88 drivers/clk/uniphier/clk-uniphier-core.c 			hw_data->hws[p->idx] = hw;
p                  27 drivers/clk/versatile/icst.c unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
p                  29 drivers/clk/versatile/icst.c 	u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
p                  30 drivers/clk/versatile/icst.c 	u32 divisor = (vco.r + 2) * p->s2div[vco.s];
p                  47 drivers/clk/versatile/icst.c icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
p                  49 drivers/clk/versatile/icst.c 	struct icst_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
p                  58 drivers/clk/versatile/icst.c 		f = freq * p->s2div[p->idx2s[i]];
p                  60 drivers/clk/versatile/icst.c 		if (f > p->vco_min && f <= p->vco_max)
p                  68 drivers/clk/versatile/icst.c 	vco.s = p->idx2s[i];
p                  74 drivers/clk/versatile/icst.c 	for (rd = p->rd_min; rd <= p->rd_max; rd++) {
p                  79 drivers/clk/versatile/icst.c 		fref_div = (2 * p->ref) / rd;
p                  82 drivers/clk/versatile/icst.c 		if (vd < p->vd_min || vd > p->vd_max)
p                  30 drivers/clk/versatile/icst.h unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco);
p                  31 drivers/clk/versatile/icst.h struct icst_vco icst_hz_to_vco(const struct icst_params *p, unsigned long freq);
p                  53 drivers/clocksource/em_sti.c static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs)
p                  55 drivers/clocksource/em_sti.c 	return ioread32(p->base + offs);
p                  58 drivers/clocksource/em_sti.c static inline void em_sti_write(struct em_sti_priv *p, int offs,
p                  61 drivers/clocksource/em_sti.c 	iowrite32(value, p->base + offs);
p                  64 drivers/clocksource/em_sti.c static int em_sti_enable(struct em_sti_priv *p)
p                  69 drivers/clocksource/em_sti.c 	ret = clk_enable(p->clk);
p                  71 drivers/clocksource/em_sti.c 		dev_err(&p->pdev->dev, "cannot enable clock\n");
p                  76 drivers/clocksource/em_sti.c 	em_sti_write(p, STI_SET_H, 0x40000000);
p                  77 drivers/clocksource/em_sti.c 	em_sti_write(p, STI_SET_L, 0x00000000);
p                  80 drivers/clocksource/em_sti.c 	em_sti_write(p, STI_INTENCLR, 3);
p                  81 drivers/clocksource/em_sti.c 	em_sti_write(p, STI_INTFFCLR, 3);
p                  84 drivers/clocksource/em_sti.c 	em_sti_write(p, STI_CONTROL, 1);
p                  89 drivers/clocksource/em_sti.c static void em_sti_disable(struct em_sti_priv *p)
p                  92 drivers/clocksource/em_sti.c 	em_sti_write(p, STI_INTENCLR, 3);
p                  95 drivers/clocksource/em_sti.c 	clk_disable(p->clk);
p                  98 drivers/clocksource/em_sti.c static u64 em_sti_count(struct em_sti_priv *p)
p                 108 drivers/clocksource/em_sti.c 	raw_spin_lock_irqsave(&p->lock, flags);
p                 109 drivers/clocksource/em_sti.c 	ticks = (u64)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
p                 110 drivers/clocksource/em_sti.c 	ticks |= em_sti_read(p, STI_COUNT_L);
p                 111 drivers/clocksource/em_sti.c 	raw_spin_unlock_irqrestore(&p->lock, flags);
p                 116 drivers/clocksource/em_sti.c static u64 em_sti_set_next(struct em_sti_priv *p, u64 next)
p                 120 drivers/clocksource/em_sti.c 	raw_spin_lock_irqsave(&p->lock, flags);
p                 123 drivers/clocksource/em_sti.c 	em_sti_write(p, STI_INTENCLR, 1);
p                 126 drivers/clocksource/em_sti.c 	em_sti_write(p, STI_COMPA_H, next >> 32);
p                 127 drivers/clocksource/em_sti.c 	em_sti_write(p, STI_COMPA_L, next & 0xffffffff);
p                 130 drivers/clocksource/em_sti.c 	em_sti_write(p, STI_INTFFCLR, 1);
p                 133 drivers/clocksource/em_sti.c 	em_sti_write(p, STI_INTENSET, 1);
p                 135 drivers/clocksource/em_sti.c 	raw_spin_unlock_irqrestore(&p->lock, flags);
p                 142 drivers/clocksource/em_sti.c 	struct em_sti_priv *p = dev_id;
p                 144 drivers/clocksource/em_sti.c 	p->ced.event_handler(&p->ced);
p                 148 drivers/clocksource/em_sti.c static int em_sti_start(struct em_sti_priv *p, unsigned int user)
p                 154 drivers/clocksource/em_sti.c 	raw_spin_lock_irqsave(&p->lock, flags);
p                 155 drivers/clocksource/em_sti.c 	used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
p                 157 drivers/clocksource/em_sti.c 		ret = em_sti_enable(p);
p                 160 drivers/clocksource/em_sti.c 		p->active[user] = 1;
p                 161 drivers/clocksource/em_sti.c 	raw_spin_unlock_irqrestore(&p->lock, flags);
p                 166 drivers/clocksource/em_sti.c static void em_sti_stop(struct em_sti_priv *p, unsigned int user)
p                 171 drivers/clocksource/em_sti.c 	raw_spin_lock_irqsave(&p->lock, flags);
p                 172 drivers/clocksource/em_sti.c 	used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
p                 173 drivers/clocksource/em_sti.c 	p->active[user] = 0;
p                 174 drivers/clocksource/em_sti.c 	used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
p                 177 drivers/clocksource/em_sti.c 		em_sti_disable(p);
p                 178 drivers/clocksource/em_sti.c 	raw_spin_unlock_irqrestore(&p->lock, flags);
p                 193 drivers/clocksource/em_sti.c 	struct em_sti_priv *p = cs_to_em_sti(cs);
p                 195 drivers/clocksource/em_sti.c 	return em_sti_start(p, USER_CLOCKSOURCE);
p                 208 drivers/clocksource/em_sti.c static int em_sti_register_clocksource(struct em_sti_priv *p)
p                 210 drivers/clocksource/em_sti.c 	struct clocksource *cs = &p->cs;
p                 212 drivers/clocksource/em_sti.c 	cs->name = dev_name(&p->pdev->dev);
p                 222 drivers/clocksource/em_sti.c 	dev_info(&p->pdev->dev, "used as clock source\n");
p                 224 drivers/clocksource/em_sti.c 	clocksource_register_hz(cs, p->rate);
p                 235 drivers/clocksource/em_sti.c 	struct em_sti_priv *p = ced_to_em_sti(ced);
p                 236 drivers/clocksource/em_sti.c 	em_sti_stop(p, USER_CLOCKEVENT);
p                 242 drivers/clocksource/em_sti.c 	struct em_sti_priv *p = ced_to_em_sti(ced);
p                 244 drivers/clocksource/em_sti.c 	dev_info(&p->pdev->dev, "used for oneshot clock events\n");
p                 245 drivers/clocksource/em_sti.c 	em_sti_start(p, USER_CLOCKEVENT);
p                 252 drivers/clocksource/em_sti.c 	struct em_sti_priv *p = ced_to_em_sti(ced);
p                 256 drivers/clocksource/em_sti.c 	next = em_sti_set_next(p, em_sti_count(p) + delta);
p                 257 drivers/clocksource/em_sti.c 	safe = em_sti_count(p) < (next - 1);
p                 262 drivers/clocksource/em_sti.c static void em_sti_register_clockevent(struct em_sti_priv *p)
p                 264 drivers/clocksource/em_sti.c 	struct clock_event_device *ced = &p->ced;
p                 266 drivers/clocksource/em_sti.c 	ced->name = dev_name(&p->pdev->dev);
p                 274 drivers/clocksource/em_sti.c 	dev_info(&p->pdev->dev, "used for clock events\n");
p                 276 drivers/clocksource/em_sti.c 	clockevents_config_and_register(ced, p->rate, 2, 0xffffffff);
p                 281 drivers/clocksource/em_sti.c 	struct em_sti_priv *p;
p                 286 drivers/clocksource/em_sti.c 	p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
p                 287 drivers/clocksource/em_sti.c 	if (p == NULL)
p                 290 drivers/clocksource/em_sti.c 	p->pdev = pdev;
p                 291 drivers/clocksource/em_sti.c 	platform_set_drvdata(pdev, p);
p                 299 drivers/clocksource/em_sti.c 	p->base = devm_ioremap_resource(&pdev->dev, res);
p                 300 drivers/clocksource/em_sti.c 	if (IS_ERR(p->base))
p                 301 drivers/clocksource/em_sti.c 		return PTR_ERR(p->base);
p                 305 drivers/clocksource/em_sti.c 			       dev_name(&pdev->dev), p);
p                 312 drivers/clocksource/em_sti.c 	p->clk = devm_clk_get(&pdev->dev, "sclk");
p                 313 drivers/clocksource/em_sti.c 	if (IS_ERR(p->clk)) {
p                 315 drivers/clocksource/em_sti.c 		return PTR_ERR(p->clk);
p                 318 drivers/clocksource/em_sti.c 	ret = clk_prepare(p->clk);
p                 324 drivers/clocksource/em_sti.c 	ret = clk_enable(p->clk);
p                 326 drivers/clocksource/em_sti.c 		dev_err(&p->pdev->dev, "cannot enable clock\n");
p                 327 drivers/clocksource/em_sti.c 		clk_unprepare(p->clk);
p                 330 drivers/clocksource/em_sti.c 	p->rate = clk_get_rate(p->clk);
p                 331 drivers/clocksource/em_sti.c 	clk_disable(p->clk);
p                 333 drivers/clocksource/em_sti.c 	raw_spin_lock_init(&p->lock);
p                 334 drivers/clocksource/em_sti.c 	em_sti_register_clockevent(p);
p                 335 drivers/clocksource/em_sti.c 	em_sti_register_clocksource(p);
p                  37 drivers/clocksource/h8300_timer16.c static unsigned long timer16_get_counter(struct timer16_priv *p)
p                  42 drivers/clocksource/h8300_timer16.c 	o1 = ioread8(p->mapcommon + TISRC) & p->ovf;
p                  47 drivers/clocksource/h8300_timer16.c 		v1 = ioread16be(p->mapbase + TCNT);
p                  48 drivers/clocksource/h8300_timer16.c 		v2 = ioread16be(p->mapbase + TCNT);
p                  49 drivers/clocksource/h8300_timer16.c 		v3 = ioread16be(p->mapbase + TCNT);
p                  50 drivers/clocksource/h8300_timer16.c 		o1 = ioread8(p->mapcommon + TISRC) & p->ovf;
p                  63 drivers/clocksource/h8300_timer16.c 	struct timer16_priv *p = (struct timer16_priv *)dev_id;
p                  65 drivers/clocksource/h8300_timer16.c 	bclr(p->ovf, p->mapcommon + TISRC);
p                  66 drivers/clocksource/h8300_timer16.c 	p->total_cycles += 0x10000;
p                  78 drivers/clocksource/h8300_timer16.c 	struct timer16_priv *p = cs_to_priv(cs);
p                  81 drivers/clocksource/h8300_timer16.c 	value = p->total_cycles;
p                  82 drivers/clocksource/h8300_timer16.c 	raw = timer16_get_counter(p);
p                  89 drivers/clocksource/h8300_timer16.c 	struct timer16_priv *p = cs_to_priv(cs);
p                  91 drivers/clocksource/h8300_timer16.c 	WARN_ON(p->cs_enabled);
p                  93 drivers/clocksource/h8300_timer16.c 	p->total_cycles = 0;
p                  94 drivers/clocksource/h8300_timer16.c 	iowrite16be(0x0000, p->mapbase + TCNT);
p                  95 drivers/clocksource/h8300_timer16.c 	iowrite8(0x83, p->mapbase + TCR);
p                  96 drivers/clocksource/h8300_timer16.c 	bset(p->ovie, p->mapcommon + TISRC);
p                  97 drivers/clocksource/h8300_timer16.c 	bset(p->enb, p->mapcommon + TSTR);
p                  99 drivers/clocksource/h8300_timer16.c 	p->cs_enabled = true;
p                 105 drivers/clocksource/h8300_timer16.c 	struct timer16_priv *p = cs_to_priv(cs);
p                 107 drivers/clocksource/h8300_timer16.c 	WARN_ON(!p->cs_enabled);
p                 109 drivers/clocksource/h8300_timer16.c 	bclr(p->ovie, p->mapcommon + TISRC);
p                 110 drivers/clocksource/h8300_timer16.c 	bclr(p->enb, p->mapcommon + TSTR);
p                 112 drivers/clocksource/h8300_timer16.c 	p->cs_enabled = false;
p                  47 drivers/clocksource/h8300_timer8.c 	struct timer8_priv *p = dev_id;
p                  49 drivers/clocksource/h8300_timer8.c 	if (clockevent_state_oneshot(&p->ced))
p                  50 drivers/clocksource/h8300_timer8.c 		iowrite16be(0x0000, p->mapbase + _8TCR);
p                  52 drivers/clocksource/h8300_timer8.c 	p->ced.event_handler(&p->ced);
p                  54 drivers/clocksource/h8300_timer8.c 	bclr(CMFA, p->mapbase + _8TCSR);
p                  59 drivers/clocksource/h8300_timer8.c static void timer8_set_next(struct timer8_priv *p, unsigned long delta)
p                  63 drivers/clocksource/h8300_timer8.c 	bclr(CMIEA, p->mapbase + _8TCR);
p                  64 drivers/clocksource/h8300_timer8.c 	iowrite16be(delta, p->mapbase + TCORA);
p                  65 drivers/clocksource/h8300_timer8.c 	iowrite16be(0x0000, p->mapbase + _8TCNT);
p                  66 drivers/clocksource/h8300_timer8.c 	bclr(CMFA, p->mapbase + _8TCSR);
p                  67 drivers/clocksource/h8300_timer8.c 	bset(CMIEA, p->mapbase + _8TCR);
p                  70 drivers/clocksource/h8300_timer8.c static int timer8_enable(struct timer8_priv *p)
p                  72 drivers/clocksource/h8300_timer8.c 	iowrite16be(0xffff, p->mapbase + TCORA);
p                  73 drivers/clocksource/h8300_timer8.c 	iowrite16be(0x0000, p->mapbase + _8TCNT);
p                  74 drivers/clocksource/h8300_timer8.c 	iowrite16be(0x0c02, p->mapbase + _8TCR);
p                  79 drivers/clocksource/h8300_timer8.c static int timer8_start(struct timer8_priv *p)
p                  83 drivers/clocksource/h8300_timer8.c 	if ((p->flags & FLAG_STARTED))
p                  86 drivers/clocksource/h8300_timer8.c 	ret = timer8_enable(p);
p                  88 drivers/clocksource/h8300_timer8.c 		p->flags |= FLAG_STARTED;
p                  93 drivers/clocksource/h8300_timer8.c static void timer8_stop(struct timer8_priv *p)
p                  95 drivers/clocksource/h8300_timer8.c 	iowrite16be(0x0000, p->mapbase + _8TCR);
p                 103 drivers/clocksource/h8300_timer8.c static void timer8_clock_event_start(struct timer8_priv *p, unsigned long delta)
p                 105 drivers/clocksource/h8300_timer8.c 	timer8_start(p);
p                 106 drivers/clocksource/h8300_timer8.c 	timer8_set_next(p, delta);
p                 117 drivers/clocksource/h8300_timer8.c 	struct timer8_priv *p = ced_to_priv(ced);
p                 120 drivers/clocksource/h8300_timer8.c 	timer8_stop(p);
p                 121 drivers/clocksource/h8300_timer8.c 	timer8_clock_event_start(p, (p->rate + HZ/2) / HZ);
p                 128 drivers/clocksource/h8300_timer8.c 	struct timer8_priv *p = ced_to_priv(ced);
p                 131 drivers/clocksource/h8300_timer8.c 	timer8_stop(p);
p                 132 drivers/clocksource/h8300_timer8.c 	timer8_clock_event_start(p, 0x10000);
p                 140 drivers/clocksource/h8300_timer8.c 	struct timer8_priv *p = ced_to_priv(ced);
p                 143 drivers/clocksource/h8300_timer8.c 	timer8_set_next(p, delta - 1);
p                  33 drivers/clocksource/h8300_tpu.c static inline unsigned long read_tcnt32(struct tpu_priv *p)
p                  37 drivers/clocksource/h8300_tpu.c 	tcnt = ioread16be(p->mapbase1 + TCNT) << 16;
p                  38 drivers/clocksource/h8300_tpu.c 	tcnt |= ioread16be(p->mapbase2 + TCNT);
p                  42 drivers/clocksource/h8300_tpu.c static int tpu_get_counter(struct tpu_priv *p, unsigned long long *val)
p                  47 drivers/clocksource/h8300_tpu.c 	o1 = ioread8(p->mapbase1 + TSR) & TCFV;
p                  52 drivers/clocksource/h8300_tpu.c 		v1 = read_tcnt32(p);
p                  53 drivers/clocksource/h8300_tpu.c 		v2 = read_tcnt32(p);
p                  54 drivers/clocksource/h8300_tpu.c 		v3 = read_tcnt32(p);
p                  55 drivers/clocksource/h8300_tpu.c 		o1 = ioread8(p->mapbase1 + TSR) & TCFV;
p                  70 drivers/clocksource/h8300_tpu.c 	struct tpu_priv *p = cs_to_priv(cs);
p                  74 drivers/clocksource/h8300_tpu.c 	raw_spin_lock_irqsave(&p->lock, flags);
p                  75 drivers/clocksource/h8300_tpu.c 	if (tpu_get_counter(p, &value))
p                  77 drivers/clocksource/h8300_tpu.c 	raw_spin_unlock_irqrestore(&p->lock, flags);
p                  84 drivers/clocksource/h8300_tpu.c 	struct tpu_priv *p = cs_to_priv(cs);
p                  86 drivers/clocksource/h8300_tpu.c 	WARN_ON(p->cs_enabled);
p                  88 drivers/clocksource/h8300_tpu.c 	iowrite16be(0, p->mapbase1 + TCNT);
p                  89 drivers/clocksource/h8300_tpu.c 	iowrite16be(0, p->mapbase2 + TCNT);
p                  90 drivers/clocksource/h8300_tpu.c 	iowrite8(0x0f, p->mapbase1 + TCR);
p                  91 drivers/clocksource/h8300_tpu.c 	iowrite8(0x03, p->mapbase2 + TCR);
p                  93 drivers/clocksource/h8300_tpu.c 	p->cs_enabled = true;
p                  99 drivers/clocksource/h8300_tpu.c 	struct tpu_priv *p = cs_to_priv(cs);
p                 101 drivers/clocksource/h8300_tpu.c 	WARN_ON(!p->cs_enabled);
p                 103 drivers/clocksource/h8300_tpu.c 	iowrite8(0, p->mapbase1 + TCR);
p                 104 drivers/clocksource/h8300_tpu.c 	iowrite8(0, p->mapbase2 + TCR);
p                 105 drivers/clocksource/h8300_tpu.c 	p->cs_enabled = false;
p                 467 drivers/counter/counter.c 	struct counter_device_attr *p, *n;
p                 469 drivers/counter/counter.c 	list_for_each_entry_safe(p, n, attr_list, l) {
p                 471 drivers/counter/counter.c 		kfree(p->dev_attr.attr.name);
p                 472 drivers/counter/counter.c 		kfree(p->component);
p                 473 drivers/counter/counter.c 		list_del(&p->l);
p                 474 drivers/counter/counter.c 		kfree(p);
p                1332 drivers/counter/counter.c 	struct counter_device_attr *p;
p                1355 drivers/counter/counter.c 		list_for_each_entry(p, &group->attr_list, l)
p                1356 drivers/counter/counter.c 			group->attr_group.attrs[j++] = &p->dev_attr.attr;
p                 960 drivers/cpufreq/intel_pstate.c 	char *p = memchr(buf, '\n', count);
p                 964 drivers/cpufreq/intel_pstate.c 	ret = intel_pstate_update_status(buf, p ? p - buf : count);
p                 122 drivers/cpufreq/pmac64-cpufreq.c 	args.u[0].p = &slew;
p                 133 drivers/cpufreq/pmac64-cpufreq.c 		args.u[0].p = &done;
p                 277 drivers/cpufreq/pmac64-cpufreq.c 		args.u[0].p = &done;
p                 302 drivers/cpufreq/pmac64-cpufreq.c 	args.u[0].p = &val;
p                 452 drivers/cpufreq/powernow-k7.c 	unsigned char *p;
p                 460 drivers/cpufreq/powernow-k7.c 		p = phys_to_virt(i);
p                 462 drivers/cpufreq/powernow-k7.c 		if (memcmp(p, "AMDK7PNOW!",  10) == 0) {
p                 463 drivers/cpufreq/powernow-k7.c 			pr_debug("Found PSB header at %p\n", p);
p                 464 drivers/cpufreq/powernow-k7.c 			psb = (struct psb_s *) p;
p                 489 drivers/cpufreq/powernow-k7.c 			p += sizeof(*psb);
p                 491 drivers/cpufreq/powernow-k7.c 			pst = (struct pst_s *) p;
p                 494 drivers/cpufreq/powernow-k7.c 				pst = (struct pst_s *) p;
p                 502 drivers/cpufreq/powernow-k7.c 					p = (char *)pst + sizeof(*pst);
p                 503 drivers/cpufreq/powernow-k7.c 					ret = get_ranges(p);
p                 507 drivers/cpufreq/powernow-k7.c 					p = (char *)pst + sizeof(*pst);
p                 509 drivers/cpufreq/powernow-k7.c 						p += 2;
p                 518 drivers/cpufreq/powernow-k7.c 		p++;
p                  36 drivers/cpufreq/s3c24xx-cpufreq-debugfs.c static int board_show(struct seq_file *seq, void *p)
p                  65 drivers/cpufreq/s3c24xx-cpufreq-debugfs.c static int info_show(struct seq_file *seq, void *p)
p                  96 drivers/cpufreq/s3c24xx-cpufreq-debugfs.c static int io_show(struct seq_file *seq, void *p)
p                5370 drivers/crypto/caam/caamalg_qi2.c 		struct caam_hash_alg *t_hash_alg, *p;
p                5372 drivers/crypto/caam/caamalg_qi2.c 		list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
p                 472 drivers/crypto/caam/caampkc.c 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
p                 543 drivers/crypto/caam/caampkc.c 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
p                 795 drivers/crypto/caam/caampkc.c 	kzfree(key->p);
p                 920 drivers/crypto/caam/caampkc.c 	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
p                 921 drivers/crypto/caam/caampkc.c 	if (!rsa_key->p)
p                 968 drivers/crypto/caam/caampkc.c 	kzfree(rsa_key->p);
p                  73 drivers/crypto/caam/caampkc.h 	u8 *p;
p                  40 drivers/crypto/caam/qi.c 	struct qman_portal *p;
p                 236 drivers/crypto/caam/qi.c 		struct qman_portal *p;
p                 238 drivers/crypto/caam/qi.c 		p = qman_get_affine_portal(smp_processor_id());
p                 239 drivers/crypto/caam/qi.c 		qman_p_poll_dqrr(p, 16);
p                 477 drivers/crypto/caam/qi.c 	int cleaned = qman_p_poll_dqrr(np->p, budget);
p                 481 drivers/crypto/caam/qi.c 		qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
p                 541 drivers/crypto/caam/qi.c static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
p                 550 drivers/crypto/caam/qi.c 		qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
p                 551 drivers/crypto/caam/qi.c 		np->p = p;
p                 558 drivers/crypto/caam/qi.c static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
p                 569 drivers/crypto/caam/qi.c 	if (caam_qi_napi_schedule(p, caam_napi))
p                 137 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_AES_SIZE(p)		((p)->aes.size)
p                 138 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_AES_ENCRYPT(p)	((p)->aes.encrypt)
p                 139 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_AES_MODE(p)		((p)->aes.mode)
p                 140 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_AES_TYPE(p)		((p)->aes.type)
p                 141 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_XTS_SIZE(p)		((p)->aes_xts.size)
p                 142 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_XTS_TYPE(p)		((p)->aes_xts.type)
p                 143 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_XTS_ENCRYPT(p)	((p)->aes_xts.encrypt)
p                 144 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_DES3_SIZE(p)	((p)->des3.size)
p                 145 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_DES3_ENCRYPT(p)	((p)->des3.encrypt)
p                 146 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_DES3_MODE(p)	((p)->des3.mode)
p                 147 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_DES3_TYPE(p)	((p)->des3.type)
p                 148 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_SHA_TYPE(p)		((p)->sha.type)
p                 149 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_RSA_SIZE(p)		((p)->rsa.size)
p                 150 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
p                 151 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
p                 152 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_ECC_MODE(p)		((p)->ecc.mode)
p                 153 drivers/crypto/ccp/ccp-dev-v5.c #define	CCP_ECC_AFFINE(p)	((p)->ecc.one)
p                 156 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_DW0(p)		((p)->dw0)
p                 157 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_SOC(p)		(CCP5_CMD_DW0(p).soc)
p                 158 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_IOC(p)		(CCP5_CMD_DW0(p).ioc)
p                 159 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_INIT(p)	(CCP5_CMD_DW0(p).init)
p                 160 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_EOM(p)		(CCP5_CMD_DW0(p).eom)
p                 161 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_FUNCTION(p)	(CCP5_CMD_DW0(p).function)
p                 162 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_ENGINE(p)	(CCP5_CMD_DW0(p).engine)
p                 163 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_PROT(p)	(CCP5_CMD_DW0(p).prot)
p                 166 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_DW1(p)		((p)->length)
p                 167 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_LEN(p)		(CCP5_CMD_DW1(p))
p                 170 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_DW2(p)		((p)->src_lo)
p                 171 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_SRC_LO(p)	(CCP5_CMD_DW2(p))
p                 174 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_DW3(p)		((p)->dw3)
p                 175 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_SRC_MEM(p)	((p)->dw3.src_mem)
p                 176 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_SRC_HI(p)	((p)->dw3.src_hi)
p                 177 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_LSB_ID(p)	((p)->dw3.lsb_cxt_id)
p                 178 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_FIX_SRC(p)	((p)->dw3.fixed)
p                 181 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_DW4(p)		((p)->dw4)
p                 182 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_DST_LO(p)	(CCP5_CMD_DW4(p).dst_lo)
p                 183 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_DW5(p)		((p)->dw5.fields.dst_hi)
p                 184 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_DST_HI(p)	(CCP5_CMD_DW5(p))
p                 185 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_DST_MEM(p)	((p)->dw5.fields.dst_mem)
p                 186 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_FIX_DST(p)	((p)->dw5.fields.fixed)
p                 187 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_SHA_LO(p)	((p)->dw4.sha_len_lo)
p                 188 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_SHA_HI(p)	((p)->dw5.sha_len_hi)
p                 191 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_DW6(p)		((p)->key_lo)
p                 192 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_KEY_LO(p)	(CCP5_CMD_DW6(p))
p                 193 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_DW7(p)		((p)->dw7)
p                 194 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_KEY_HI(p)	((p)->dw7.key_hi)
p                 195 drivers/crypto/ccp/ccp-dev-v5.c #define CCP5_CMD_KEY_MEM(p)	((p)->dw7.key_mem)
p                 206 drivers/crypto/ccp/ccp-ops.c 	u8 *p, *q;
p                 213 drivers/crypto/ccp/ccp-ops.c 	p = wa->address + wa_offset;
p                 214 drivers/crypto/ccp/ccp-ops.c 	q = p + len - 1;
p                 215 drivers/crypto/ccp/ccp-ops.c 	while (p < q) {
p                 216 drivers/crypto/ccp/ccp-ops.c 		*p = *p ^ *q;
p                 217 drivers/crypto/ccp/ccp-ops.c 		*q = *p ^ *q;
p                 218 drivers/crypto/ccp/ccp-ops.c 		*p = *p ^ *q;
p                 219 drivers/crypto/ccp/ccp-ops.c 		p++;
p                 231 drivers/crypto/ccp/ccp-ops.c 	u8 *p, *q;
p                 233 drivers/crypto/ccp/ccp-ops.c 	p = wa->address + wa_offset;
p                 234 drivers/crypto/ccp/ccp-ops.c 	q = p + len - 1;
p                 235 drivers/crypto/ccp/ccp-ops.c 	while (p < q) {
p                 236 drivers/crypto/ccp/ccp-ops.c 		*p = *p ^ *q;
p                 237 drivers/crypto/ccp/ccp-ops.c 		*q = *p ^ *q;
p                 238 drivers/crypto/ccp/ccp-ops.c 		*p = *p ^ *q;
p                 239 drivers/crypto/ccp/ccp-ops.c 		p++;
p                 488 drivers/crypto/ccp/psp-dev.c 	struct page *p;
p                 506 drivers/crypto/ccp/psp-dev.c 	p = alloc_pages(GFP_KERNEL, order);
p                 507 drivers/crypto/ccp/psp-dev.c 	if (!p) {
p                 516 drivers/crypto/ccp/psp-dev.c 	data = page_address(p);
p                 517 drivers/crypto/ccp/psp-dev.c 	memcpy(page_address(p) + data_size, firmware->data, firmware->size);
p                 519 drivers/crypto/ccp/psp-dev.c 	data->address = __psp_pa(page_address(p) + data_size);
p                 528 drivers/crypto/ccp/psp-dev.c 	__free_pages(p, order);
p                  74 drivers/crypto/ccree/cc_sram_mgr.c 	cc_sram_addr_t p;
p                  87 drivers/crypto/ccree/cc_sram_mgr.c 	p = smgr_ctx->sram_free_offset;
p                  89 drivers/crypto/ccree/cc_sram_mgr.c 	dev_dbg(dev, "Allocated %u B @ %u\n", size, (unsigned int)p);
p                  90 drivers/crypto/ccree/cc_sram_mgr.c 	return p;
p                 504 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct listen_info *p = kmalloc(sizeof(*p), GFP_KERNEL);
p                 506 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (p) {
p                 509 drivers/crypto/chelsio/chtls/chtls_cm.c 		p->sk = sk;
p                 510 drivers/crypto/chelsio/chtls/chtls_cm.c 		p->stid = stid;
p                 512 drivers/crypto/chelsio/chtls/chtls_cm.c 		p->next = cdev->listen_hash_tab[key];
p                 513 drivers/crypto/chelsio/chtls/chtls_cm.c 		cdev->listen_hash_tab[key] = p;
p                 516 drivers/crypto/chelsio/chtls/chtls_cm.c 	return p;
p                 522 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct listen_info *p;
p                 529 drivers/crypto/chelsio/chtls/chtls_cm.c 	for (p = cdev->listen_hash_tab[key]; p; p = p->next)
p                 530 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (p->sk == sk) {
p                 531 drivers/crypto/chelsio/chtls/chtls_cm.c 			stid = p->stid;
p                 541 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct listen_info *p, **prev;
p                 549 drivers/crypto/chelsio/chtls/chtls_cm.c 	for (p = *prev; p; prev = &p->next, p = p->next)
p                 550 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (p->sk == sk) {
p                 551 drivers/crypto/chelsio/chtls/chtls_cm.c 			stid = p->stid;
p                 552 drivers/crypto/chelsio/chtls/chtls_cm.c 			*prev = p->next;
p                 553 drivers/crypto/chelsio/chtls/chtls_cm.c 			kfree(p);
p                 370 drivers/crypto/hifn_795x.c 	volatile __le32		p;
p                 886 drivers/crypto/hifn_795x.c 		dma->cmdr[i].p = __cpu_to_le32(dptr +
p                 889 drivers/crypto/hifn_795x.c 		dma->resr[i].p = __cpu_to_le32(dptr +
p                 893 drivers/crypto/hifn_795x.c 	dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr +
p                 895 drivers/crypto/hifn_795x.c 	dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr +
p                 897 drivers/crypto/hifn_795x.c 	dma->dstr[HIFN_D_DST_RSIZE].p = __cpu_to_le32(dptr +
p                 899 drivers/crypto/hifn_795x.c 	dma->resr[HIFN_D_RES_RSIZE].p = __cpu_to_le32(dptr +
p                1241 drivers/crypto/hifn_795x.c 	dma->srcr[idx].p = __cpu_to_le32(addr);
p                1298 drivers/crypto/hifn_795x.c 	dma->dstr[idx].p = __cpu_to_le32(addr);
p                 418 drivers/crypto/inside-secure/safexcel.h #define EIP197_TRC_FREECHAIN_HEAD_PTR(p)	(p)
p                 419 drivers/crypto/inside-secure/safexcel.h #define EIP197_TRC_FREECHAIN_TAIL_PTR(p)	((p) << 16)
p                 422 drivers/crypto/inside-secure/safexcel.h #define EIP197_TRC_PARAMS2_HTABLE_PTR(p)	(p)
p                1376 drivers/crypto/n2_core.c 	struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
p                1380 drivers/crypto/n2_core.c 	if (!p)
p                1383 drivers/crypto/n2_core.c 	alg = &p->alg;
p                1391 drivers/crypto/n2_core.c 	p->enc_type = tmpl->enc_type;
p                1398 drivers/crypto/n2_core.c 	list_add(&p->entry, &cipher_algs);
p                1402 drivers/crypto/n2_core.c 		list_del(&p->entry);
p                1403 drivers/crypto/n2_core.c 		kfree(p);
p                1412 drivers/crypto/n2_core.c 	struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
p                1417 drivers/crypto/n2_core.c 	if (!p)
p                1420 drivers/crypto/n2_core.c 	p->child_alg = n2ahash->alg.halg.base.cra_name;
p                1421 drivers/crypto/n2_core.c 	memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
p                1422 drivers/crypto/n2_core.c 	INIT_LIST_HEAD(&p->derived.entry);
p                1424 drivers/crypto/n2_core.c 	ahash = &p->derived.alg;
p                1429 drivers/crypto/n2_core.c 	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
p                1430 drivers/crypto/n2_core.c 	snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
p                1436 drivers/crypto/n2_core.c 	list_add(&p->derived.entry, &hmac_algs);
p                1440 drivers/crypto/n2_core.c 		list_del(&p->derived.entry);
p                1441 drivers/crypto/n2_core.c 		kfree(p);
p                1450 drivers/crypto/n2_core.c 	struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
p                1456 drivers/crypto/n2_core.c 	if (!p)
p                1459 drivers/crypto/n2_core.c 	p->hash_zero = tmpl->hash_zero;
p                1460 drivers/crypto/n2_core.c 	p->hash_init = tmpl->hash_init;
p                1461 drivers/crypto/n2_core.c 	p->auth_type = tmpl->auth_type;
p                1462 drivers/crypto/n2_core.c 	p->hmac_type = tmpl->hmac_type;
p                1463 drivers/crypto/n2_core.c 	p->hw_op_hashsz = tmpl->hw_op_hashsz;
p                1464 drivers/crypto/n2_core.c 	p->digest_size = tmpl->digest_size;
p                1466 drivers/crypto/n2_core.c 	ahash = &p->alg;
p                1490 drivers/crypto/n2_core.c 	list_add(&p->entry, &ahash_algs);
p                1494 drivers/crypto/n2_core.c 		list_del(&p->entry);
p                1495 drivers/crypto/n2_core.c 		kfree(p);
p                1499 drivers/crypto/n2_core.c 	if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
p                1500 drivers/crypto/n2_core.c 		err = __n2_register_one_hmac(p);
p                1579 drivers/crypto/n2_core.c 		       const char *irq_name, struct spu_queue *p,
p                1585 drivers/crypto/n2_core.c 	herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
p                1589 drivers/crypto/n2_core.c 	index = find_devino_index(dev, ip, p->devino);
p                1593 drivers/crypto/n2_core.c 	p->irq = dev->archdata.irqs[index];
p                1595 drivers/crypto/n2_core.c 	sprintf(p->irq_name, "%s-%d", irq_name, index);
p                1597 drivers/crypto/n2_core.c 	return request_irq(p->irq, handler, 0, p->irq_name, p);
p                1607 drivers/crypto/n2_core.c static void free_queue(void *p, unsigned long q_type)
p                1609 drivers/crypto/n2_core.c 	kmem_cache_free(queue_cache[q_type - 1], p);
p                1648 drivers/crypto/n2_core.c 	struct spu_queue *p = qr->queue;
p                1652 drivers/crypto/n2_core.c 	hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
p                1653 drivers/crypto/n2_core.c 				 CWQ_NUM_ENTRIES, &p->qhandle);
p                1655 drivers/crypto/n2_core.c 		sun4v_ncs_sethead_marker(p->qhandle, 0);
p                1660 drivers/crypto/n2_core.c static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
p                1662 drivers/crypto/n2_core.c 	int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
p                1663 drivers/crypto/n2_core.c 	struct spu_qreg qr = { .queue = p, .type = q_type };
p                1668 drivers/crypto/n2_core.c static int spu_queue_setup(struct spu_queue *p)
p                1672 drivers/crypto/n2_core.c 	p->q = new_queue(p->q_type);
p                1673 drivers/crypto/n2_core.c 	if (!p->q)
p                1676 drivers/crypto/n2_core.c 	err = spu_queue_register(p, p->q_type);
p                1678 drivers/crypto/n2_core.c 		free_queue(p->q, p->q_type);
p                1679 drivers/crypto/n2_core.c 		p->q = NULL;
p                1685 drivers/crypto/n2_core.c static void spu_queue_destroy(struct spu_queue *p)
p                1689 drivers/crypto/n2_core.c 	if (!p->q)
p                1692 drivers/crypto/n2_core.c 	hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
p                1695 drivers/crypto/n2_core.c 		free_queue(p->q, p->q_type);
p                1700 drivers/crypto/n2_core.c 	struct spu_queue *p, *n;
p                1702 drivers/crypto/n2_core.c 	list_for_each_entry_safe(p, n, list, list) {
p                1706 drivers/crypto/n2_core.c 			if (cpu_to_cwq[i] == p)
p                1710 drivers/crypto/n2_core.c 		if (p->irq) {
p                1711 drivers/crypto/n2_core.c 			free_irq(p->irq, p);
p                1712 drivers/crypto/n2_core.c 			p->irq = 0;
p                1714 drivers/crypto/n2_core.c 		spu_queue_destroy(p);
p                1715 drivers/crypto/n2_core.c 		list_del(&p->list);
p                1716 drivers/crypto/n2_core.c 		kfree(p);
p                1725 drivers/crypto/n2_core.c 			       u64 node, struct spu_queue *p,
p                1743 drivers/crypto/n2_core.c 		cpumask_set_cpu(*id, &p->sharing);
p                1744 drivers/crypto/n2_core.c 		table[*id] = p;
p                1755 drivers/crypto/n2_core.c 	struct spu_queue *p;
p                1758 drivers/crypto/n2_core.c 	p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
p                1759 drivers/crypto/n2_core.c 	if (!p) {
p                1765 drivers/crypto/n2_core.c 	cpumask_clear(&p->sharing);
p                1766 drivers/crypto/n2_core.c 	spin_lock_init(&p->lock);
p                1767 drivers/crypto/n2_core.c 	p->q_type = q_type;
p                1768 drivers/crypto/n2_core.c 	INIT_LIST_HEAD(&p->jobs);
p                1769 drivers/crypto/n2_core.c 	list_add(&p->list, list);
p                1771 drivers/crypto/n2_core.c 	err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
p                1775 drivers/crypto/n2_core.c 	err = spu_queue_setup(p);
p                1779 drivers/crypto/n2_core.c 	return spu_map_ino(dev, ip, iname, p, handler);
p                 855 drivers/crypto/nx/nx-842-pseries.c 	int p = 0;							\
p                 859 drivers/crypto/nx/nx-842-pseries.c 		p = snprintf(buf, PAGE_SIZE, "%lld\n",			\
p                 862 drivers/crypto/nx/nx-842-pseries.c 	return p;							\
p                 888 drivers/crypto/nx/nx-842-pseries.c 	char *p = buf;
p                 912 drivers/crypto/nx/nx-842-pseries.c 		bytes = snprintf(p, bytes_remain, "%u-%uus:\t%lld\n",
p                 916 drivers/crypto/nx/nx-842-pseries.c 		p += bytes;
p                 920 drivers/crypto/nx/nx-842-pseries.c 	bytes = snprintf(p, bytes_remain, "%uus - :\t%lld\n",
p                 923 drivers/crypto/nx/nx-842-pseries.c 	p += bytes;
p                 926 drivers/crypto/nx/nx-842-pseries.c 	return p - buf;
p                  87 drivers/crypto/nx/nx-842.c static int update_param(struct nx842_crypto_param *p,
p                  90 drivers/crypto/nx/nx-842.c 	if (p->iremain < slen)
p                  92 drivers/crypto/nx/nx-842.c 	if (p->oremain < dlen)
p                  95 drivers/crypto/nx/nx-842.c 	p->in += slen;
p                  96 drivers/crypto/nx/nx-842.c 	p->iremain -= slen;
p                  97 drivers/crypto/nx/nx-842.c 	p->out += dlen;
p                  98 drivers/crypto/nx/nx-842.c 	p->oremain -= dlen;
p                  99 drivers/crypto/nx/nx-842.c 	p->ototal += dlen;
p                 159 drivers/crypto/nx/nx-842.c 		    struct nx842_crypto_param *p,
p                 165 drivers/crypto/nx/nx-842.c 	unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
p                 167 drivers/crypto/nx/nx-842.c 	u8 *src = p->in, *dst = p->out;
p                 171 drivers/crypto/nx/nx-842.c 	if (p->iremain == 0)
p                 174 drivers/crypto/nx/nx-842.c 	if (p->oremain == 0 || hdrsize + c->minimum > dlen)
p                 207 drivers/crypto/nx/nx-842.c 		dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
p                 232 drivers/crypto/nx/nx-842.c 		memcpy(p->out + dskip, dst, dlen);
p                 238 drivers/crypto/nx/nx-842.c 	if (p->iremain < slen) {
p                 239 drivers/crypto/nx/nx-842.c 		*ignore = slen - p->iremain;
p                 240 drivers/crypto/nx/nx-842.c 		slen = p->iremain;
p                 246 drivers/crypto/nx/nx-842.c 	return update_param(p, slen, dskip + dlen);
p                 255 drivers/crypto/nx/nx-842.c 	struct nx842_crypto_param p;
p                 264 drivers/crypto/nx/nx-842.c 	p.in = (u8 *)src;
p                 265 drivers/crypto/nx/nx-842.c 	p.iremain = slen;
p                 266 drivers/crypto/nx/nx-842.c 	p.out = dst;
p                 267 drivers/crypto/nx/nx-842.c 	p.oremain = *dlen;
p                 268 drivers/crypto/nx/nx-842.c 	p.ototal = 0;
p                 273 drivers/crypto/nx/nx-842.c 		       DIV_ROUND_UP(p.iremain, c.maximum));
p                 279 drivers/crypto/nx/nx-842.c 	add_header = (p.iremain % c.multiple	||
p                 280 drivers/crypto/nx/nx-842.c 		      p.iremain < c.minimum	||
p                 281 drivers/crypto/nx/nx-842.c 		      p.iremain > c.maximum	||
p                 282 drivers/crypto/nx/nx-842.c 		      (u64)p.in % c.alignment	||
p                 283 drivers/crypto/nx/nx-842.c 		      p.oremain % c.multiple	||
p                 284 drivers/crypto/nx/nx-842.c 		      p.oremain < c.minimum	||
p                 285 drivers/crypto/nx/nx-842.c 		      p.oremain > c.maximum	||
p                 286 drivers/crypto/nx/nx-842.c 		      (u64)p.out % c.alignment);
p                 292 drivers/crypto/nx/nx-842.c 	while (p.iremain > 0) {
p                 304 drivers/crypto/nx/nx-842.c 		ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
p                 325 drivers/crypto/nx/nx-842.c 	*dlen = p.ototal;
p                 336 drivers/crypto/nx/nx-842.c 		      struct nx842_crypto_param *p,
p                 343 drivers/crypto/nx/nx-842.c 	unsigned int dlen = p->oremain, tmplen;
p                 345 drivers/crypto/nx/nx-842.c 	u8 *src = p->in, *dst = p->out;
p                 353 drivers/crypto/nx/nx-842.c 	if (p->iremain <= 0 || padding + slen > p->iremain)
p                 356 drivers/crypto/nx/nx-842.c 	if (p->oremain <= 0 || required_len - ignore > p->oremain)
p                 402 drivers/crypto/nx/nx-842.c 		src = p->in + padding;
p                 405 drivers/crypto/nx/nx-842.c 		dst = p->out;
p                 406 drivers/crypto/nx/nx-842.c 		dlen = p->oremain;
p                 424 drivers/crypto/nx/nx-842.c 		memcpy(p->out, dst, dlen);
p                 429 drivers/crypto/nx/nx-842.c 	return update_param(p, slen + padding, dlen);
p                 438 drivers/crypto/nx/nx-842.c 	struct nx842_crypto_param p;
p                 445 drivers/crypto/nx/nx-842.c 	p.in = (u8 *)src;
p                 446 drivers/crypto/nx/nx-842.c 	p.iremain = slen;
p                 447 drivers/crypto/nx/nx-842.c 	p.out = dst;
p                 448 drivers/crypto/nx/nx-842.c 	p.oremain = *dlen;
p                 449 drivers/crypto/nx/nx-842.c 	p.ototal = 0;
p                 463 drivers/crypto/nx/nx-842.c 			.compressed_length =	cpu_to_be32(p.iremain),
p                 464 drivers/crypto/nx/nx-842.c 			.uncompressed_length =	cpu_to_be32(p.oremain),
p                 467 drivers/crypto/nx/nx-842.c 		ret = decompress(ctx, &p, &g, &c, 0);
p                 500 drivers/crypto/nx/nx-842.c 		ret = decompress(ctx, &p, &hdr->group[n], &c, ignore);
p                 506 drivers/crypto/nx/nx-842.c 	*dlen = p.ototal;
p                 327 drivers/crypto/nx/nx.c 			       struct property *p,
p                 330 drivers/crypto/nx/nx.c 	if (!strncmp(p->value, "okay", p->length)) {
p                 335 drivers/crypto/nx/nx.c 			 (char *)p->value);
p                 340 drivers/crypto/nx/nx.c 			       struct property *p,
p                 343 drivers/crypto/nx/nx.c 	if (p->length != sizeof(props->max_sg_len)) {
p                 348 drivers/crypto/nx/nx.c 			p->length, sizeof(props->max_sg_len));
p                 352 drivers/crypto/nx/nx.c 	props->max_sg_len = *(u32 *)p->value;
p                 357 drivers/crypto/nx/nx.c 			     struct property *p,
p                 364 drivers/crypto/nx/nx.c 	msc = (struct max_sync_cop *)p->value;
p                 365 drivers/crypto/nx/nx.c 	lenp = p->length;
p                 465 drivers/crypto/nx/nx.c 	struct property *p;
p                 467 drivers/crypto/nx/nx.c 	p = of_find_property(base_node, "status", NULL);
p                 468 drivers/crypto/nx/nx.c 	if (!p)
p                 471 drivers/crypto/nx/nx.c 		nx_of_update_status(dev, p, props);
p                 473 drivers/crypto/nx/nx.c 	p = of_find_property(base_node, "ibm,max-sg-len", NULL);
p                 474 drivers/crypto/nx/nx.c 	if (!p)
p                 478 drivers/crypto/nx/nx.c 		nx_of_update_sglen(dev, p, props);
p                 480 drivers/crypto/nx/nx.c 	p = of_find_property(base_node, "ibm,max-sync-cop", NULL);
p                 481 drivers/crypto/nx/nx.c 	if (!p)
p                 485 drivers/crypto/nx/nx.c 		nx_of_update_msc(dev, p, props);
p                  69 drivers/crypto/picoxcell_crypto.c 	dma_addr_t	p;
p                 264 drivers/crypto/picoxcell_crypto.c 	ddt->p = phys;
p                  81 drivers/crypto/qat/qat_common/qat_asym_algs.c 			dma_addr_t p;
p                 107 drivers/crypto/qat/qat_common/qat_asym_algs.c 	char *p;
p                 130 drivers/crypto/qat/qat_common/qat_asym_algs.c 			dma_addr_t p;
p                 134 drivers/crypto/qat/qat_common/qat_asym_algs.c 			dma_addr_t p;
p                 150 drivers/crypto/qat/qat_common/qat_asym_algs.c 	char *p;
p                 298 drivers/crypto/qat/qat_common/qat_asym_algs.c 		qat_req->in.dh.in.p = ctx->dma_p;
p                 303 drivers/crypto/qat/qat_common/qat_asym_algs.c 			qat_req->in.dh.in_g2.p = ctx->dma_p;
p                 308 drivers/crypto/qat/qat_common/qat_asym_algs.c 			qat_req->in.dh.in.p = ctx->dma_p;
p                 450 drivers/crypto/qat/qat_common/qat_asym_algs.c 	ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
p                 451 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (!ctx->p)
p                 453 drivers/crypto/qat/qat_common/qat_asym_algs.c 	memcpy(ctx->p, params->p, ctx->p_size);
p                 480 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (ctx->p) {
p                 481 drivers/crypto/qat/qat_common/qat_asym_algs.c 		dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
p                 482 drivers/crypto/qat/qat_common/qat_asym_algs.c 		ctx->p = NULL;
p                 856 drivers/crypto/qat/qat_common/qat_asym_algs.c 		qat_req->in.rsa.dec_crt.p = ctx->dma_p;
p                1075 drivers/crypto/qat/qat_common/qat_asym_algs.c 	ptr = rsa_key->p;
p                1080 drivers/crypto/qat/qat_common/qat_asym_algs.c 	ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
p                1081 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (!ctx->p)
p                1083 drivers/crypto/qat/qat_common/qat_asym_algs.c 	memcpy(ctx->p + (half_key_sz - len), ptr, len);
p                1148 drivers/crypto/qat/qat_common/qat_asym_algs.c 	memset(ctx->p, '\0', half_key_sz);
p                1149 drivers/crypto/qat/qat_common/qat_asym_algs.c 	dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
p                1150 drivers/crypto/qat/qat_common/qat_asym_algs.c 	ctx->p = NULL;
p                1168 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (ctx->p) {
p                1169 drivers/crypto/qat/qat_common/qat_asym_algs.c 		memset(ctx->p, '\0', half_key_sz);
p                1170 drivers/crypto/qat/qat_common/qat_asym_algs.c 		dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
p                1192 drivers/crypto/qat/qat_common/qat_asym_algs.c 	ctx->p = NULL;
p                 139 drivers/crypto/sahara.c 	u32	p;
p                 434 drivers/crypto/sahara.c 		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
p                 504 drivers/crypto/sahara.c 		dev->hw_link[i]->p = sg->dma_address;
p                 518 drivers/crypto/sahara.c 		dev->hw_link[j]->p = sg->dma_address;
p                 822 drivers/crypto/sahara.c 		dev->hw_link[i]->p = sg->dma_address;
p                 868 drivers/crypto/sahara.c 	dev->hw_link[i]->p = dev->context_phys_base;
p                 900 drivers/crypto/sahara.c 	dev->hw_link[index]->p = dev->context_phys_base;
p                 249 drivers/dma-buf/sw_sync.c 		struct rb_node **p = &obj->pt_tree.rb_node;
p                 252 drivers/dma-buf/sw_sync.c 		while (*p) {
p                 256 drivers/dma-buf/sw_sync.c 			parent = *p;
p                 260 drivers/dma-buf/sw_sync.c 				p = &parent->rb_right;
p                 262 drivers/dma-buf/sw_sync.c 				p = &parent->rb_left;
p                 270 drivers/dma-buf/sw_sync.c 				p = &parent->rb_left;
p                 273 drivers/dma-buf/sw_sync.c 		rb_link_node(&pt->node, parent, p);
p                 906 drivers/dma/amba-pl08x.c 	struct pl08x_dma_chan *p, *next;
p                 917 drivers/dma/amba-pl08x.c 	list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
p                 918 drivers/dma/amba-pl08x.c 		if (p->state == PL08X_CHAN_WAITING &&
p                 919 drivers/dma/amba-pl08x.c 		    p->waiting_at <= waiting_at) {
p                 920 drivers/dma/amba-pl08x.c 			next = p;
p                 921 drivers/dma/amba-pl08x.c 			waiting_at = p->waiting_at;
p                 925 drivers/dma/amba-pl08x.c 		list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
p                 926 drivers/dma/amba-pl08x.c 			if (p->state == PL08X_CHAN_WAITING &&
p                 927 drivers/dma/amba-pl08x.c 			    p->waiting_at <= waiting_at) {
p                 928 drivers/dma/amba-pl08x.c 				next = p;
p                 929 drivers/dma/amba-pl08x.c 				waiting_at = p->waiting_at;
p                1803 drivers/dma/coh901318.c 	const struct coh901318_params *p;
p                1808 drivers/dma/coh901318.c 		p = param;
p                1810 drivers/dma/coh901318.c 		p = cohc_chan_param(cohc);
p                1823 drivers/dma/coh901318.c 	coh901318_set_conf(cohc, p->config);
p                1824 drivers/dma/coh901318.c 	coh901318_set_ctrl(cohc, p->ctrl_lli_last);
p                1149 drivers/dma/dmaengine.c 	void *p;
p                1152 drivers/dma/dmaengine.c 	p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
p                1153 drivers/dma/dmaengine.c 	if (!p)
p                1158 drivers/dma/dmaengine.c 		*(struct dma_device **)p = device;
p                1159 drivers/dma/dmaengine.c 		devres_add(device->dev, p);
p                1161 drivers/dma/dmaengine.c 		devres_free(p);
p                1243 drivers/dma/dmaengine.c 		struct dmaengine_unmap_pool *p = &unmap_pool[i];
p                1245 drivers/dma/dmaengine.c 		mempool_destroy(p->pool);
p                1246 drivers/dma/dmaengine.c 		p->pool = NULL;
p                1247 drivers/dma/dmaengine.c 		kmem_cache_destroy(p->cache);
p                1248 drivers/dma/dmaengine.c 		p->cache = NULL;
p                1257 drivers/dma/dmaengine.c 		struct dmaengine_unmap_pool *p = &unmap_pool[i];
p                1261 drivers/dma/dmaengine.c 		       sizeof(dma_addr_t) * p->size;
p                1263 drivers/dma/dmaengine.c 		p->cache = kmem_cache_create(p->name, size, 0,
p                1265 drivers/dma/dmaengine.c 		if (!p->cache)
p                1267 drivers/dma/dmaengine.c 		p->pool = mempool_create_slab_pool(1, p->cache);
p                1268 drivers/dma/dmaengine.c 		if (!p->pool)
p                 413 drivers/dma/fsl_raid.c 	u8 *p;
p                 475 drivers/dma/fsl_raid.c 	p = pq->gfm_q1;
p                 478 drivers/dma/fsl_raid.c 		p[i] = 1;
p                 484 drivers/dma/fsl_raid.c 	p += gfmq_len;
p                 486 drivers/dma/fsl_raid.c 		p[i] = scf[i];
p                 503 drivers/dma/fsl_raid.c 			p[save_src_cnt] = 0;
p                 504 drivers/dma/fsl_raid.c 			p[save_src_cnt + 1] = 0;
p                 505 drivers/dma/fsl_raid.c 			p[save_src_cnt + 2] = 1;
p                 195 drivers/dma/fsldma.h #define fsl_ioread32(p)		in_le32(p)
p                 196 drivers/dma/fsldma.h #define fsl_ioread32be(p)	in_be32(p)
p                 197 drivers/dma/fsldma.h #define fsl_iowrite32(v, p)	out_le32(p, v)
p                 198 drivers/dma/fsldma.h #define fsl_iowrite32be(v, p)	out_be32(p, v)
p                 201 drivers/dma/fsldma.h #define fsl_ioread64(p)		in_le64(p)
p                 202 drivers/dma/fsldma.h #define fsl_ioread64be(p)	in_be64(p)
p                 203 drivers/dma/fsldma.h #define fsl_iowrite64(v, p)	out_le64(p, v)
p                 204 drivers/dma/fsldma.h #define fsl_iowrite64be(v, p)	out_be64(p, v)
p                 237 drivers/dma/fsldma.h #define fsl_ioread32(p)		ioread32(p)
p                 238 drivers/dma/fsldma.h #define fsl_ioread32be(p)	ioread32be(p)
p                 239 drivers/dma/fsldma.h #define fsl_iowrite32(v, p)	iowrite32(v, p)
p                 240 drivers/dma/fsldma.h #define fsl_iowrite32be(v, p)	iowrite32be(v, p)
p                 241 drivers/dma/fsldma.h #define fsl_ioread64(p)		ioread64(p)
p                 242 drivers/dma/fsldma.h #define fsl_ioread64be(p)	ioread64be(p)
p                 243 drivers/dma/fsldma.h #define fsl_iowrite64(v, p)	iowrite64(v, p)
p                 244 drivers/dma/fsldma.h #define fsl_iowrite64be(v, p)	iowrite64be(v, p)
p                 213 drivers/dma/k3dma.c 	struct k3_dma_phy *p;
p                 228 drivers/dma/k3dma.c 			p = &d->phy[i];
p                 229 drivers/dma/k3dma.c 			c = p->vchan;
p                 232 drivers/dma/k3dma.c 				if (p->ds_run != NULL) {
p                 233 drivers/dma/k3dma.c 					vchan_cookie_complete(&p->ds_run->vd);
p                 234 drivers/dma/k3dma.c 					p->ds_done = p->ds_run;
p                 235 drivers/dma/k3dma.c 					p->ds_run = NULL;
p                 241 drivers/dma/k3dma.c 				if (p->ds_run != NULL)
p                 242 drivers/dma/k3dma.c 					vchan_cyclic_callback(&p->ds_run->vd);
p                 303 drivers/dma/k3dma.c 	struct k3_dma_phy *p;
p                 310 drivers/dma/k3dma.c 		p = c->phy;
p                 311 drivers/dma/k3dma.c 		if (p && p->ds_done) {
p                 314 drivers/dma/k3dma.c 				dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
p                 317 drivers/dma/k3dma.c 				p->vchan = NULL;
p                 329 drivers/dma/k3dma.c 		p = &d->phy[pch];
p                 331 drivers/dma/k3dma.c 		if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
p                 338 drivers/dma/k3dma.c 			p->vchan = c;
p                 339 drivers/dma/k3dma.c 			c->phy = p;
p                 350 drivers/dma/k3dma.c 			p = &d->phy[pch];
p                 351 drivers/dma/k3dma.c 			c = p->vchan;
p                 380 drivers/dma/k3dma.c 	struct k3_dma_phy *p;
p                 391 drivers/dma/k3dma.c 	p = c->phy;
p                 401 drivers/dma/k3dma.c 	} else if ((!p) || (!p->ds_run)) {
p                 404 drivers/dma/k3dma.c 		struct k3_dma_desc_sw *ds = p->ds_run;
p                 407 drivers/dma/k3dma.c 		bytes = k3_dma_get_curr_cnt(d, p);
p                 408 drivers/dma/k3dma.c 		clli = k3_dma_get_curr_lli(p);
p                 725 drivers/dma/k3dma.c 	struct k3_dma_phy *p = c->phy;
p                 739 drivers/dma/k3dma.c 	if (p) {
p                 741 drivers/dma/k3dma.c 		k3_dma_terminate_chan(p, d);
p                 743 drivers/dma/k3dma.c 		p->vchan = NULL;
p                 744 drivers/dma/k3dma.c 		if (p->ds_run) {
p                 745 drivers/dma/k3dma.c 			vchan_terminate_vdesc(&p->ds_run->vd);
p                 746 drivers/dma/k3dma.c 			p->ds_run = NULL;
p                 748 drivers/dma/k3dma.c 		p->ds_done = NULL;
p                 767 drivers/dma/k3dma.c 	struct k3_dma_phy *p = c->phy;
p                 772 drivers/dma/k3dma.c 		if (p) {
p                 773 drivers/dma/k3dma.c 			k3_dma_pause_dma(p, false);
p                 788 drivers/dma/k3dma.c 	struct k3_dma_phy *p = c->phy;
p                 795 drivers/dma/k3dma.c 		if (p) {
p                 796 drivers/dma/k3dma.c 			k3_dma_pause_dma(p, true);
p                 907 drivers/dma/k3dma.c 		struct k3_dma_phy *p;
p                 912 drivers/dma/k3dma.c 		p = &d->phy[i];
p                 913 drivers/dma/k3dma.c 		p->idx = i;
p                 914 drivers/dma/k3dma.c 		p->base = d->base + i * 0x40;
p                 234 drivers/dma/ppc4xx/adma.c 	struct xor_cb *p;
p                 238 drivers/dma/ppc4xx/adma.c 		p = desc->hw_desc;
p                 241 drivers/dma/ppc4xx/adma.c 		p->cbc = XOR_CBCR_CBCE_BIT;
p                 903 drivers/dma/ppc4xx/adma.c 	u8 *p = chan->device->dma_desc_pool_virt;
p                 914 drivers/dma/ppc4xx/adma.c 			cdb = (struct dma_cdb *)&p[i -
p                 188 drivers/dma/pxa_dma.c static int requester_chan_show(struct seq_file *s, void *p)
p                 219 drivers/dma/pxa_dma.c static int descriptors_show(struct seq_file *s, void *p)
p                 262 drivers/dma/pxa_dma.c static int chan_state_show(struct seq_file *s, void *p)
p                 305 drivers/dma/pxa_dma.c static int state_show(struct seq_file *s, void *p)
p                1457 drivers/dma/pxa_dma.c 	struct pxad_param *p = param;
p                1462 drivers/dma/pxa_dma.c 	c->drcmr = p->drcmr;
p                1463 drivers/dma/pxa_dma.c 	c->prio = p->prio;
p                 584 drivers/dma/s3c24xx-dma.c 	struct s3c24xx_dma_chan *p, *next;
p                 590 drivers/dma/s3c24xx-dma.c 	list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node)
p                 591 drivers/dma/s3c24xx-dma.c 		if (p->state == S3C24XX_DMA_CHAN_WAITING) {
p                 592 drivers/dma/s3c24xx-dma.c 			next = p;
p                 597 drivers/dma/s3c24xx-dma.c 		list_for_each_entry(p, &s3cdma->slave.channels,
p                 599 drivers/dma/s3c24xx-dma.c 			if (p->state == S3C24XX_DMA_CHAN_WAITING &&
p                 600 drivers/dma/s3c24xx-dma.c 				      s3c24xx_dma_phy_valid(p, s3cchan->phy)) {
p                 601 drivers/dma/s3c24xx-dma.c 				next = p;
p                 148 drivers/dma/sa11x0-dma.c static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
p                 151 drivers/dma/sa11x0-dma.c 	p->txd_load = txd;
p                 152 drivers/dma/sa11x0-dma.c 	p->sg_load = 0;
p                 154 drivers/dma/sa11x0-dma.c 	dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
p                 155 drivers/dma/sa11x0-dma.c 		p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
p                 158 drivers/dma/sa11x0-dma.c static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
p                 161 drivers/dma/sa11x0-dma.c 	struct sa11x0_dma_desc *txd = p->txd_load;
p                 163 drivers/dma/sa11x0-dma.c 	void __iomem *base = p->base;
p                 176 drivers/dma/sa11x0-dma.c 	if (p->sg_load == txd->sglen) {
p                 187 drivers/dma/sa11x0-dma.c 				sa11x0_dma_start_desc(p, txn);
p                 189 drivers/dma/sa11x0-dma.c 				p->txd_load = NULL;
p                 194 drivers/dma/sa11x0-dma.c 			p->sg_load = 0;
p                 198 drivers/dma/sa11x0-dma.c 	sg = &txd->sg[p->sg_load++];
p                 216 drivers/dma/sa11x0-dma.c 	dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
p                 217 drivers/dma/sa11x0-dma.c 		p->num, dcsr,
p                 222 drivers/dma/sa11x0-dma.c static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
p                 225 drivers/dma/sa11x0-dma.c 	struct sa11x0_dma_desc *txd = p->txd_done;
p                 227 drivers/dma/sa11x0-dma.c 	if (++p->sg_done == txd->sglen) {
p                 231 drivers/dma/sa11x0-dma.c 			p->sg_done = 0;
p                 232 drivers/dma/sa11x0-dma.c 			p->txd_done = p->txd_load;
p                 234 drivers/dma/sa11x0-dma.c 			if (!p->txd_done)
p                 235 drivers/dma/sa11x0-dma.c 				tasklet_schedule(&p->dev->task);
p                 237 drivers/dma/sa11x0-dma.c 			if ((p->sg_done % txd->period) == 0)
p                 241 drivers/dma/sa11x0-dma.c 			p->sg_done = 0;
p                 245 drivers/dma/sa11x0-dma.c 	sa11x0_dma_start_sg(p, c);
p                 250 drivers/dma/sa11x0-dma.c 	struct sa11x0_dma_phy *p = dev_id;
p                 251 drivers/dma/sa11x0-dma.c 	struct sa11x0_dma_dev *d = p->dev;
p                 255 drivers/dma/sa11x0-dma.c 	dcsr = readl_relaxed(p->base + DMA_DCSR_R);
p                 261 drivers/dma/sa11x0-dma.c 		p->base + DMA_DCSR_C);
p                 263 drivers/dma/sa11x0-dma.c 	dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
p                 267 drivers/dma/sa11x0-dma.c 			p->num, dcsr,
p                 268 drivers/dma/sa11x0-dma.c 			readl_relaxed(p->base + DMA_DDAR),
p                 269 drivers/dma/sa11x0-dma.c 			readl_relaxed(p->base + DMA_DBSA),
p                 270 drivers/dma/sa11x0-dma.c 			readl_relaxed(p->base + DMA_DBTA),
p                 271 drivers/dma/sa11x0-dma.c 			readl_relaxed(p->base + DMA_DBSB),
p                 272 drivers/dma/sa11x0-dma.c 			readl_relaxed(p->base + DMA_DBTB));
p                 275 drivers/dma/sa11x0-dma.c 	c = p->vchan;
p                 287 drivers/dma/sa11x0-dma.c 		if (c->phy == p) {
p                 289 drivers/dma/sa11x0-dma.c 				sa11x0_dma_complete(p, c);
p                 291 drivers/dma/sa11x0-dma.c 				sa11x0_dma_complete(p, c);
p                 305 drivers/dma/sa11x0-dma.c 		struct sa11x0_dma_phy *p = c->phy;
p                 307 drivers/dma/sa11x0-dma.c 		sa11x0_dma_start_desc(p, txd);
p                 308 drivers/dma/sa11x0-dma.c 		p->txd_done = txd;
p                 309 drivers/dma/sa11x0-dma.c 		p->sg_done = 0;
p                 312 drivers/dma/sa11x0-dma.c 		WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
p                 317 drivers/dma/sa11x0-dma.c 			       p->base + DMA_DCSR_C);
p                 318 drivers/dma/sa11x0-dma.c 		writel_relaxed(txd->ddar, p->base + DMA_DDAR);
p                 321 drivers/dma/sa11x0-dma.c 		sa11x0_dma_start_sg(p, c);
p                 322 drivers/dma/sa11x0-dma.c 		sa11x0_dma_start_sg(p, c);
p                 329 drivers/dma/sa11x0-dma.c 	struct sa11x0_dma_phy *p;
p                 337 drivers/dma/sa11x0-dma.c 		p = c->phy;
p                 338 drivers/dma/sa11x0-dma.c 		if (p && !p->txd_done) {
p                 340 drivers/dma/sa11x0-dma.c 			if (!p->txd_done) {
p                 342 drivers/dma/sa11x0-dma.c 				dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
p                 346 drivers/dma/sa11x0-dma.c 				p->vchan = NULL;
p                 354 drivers/dma/sa11x0-dma.c 		p = &d->phy[pch];
p                 356 drivers/dma/sa11x0-dma.c 		if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
p                 364 drivers/dma/sa11x0-dma.c 			p->vchan = c;
p                 373 drivers/dma/sa11x0-dma.c 			p = &d->phy[pch];
p                 374 drivers/dma/sa11x0-dma.c 			c = p->vchan;
p                 377 drivers/dma/sa11x0-dma.c 			c->phy = p;
p                 401 drivers/dma/sa11x0-dma.c static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
p                 406 drivers/dma/sa11x0-dma.c 	dcsr = readl_relaxed(p->base + DMA_DCSR_R);
p                 414 drivers/dma/sa11x0-dma.c 	return readl_relaxed(p->base + reg);
p                 422 drivers/dma/sa11x0-dma.c 	struct sa11x0_dma_phy *p;
p                 435 drivers/dma/sa11x0-dma.c 	p = c->phy;
p                 444 drivers/dma/sa11x0-dma.c 	} else if (!p) {
p                 450 drivers/dma/sa11x0-dma.c 		if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
p                 451 drivers/dma/sa11x0-dma.c 			txd = p->txd_done;
p                 452 drivers/dma/sa11x0-dma.c 		else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
p                 453 drivers/dma/sa11x0-dma.c 			txd = p->txd_load;
p                 459 drivers/dma/sa11x0-dma.c 			dma_addr_t addr = sa11x0_dma_pos(p);
p                 704 drivers/dma/sa11x0-dma.c 	struct sa11x0_dma_phy *p;
p                 712 drivers/dma/sa11x0-dma.c 		p = c->phy;
p                 713 drivers/dma/sa11x0-dma.c 		if (p) {
p                 714 drivers/dma/sa11x0-dma.c 			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
p                 730 drivers/dma/sa11x0-dma.c 	struct sa11x0_dma_phy *p;
p                 738 drivers/dma/sa11x0-dma.c 		p = c->phy;
p                 739 drivers/dma/sa11x0-dma.c 		if (p) {
p                 740 drivers/dma/sa11x0-dma.c 			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
p                 756 drivers/dma/sa11x0-dma.c 	struct sa11x0_dma_phy *p;
p                 765 drivers/dma/sa11x0-dma.c 	p = c->phy;
p                 766 drivers/dma/sa11x0-dma.c 	if (p) {
p                 767 drivers/dma/sa11x0-dma.c 		dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
p                 772 drivers/dma/sa11x0-dma.c 		       p->base + DMA_DCSR_C);
p                 774 drivers/dma/sa11x0-dma.c 		if (p->txd_load) {
p                 775 drivers/dma/sa11x0-dma.c 			if (p->txd_load != p->txd_done)
p                 776 drivers/dma/sa11x0-dma.c 				list_add_tail(&p->txd_load->vd.node, &head);
p                 777 drivers/dma/sa11x0-dma.c 			p->txd_load = NULL;
p                 779 drivers/dma/sa11x0-dma.c 		if (p->txd_done) {
p                 780 drivers/dma/sa11x0-dma.c 			list_add_tail(&p->txd_done->vd.node, &head);
p                 781 drivers/dma/sa11x0-dma.c 			p->txd_done = NULL;
p                 785 drivers/dma/sa11x0-dma.c 		p->vchan = NULL;
p                 830 drivers/dma/sa11x0-dma.c 	const char *p = param;
p                 832 drivers/dma/sa11x0-dma.c 	return !strcmp(c->name, p);
p                 934 drivers/dma/sa11x0-dma.c 		struct sa11x0_dma_phy *p = &d->phy[i];
p                 936 drivers/dma/sa11x0-dma.c 		p->dev = d;
p                 937 drivers/dma/sa11x0-dma.c 		p->num = i;
p                 938 drivers/dma/sa11x0-dma.c 		p->base = d->base + i * DMA_SIZE;
p                 941 drivers/dma/sa11x0-dma.c 			p->base + DMA_DCSR_C);
p                 942 drivers/dma/sa11x0-dma.c 		writel_relaxed(0, p->base + DMA_DDAR);
p                 944 drivers/dma/sa11x0-dma.c 		ret = sa11x0_dma_request_irq(pdev, i, p);
p                1010 drivers/dma/sa11x0-dma.c 		struct sa11x0_dma_phy *p = &d->phy[pch];
p                1013 drivers/dma/sa11x0-dma.c 		dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
p                1015 drivers/dma/sa11x0-dma.c 			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
p                1016 drivers/dma/sa11x0-dma.c 			dcsr = readl_relaxed(p->base + DMA_DCSR_R);
p                1021 drivers/dma/sa11x0-dma.c 			p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
p                1022 drivers/dma/sa11x0-dma.c 			p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
p                1023 drivers/dma/sa11x0-dma.c 			p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
p                1024 drivers/dma/sa11x0-dma.c 			p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
p                1028 drivers/dma/sa11x0-dma.c 			p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
p                1029 drivers/dma/sa11x0-dma.c 			p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
p                1030 drivers/dma/sa11x0-dma.c 			p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
p                1031 drivers/dma/sa11x0-dma.c 			p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
p                1034 drivers/dma/sa11x0-dma.c 		p->dcsr = saved_dcsr;
p                1036 drivers/dma/sa11x0-dma.c 		writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
p                1048 drivers/dma/sa11x0-dma.c 		struct sa11x0_dma_phy *p = &d->phy[pch];
p                1050 drivers/dma/sa11x0-dma.c 		u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
p                1054 drivers/dma/sa11x0-dma.c 		if (p->txd_done)
p                1055 drivers/dma/sa11x0-dma.c 			txd = p->txd_done;
p                1056 drivers/dma/sa11x0-dma.c 		else if (p->txd_load)
p                1057 drivers/dma/sa11x0-dma.c 			txd = p->txd_load;
p                1062 drivers/dma/sa11x0-dma.c 		writel_relaxed(txd->ddar, p->base + DMA_DDAR);
p                1064 drivers/dma/sa11x0-dma.c 		writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
p                1065 drivers/dma/sa11x0-dma.c 		writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
p                1066 drivers/dma/sa11x0-dma.c 		writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
p                1067 drivers/dma/sa11x0-dma.c 		writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
p                1068 drivers/dma/sa11x0-dma.c 		writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
p                 313 drivers/dma/ti/dma-crossbar.c static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
p                 316 drivers/dma/ti/dma-crossbar.c 		set_bit(offset + (len - 1), p);
p                1509 drivers/dma/ti/edma.c 	struct edmacc_param p;
p                1517 drivers/dma/ti/edma.c 	err = edma_read_slot(ecc, echan->slot[0], &p);
p                1531 drivers/dma/ti/edma.c 	if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) {
p                1036 drivers/dma/xilinx/zynqmp_dma.c 	struct dma_device *p;
p                1049 drivers/dma/xilinx/zynqmp_dma.c 	p = &zdev->common;
p                1050 drivers/dma/xilinx/zynqmp_dma.c 	p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
p                1051 drivers/dma/xilinx/zynqmp_dma.c 	p->device_terminate_all = zynqmp_dma_device_terminate_all;
p                1052 drivers/dma/xilinx/zynqmp_dma.c 	p->device_issue_pending = zynqmp_dma_issue_pending;
p                1053 drivers/dma/xilinx/zynqmp_dma.c 	p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources;
p                1054 drivers/dma/xilinx/zynqmp_dma.c 	p->device_free_chan_resources = zynqmp_dma_free_chan_resources;
p                1055 drivers/dma/xilinx/zynqmp_dma.c 	p->device_tx_status = dma_cookie_status;
p                1056 drivers/dma/xilinx/zynqmp_dma.c 	p->device_config = zynqmp_dma_device_config;
p                1057 drivers/dma/xilinx/zynqmp_dma.c 	p->dev = &pdev->dev;
p                1088 drivers/dma/xilinx/zynqmp_dma.c 	p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
p                1089 drivers/dma/xilinx/zynqmp_dma.c 	p->src_addr_widths = BIT(zdev->chan->bus_width / 8);
p                 219 drivers/dma/zx_dma.c 	struct zx_dma_phy *p;
p                 228 drivers/dma/zx_dma.c 		p = c->phy;
p                 229 drivers/dma/zx_dma.c 		if (p && p->ds_done && zx_dma_start_txd(c)) {
p                 231 drivers/dma/zx_dma.c 			dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
p                 234 drivers/dma/zx_dma.c 			p->vchan = NULL;
p                 244 drivers/dma/zx_dma.c 		p = &d->phy[c->id];
p                 245 drivers/dma/zx_dma.c 		if (!p->vchan) {
p                 250 drivers/dma/zx_dma.c 			p->vchan = c;
p                 251 drivers/dma/zx_dma.c 			c->phy = p;
p                 260 drivers/dma/zx_dma.c 			p = &d->phy[pch];
p                 261 drivers/dma/zx_dma.c 			c = p->vchan;
p                 274 drivers/dma/zx_dma.c 	struct zx_dma_phy *p;
p                 285 drivers/dma/zx_dma.c 		p = &d->phy[i];
p                 286 drivers/dma/zx_dma.c 		c = p->vchan;
p                 292 drivers/dma/zx_dma.c 				vchan_cyclic_callback(&p->ds_run->vd);
p                 294 drivers/dma/zx_dma.c 				vchan_cookie_complete(&p->ds_run->vd);
p                 295 drivers/dma/zx_dma.c 				p->ds_done = p->ds_run;
p                 336 drivers/dma/zx_dma.c 	struct zx_dma_phy *p;
p                 347 drivers/dma/zx_dma.c 	p = c->phy;
p                 357 drivers/dma/zx_dma.c 	} else if ((!p) || (!p->ds_run)) {
p                 360 drivers/dma/zx_dma.c 		struct zx_dma_desc_sw *ds = p->ds_run;
p                 364 drivers/dma/zx_dma.c 		clli = zx_dma_get_curr_lli(p);
p                 663 drivers/dma/zx_dma.c 	struct zx_dma_phy *p = c->phy;
p                 677 drivers/dma/zx_dma.c 	if (p) {
p                 679 drivers/dma/zx_dma.c 		zx_dma_terminate_chan(p, d);
p                 681 drivers/dma/zx_dma.c 		p->vchan = NULL;
p                 682 drivers/dma/zx_dma.c 		p->ds_run = NULL;
p                 683 drivers/dma/zx_dma.c 		p->ds_done = NULL;
p                 804 drivers/dma/zx_dma.c 		struct zx_dma_phy *p = &d->phy[i];
p                 806 drivers/dma/zx_dma.c 		p->idx = i;
p                 807 drivers/dma/zx_dma.c 		p->base = d->base + i * 0x40;
p                1224 drivers/edac/altera_edac.c static void ocram_free_mem(void *p, size_t size, void *other)
p                1226 drivers/edac/altera_edac.c 	gen_pool_free((struct gen_pool *)other, (unsigned long)p, size);
p                1314 drivers/edac/altera_edac.c static void l2_free_mem(void *p, size_t size, void *other)
p                1318 drivers/edac/altera_edac.c 	if (dev && p)
p                1319 drivers/edac/altera_edac.c 		devm_kfree(dev, p);
p                2117 drivers/edac/altera_edac.c #define to_a10edac(p, m) container_of(p, struct altr_arria10_edac, m)
p                 361 drivers/edac/altera_edac.h 	void (*free_mem)(void *p, size_t size, void *other);
p                  65 drivers/edac/edac_device.c 	void *pvt, *p;
p                  77 drivers/edac/edac_device.c 	p = NULL;
p                  78 drivers/edac/edac_device.c 	dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1);
p                  83 drivers/edac/edac_device.c 	dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances);
p                  89 drivers/edac/edac_device.c 	dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count);
p                  97 drivers/edac/edac_device.c 	dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count);
p                 100 drivers/edac/edac_device.c 	pvt = edac_align_ptr(&p, sz_private, 1);
p                 122 drivers/edac/edac_mc.c 	char *p = buf;
p                 125 drivers/edac/edac_mc.c 		n = snprintf(p, len, "%s %d ",
p                 128 drivers/edac/edac_mc.c 		p += n;
p                 239 drivers/edac/edac_mc.c void *edac_align_ptr(void **p, unsigned int size, int n_elems)
p                 242 drivers/edac/edac_mc.c 	void *ptr = *p;
p                 244 drivers/edac/edac_mc.c 	*p += size * n_elems;
p                 266 drivers/edac/edac_mc.c 	r = (unsigned long)p % align;
p                 271 drivers/edac/edac_mc.c 	*p += align - r;
p                 319 drivers/edac/edac_mc.c 	void *pvt, *p, *ptr = NULL;
p                 446 drivers/edac/edac_mc.c 		p = dimm->label;
p                 447 drivers/edac/edac_mc.c 		n = snprintf(p, len, "mc#%u", mc_num);
p                 448 drivers/edac/edac_mc.c 		p += n;
p                 451 drivers/edac/edac_mc.c 			n = snprintf(p, len, "%s#%u",
p                 454 drivers/edac/edac_mc.c 			p += n;
p                 624 drivers/edac/edac_mc.c 	struct mem_ctl_info *p;
p                 628 drivers/edac/edac_mc.c 	p = __find_mci_by_dev(mci->pdev);
p                 629 drivers/edac/edac_mc.c 	if (unlikely(p != NULL))
p                 633 drivers/edac/edac_mc.c 		p = list_entry(item, struct mem_ctl_info, link);
p                 635 drivers/edac/edac_mc.c 		if (p->mc_idx >= mci->mc_idx) {
p                 636 drivers/edac/edac_mc.c 			if (unlikely(p->mc_idx == mci->mc_idx))
p                 649 drivers/edac/edac_mc.c 		"%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
p                 650 drivers/edac/edac_mc.c 		edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
p                 656 drivers/edac/edac_mc.c 		"    duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
p                1087 drivers/edac/edac_mc.c 	char *p;
p                1144 drivers/edac/edac_mc.c 	p = e->label;
p                1145 drivers/edac/edac_mc.c 	*p = '\0';
p                1173 drivers/edac/edac_mc.c 			if (p != e->label) {
p                1174 drivers/edac/edac_mc.c 				strcpy(p, OTHER_LABEL);
p                1175 drivers/edac/edac_mc.c 				p += strlen(OTHER_LABEL);
p                1177 drivers/edac/edac_mc.c 			strcpy(p, dimm->label);
p                1178 drivers/edac/edac_mc.c 			p += strlen(p);
p                1179 drivers/edac/edac_mc.c 			*p = '\0';
p                1204 drivers/edac/edac_mc.c 		if (p == e->label)
p                1218 drivers/edac/edac_mc.c 	p = e->location;
p                1224 drivers/edac/edac_mc.c 		p += sprintf(p, "%s:%d ",
p                1228 drivers/edac/edac_mc.c 	if (p > e->location)
p                1229 drivers/edac/edac_mc.c 		*(p - 1) = '\0';
p                 832 drivers/edac/edac_mc_sysfs.c 	char *p = data;
p                 835 drivers/edac/edac_mc_sysfs.c 		p += sprintf(p, "%s %d ",
p                 840 drivers/edac/edac_mc_sysfs.c 	return p - data;
p                  63 drivers/edac/edac_module.h extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
p                  35 drivers/edac/edac_pci.c 	void *p = NULL, *pvt;
p                  40 drivers/edac/edac_pci.c 	pci = edac_align_ptr(&p, sizeof(*pci), 1);
p                  41 drivers/edac/edac_pci.c 	pvt = edac_align_ptr(&p, 1, sz_pvt);
p                 211 drivers/edac/ghes_edac.c 	char *p;
p                 264 drivers/edac/ghes_edac.c 		p = pvt->msg;
p                 267 drivers/edac/ghes_edac.c 			p += sprintf(p, "Unknown");
p                 270 drivers/edac/ghes_edac.c 			p += sprintf(p, "No error");
p                 273 drivers/edac/ghes_edac.c 			p += sprintf(p, "Single-bit ECC");
p                 276 drivers/edac/ghes_edac.c 			p += sprintf(p, "Multi-bit ECC");
p                 279 drivers/edac/ghes_edac.c 			p += sprintf(p, "Single-symbol ChipKill ECC");
p                 282 drivers/edac/ghes_edac.c 			p += sprintf(p, "Multi-symbol ChipKill ECC");
p                 285 drivers/edac/ghes_edac.c 			p += sprintf(p, "Master abort");
p                 288 drivers/edac/ghes_edac.c 			p += sprintf(p, "Target abort");
p                 291 drivers/edac/ghes_edac.c 			p += sprintf(p, "Parity Error");
p                 294 drivers/edac/ghes_edac.c 			p += sprintf(p, "Watchdog timeout");
p                 297 drivers/edac/ghes_edac.c 			p += sprintf(p, "Invalid address");
p                 300 drivers/edac/ghes_edac.c 			p += sprintf(p, "Mirror Broken");
p                 303 drivers/edac/ghes_edac.c 			p += sprintf(p, "Memory Sparing");
p                 306 drivers/edac/ghes_edac.c 			p += sprintf(p, "Scrub corrected error");
p                 309 drivers/edac/ghes_edac.c 			p += sprintf(p, "Scrub uncorrected error");
p                 312 drivers/edac/ghes_edac.c 			p += sprintf(p, "Physical Memory Map-out event");
p                 315 drivers/edac/ghes_edac.c 			p += sprintf(p, "reserved error (%d)",
p                 333 drivers/edac/ghes_edac.c 	p = e->location;
p                 335 drivers/edac/ghes_edac.c 		p += sprintf(p, "node:%d ", mem_err->node);
p                 337 drivers/edac/ghes_edac.c 		p += sprintf(p, "card:%d ", mem_err->card);
p                 339 drivers/edac/ghes_edac.c 		p += sprintf(p, "module:%d ", mem_err->module);
p                 341 drivers/edac/ghes_edac.c 		p += sprintf(p, "rank:%d ", mem_err->rank);
p                 343 drivers/edac/ghes_edac.c 		p += sprintf(p, "bank:%d ", mem_err->bank);
p                 345 drivers/edac/ghes_edac.c 		p += sprintf(p, "row:%d ", mem_err->row);
p                 347 drivers/edac/ghes_edac.c 		p += sprintf(p, "col:%d ", mem_err->column);
p                 349 drivers/edac/ghes_edac.c 		p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos);
p                 356 drivers/edac/ghes_edac.c 			p += sprintf(p, "DIMM location:%s %s ", bank, device);
p                 358 drivers/edac/ghes_edac.c 			p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
p                 368 drivers/edac/ghes_edac.c 	if (p > e->location)
p                 369 drivers/edac/ghes_edac.c 		*(p - 1) = '\0';
p                 372 drivers/edac/ghes_edac.c 	p = pvt->other_detail;
p                 376 drivers/edac/ghes_edac.c 		p += sprintf(p, "status(0x%016llx): ", (long long)status);
p                 379 drivers/edac/ghes_edac.c 			p += sprintf(p, "Error detected internal to the component ");
p                 382 drivers/edac/ghes_edac.c 			p += sprintf(p, "Error detected in the bus ");
p                 385 drivers/edac/ghes_edac.c 			p += sprintf(p, "Storage error in DRAM memory ");
p                 388 drivers/edac/ghes_edac.c 			p += sprintf(p, "Storage error in TLB ");
p                 391 drivers/edac/ghes_edac.c 			p += sprintf(p, "Storage error in cache ");
p                 394 drivers/edac/ghes_edac.c 			p += sprintf(p, "Error in one or more functional units ");
p                 397 drivers/edac/ghes_edac.c 			p += sprintf(p, "component failed self test ");
p                 400 drivers/edac/ghes_edac.c 			p += sprintf(p, "Overflow or undervalue of internal queue ");
p                 403 drivers/edac/ghes_edac.c 			p += sprintf(p, "Virtual address not found on IO-TLB or IO-PDIR ");
p                 406 drivers/edac/ghes_edac.c 			p += sprintf(p, "Improper access error ");
p                 409 drivers/edac/ghes_edac.c 			p += sprintf(p, "Access to a memory address which is not mapped to any component ");
p                 412 drivers/edac/ghes_edac.c 			p += sprintf(p, "Loss of Lockstep ");
p                 415 drivers/edac/ghes_edac.c 			p += sprintf(p, "Response not associated with a request ");
p                 418 drivers/edac/ghes_edac.c 			p += sprintf(p, "Bus parity error - must also set the A, C, or D Bits ");
p                 421 drivers/edac/ghes_edac.c 			p += sprintf(p, "Detection of a PATH_ERROR ");
p                 424 drivers/edac/ghes_edac.c 			p += sprintf(p, "Bus operation timeout ");
p                 427 drivers/edac/ghes_edac.c 			p += sprintf(p, "A read was issued to data that has been poisoned ");
p                 430 drivers/edac/ghes_edac.c 			p += sprintf(p, "reserved ");
p                 435 drivers/edac/ghes_edac.c 		p += sprintf(p, "requestorID: 0x%016llx ",
p                 438 drivers/edac/ghes_edac.c 		p += sprintf(p, "responderID: 0x%016llx ",
p                 441 drivers/edac/ghes_edac.c 		p += sprintf(p, "targetID: 0x%016llx ",
p                 443 drivers/edac/ghes_edac.c 	if (p > pvt->other_detail)
p                 444 drivers/edac/ghes_edac.c 		*(p - 1) = '\0';
p                1038 drivers/edac/i5000_edac.c 	char *p, *mem_buffer;
p                1043 drivers/edac/i5000_edac.c 	mem_buffer = p = kmalloc(space, GFP_KERNEL);
p                1044 drivers/edac/i5000_edac.c 	if (p == NULL) {
p                1060 drivers/edac/i5000_edac.c 			n = snprintf(p, space, "--------------------------"
p                1062 drivers/edac/i5000_edac.c 			p += n;
p                1065 drivers/edac/i5000_edac.c 			p = mem_buffer;
p                1068 drivers/edac/i5000_edac.c 		n = snprintf(p, space, "slot %2d    ", slot);
p                1069 drivers/edac/i5000_edac.c 		p += n;
p                1076 drivers/edac/i5000_edac.c 				n = snprintf(p, space, "%4d MB %dR| ",
p                1079 drivers/edac/i5000_edac.c 				n = snprintf(p, space, "%4d MB   | ", 0);
p                1080 drivers/edac/i5000_edac.c 			p += n;
p                1083 drivers/edac/i5000_edac.c 		p += n;
p                1086 drivers/edac/i5000_edac.c 		p = mem_buffer;
p                1091 drivers/edac/i5000_edac.c 	n = snprintf(p, space, "--------------------------"
p                1093 drivers/edac/i5000_edac.c 	p += n;
p                1096 drivers/edac/i5000_edac.c 	p = mem_buffer;
p                1100 drivers/edac/i5000_edac.c 	n = snprintf(p, space, "           ");
p                1101 drivers/edac/i5000_edac.c 	p += n;
p                1104 drivers/edac/i5000_edac.c 		n = snprintf(p, space, "channel %d | ", channel);
p                1105 drivers/edac/i5000_edac.c 		p += n;
p                1109 drivers/edac/i5000_edac.c 	p = mem_buffer;
p                1112 drivers/edac/i5000_edac.c 	n = snprintf(p, space, "           ");
p                1113 drivers/edac/i5000_edac.c 	p += n;
p                1115 drivers/edac/i5000_edac.c 		n = snprintf(p, space, "       branch %d       | ", branch);
p                1116 drivers/edac/i5000_edac.c 		p += n;
p                 960 drivers/edac/i5400_edac.c 	char *p, *mem_buffer;
p                 966 drivers/edac/i5400_edac.c 	mem_buffer = p = kmalloc(space, GFP_KERNEL);
p                 967 drivers/edac/i5400_edac.c 	if (p == NULL) {
p                 984 drivers/edac/i5400_edac.c 			n = snprintf(p, space, "---------------------------"
p                 986 drivers/edac/i5400_edac.c 			p += n;
p                 989 drivers/edac/i5400_edac.c 			p = mem_buffer;
p                 992 drivers/edac/i5400_edac.c 		n = snprintf(p, space, "dimm %2d    ", dimm);
p                 993 drivers/edac/i5400_edac.c 		p += n;
p                 999 drivers/edac/i5400_edac.c 			n = snprintf(p, space, "%4d MB   | ", dinfo->megabytes);
p                1000 drivers/edac/i5400_edac.c 			p += n;
p                1004 drivers/edac/i5400_edac.c 		p = mem_buffer;
p                1009 drivers/edac/i5400_edac.c 	n = snprintf(p, space, "---------------------------"
p                1011 drivers/edac/i5400_edac.c 	p += n;
p                1014 drivers/edac/i5400_edac.c 	p = mem_buffer;
p                1018 drivers/edac/i5400_edac.c 	n = snprintf(p, space, "           ");
p                1019 drivers/edac/i5400_edac.c 	p += n;
p                1022 drivers/edac/i5400_edac.c 		n = snprintf(p, space, "channel %d | ", channel);
p                1023 drivers/edac/i5400_edac.c 		p += n;
p                1029 drivers/edac/i5400_edac.c 	p = mem_buffer;
p                1032 drivers/edac/i5400_edac.c 	n = snprintf(p, space, "           ");
p                1033 drivers/edac/i5400_edac.c 	p += n;
p                1035 drivers/edac/i5400_edac.c 		n = snprintf(p, space, "       branch %d       | ", branch);
p                1036 drivers/edac/i5400_edac.c 		p += n;
p                 686 drivers/edac/i7300_edac.c 	char *p;
p                 691 drivers/edac/i7300_edac.c 	p = pvt->tmp_prt_buffer;
p                 693 drivers/edac/i7300_edac.c 	n = snprintf(p, space, "              ");
p                 694 drivers/edac/i7300_edac.c 	p += n;
p                 697 drivers/edac/i7300_edac.c 		n = snprintf(p, space, "channel %d | ", channel);
p                 698 drivers/edac/i7300_edac.c 		p += n;
p                 702 drivers/edac/i7300_edac.c 	p = pvt->tmp_prt_buffer;
p                 704 drivers/edac/i7300_edac.c 	n = snprintf(p, space, "-------------------------------"
p                 706 drivers/edac/i7300_edac.c 	p += n;
p                 709 drivers/edac/i7300_edac.c 	p = pvt->tmp_prt_buffer;
p                 713 drivers/edac/i7300_edac.c 		n = snprintf(p, space, "csrow/SLOT %d  ", slot);
p                 714 drivers/edac/i7300_edac.c 		p += n;
p                 719 drivers/edac/i7300_edac.c 			n = snprintf(p, space, "%4d MB   | ", dinfo->megabytes);
p                 720 drivers/edac/i7300_edac.c 			p += n;
p                 725 drivers/edac/i7300_edac.c 		p = pvt->tmp_prt_buffer;
p                 729 drivers/edac/i7300_edac.c 	n = snprintf(p, space, "-------------------------------"
p                 731 drivers/edac/i7300_edac.c 	p += n;
p                 734 drivers/edac/i7300_edac.c 	p = pvt->tmp_prt_buffer;
p                  41 drivers/edac/octeon_edac-pc.c 	struct co_cache_error *p = container_of(this, struct co_cache_error,
p                  57 drivers/edac/octeon_edac-pc.c 		edac_device_printk(p->ed, KERN_ERR,
p                  62 drivers/edac/octeon_edac-pc.c 		edac_device_handle_ce(p->ed, cpu, 1, "icache");
p                  65 drivers/edac/octeon_edac-pc.c 		edac_device_printk(p->ed, KERN_ERR,
p                  70 drivers/edac/octeon_edac-pc.c 			edac_device_handle_ue(p->ed, cpu, 0, "dcache");
p                  72 drivers/edac/octeon_edac-pc.c 			edac_device_handle_ce(p->ed, cpu, 0, "dcache");
p                  86 drivers/edac/octeon_edac-pc.c 	struct co_cache_error *p = devm_kzalloc(&pdev->dev, sizeof(*p),
p                  88 drivers/edac/octeon_edac-pc.c 	if (!p)
p                  91 drivers/edac/octeon_edac-pc.c 	p->notifier.notifier_call = co_cache_error_event;
p                  92 drivers/edac/octeon_edac-pc.c 	platform_set_drvdata(pdev, p);
p                  94 drivers/edac/octeon_edac-pc.c 	p->ed = edac_device_alloc_ctl_info(0, "cpu", num_possible_cpus(),
p                  97 drivers/edac/octeon_edac-pc.c 	if (!p->ed)
p                 100 drivers/edac/octeon_edac-pc.c 	p->ed->dev = &pdev->dev;
p                 102 drivers/edac/octeon_edac-pc.c 	p->ed->dev_name = dev_name(&pdev->dev);
p                 104 drivers/edac/octeon_edac-pc.c 	p->ed->mod_name = "octeon-cpu";
p                 105 drivers/edac/octeon_edac-pc.c 	p->ed->ctl_name = "cache";
p                 107 drivers/edac/octeon_edac-pc.c 	if (edac_device_add_device(p->ed)) {
p                 112 drivers/edac/octeon_edac-pc.c 	register_co_cache_error_notifier(&p->notifier);
p                 117 drivers/edac/octeon_edac-pc.c 	edac_device_free_ctl_info(p->ed);
p                 124 drivers/edac/octeon_edac-pc.c 	struct co_cache_error *p = platform_get_drvdata(pdev);
p                 126 drivers/edac/octeon_edac-pc.c 	unregister_co_cache_error_notifier(&p->notifier);
p                 128 drivers/edac/octeon_edac-pc.c 	edac_device_free_ctl_info(p->ed);
p                 375 drivers/edac/pnd2_edac.c static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
p                 379 drivers/edac/pnd2_edac.c 	if (!p->slice_0_mem_disabled)
p                 380 drivers/edac/pnd2_edac.c 		mask |= p->sym_slice0_channel_enabled;
p                 382 drivers/edac/pnd2_edac.c 	if (!p->slice_1_disabled)
p                 383 drivers/edac/pnd2_edac.c 		mask |= p->sym_slice1_channel_enabled << 2;
p                 385 drivers/edac/pnd2_edac.c 	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
p                 391 drivers/edac/pnd2_edac.c static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
p                 405 drivers/edac/pnd2_edac.c 	if (p->slice_0_mem_disabled)
p                 407 drivers/edac/pnd2_edac.c 	if (p->slice_1_disabled)
p                 409 drivers/edac/pnd2_edac.c 	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
p                  31 drivers/edac/sifive_edac.c 	struct sifive_edac_priv *p;
p                  33 drivers/edac/sifive_edac.c 	p = container_of(this, struct sifive_edac_priv, notifier);
p                  36 drivers/edac/sifive_edac.c 		edac_device_handle_ue(p->dci, 0, 0, msg);
p                  38 drivers/edac/sifive_edac.c 		edac_device_handle_ce(p->dci, 0, 0, msg);
p                  45 drivers/edac/sifive_edac.c 	struct sifive_edac_priv *p;
p                  47 drivers/edac/sifive_edac.c 	p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
p                  48 drivers/edac/sifive_edac.c 	if (!p)
p                  51 drivers/edac/sifive_edac.c 	p->notifier.notifier_call = ecc_err_event;
p                  52 drivers/edac/sifive_edac.c 	platform_set_drvdata(pdev, p);
p                  54 drivers/edac/sifive_edac.c 	p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc",
p                  57 drivers/edac/sifive_edac.c 	if (!p->dci)
p                  60 drivers/edac/sifive_edac.c 	p->dci->dev = &pdev->dev;
p                  61 drivers/edac/sifive_edac.c 	p->dci->mod_name = "Sifive ECC Manager";
p                  62 drivers/edac/sifive_edac.c 	p->dci->ctl_name = dev_name(&pdev->dev);
p                  63 drivers/edac/sifive_edac.c 	p->dci->dev_name = dev_name(&pdev->dev);
p                  65 drivers/edac/sifive_edac.c 	if (edac_device_add_device(p->dci)) {
p                  66 drivers/edac/sifive_edac.c 		dev_err(p->dci->dev, "failed to register with EDAC core\n");
p                  70 drivers/edac/sifive_edac.c 	register_sifive_l2_error_notifier(&p->notifier);
p                  75 drivers/edac/sifive_edac.c 	edac_device_free_ctl_info(p->dci);
p                  82 drivers/edac/sifive_edac.c 	struct sifive_edac_priv *p = platform_get_drvdata(pdev);
p                  84 drivers/edac/sifive_edac.c 	unregister_sifive_l2_error_notifier(&p->notifier);
p                  86 drivers/edac/sifive_edac.c 	edac_device_free_ctl_info(p->dci);
p                 359 drivers/edac/synopsys_edac.c 	struct synps_ecc_status *p;
p                 364 drivers/edac/synopsys_edac.c 	p = &priv->stat;
p                 370 drivers/edac/synopsys_edac.c 	p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
p                 371 drivers/edac/synopsys_edac.c 	p->ue_cnt = regval & STAT_UECNT_MASK;
p                 374 drivers/edac/synopsys_edac.c 	if (!(p->ce_cnt && (regval & LOG_VALID)))
p                 377 drivers/edac/synopsys_edac.c 	p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
p                 379 drivers/edac/synopsys_edac.c 	p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
p                 380 drivers/edac/synopsys_edac.c 	p->ceinfo.col = regval & ADDR_COL_MASK;
p                 381 drivers/edac/synopsys_edac.c 	p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
p                 382 drivers/edac/synopsys_edac.c 	p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
p                 383 drivers/edac/synopsys_edac.c 	edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
p                 384 drivers/edac/synopsys_edac.c 		 p->ceinfo.data);
p                 389 drivers/edac/synopsys_edac.c 	if (!(p->ue_cnt && (regval & LOG_VALID)))
p                 393 drivers/edac/synopsys_edac.c 	p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
p                 394 drivers/edac/synopsys_edac.c 	p->ueinfo.col = regval & ADDR_COL_MASK;
p                 395 drivers/edac/synopsys_edac.c 	p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
p                 396 drivers/edac/synopsys_edac.c 	p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
p                 414 drivers/edac/synopsys_edac.c 	struct synps_ecc_status *p;
p                 419 drivers/edac/synopsys_edac.c 	p = &priv->stat;
p                 425 drivers/edac/synopsys_edac.c 	p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
p                 426 drivers/edac/synopsys_edac.c 	p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
p                 427 drivers/edac/synopsys_edac.c 	if (!p->ce_cnt)
p                 430 drivers/edac/synopsys_edac.c 	p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
p                 433 drivers/edac/synopsys_edac.c 	p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
p                 435 drivers/edac/synopsys_edac.c 	p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
p                 437 drivers/edac/synopsys_edac.c 	p->ceinfo.bankgrpnr = (regval &	ECC_CEADDR1_BNKGRP_MASK) >>
p                 439 drivers/edac/synopsys_edac.c 	p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
p                 440 drivers/edac/synopsys_edac.c 	p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
p                 445 drivers/edac/synopsys_edac.c 	if (!p->ue_cnt)
p                 449 drivers/edac/synopsys_edac.c 	p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
p                 451 drivers/edac/synopsys_edac.c 	p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
p                 453 drivers/edac/synopsys_edac.c 	p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
p                 455 drivers/edac/synopsys_edac.c 	p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
p                 456 drivers/edac/synopsys_edac.c 	p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
p                 473 drivers/edac/synopsys_edac.c static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
p                 478 drivers/edac/synopsys_edac.c 	if (p->ce_cnt) {
p                 479 drivers/edac/synopsys_edac.c 		pinf = &p->ceinfo;
p                 494 drivers/edac/synopsys_edac.c 				     p->ce_cnt, 0, 0, 0, 0, 0, -1,
p                 498 drivers/edac/synopsys_edac.c 	if (p->ue_cnt) {
p                 499 drivers/edac/synopsys_edac.c 		pinf = &p->ueinfo;
p                 512 drivers/edac/synopsys_edac.c 				     p->ue_cnt, 0, 0, 0, 0, 0, -1,
p                 516 drivers/edac/synopsys_edac.c 	memset(p, 0, sizeof(*p));
p                 630 drivers/firewire/core-card.c static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
p                 199 drivers/firewire/core-cdev.c 	struct fw_packet p;
p                 368 drivers/firewire/core-cdev.c static int schedule_reallocations(int id, void *p, void *data)
p                 370 drivers/firewire/core-cdev.c 	schedule_if_iso_resource(p);
p                1050 drivers/firewire/core-cdev.c 	struct fw_cdev_iso_packet __user *p, *end, *next;
p                1083 drivers/firewire/core-cdev.c 	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
p                1084 drivers/firewire/core-cdev.c 	if (!access_ok(p, a->size))
p                1087 drivers/firewire/core-cdev.c 	end = (void __user *)p + a->size;
p                1089 drivers/firewire/core-cdev.c 	while (p < end) {
p                1090 drivers/firewire/core-cdev.c 		if (get_user(control, &p->control))
p                1120 drivers/firewire/core-cdev.c 			&p->header[transmit_header_bytes / 4];
p                1124 drivers/firewire/core-cdev.c 		    (u.packet.header, p->header, transmit_header_bytes))
p                1136 drivers/firewire/core-cdev.c 		p = next;
p                1142 drivers/firewire/core-cdev.c 	a->size    -= uptr_to_u64(p) - a->packets;
p                1143 drivers/firewire/core-cdev.c 	a->packets  = uptr_to_u64(p);
p                1484 drivers/firewire/core-cdev.c 		container_of(packet, struct outbound_phy_packet_event, p);
p                1522 drivers/firewire/core-cdev.c 	e->p.speed		= SCODE_100;
p                1523 drivers/firewire/core-cdev.c 	e->p.generation		= a->generation;
p                1524 drivers/firewire/core-cdev.c 	e->p.header[0]		= TCODE_LINK_INTERNAL << 4;
p                1525 drivers/firewire/core-cdev.c 	e->p.header[1]		= a->data[0];
p                1526 drivers/firewire/core-cdev.c 	e->p.header[2]		= a->data[1];
p                1527 drivers/firewire/core-cdev.c 	e->p.header_length	= 12;
p                1528 drivers/firewire/core-cdev.c 	e->p.callback		= outbound_phy_packet_callback;
p                1534 drivers/firewire/core-cdev.c 	card->driver->send_request(card, &e->p);
p                1558 drivers/firewire/core-cdev.c void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
p                1575 drivers/firewire/core-cdev.c 		e->phy_packet.data[0]	= p->header[1];
p                1576 drivers/firewire/core-cdev.c 		e->phy_packet.data[1]	= p->header[2];
p                1707 drivers/firewire/core-cdev.c static int is_outbound_transaction_resource(int id, void *p, void *data)
p                1709 drivers/firewire/core-cdev.c 	struct client_resource *resource = p;
p                1726 drivers/firewire/core-cdev.c static int shutdown_resource(int id, void *p, void *data)
p                1728 drivers/firewire/core-cdev.c 	struct client_resource *resource = p;
p                  34 drivers/firewire/core-device.c void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p)
p                  36 drivers/firewire/core-device.c 	ci->p = p + 1;
p                  37 drivers/firewire/core-device.c 	ci->end = ci->p + (p[0] >> 16);
p                  43 drivers/firewire/core-device.c 	*key = *ci->p >> 24;
p                  44 drivers/firewire/core-device.c 	*value = *ci->p & 0xffffff;
p                  46 drivers/firewire/core-device.c 	return ci->p++ < ci->end;
p                  59 drivers/firewire/core-device.c 			return ci.p - 1 + value;
p                 455 drivers/firewire/core-device.c 		i += units_sprintf(&buf[i], ci.p + value - 1);
p                 709 drivers/firewire/core-device.c 		unit->directory = ci.p + value - 1;
p                 745 drivers/firewire/core-transaction.c 					   struct fw_packet *p)
p                 751 drivers/firewire/core-transaction.c 	request_tcode = HEADER_GET_TCODE(p->header[0]);
p                 754 drivers/firewire/core-transaction.c 		data = &p->header[3];
p                 760 drivers/firewire/core-transaction.c 		data = p->payload;
p                 761 drivers/firewire/core-transaction.c 		length = HEADER_GET_DATA_LENGTH(p->header[3]);
p                 771 drivers/firewire/core-transaction.c 		length = HEADER_GET_DATA_LENGTH(p->header[3]);
p                 776 drivers/firewire/core-transaction.c 			 p->header[0], p->header[1], p->header[2]);
p                 784 drivers/firewire/core-transaction.c 	request->response.speed = p->speed;
p                 786 drivers/firewire/core-transaction.c 			compute_split_timeout_timestamp(card, p->timestamp);
p                 787 drivers/firewire/core-transaction.c 	request->response.generation = p->generation;
p                 790 drivers/firewire/core-transaction.c 	request->ack = p->ack;
p                 795 drivers/firewire/core-transaction.c 	memcpy(request->request_header, p->header, sizeof(p->header));
p                 836 drivers/firewire/core-transaction.c 					    struct fw_packet *p,
p                 843 drivers/firewire/core-transaction.c 	destination = HEADER_GET_DESTINATION(p->header[0]);
p                 844 drivers/firewire/core-transaction.c 	source      = HEADER_GET_SOURCE(p->header[1]);
p                 845 drivers/firewire/core-transaction.c 	tcode       = HEADER_GET_TCODE(p->header[0]);
p                 847 drivers/firewire/core-transaction.c 		tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]);
p                 855 drivers/firewire/core-transaction.c 					  p->generation, offset,
p                 865 drivers/firewire/core-transaction.c 				      struct fw_packet *p,
p                 880 drivers/firewire/core-transaction.c 	tcode       = HEADER_GET_TCODE(p->header[0]);
p                 881 drivers/firewire/core-transaction.c 	destination = HEADER_GET_DESTINATION(p->header[0]);
p                 882 drivers/firewire/core-transaction.c 	source      = HEADER_GET_SOURCE(p->header[1]);
p                 896 drivers/firewire/core-transaction.c 						  p->generation, offset,
p                 906 drivers/firewire/core-transaction.c void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
p                 911 drivers/firewire/core-transaction.c 	if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
p                 914 drivers/firewire/core-transaction.c 	if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) {
p                 915 drivers/firewire/core-transaction.c 		fw_cdev_handle_phy_packet(card, p);
p                 919 drivers/firewire/core-transaction.c 	request = allocate_request(card, p);
p                 925 drivers/firewire/core-transaction.c 	offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) |
p                 926 drivers/firewire/core-transaction.c 		p->header[2];
p                 929 drivers/firewire/core-transaction.c 		handle_exclusive_region_request(card, p, request, offset);
p                 931 drivers/firewire/core-transaction.c 		handle_fcp_region_request(card, p, request, offset);
p                 936 drivers/firewire/core-transaction.c void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
p                 944 drivers/firewire/core-transaction.c 	tcode	= HEADER_GET_TCODE(p->header[0]);
p                 945 drivers/firewire/core-transaction.c 	tlabel	= HEADER_GET_TLABEL(p->header[0]);
p                 946 drivers/firewire/core-transaction.c 	source	= HEADER_GET_SOURCE(p->header[1]);
p                 947 drivers/firewire/core-transaction.c 	rcode	= HEADER_GET_RCODE(p->header[1]);
p                 977 drivers/firewire/core-transaction.c 		data = (u32 *) &p->header[3];
p                 988 drivers/firewire/core-transaction.c 		data = p->payload;
p                 989 drivers/firewire/core-transaction.c 		data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
p                 130 drivers/firewire/core.h void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p);
p                 741 drivers/firewire/net.c static int gasp_source_id(__be32 *p)
p                 743 drivers/firewire/net.c 	return be32_to_cpu(p[0]) >> 16;
p                 746 drivers/firewire/net.c static u32 gasp_specifier_id(__be32 *p)
p                 748 drivers/firewire/net.c 	return (be32_to_cpu(p[0]) & 0xffff) << 8 |
p                 749 drivers/firewire/net.c 	       (be32_to_cpu(p[1]) & 0xff000000) >> 24;
p                 752 drivers/firewire/net.c static u32 gasp_version(__be32 *p)
p                 754 drivers/firewire/net.c 	return be32_to_cpu(p[1]) & 0xffffff;
p                 985 drivers/firewire/net.c 		u8 *p;
p                1005 drivers/firewire/net.c 		p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
p                1006 drivers/firewire/net.c 		put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
p                1008 drivers/firewire/net.c 						| sw_version, &p[4]);
p                 527 drivers/firewire/nosy.c 	u32 p, end;
p                 583 drivers/firewire/nosy.c 	p = lynx->rcv_buffer_bus + 2048;
p                 585 drivers/firewire/nosy.c 	for (i = 1; p < end; i++, p += 2048) {
p                 588 drivers/firewire/nosy.c 		lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p);
p                 801 drivers/firewire/ohci.c 	struct fw_packet p;
p                 805 drivers/firewire/ohci.c 	p.header[0] = cond_le32_to_cpu(buffer[0]);
p                 806 drivers/firewire/ohci.c 	p.header[1] = cond_le32_to_cpu(buffer[1]);
p                 807 drivers/firewire/ohci.c 	p.header[2] = cond_le32_to_cpu(buffer[2]);
p                 809 drivers/firewire/ohci.c 	tcode = (p.header[0] >> 4) & 0x0f;
p                 813 drivers/firewire/ohci.c 		p.header[3] = (__force __u32) buffer[3];
p                 814 drivers/firewire/ohci.c 		p.header_length = 16;
p                 815 drivers/firewire/ohci.c 		p.payload_length = 0;
p                 819 drivers/firewire/ohci.c 		p.header[3] = cond_le32_to_cpu(buffer[3]);
p                 820 drivers/firewire/ohci.c 		p.header_length = 16;
p                 821 drivers/firewire/ohci.c 		p.payload_length = 0;
p                 828 drivers/firewire/ohci.c 		p.header[3] = cond_le32_to_cpu(buffer[3]);
p                 829 drivers/firewire/ohci.c 		p.header_length = 16;
p                 830 drivers/firewire/ohci.c 		p.payload_length = p.header[3] >> 16;
p                 831 drivers/firewire/ohci.c 		if (p.payload_length > MAX_ASYNC_PAYLOAD) {
p                 840 drivers/firewire/ohci.c 		p.header_length = 12;
p                 841 drivers/firewire/ohci.c 		p.payload_length = 0;
p                 849 drivers/firewire/ohci.c 	p.payload = (void *) buffer + p.header_length;
p                 852 drivers/firewire/ohci.c 	length = (p.header_length + p.payload_length + 3) / 4;
p                 856 drivers/firewire/ohci.c 	p.ack        = evt - 16;
p                 857 drivers/firewire/ohci.c 	p.speed      = (status >> 21) & 0x7;
p                 858 drivers/firewire/ohci.c 	p.timestamp  = status & 0xffff;
p                 859 drivers/firewire/ohci.c 	p.generation = ohci->request_generation;
p                 861 drivers/firewire/ohci.c 	log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
p                 868 drivers/firewire/ohci.c 	    (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
p                 869 drivers/firewire/ohci.c 		p.ack = ACK_COMPLETE;
p                 886 drivers/firewire/ohci.c 			ohci->request_generation = (p.header[2] >> 16) & 0xff;
p                 888 drivers/firewire/ohci.c 		fw_core_handle_request(&ohci->card, &p);
p                 890 drivers/firewire/ohci.c 		fw_core_handle_response(&ohci->card, &p);
p                 896 drivers/firewire/ohci.c static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
p                 900 drivers/firewire/ohci.c 	while (p < end) {
p                 901 drivers/firewire/ohci.c 		next = handle_ar_packet(ctx, p);
p                 903 drivers/firewire/ohci.c 			return p;
p                 904 drivers/firewire/ohci.c 		p = next;
p                 907 drivers/firewire/ohci.c 	return p;
p                 928 drivers/firewire/ohci.c 	void *p, *end;
p                 930 drivers/firewire/ohci.c 	p = ctx->pointer;
p                 931 drivers/firewire/ohci.c 	if (!p)
p                 947 drivers/firewire/ohci.c 		p = handle_ar_packets(ctx, p, buffer_end);
p                 948 drivers/firewire/ohci.c 		if (p < buffer_end)
p                 951 drivers/firewire/ohci.c 		p -= AR_BUFFERS * PAGE_SIZE;
p                 954 drivers/firewire/ohci.c 	p = handle_ar_packets(ctx, p, end);
p                 955 drivers/firewire/ohci.c 	if (p != end) {
p                 956 drivers/firewire/ohci.c 		if (p > end)
p                 961 drivers/firewire/ohci.c 	ctx->pointer = p;
p                3198 drivers/firewire/ohci.c 	struct fw_iso_packet *p;
p                3205 drivers/firewire/ohci.c 	p = packet;
p                3208 drivers/firewire/ohci.c 	if (p->skip)
p                3212 drivers/firewire/ohci.c 	if (p->header_length > 0)
p                3216 drivers/firewire/ohci.c 	end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
p                3217 drivers/firewire/ohci.c 	if (p->payload_length > 0)
p                3225 drivers/firewire/ohci.c 	header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
p                3231 drivers/firewire/ohci.c 	if (!p->skip) {
p                3244 drivers/firewire/ohci.c 		header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
p                3245 drivers/firewire/ohci.c 					IT_HEADER_TAG(p->tag) |
p                3250 drivers/firewire/ohci.c 			cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
p                3251 drivers/firewire/ohci.c 							  p->payload_length));
p                3254 drivers/firewire/ohci.c 	if (p->header_length > 0) {
p                3255 drivers/firewire/ohci.c 		d[2].req_count    = cpu_to_le16(p->header_length);
p                3257 drivers/firewire/ohci.c 		memcpy(&d[z], p->header, p->header_length);
p                3261 drivers/firewire/ohci.c 	payload_end_index = payload_index + p->payload_length;
p                3280 drivers/firewire/ohci.c 	if (p->interrupt)
p                1055 drivers/firewire/sbp2.c 			sbp2_get_unit_unique_id(tgt, ci.p - 1 + value);
p                1060 drivers/firewire/sbp2.c 			if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0)
p                 105 drivers/firmware/arm_scmi/common.h 		       size_t tx_size, size_t rx_size, struct scmi_xfer **p);
p                 529 drivers/firmware/arm_scmi/driver.c 		       size_t tx_size, size_t rx_size, struct scmi_xfer **p)
p                 555 drivers/firmware/arm_scmi/driver.c 	*p = xfer;
p                 629 drivers/firmware/arm_scmi/driver.c 	struct list_head *p;
p                 634 drivers/firmware/arm_scmi/driver.c 	list_for_each(p, &scmi_list) {
p                 635 drivers/firmware/arm_scmi/driver.c 		info = list_entry(p, struct scmi_info, node);
p                 901 drivers/firmware/arm_scmi/driver.c static int scmi_mbox_free_channel(int id, void *p, void *data)
p                 903 drivers/firmware/arm_scmi/driver.c 	struct scmi_chan_info *cinfo = p;
p                 490 drivers/firmware/broadcom/bcm47xx_sprom.c 	u8 *p = mac + ETH_ALEN - 1;
p                 493 drivers/firmware/broadcom/bcm47xx_sprom.c 		(*p) += num;
p                 494 drivers/firmware/broadcom/bcm47xx_sprom.c 		if (*p > num)
p                 496 drivers/firmware/broadcom/bcm47xx_sprom.c 		p--;
p                 498 drivers/firmware/broadcom/bcm47xx_sprom.c 	} while (p != oui);
p                 500 drivers/firmware/broadcom/bcm47xx_sprom.c 	if (p == oui) {
p                  94 drivers/firmware/dmi-id.c 	char *p;
p                  98 drivers/firmware/dmi-id.c 	p = buffer + 3; left = buffer_size - 4;
p                 112 drivers/firmware/dmi-id.c 		l = scnprintf(p, left, ":%s%s", f->prefix, t);
p                 115 drivers/firmware/dmi-id.c 		p += l;
p                 119 drivers/firmware/dmi-id.c 	p[0] = ':';
p                 120 drivers/firmware/dmi-id.c 	p[1] = 0;
p                 122 drivers/firmware/dmi-id.c 	return p - buffer + 1;
p                 185 drivers/firmware/dmi-sysfs.c 	const char *p = (const char *)dh;
p                 187 drivers/firmware/dmi-sysfs.c 	p += dh->length;
p                 189 drivers/firmware/dmi-sysfs.c 	while (p[0] || p[1])
p                 190 drivers/firmware/dmi-sysfs.c 		p++;
p                 192 drivers/firmware/dmi-sysfs.c 	return 2 + p - (const char *)dh;
p                 172 drivers/firmware/dmi_scan.c 	const char *p;
p                 177 drivers/firmware/dmi_scan.c 	p = dmi_string(dm, d[string]);
p                 178 drivers/firmware/dmi_scan.c 	if (p == NULL)
p                 181 drivers/firmware/dmi_scan.c 	dmi_ident[slot] = p;
p                 485 drivers/firmware/dmi_scan.c 	const char *p;
p                 490 drivers/firmware/dmi_scan.c 	for (p = info; *p; p++)
p                 491 drivers/firmware/dmi_scan.c 		if (isprint(*p))
p                 492 drivers/firmware/dmi_scan.c 			c += scnprintf(buf + c, len - c, "%c", *p);
p                 494 drivers/firmware/dmi_scan.c 			c += scnprintf(buf + c, len - c, "\\x%02x", *p & 0xff);
p                 617 drivers/firmware/dmi_scan.c 	char __iomem *p, *q;
p                 635 drivers/firmware/dmi_scan.c 			p = dmi_early_remap(efi.smbios3, 32);
p                 636 drivers/firmware/dmi_scan.c 			if (p == NULL)
p                 638 drivers/firmware/dmi_scan.c 			memcpy_fromio(buf, p, 32);
p                 639 drivers/firmware/dmi_scan.c 			dmi_early_unmap(p, 32);
p                 653 drivers/firmware/dmi_scan.c 		p = dmi_early_remap(efi.smbios, 32);
p                 654 drivers/firmware/dmi_scan.c 		if (p == NULL)
p                 656 drivers/firmware/dmi_scan.c 		memcpy_fromio(buf, p, 32);
p                 657 drivers/firmware/dmi_scan.c 		dmi_early_unmap(p, 32);
p                 664 drivers/firmware/dmi_scan.c 		p = dmi_early_remap(0xF0000, 0x10000);
p                 665 drivers/firmware/dmi_scan.c 		if (p == NULL)
p                 672 drivers/firmware/dmi_scan.c 		memcpy_fromio(buf, p, 16);
p                 673 drivers/firmware/dmi_scan.c 		for (q = p + 16; q < p + 0x10000; q += 16) {
p                 677 drivers/firmware/dmi_scan.c 				dmi_early_unmap(p, 0x10000);
p                 691 drivers/firmware/dmi_scan.c 		for (q = p; q < p + 0x10000; q += 16) {
p                 695 drivers/firmware/dmi_scan.c 				dmi_early_unmap(p, 0x10000);
p                 700 drivers/firmware/dmi_scan.c 		dmi_early_unmap(p, 0x10000);
p                  45 drivers/firmware/edd.c #define left (PAGE_SIZE - (p - buf) - 1)
p                 124 drivers/firmware/edd.c 	char *p = buf;
p                 135 drivers/firmware/edd.c 			p += scnprintf(p, left, "%c", info->params.host_bus_type[i]);
p                 137 drivers/firmware/edd.c 			p += scnprintf(p, left, " ");
p                 142 drivers/firmware/edd.c 		p += scnprintf(p, left, "\tbase_address: %x\n",
p                 147 drivers/firmware/edd.c 		p += scnprintf(p, left,
p                 155 drivers/firmware/edd.c 		p += scnprintf(p, left,
p                 160 drivers/firmware/edd.c 		p += scnprintf(p, left, "\tunknown: %llx\n",
p                 163 drivers/firmware/edd.c 	return (p - buf);
p                 170 drivers/firmware/edd.c 	char *p = buf;
p                 181 drivers/firmware/edd.c 			p += scnprintf(p, left, "%c", info->params.interface_type[i]);
p                 183 drivers/firmware/edd.c 			p += scnprintf(p, left, " ");
p                 187 drivers/firmware/edd.c 		p += scnprintf(p, left, "\tdevice: %u  lun: %u\n",
p                 191 drivers/firmware/edd.c 		p += scnprintf(p, left, "\tdevice: %u\n",
p                 194 drivers/firmware/edd.c 		p += scnprintf(p, left, "\tid: %u  lun: %llu\n",
p                 198 drivers/firmware/edd.c 		p += scnprintf(p, left, "\tserial_number: %llx\n",
p                 201 drivers/firmware/edd.c 		p += scnprintf(p, left, "\teui: %llx\n",
p                 204 drivers/firmware/edd.c 		p += scnprintf(p, left, "\twwid: %llx lun: %llx\n",
p                 208 drivers/firmware/edd.c 		p += scnprintf(p, left, "\tidentity_tag: %llx\n",
p                 211 drivers/firmware/edd.c 		p += scnprintf(p, left, "\tidentity_tag: %x\n",
p                 214 drivers/firmware/edd.c 		p += scnprintf(p, left, "\tdevice: %u\n",
p                 217 drivers/firmware/edd.c 		p += scnprintf(p, left, "\tunknown: %llx %llx\n",
p                 222 drivers/firmware/edd.c 	return (p - buf);
p                 258 drivers/firmware/edd.c 	char *p = buf;
p                 265 drivers/firmware/edd.c 	p += scnprintf(p, left, "0x%02x\n", info->version);
p                 266 drivers/firmware/edd.c 	return (p - buf);
p                 272 drivers/firmware/edd.c 	char *p = buf;
p                 273 drivers/firmware/edd.c 	p += scnprintf(p, left, "0x%08x\n", edev->mbr_signature);
p                 274 drivers/firmware/edd.c 	return (p - buf);
p                 281 drivers/firmware/edd.c 	char *p = buf;
p                 289 drivers/firmware/edd.c 		p += scnprintf(p, left, "Fixed disk access\n");
p                 292 drivers/firmware/edd.c 		p += scnprintf(p, left, "Device locking and ejecting\n");
p                 295 drivers/firmware/edd.c 		p += scnprintf(p, left, "Enhanced Disk Drive support\n");
p                 298 drivers/firmware/edd.c 		p += scnprintf(p, left, "64-bit extensions\n");
p                 300 drivers/firmware/edd.c 	return (p - buf);
p                 307 drivers/firmware/edd.c 	char *p = buf;
p                 315 drivers/firmware/edd.c 		p += scnprintf(p, left, "DMA boundary error transparent\n");
p                 317 drivers/firmware/edd.c 		p += scnprintf(p, left, "geometry valid\n");
p                 319 drivers/firmware/edd.c 		p += scnprintf(p, left, "removable\n");
p                 321 drivers/firmware/edd.c 		p += scnprintf(p, left, "write verify\n");
p                 323 drivers/firmware/edd.c 		p += scnprintf(p, left, "media change notification\n");
p                 325 drivers/firmware/edd.c 		p += scnprintf(p, left, "lockable\n");
p                 327 drivers/firmware/edd.c 		p += scnprintf(p, left, "no media present\n");
p                 329 drivers/firmware/edd.c 		p += scnprintf(p, left, "use int13 fn50\n");
p                 330 drivers/firmware/edd.c 	return (p - buf);
p                 337 drivers/firmware/edd.c 	char *p = buf;
p                 344 drivers/firmware/edd.c 	p += snprintf(p, left, "%u\n", info->legacy_max_cylinder);
p                 345 drivers/firmware/edd.c 	return (p - buf);
p                 352 drivers/firmware/edd.c 	char *p = buf;
p                 359 drivers/firmware/edd.c 	p += snprintf(p, left, "%u\n", info->legacy_max_head);
p                 360 drivers/firmware/edd.c 	return (p - buf);
p                 367 drivers/firmware/edd.c 	char *p = buf;
p                 374 drivers/firmware/edd.c 	p += snprintf(p, left, "%u\n", info->legacy_sectors_per_track);
p                 375 drivers/firmware/edd.c 	return (p - buf);
p                 382 drivers/firmware/edd.c 	char *p = buf;
p                 389 drivers/firmware/edd.c 	p += scnprintf(p, left, "%u\n", info->params.num_default_cylinders);
p                 390 drivers/firmware/edd.c 	return (p - buf);
p                 397 drivers/firmware/edd.c 	char *p = buf;
p                 404 drivers/firmware/edd.c 	p += scnprintf(p, left, "%u\n", info->params.num_default_heads);
p                 405 drivers/firmware/edd.c 	return (p - buf);
p                 412 drivers/firmware/edd.c 	char *p = buf;
p                 419 drivers/firmware/edd.c 	p += scnprintf(p, left, "%u\n", info->params.sectors_per_track);
p                 420 drivers/firmware/edd.c 	return (p - buf);
p                 427 drivers/firmware/edd.c 	char *p = buf;
p                 434 drivers/firmware/edd.c 	p += scnprintf(p, left, "%llu\n", info->params.number_of_sectors);
p                 435 drivers/firmware/edd.c 	return (p - buf);
p                 300 drivers/firmware/efi/cper.c const char *cper_mem_err_unpack(struct trace_seq *p,
p                 303 drivers/firmware/efi/cper.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 306 drivers/firmware/efi/cper.c 		trace_seq_printf(p, "%s", rcd_decode_str);
p                 308 drivers/firmware/efi/cper.c 		trace_seq_printf(p, "%s", rcd_decode_str);
p                 309 drivers/firmware/efi/cper.c 	trace_seq_putc(p, '\0');
p                 373 drivers/firmware/efi/cper.c 		const __u8 *p;
p                 383 drivers/firmware/efi/cper.c 		p = pcie->device_id.class_code;
p                 384 drivers/firmware/efi/cper.c 		printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]);
p                 592 drivers/firmware/efi/efi.c 			u8 *p;
p                 600 drivers/firmware/efi/efi.c 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
p                 602 drivers/firmware/efi/efi.c 			if (p == NULL) {
p                 607 drivers/firmware/efi/efi.c 			rsv = (void *)(p + prsv % PAGE_SIZE);
p                 618 drivers/firmware/efi/efi.c 			early_memunmap(p, PAGE_SIZE);
p                  84 drivers/firmware/efi/fake_mem.c static int __init setup_fake_mem(char *p)
p                  89 drivers/firmware/efi/fake_mem.c 	if (!p)
p                  92 drivers/firmware/efi/fake_mem.c 	while (*p != '\0') {
p                  93 drivers/firmware/efi/fake_mem.c 		mem_size = memparse(p, &p);
p                  94 drivers/firmware/efi/fake_mem.c 		if (*p == '@')
p                  95 drivers/firmware/efi/fake_mem.c 			start = memparse(p+1, &p);
p                  99 drivers/firmware/efi/fake_mem.c 		if (*p == ':')
p                 100 drivers/firmware/efi/fake_mem.c 			attribute = simple_strtoull(p+1, &p, 0);
p                 112 drivers/firmware/efi/fake_mem.c 		if (*p == ',')
p                 113 drivers/firmware/efi/fake_mem.c 			p++;
p                 124 drivers/firmware/efi/fake_mem.c 	return *p == '\0' ? 0 : -EINVAL;
p                 565 drivers/firmware/efi/libstub/efi-stub-helper.c 		efi_char16_t *p;
p                 574 drivers/firmware/efi/libstub/efi-stub-helper.c 		p = filename_16;
p                 581 drivers/firmware/efi/libstub/efi-stub-helper.c 			if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16))
p                 585 drivers/firmware/efi/libstub/efi-stub-helper.c 				*p++ = '\\';
p                 588 drivers/firmware/efi/libstub/efi-stub-helper.c 				*p++ = *str++;
p                 592 drivers/firmware/efi/libstub/efi-stub-helper.c 		*p = '\0';
p                 217 drivers/firmware/efi/libstub/fdt.c 	struct exit_boot_struct *p = priv;
p                 224 drivers/firmware/efi/libstub/fdt.c 			p->runtime_map, p->runtime_entry_count);
p                 226 drivers/firmware/efi/libstub/fdt.c 	return update_fdt_memmap(p->new_fdt_addr, map);
p                 344 drivers/firmware/efi/libstub/fdt.c 				efi_memory_desc_t *p = (void *)memory_map + l;
p                 346 drivers/firmware/efi/libstub/fdt.c 				if (p->attribute & EFI_MEMORY_RUNTIME)
p                 347 drivers/firmware/efi/libstub/fdt.c 					p->virt_addr = 0;
p                  24 drivers/firmware/efi/memmap.c 	struct page *p = alloc_pages(GFP_KERNEL, order);
p                  26 drivers/firmware/efi/memmap.c 	if (!p)
p                  29 drivers/firmware/efi/memmap.c 	return PFN_PHYS(page_to_pfn(p));
p                  24 drivers/firmware/pcdp.c 	static char options[64], *p = options;
p                  28 drivers/firmware/pcdp.c 	p += sprintf(p, "uart8250,%s,0x%llx",
p                  31 drivers/firmware/pcdp.c 		p += sprintf(p, ",%llu", uart->baud);
p                  38 drivers/firmware/pcdp.c 			p += sprintf(p, "%c%d", parity, uart->bits);
p                 498 drivers/firmware/qemu_fw_cfg.c 	char *name_copy, *p, *tok;
p                 504 drivers/firmware/qemu_fw_cfg.c 	name_copy = p = kstrdup(name, GFP_KERNEL);
p                 509 drivers/firmware/qemu_fw_cfg.c 	while ((tok = strsep(&p, "/")) && *tok) {
p                 512 drivers/firmware/qemu_fw_cfg.c 		if (!p || !*p) {
p                 193 drivers/firmware/tegra/bpmp-debugfs.c static int debugfs_show(struct seq_file *m, void *p)
p                 141 drivers/firmware/tegra/bpmp-tegra210.c 	void *p;
p                 148 drivers/firmware/tegra/bpmp-tegra210.c 	p = devm_ioremap(bpmp->dev, address, 0x80);
p                 149 drivers/firmware/tegra/bpmp-tegra210.c 	if (!p)
p                 152 drivers/firmware/tegra/bpmp-tegra210.c 	channel->ib = p;
p                 153 drivers/firmware/tegra/bpmp-tegra210.c 	channel->ob = p;
p                3007 drivers/firmware/ti_sci.c 	struct list_head *p;
p                3022 drivers/firmware/ti_sci.c 	list_for_each(p, &ti_sci_list) {
p                3023 drivers/firmware/ti_sci.c 		info = list_entry(p, struct ti_sci_info, node);
p                3133 drivers/firmware/ti_sci.c 	struct list_head *p;
p                3145 drivers/firmware/ti_sci.c 	list_for_each(p, &ti_sci_list) {
p                3146 drivers/firmware/ti_sci.c 		info = list_entry(p, struct ti_sci_info, node);
p                 473 drivers/fpga/dfl.c 	struct dfl_feature_info *finfo, *p;
p                 521 drivers/fpga/dfl.c 	list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
p                 595 drivers/fpga/dfl.c 	struct dfl_feature_info *finfo, *p;
p                 605 drivers/fpga/dfl.c 		list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
p                 253 drivers/fpga/fpga-mgr.c 	const void *p;
p                 276 drivers/fpga/fpga-mgr.c 	p = buf - offset_in_page(buf);
p                 278 drivers/fpga/fpga-mgr.c 		if (is_vmalloc_addr(p))
p                 279 drivers/fpga/fpga-mgr.c 			pages[index] = vmalloc_to_page(p);
p                 281 drivers/fpga/fpga-mgr.c 			pages[index] = kmap_to_page((void *)p);
p                 286 drivers/fpga/fpga-mgr.c 		p += PAGE_SIZE;
p                 142 drivers/fpga/xilinx-pr-decoupler.c 	struct xlnx_pr_decoupler_data *p = bridge->priv;
p                 146 drivers/fpga/xilinx-pr-decoupler.c 	clk_unprepare(p->clk);
p                 443 drivers/fsi/fsi-master-ast-cf.c 	char *p;
p                 457 drivers/fsi/fsi-master-ast-cf.c 			p = trbuf;
p                 459 drivers/fsi/fsi-master-ast-cf.c 		p += sprintf(p, "%02x ", v);
p                 146 drivers/fsi/fsi-sbefifo.c 	char *p = ffdc_line;
p                 181 drivers/fsi/fsi-sbefifo.c 				p = ffdc_line;
p                 182 drivers/fsi/fsi-sbefifo.c 				p += sprintf(p, "| %04x:", i << 4);
p                 184 drivers/fsi/fsi-sbefifo.c 			p += sprintf(p, " %08x", be32_to_cpu(*(ffdc++)));
p                 188 drivers/fsi/fsi-sbefifo.c 					p += sprintf(p, "         ");
p                 665 drivers/gpio/gpio-aspeed.c 	unsigned int i, p, girq, banks;
p                 677 drivers/gpio/gpio-aspeed.c 		for_each_set_bit(p, &reg, 32) {
p                 678 drivers/gpio/gpio-aspeed.c 			girq = irq_find_mapping(gc->irq.domain, i * 32 + p);
p                 614 drivers/gpio/gpio-brcmstb.c 	const __be32 *p;
p                 660 drivers/gpio/gpio-brcmstb.c 	of_property_for_each_u32(np, "brcm,gpio-bank-widths", prop, p,
p                  58 drivers/gpio/gpio-em.c static inline unsigned long em_gio_read(struct em_gio_priv *p, int offs)
p                  61 drivers/gpio/gpio-em.c 		return ioread32(p->base0 + offs);
p                  63 drivers/gpio/gpio-em.c 		return ioread32(p->base1 + (offs - GIO_IDT0));
p                  66 drivers/gpio/gpio-em.c static inline void em_gio_write(struct em_gio_priv *p, int offs,
p                  70 drivers/gpio/gpio-em.c 		iowrite32(value, p->base0 + offs);
p                  72 drivers/gpio/gpio-em.c 		iowrite32(value, p->base1 + (offs - GIO_IDT0));
p                  77 drivers/gpio/gpio-em.c 	struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
p                  79 drivers/gpio/gpio-em.c 	em_gio_write(p, GIO_IDS, BIT(irqd_to_hwirq(d)));
p                  84 drivers/gpio/gpio-em.c 	struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
p                  86 drivers/gpio/gpio-em.c 	em_gio_write(p, GIO_IEN, BIT(irqd_to_hwirq(d)));
p                  91 drivers/gpio/gpio-em.c 	struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
p                  94 drivers/gpio/gpio-em.c 	ret = gpiochip_lock_as_irq(&p->gpio_chip, irqd_to_hwirq(d));
p                  96 drivers/gpio/gpio-em.c 		dev_err(p->gpio_chip.parent,
p                 106 drivers/gpio/gpio-em.c 	struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
p                 108 drivers/gpio/gpio-em.c 	gpiochip_unlock_as_irq(&p->gpio_chip, irqd_to_hwirq(d));
p                 125 drivers/gpio/gpio-em.c 	struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
p                 141 drivers/gpio/gpio-em.c 	spin_lock_irqsave(&p->sense_lock, flags);
p                 144 drivers/gpio/gpio-em.c 	tmp = em_gio_read(p, GIO_IIA);
p                 146 drivers/gpio/gpio-em.c 	em_gio_write(p, GIO_IIA, tmp);
p                 149 drivers/gpio/gpio-em.c 	tmp = em_gio_read(p, reg);
p                 152 drivers/gpio/gpio-em.c 	em_gio_write(p, reg, tmp);
p                 155 drivers/gpio/gpio-em.c 	em_gio_write(p, GIO_IIR, BIT(offset));
p                 158 drivers/gpio/gpio-em.c 	tmp = em_gio_read(p, GIO_IIA);
p                 160 drivers/gpio/gpio-em.c 	em_gio_write(p, GIO_IIA, tmp);
p                 162 drivers/gpio/gpio-em.c 	spin_unlock_irqrestore(&p->sense_lock, flags);
p                 169 drivers/gpio/gpio-em.c 	struct em_gio_priv *p = dev_id;
p                 173 drivers/gpio/gpio-em.c 	while ((pending = em_gio_read(p, GIO_MST))) {
p                 175 drivers/gpio/gpio-em.c 		em_gio_write(p, GIO_IIR, BIT(offset));
p                 176 drivers/gpio/gpio-em.c 		generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
p                 248 drivers/gpio/gpio-em.c 	struct em_gio_priv *p = h->host_data;
p                 253 drivers/gpio/gpio-em.c 	irq_set_chip_and_handler(irq, &p->irq_chip, handle_level_irq);
p                 271 drivers/gpio/gpio-em.c 	struct em_gio_priv *p;
p                 280 drivers/gpio/gpio-em.c 	p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
p                 281 drivers/gpio/gpio-em.c 	if (!p)
p                 284 drivers/gpio/gpio-em.c 	p->pdev = pdev;
p                 285 drivers/gpio/gpio-em.c 	platform_set_drvdata(pdev, p);
p                 286 drivers/gpio/gpio-em.c 	spin_lock_init(&p->sense_lock);
p                 298 drivers/gpio/gpio-em.c 	p->base0 = devm_ioremap_nocache(dev, io[0]->start,
p                 300 drivers/gpio/gpio-em.c 	if (!p->base0)
p                 303 drivers/gpio/gpio-em.c 	p->base1 = devm_ioremap_nocache(dev, io[1]->start,
p                 305 drivers/gpio/gpio-em.c 	if (!p->base1)
p                 313 drivers/gpio/gpio-em.c 	gpio_chip = &p->gpio_chip;
p                 328 drivers/gpio/gpio-em.c 	irq_chip = &p->irq_chip;
p                 337 drivers/gpio/gpio-em.c 	p->irq_domain = irq_domain_add_simple(dev->of_node, ngpios, 0,
p                 338 drivers/gpio/gpio-em.c 					      &em_gio_irq_domain_ops, p);
p                 339 drivers/gpio/gpio-em.c 	if (!p->irq_domain) {
p                 345 drivers/gpio/gpio-em.c 				       p->irq_domain);
p                 350 drivers/gpio/gpio-em.c 			     em_gio_irq_handler, 0, name, p)) {
p                 356 drivers/gpio/gpio-em.c 			     em_gio_irq_handler, 0, name, p)) {
p                 361 drivers/gpio/gpio-em.c 	ret = devm_gpiochip_add_data(dev, gpio_chip, p);
p                 121 drivers/gpio/gpio-exar.c 	void __iomem *p;
p                 128 drivers/gpio/gpio-exar.c 	p = pcim_iomap_table(pcidev)[0];
p                 129 drivers/gpio/gpio-exar.c 	if (!p)
p                 163 drivers/gpio/gpio-exar.c 	exar_gpio->regs = p;
p                  26 drivers/gpio/gpio-mxs.c #define PINCTRL_DOUT(p)		((is_imx23_gpio(p) ? 0x0500 : 0x0700) + (p->id) * 0x10)
p                  27 drivers/gpio/gpio-mxs.c #define PINCTRL_DIN(p)		((is_imx23_gpio(p) ? 0x0600 : 0x0900) + (p->id) * 0x10)
p                  28 drivers/gpio/gpio-mxs.c #define PINCTRL_DOE(p)		((is_imx23_gpio(p) ? 0x0700 : 0x0b00) + (p->id) * 0x10)
p                  29 drivers/gpio/gpio-mxs.c #define PINCTRL_PIN2IRQ(p)	((is_imx23_gpio(p) ? 0x0800 : 0x1000) + (p->id) * 0x10)
p                  30 drivers/gpio/gpio-mxs.c #define PINCTRL_IRQEN(p)	((is_imx23_gpio(p) ? 0x0900 : 0x1100) + (p->id) * 0x10)
p                  31 drivers/gpio/gpio-mxs.c #define PINCTRL_IRQLEV(p)	((is_imx23_gpio(p) ? 0x0a00 : 0x1200) + (p->id) * 0x10)
p                  32 drivers/gpio/gpio-mxs.c #define PINCTRL_IRQPOL(p)	((is_imx23_gpio(p) ? 0x0b00 : 0x1300) + (p->id) * 0x10)
p                  33 drivers/gpio/gpio-mxs.c #define PINCTRL_IRQSTAT(p)	((is_imx23_gpio(p) ? 0x0c00 : 0x1400) + (p->id) * 0x10)
p                1056 drivers/gpio/gpio-omap.c static void omap_gpio_init_context(struct gpio_bank *p)
p                1058 drivers/gpio/gpio-omap.c 	const struct omap_gpio_reg_offs *regs = p->regs;
p                1059 drivers/gpio/gpio-omap.c 	void __iomem *base = p->base;
p                1061 drivers/gpio/gpio-omap.c 	p->context.ctrl		= readl_relaxed(base + regs->ctrl);
p                1062 drivers/gpio/gpio-omap.c 	p->context.oe		= readl_relaxed(base + regs->direction);
p                1063 drivers/gpio/gpio-omap.c 	p->context.wake_en	= readl_relaxed(base + regs->wkup_en);
p                1064 drivers/gpio/gpio-omap.c 	p->context.leveldetect0	= readl_relaxed(base + regs->leveldetect0);
p                1065 drivers/gpio/gpio-omap.c 	p->context.leveldetect1	= readl_relaxed(base + regs->leveldetect1);
p                1066 drivers/gpio/gpio-omap.c 	p->context.risingdetect	= readl_relaxed(base + regs->risingdetect);
p                1067 drivers/gpio/gpio-omap.c 	p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
p                1068 drivers/gpio/gpio-omap.c 	p->context.irqenable1	= readl_relaxed(base + regs->irqenable);
p                1069 drivers/gpio/gpio-omap.c 	p->context.irqenable2	= readl_relaxed(base + regs->irqenable2);
p                1070 drivers/gpio/gpio-omap.c 	p->context.dataout	= readl_relaxed(base + regs->dataout);
p                1072 drivers/gpio/gpio-omap.c 	p->context_valid = true;
p                 162 drivers/gpio/gpio-pxa.c 	struct pxa_gpio_chip *p = gpiochip_get_data(c);
p                 163 drivers/gpio/gpio-pxa.c 	struct pxa_gpio_bank *bank = p->banks + (gpio / 32);
p                  64 drivers/gpio/gpio-rcar.c static inline u32 gpio_rcar_read(struct gpio_rcar_priv *p, int offs)
p                  66 drivers/gpio/gpio-rcar.c 	return ioread32(p->base + offs);
p                  69 drivers/gpio/gpio-rcar.c static inline void gpio_rcar_write(struct gpio_rcar_priv *p, int offs,
p                  72 drivers/gpio/gpio-rcar.c 	iowrite32(value, p->base + offs);
p                  75 drivers/gpio/gpio-rcar.c static void gpio_rcar_modify_bit(struct gpio_rcar_priv *p, int offs,
p                  78 drivers/gpio/gpio-rcar.c 	u32 tmp = gpio_rcar_read(p, offs);
p                  85 drivers/gpio/gpio-rcar.c 	gpio_rcar_write(p, offs, tmp);
p                  91 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p = gpiochip_get_data(gc);
p                  93 drivers/gpio/gpio-rcar.c 	gpio_rcar_write(p, INTMSK, ~BIT(irqd_to_hwirq(d)));
p                  99 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p = gpiochip_get_data(gc);
p                 101 drivers/gpio/gpio-rcar.c 	gpio_rcar_write(p, MSKCLR, BIT(irqd_to_hwirq(d)));
p                 104 drivers/gpio/gpio-rcar.c static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
p                 117 drivers/gpio/gpio-rcar.c 	spin_lock_irqsave(&p->lock, flags);
p                 120 drivers/gpio/gpio-rcar.c 	gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge);
p                 123 drivers/gpio/gpio-rcar.c 	gpio_rcar_modify_bit(p, EDGLEVEL, hwirq, !level_trigger);
p                 126 drivers/gpio/gpio-rcar.c 	if (p->has_both_edge_trigger)
p                 127 drivers/gpio/gpio-rcar.c 		gpio_rcar_modify_bit(p, BOTHEDGE, hwirq, both);
p                 130 drivers/gpio/gpio-rcar.c 	gpio_rcar_modify_bit(p, IOINTSEL, hwirq, true);
p                 134 drivers/gpio/gpio-rcar.c 		gpio_rcar_write(p, INTCLR, BIT(hwirq));
p                 136 drivers/gpio/gpio-rcar.c 	spin_unlock_irqrestore(&p->lock, flags);
p                 142 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p = gpiochip_get_data(gc);
p                 145 drivers/gpio/gpio-rcar.c 	dev_dbg(p->dev, "sense irq = %d, type = %d\n", hwirq, type);
p                 149 drivers/gpio/gpio-rcar.c 		gpio_rcar_config_interrupt_input_mode(p, hwirq, true, true,
p                 153 drivers/gpio/gpio-rcar.c 		gpio_rcar_config_interrupt_input_mode(p, hwirq, false, true,
p                 157 drivers/gpio/gpio-rcar.c 		gpio_rcar_config_interrupt_input_mode(p, hwirq, true, false,
p                 161 drivers/gpio/gpio-rcar.c 		gpio_rcar_config_interrupt_input_mode(p, hwirq, false, false,
p                 165 drivers/gpio/gpio-rcar.c 		if (!p->has_both_edge_trigger)
p                 167 drivers/gpio/gpio-rcar.c 		gpio_rcar_config_interrupt_input_mode(p, hwirq, true, false,
p                 179 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p = gpiochip_get_data(gc);
p                 182 drivers/gpio/gpio-rcar.c 	if (p->irq_parent) {
p                 183 drivers/gpio/gpio-rcar.c 		error = irq_set_irq_wake(p->irq_parent, on);
p                 185 drivers/gpio/gpio-rcar.c 			dev_dbg(p->dev, "irq %u doesn't support irq_set_wake\n",
p                 186 drivers/gpio/gpio-rcar.c 				p->irq_parent);
p                 187 drivers/gpio/gpio-rcar.c 			p->irq_parent = 0;
p                 192 drivers/gpio/gpio-rcar.c 		atomic_inc(&p->wakeup_path);
p                 194 drivers/gpio/gpio-rcar.c 		atomic_dec(&p->wakeup_path);
p                 201 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p = dev_id;
p                 205 drivers/gpio/gpio-rcar.c 	while ((pending = gpio_rcar_read(p, INTDT) &
p                 206 drivers/gpio/gpio-rcar.c 			  gpio_rcar_read(p, INTMSK))) {
p                 208 drivers/gpio/gpio-rcar.c 		gpio_rcar_write(p, INTCLR, BIT(offset));
p                 209 drivers/gpio/gpio-rcar.c 		generic_handle_irq(irq_find_mapping(p->gpio_chip.irq.domain,
p                 221 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p = gpiochip_get_data(chip);
p                 229 drivers/gpio/gpio-rcar.c 	spin_lock_irqsave(&p->lock, flags);
p                 232 drivers/gpio/gpio-rcar.c 	gpio_rcar_modify_bit(p, POSNEG, gpio, false);
p                 235 drivers/gpio/gpio-rcar.c 	gpio_rcar_modify_bit(p, IOINTSEL, gpio, false);
p                 238 drivers/gpio/gpio-rcar.c 	gpio_rcar_modify_bit(p, INOUTSEL, gpio, output);
p                 241 drivers/gpio/gpio-rcar.c 	if (p->has_outdtsel && output)
p                 242 drivers/gpio/gpio-rcar.c 		gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false);
p                 244 drivers/gpio/gpio-rcar.c 	spin_unlock_irqrestore(&p->lock, flags);
p                 249 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p = gpiochip_get_data(chip);
p                 252 drivers/gpio/gpio-rcar.c 	error = pm_runtime_get_sync(p->dev);
p                 258 drivers/gpio/gpio-rcar.c 		pm_runtime_put(p->dev);
p                 265 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p = gpiochip_get_data(chip);
p                 275 drivers/gpio/gpio-rcar.c 	pm_runtime_put(p->dev);
p                 280 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p = gpiochip_get_data(chip);
p                 282 drivers/gpio/gpio-rcar.c 	return !(gpio_rcar_read(p, INOUTSEL) & BIT(offset));
p                 305 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p = gpiochip_get_data(chip);
p                 308 drivers/gpio/gpio-rcar.c 	spin_lock_irqsave(&p->lock, flags);
p                 309 drivers/gpio/gpio-rcar.c 	gpio_rcar_modify_bit(p, OUTDT, offset, value);
p                 310 drivers/gpio/gpio-rcar.c 	spin_unlock_irqrestore(&p->lock, flags);
p                 316 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p = gpiochip_get_data(chip);
p                 327 drivers/gpio/gpio-rcar.c 	spin_lock_irqsave(&p->lock, flags);
p                 328 drivers/gpio/gpio-rcar.c 	val = gpio_rcar_read(p, OUTDT);
p                 331 drivers/gpio/gpio-rcar.c 	gpio_rcar_write(p, OUTDT, val);
p                 332 drivers/gpio/gpio-rcar.c 	spin_unlock_irqrestore(&p->lock, flags);
p                 407 drivers/gpio/gpio-rcar.c static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
p                 409 drivers/gpio/gpio-rcar.c 	struct device_node *np = p->dev->of_node;
p                 414 drivers/gpio/gpio-rcar.c 	info = of_device_get_match_data(p->dev);
p                 415 drivers/gpio/gpio-rcar.c 	p->has_outdtsel = info->has_outdtsel;
p                 416 drivers/gpio/gpio-rcar.c 	p->has_both_edge_trigger = info->has_both_edge_trigger;
p                 422 drivers/gpio/gpio-rcar.c 		dev_warn(p->dev, "Invalid number of gpio lines %u, using %u\n",
p                 432 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p;
p                 441 drivers/gpio/gpio-rcar.c 	p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
p                 442 drivers/gpio/gpio-rcar.c 	if (!p)
p                 445 drivers/gpio/gpio-rcar.c 	p->dev = dev;
p                 446 drivers/gpio/gpio-rcar.c 	spin_lock_init(&p->lock);
p                 449 drivers/gpio/gpio-rcar.c 	ret = gpio_rcar_parse_dt(p, &npins);
p                 453 drivers/gpio/gpio-rcar.c 	platform_set_drvdata(pdev, p);
p                 464 drivers/gpio/gpio-rcar.c 	p->base = devm_platform_ioremap_resource(pdev, 0);
p                 465 drivers/gpio/gpio-rcar.c 	if (IS_ERR(p->base)) {
p                 466 drivers/gpio/gpio-rcar.c 		ret = PTR_ERR(p->base);
p                 470 drivers/gpio/gpio-rcar.c 	gpio_chip = &p->gpio_chip;
p                 485 drivers/gpio/gpio-rcar.c 	irq_chip = &p->irq_chip;
p                 494 drivers/gpio/gpio-rcar.c 	ret = gpiochip_add_data(gpio_chip, p);
p                 507 drivers/gpio/gpio-rcar.c 	p->irq_parent = irq->start;
p                 509 drivers/gpio/gpio-rcar.c 			     IRQF_SHARED, name, p)) {
p                 528 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p = platform_get_drvdata(pdev);
p                 530 drivers/gpio/gpio-rcar.c 	gpiochip_remove(&p->gpio_chip);
p                 539 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p = dev_get_drvdata(dev);
p                 541 drivers/gpio/gpio-rcar.c 	p->bank_info.iointsel = gpio_rcar_read(p, IOINTSEL);
p                 542 drivers/gpio/gpio-rcar.c 	p->bank_info.inoutsel = gpio_rcar_read(p, INOUTSEL);
p                 543 drivers/gpio/gpio-rcar.c 	p->bank_info.outdt = gpio_rcar_read(p, OUTDT);
p                 544 drivers/gpio/gpio-rcar.c 	p->bank_info.intmsk = gpio_rcar_read(p, INTMSK);
p                 545 drivers/gpio/gpio-rcar.c 	p->bank_info.posneg = gpio_rcar_read(p, POSNEG);
p                 546 drivers/gpio/gpio-rcar.c 	p->bank_info.edglevel = gpio_rcar_read(p, EDGLEVEL);
p                 547 drivers/gpio/gpio-rcar.c 	if (p->has_both_edge_trigger)
p                 548 drivers/gpio/gpio-rcar.c 		p->bank_info.bothedge = gpio_rcar_read(p, BOTHEDGE);
p                 550 drivers/gpio/gpio-rcar.c 	if (atomic_read(&p->wakeup_path))
p                 558 drivers/gpio/gpio-rcar.c 	struct gpio_rcar_priv *p = dev_get_drvdata(dev);
p                 562 drivers/gpio/gpio-rcar.c 	for (offset = 0; offset < p->gpio_chip.ngpio; offset++) {
p                 563 drivers/gpio/gpio-rcar.c 		if (!gpiochip_line_is_valid(&p->gpio_chip, offset))
p                 568 drivers/gpio/gpio-rcar.c 		if (!(p->bank_info.iointsel & mask)) {
p                 569 drivers/gpio/gpio-rcar.c 			if (p->bank_info.inoutsel & mask)
p                 571 drivers/gpio/gpio-rcar.c 					&p->gpio_chip, offset,
p                 572 drivers/gpio/gpio-rcar.c 					!!(p->bank_info.outdt & mask));
p                 574 drivers/gpio/gpio-rcar.c 				gpio_rcar_direction_input(&p->gpio_chip,
p                 579 drivers/gpio/gpio-rcar.c 				p,
p                 581 drivers/gpio/gpio-rcar.c 				!(p->bank_info.posneg & mask),
p                 582 drivers/gpio/gpio-rcar.c 				!(p->bank_info.edglevel & mask),
p                 583 drivers/gpio/gpio-rcar.c 				!!(p->bank_info.bothedge & mask));
p                 585 drivers/gpio/gpio-rcar.c 			if (p->bank_info.intmsk & mask)
p                 586 drivers/gpio/gpio-rcar.c 				gpio_rcar_write(p, MSKCLR, mask);
p                 418 drivers/gpio/gpio-tegra.c 	unsigned int b, p;
p                 425 drivers/gpio/gpio-tegra.c 		for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
p                 426 drivers/gpio/gpio-tegra.c 			unsigned int gpio = (b << 5) | (p << 3);
p                 428 drivers/gpio/gpio-tegra.c 			tegra_gpio_writel(tgi, bank->cnf[p],
p                 432 drivers/gpio/gpio-tegra.c 				tegra_gpio_writel(tgi, bank->dbc_cnt[p],
p                 434 drivers/gpio/gpio-tegra.c 				tegra_gpio_writel(tgi, bank->dbc_enb[p],
p                 438 drivers/gpio/gpio-tegra.c 			tegra_gpio_writel(tgi, bank->out[p],
p                 440 drivers/gpio/gpio-tegra.c 			tegra_gpio_writel(tgi, bank->oe[p],
p                 442 drivers/gpio/gpio-tegra.c 			tegra_gpio_writel(tgi, bank->int_lvl[p],
p                 444 drivers/gpio/gpio-tegra.c 			tegra_gpio_writel(tgi, bank->int_enb[p],
p                 457 drivers/gpio/gpio-tegra.c 	unsigned int b, p;
p                 463 drivers/gpio/gpio-tegra.c 		for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
p                 464 drivers/gpio/gpio-tegra.c 			unsigned int gpio = (b << 5) | (p << 3);
p                 466 drivers/gpio/gpio-tegra.c 			bank->cnf[p] = tegra_gpio_readl(tgi,
p                 468 drivers/gpio/gpio-tegra.c 			bank->out[p] = tegra_gpio_readl(tgi,
p                 470 drivers/gpio/gpio-tegra.c 			bank->oe[p] = tegra_gpio_readl(tgi,
p                 473 drivers/gpio/gpio-tegra.c 				bank->dbc_enb[p] = tegra_gpio_readl(tgi,
p                 475 drivers/gpio/gpio-tegra.c 				bank->dbc_enb[p] = (bank->dbc_enb[p] << 8) |
p                 476 drivers/gpio/gpio-tegra.c 							bank->dbc_enb[p];
p                 479 drivers/gpio/gpio-tegra.c 			bank->int_enb[p] = tegra_gpio_readl(tgi,
p                 481 drivers/gpio/gpio-tegra.c 			bank->int_lvl[p] = tegra_gpio_readl(tgi,
p                 485 drivers/gpio/gpio-tegra.c 			tegra_gpio_writel(tgi, bank->wake_enb[p],
p                 325 drivers/gpio/gpio-wcove.c 	u8 p[2];
p                 327 drivers/gpio/gpio-wcove.c 	if (regmap_bulk_read(wg->regmap, IRQ_STATUS_BASE, p, 2)) {
p                 332 drivers/gpio/gpio-wcove.c 	pending = (p[0] & GPIO_IRQ0_MASK) | ((p[1] & GPIO_IRQ1_MASK) << 7);
p                 351 drivers/gpio/gpio-wcove.c 		if (regmap_bulk_read(wg->regmap, IRQ_STATUS_BASE, p, 2)) {
p                 356 drivers/gpio/gpio-wcove.c 		pending = (p[0] & GPIO_IRQ0_MASK) | ((p[1] & GPIO_IRQ1_MASK) << 7);
p                 578 drivers/gpio/gpiolib-sysfs.c 	if (!gpio_class.p) {
p                 752 drivers/gpio/gpiolib-sysfs.c 	if (!gpio_class.p)
p                 361 drivers/gpio/gpiolib.c 	unsigned long *p;
p                 363 drivers/gpio/gpiolib.c 	p = bitmap_alloc(chip->ngpio, GFP_KERNEL);
p                 364 drivers/gpio/gpiolib.c 	if (!p)
p                 368 drivers/gpio/gpiolib.c 	bitmap_fill(p, chip->ngpio);
p                 370 drivers/gpio/gpiolib.c 	return p;
p                 833 drivers/gpio/gpiolib.c static irqreturn_t lineevent_irq_thread(int irq, void *p)
p                 835 drivers/gpio/gpiolib.c 	struct lineevent_state *le = p;
p                 877 drivers/gpio/gpiolib.c static irqreturn_t lineevent_irq_handler(int irq, void *p)
p                 879 drivers/gpio/gpiolib.c 	struct lineevent_state *le = p;
p                4324 drivers/gpio/gpiolib.c 	struct gpiod_lookup *p;
p                4330 drivers/gpio/gpiolib.c 	for (p = &table->table[0]; p->chip_label; p++) {
p                4334 drivers/gpio/gpiolib.c 		if (p->idx != idx)
p                4338 drivers/gpio/gpiolib.c 		if (p->con_id && (!con_id || strcmp(p->con_id, con_id)))
p                4341 drivers/gpio/gpiolib.c 		chip = find_chip_by_name(p->chip_label);
p                4352 drivers/gpio/gpiolib.c 				 p->chip_label);
p                4356 drivers/gpio/gpiolib.c 		if (chip->ngpio <= p->chip_hwnum) {
p                4359 drivers/gpio/gpiolib.c 				idx, p->chip_hwnum, chip->ngpio - 1,
p                4364 drivers/gpio/gpiolib.c 		desc = gpiochip_get_desc(chip, p->chip_hwnum);
p                4365 drivers/gpio/gpiolib.c 		*flags = p->flags;
p                4376 drivers/gpio/gpiolib.c 	struct gpiod_lookup *p;
p                4383 drivers/gpio/gpiolib.c 	for (p = &table->table[0]; p->chip_label; p++) {
p                4384 drivers/gpio/gpiolib.c 		if ((con_id && p->con_id && !strcmp(con_id, p->con_id)) ||
p                4385 drivers/gpio/gpiolib.c 		    (!con_id && !p->con_id))
p                 353 drivers/gpio/sgpio-aspeed.c 	unsigned int i, p, girq;
p                 363 drivers/gpio/sgpio-aspeed.c 		for_each_set_bit(p, &reg, 32) {
p                 364 drivers/gpio/sgpio-aspeed.c 			girq = irq_find_mapping(gc->irq.domain, i * 32 + p);
p                 489 drivers/gpu/drm/amd/amdgpu/amdgpu.h static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
p                 492 drivers/gpu/drm/amd/amdgpu/amdgpu.h 	return p->job->ibs[ib_idx].ptr[idx];
p                 495 drivers/gpu/drm/amd/amdgpu/amdgpu.h static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
p                 499 drivers/gpu/drm/amd/amdgpu/amdgpu.h 	p->job->ibs[ib_idx].ptr[idx] = value;
p                 298 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 	struct amdgpu_vm_parser *p = param;
p                 300 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 	return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
p                 585 drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c 	const struct amdgpu_px_quirk *p = amdgpu_px_quirk_list;
p                 588 drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c 	while (p && p->chip_device != 0) {
p                 589 drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c 		if (pdev->vendor == p->chip_vendor &&
p                 590 drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c 		    pdev->device == p->chip_device &&
p                 591 drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c 		    pdev->subsystem_vendor == p->subsys_vendor &&
p                 592 drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c 		    pdev->subsystem_device == p->subsys_device) {
p                 593 drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c 			amdgpu_atpx_priv.quirks |= p->px_quirk_flags;
p                 596 drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c 		++p;
p                  43 drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c #define AMD_IS_VALID_VBIOS(p) ((p)[0] == 0x55 && (p)[1] == 0xAA)
p                  44 drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c #define AMD_VBIOS_LENGTH(p) ((p)[2] << 9)
p                  39 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
p                  48 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	gobj = drm_gem_object_lookup(p->filp, data->handle);
p                  53 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->uf_entry.priority = 0;
p                  54 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->uf_entry.tv.bo = &bo->tbo;
p                  56 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->uf_entry.tv.num_shared = 2;
p                  80 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
p                  90 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
p                  91 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 				  &p->bo_list);
p                 105 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
p                 107 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
p                 123 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
p                 124 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (!p->ctx) {
p                 129 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	mutex_lock(&p->ctx->lock);
p                 132 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (atomic_read(&p->ctx->guilty) == 1) {
p                 145 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->nchunks = cs->in.num_chunks;
p                 146 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
p                 148 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (!p->chunks) {
p                 153 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	for (i = 0; i < p->nchunks; i++) {
p                 165 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->chunks[i].chunk_id = user_chunk.chunk_id;
p                 166 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->chunks[i].length_dw = user_chunk.length_dw;
p                 168 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		size = p->chunks[i].length_dw;
p                 171 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
p                 172 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		if (p->chunks[i].kdata == NULL) {
p                 178 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
p                 183 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		switch (p->chunks[i].chunk_id) {
p                 190 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
p                 195 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
p                 204 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
p                 209 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
p                 229 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
p                 233 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
p                 238 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (p->uf_entry.tv.bo)
p                 239 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->job->uf_addr = uf_offset;
p                 248 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	i = p->nchunks - 1;
p                 251 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		kvfree(p->chunks[i].kdata);
p                 252 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	kfree(p->chunks);
p                 253 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->chunks = NULL;
p                 254 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->nchunks = 0;
p                 398 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
p                 417 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (p->bytes_moved < p->bytes_moved_threshold) {
p                 424 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
p                 439 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->bytes_moved += ctx.bytes_moved;
p                 442 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->bytes_moved_vis += ctx.bytes_moved;
p                 453 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
p                 460 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (!p->evictable)
p                 463 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	for (;&p->evictable->tv.head != &p->validated;
p                 464 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	     p->evictable = list_prev_entry(p->evictable, tv.head)) {
p                 466 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		struct amdgpu_bo_list_entry *candidate = p->evictable;
p                 497 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->bytes_moved += ctx.bytes_moved;
p                 499 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			p->bytes_moved_vis += ctx.bytes_moved;
p                 504 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->evictable = list_prev_entry(p->evictable, tv.head);
p                 505 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		list_move(&candidate->tv.head, &p->validated);
p                 515 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_cs_parser *p = param;
p                 519 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		r = amdgpu_cs_bo_validate(p, bo);
p                 520 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
p                 525 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		r = amdgpu_cs_bo_validate(p, bo->shadow);
p                 530 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
p                 557 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		if (p->evictable == lobj)
p                 558 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			p->evictable = NULL;
p                 560 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		r = amdgpu_cs_validate(p, bo);
p                 570 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p                 573 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
p                 582 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	INIT_LIST_HEAD(&p->validated);
p                 586 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		if (p->bo_list)
p                 590 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 				       &p->bo_list);
p                 593 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	} else if (!p->bo_list) {
p                 595 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
p                 596 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 					  &p->bo_list);
p                 602 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_bo_list_for_each_entry(e, p->bo_list)
p                 605 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_bo_list_get_list(p->bo_list, &p->validated);
p                 606 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (p->bo_list->first_userptr != p->bo_list->num_entries)
p                 607 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
p                 610 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
p                 612 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
p                 613 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		list_add(&p->uf_entry.tv.head, &p->validated);
p                 619 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
p                 648 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
p                 656 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
p                 657 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 					  &p->bytes_moved_vis_threshold);
p                 658 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->bytes_moved = 0;
p                 659 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->bytes_moved_vis = 0;
p                 660 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->evictable = list_last_entry(&p->validated,
p                 664 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
p                 665 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 				      amdgpu_cs_validate, p);
p                 671 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	r = amdgpu_cs_list_validate(p, &duplicates);
p                 675 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	r = amdgpu_cs_list_validate(p, &p->validated);
p                 679 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
p                 680 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 				     p->bytes_moved_vis);
p                 682 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	gds = p->bo_list->gds_obj;
p                 683 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	gws = p->bo_list->gws_obj;
p                 684 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	oa = p->bo_list->oa_obj;
p                 686 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
p                 696 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
p                 697 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
p                 700 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
p                 701 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
p                 704 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
p                 705 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
p                 708 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (!r && p->uf_entry.tv.bo) {
p                 709 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
p                 712 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
p                 717 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
p                 722 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
p                 727 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	list_for_each_entry(e, &p->validated, tv.head) {
p                 731 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
p                 784 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
p                 786 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
p                 787 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
p                 788 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_device *adev = p->adev;
p                 799 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
p                 808 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			chunk = &p->chunks[i];
p                 809 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			ib = &p->job->ibs[j];
p                 816 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
p                 841 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 				r = amdgpu_ring_parse_cs(ring, p, j);
p                 846 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 				r = amdgpu_ring_patch_cs_in_place(ring, p, j);
p                 856 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (!p->job->vm)
p                 857 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		return amdgpu_cs_sync_rings(p);
p                 868 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	r = amdgpu_sync_fence(adev, &p->job->sync,
p                 883 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
p                 888 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
p                 905 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
p                 918 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
p                 922 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
p                 926 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
p                 937 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	return amdgpu_cs_sync_rings(p);
p                1014 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
p                1017 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
p                1060 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
p                1068 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
p                1075 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
p                1082 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
p                1088 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
p                1099 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
p                1109 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
p                1120 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		r = amdgpu_syncobj_lookup_and_add_to_sync(p,
p                1131 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
p                1142 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (p->post_deps)
p                1145 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
p                1147 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->num_post_deps = 0;
p                1149 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (!p->post_deps)
p                1154 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->post_deps[i].syncobj =
p                1155 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			drm_syncobj_find(p->filp, deps[i].handle);
p                1156 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		if (!p->post_deps[i].syncobj)
p                1158 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->post_deps[i].chain = NULL;
p                1159 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->post_deps[i].point = 0;
p                1160 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->num_post_deps++;
p                1167 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
p                1178 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (p->post_deps)
p                1181 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
p                1183 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->num_post_deps = 0;
p                1185 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (!p->post_deps)
p                1189 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
p                1198 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		dep->syncobj = drm_syncobj_find(p->filp,
p                1205 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->num_post_deps++;
p                1212 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 				  struct amdgpu_cs_parser *p)
p                1216 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	for (i = 0; i < p->nchunks; ++i) {
p                1219 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		chunk = &p->chunks[i];
p                1224 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			r = amdgpu_cs_process_fence_dep(p, chunk);
p                1229 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
p                1234 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
p                1239 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
p                1244 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
p                1254 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
p                1258 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	for (i = 0; i < p->num_post_deps; ++i) {
p                1259 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		if (p->post_deps[i].chain && p->post_deps[i].point) {
p                1260 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			drm_syncobj_add_point(p->post_deps[i].syncobj,
p                1261 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 					      p->post_deps[i].chain,
p                1262 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 					      p->fence, p->post_deps[i].point);
p                1263 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			p->post_deps[i].chain = NULL;
p                1265 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			drm_syncobj_replace_fence(p->post_deps[i].syncobj,
p                1266 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 						  p->fence);
p                1271 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
p                1274 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
p                1275 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct drm_sched_entity *entity = p->entity;
p                1283 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	job = p->job;
p                1284 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->job = NULL;
p                1286 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	r = drm_sched_job_init(&job->base, entity, p->filp);
p                1294 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_mn_lock(p->mn);
p                1299 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
p                1309 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	job->owner = p->filp;
p                1310 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	p->fence = dma_fence_get(&job->base.s_fence->finished);
p                1312 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
p                1313 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_cs_post_dependencies(p);
p                1316 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	    !p->ctx->preamble_presented) {
p                1318 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		p->ctx->preamble_presented = true;
p                1327 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
p                1334 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
p                1336 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
p                1337 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_mn_unlock(p->mn);
p                1343 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_mn_unlock(p->mn);
p                 137 drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c 	uint32_t *p = (uint32_t *)binary;
p                 146 drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c 		*p++ = RREG32_NO_KIQ(mmMM_DATA);
p                 225 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	unsigned p;
p                 237 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
p                 238 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	for (i = 0; i < pages; i++, p++) {
p                 240 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 		adev->gart.pages[p] = NULL;
p                 315 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	unsigned t,p;
p                 326 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
p                 327 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	for (i = 0; i < pages; i++, p++)
p                 328 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 		adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
p                 147 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	const char *p;
p                 154 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	p = amdgpu_disable_cu;
p                 157 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
p                 171 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		next = strchr(p, ',');
p                 174 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		p = next + 1;
p                 126 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
p                 127 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
p                 231 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
p                 232 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h #define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
p                 143 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 	    TP_PROTO(struct amdgpu_cs_parser *p, int i),
p                 144 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 	    TP_ARGS(p, i),
p                 153 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __entry->bo_list = p->bo_list;
p                 154 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx;
p                 155 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __entry->dw = p->job->ibs[i].length_dw;
p                 157 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 				to_amdgpu_ring(p->entity->rq->sched));
p                2138 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct drm_printer p = drm_seq_file_printer(m);
p                2140 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	man->func->debug(man, &p);
p                2264 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		loff_t p = *pos / PAGE_SIZE;
p                2270 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		if (p >= adev->gart.num_cpu_pages)
p                2273 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		page = adev->gart.pages[p];
p                2279 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			kunmap(adev->gart.pages[p]);
p                2326 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		struct page *p;
p                2341 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		p = pfn_to_page(pfn);
p                2342 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		if (p->mapping != adev->mman.bdev.dev_mapping)
p                2345 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		ptr = kmap(p);
p                2347 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		kunmap(p);
p                2381 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		struct page *p;
p                2392 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		p = pfn_to_page(pfn);
p                2393 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		if (p->mapping != adev->mman.bdev.dev_mapping)
p                2396 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		ptr = kmap(p);
p                2398 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		kunmap(p);
p                 576 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
p                 587 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
p                 588 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
p                 598 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
p                 624 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
p                 635 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
p                 636 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
p                 639 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
p                 657 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
p                 658 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
p                 673 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
p                 680 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		if (atomic_read(&p->adev->vce.handles[i]) == handle) {
p                 681 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			if (p->adev->vce.filp[i] != p->filp) {
p                 691 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
p                 692 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			p->adev->vce.filp[i] = p->filp;
p                 693 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			p->adev->vce.img_size[i] = 0;
p                 709 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
p                 711 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
p                 722 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	p->job->vm = NULL;
p                 726 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
p                 727 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
p                 737 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
p                 738 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
p                 742 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 10,
p                 747 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 12,
p                 754 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
p                 761 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
p                 762 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
p                 769 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
p                 776 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
p                 781 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8,
p                 792 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
p                 793 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
p                 797 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
p                 798 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			session_idx = amdgpu_vce_validate_handle(p, handle,
p                 804 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			size = &p->adev->vce.img_size[session_idx];
p                 808 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
p                 809 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
p                 824 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			*size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
p                 825 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 				amdgpu_get_ib_value(p, ib_idx, idx + 10) *
p                 840 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			switch (p->adev->asic_type) {
p                 854 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
p                 859 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
p                 870 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
p                 877 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
p                 878 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
p                 885 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
p                 892 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3,
p                 897 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8,
p                 934 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			atomic_set(&p->adev->vce.handles[i], 0);
p                 945 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
p                 947 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
p                 956 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
p                 957 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
p                 967 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
p                 968 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			session_idx = amdgpu_vce_validate_handle(p, handle,
p                1016 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		amdgpu_ib_free(p->adev, ib, NULL);
p                1024 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 			atomic_set(&p->adev->vce.handles[i], 0);
p                  67 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
p                  68 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
p                 646 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 			      int (*validate)(void *p, struct amdgpu_bo *bo),
p                 221 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 	int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
p                 223 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 	int (*update)(struct amdgpu_vm_update_params *p,
p                 226 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 	int (*commit)(struct amdgpu_vm_update_params *p,
p                 357 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 			      int (*callback)(void *p, struct amdgpu_bo *bo),
p                  47 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
p                  55 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c 	r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
p                  82 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
p                  95 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c 		value = p->pages_addr ?
p                  96 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c 			amdgpu_vm_map_gart(p->pages_addr, addr) :
p                  98 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c 		amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe,
p                 113 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
p                 118 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c 	amdgpu_asic_flush_hdp(p->adev, NULL);
p                  60 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
p                  63 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	struct amdgpu_bo *root = p->vm->root.base.bo;
p                  67 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
p                  71 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
p                  75 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
p                  80 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	p->num_dw_left = ndw;
p                  93 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
p                  96 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	struct amdgpu_bo *root = p->vm->root.base.bo;
p                  97 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	struct amdgpu_ib *ib = p->job->ibs;
p                 102 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
p                 106 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	WARN_ON(ib->length_dw > p->num_dw_left);
p                 107 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	r = amdgpu_job_submit(p->job, &p->vm->entity,
p                 119 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	amdgpu_job_free(p->job);
p                 134 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
p                 138 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	struct amdgpu_ib *ib = p->job->ibs;
p                 141 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	src += p->num_dw_left * 4;
p                 146 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
p                 163 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
p                 168 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	struct amdgpu_ib *ib = p->job->ibs;
p                 173 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 		amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
p                 176 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 		amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr,
p                 195 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
p                 205 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 		ndw = p->num_dw_left;
p                 206 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 		ndw -= p->job->ibs->length_dw;
p                 209 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 			r = amdgpu_vm_sdma_commit(p, NULL);
p                 215 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 			if (p->pages_addr)
p                 220 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 			r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
p                 224 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 			p->num_dw_left = ndw;
p                 227 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 		if (!p->pages_addr) {
p                 230 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 				amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
p                 232 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 			amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
p                 238 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 		ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
p                 247 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 		p->num_dw_left -= nptes * 2;
p                 248 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 		pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
p                 250 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 			pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
p                 255 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 			amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
p                 256 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 		amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
p                 211 drivers/gpu/drm/amd/amdgpu/atombios_dp.c 	u8 p = 0;
p                 225 drivers/gpu/drm/amd/amdgpu/atombios_dp.c 		if (this_p > p)
p                 226 drivers/gpu/drm/amd/amdgpu/atombios_dp.c 			p = this_p;
p                 232 drivers/gpu/drm/amd/amdgpu/atombios_dp.c 	if (p >= DP_PRE_EMPHASIS_MAX)
p                 233 drivers/gpu/drm/amd/amdgpu/atombios_dp.c 		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
p                 237 drivers/gpu/drm/amd/amdgpu/atombios_dp.c 		  pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
p                 240 drivers/gpu/drm/amd/amdgpu/atombios_dp.c 		train_set[lane] = v | p;
p                 116 drivers/gpu/drm/amd/amdgpu/atombios_i2c.c 	struct i2c_msg *p;
p                 121 drivers/gpu/drm/amd/amdgpu/atombios_i2c.c 	p = &msgs[0];
p                 122 drivers/gpu/drm/amd/amdgpu/atombios_i2c.c 	if ((num == 1) && (p->len == 0)) {
p                 124 drivers/gpu/drm/amd/amdgpu/atombios_i2c.c 						  p->addr, HW_I2C_WRITE,
p                 133 drivers/gpu/drm/amd/amdgpu/atombios_i2c.c 		p = &msgs[i];
p                 134 drivers/gpu/drm/amd/amdgpu/atombios_i2c.c 		remaining = p->len;
p                 137 drivers/gpu/drm/amd/amdgpu/atombios_i2c.c 		if (p->flags & I2C_M_RD) {
p                 150 drivers/gpu/drm/amd/amdgpu/atombios_i2c.c 							  p->addr, flags,
p                 151 drivers/gpu/drm/amd/amdgpu/atombios_i2c.c 							  &p->buf[buffer_offset], current_count);
p                3340 drivers/gpu/drm/amd/amdgpu/si_dpm.c 			    u32 *p, u32 *u)
p                3355 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	*p = i_c / (1 << (2 * (*u)));
p                1260 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
p                1263 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
p                1264 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
p                1272 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
p                1274 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		reg -= p->adev->reg_offset[UVD_HWIP][0][1];
p                1275 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		reg += p->adev->reg_offset[UVD_HWIP][1][1];
p                1277 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		amdgpu_set_ib_value(p, ib_idx, i, reg);
p                 136 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
p                 252 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
p                 277 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                 279 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	pdd = kfd_bind_process_to_device(dev, p);
p                 286 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 			p->pasid,
p                 289 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id);
p                 308 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                 325 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                 329 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
p                 337 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 				p->pasid);
p                 339 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                 341 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	retval = pqm_destroy_queue(&p->pqm, args->queue_id);
p                 343 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                 347 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
p                 382 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 			args->queue_id, p->pasid);
p                 384 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                 386 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
p                 388 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                 393 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
p                 436 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                 438 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties);
p                 440 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                 449 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 					  struct kfd_process *p, void *data)
p                 454 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                 456 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	r = pqm_get_wave_state(&p->pqm, args->queue_id,
p                 461 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                 467 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 					struct kfd_process *p, void *data)
p                 489 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                 491 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	pdd = kfd_bind_process_to_device(dev, p);
p                 513 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                 519 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 					struct kfd_process *p, void *data)
p                 530 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                 532 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	pdd = kfd_bind_process_to_device(dev, p);
p                 545 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                 551 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 				struct kfd_process *p, void *data)
p                 569 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                 576 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	pdd = kfd_bind_process_to_device(dev, p);
p                 586 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 			status = kfd_dbgmgr_register(dbgmgr_ptr, p);
p                 599 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                 605 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 				struct kfd_process *p, void *data)
p                 622 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	status = kfd_dbgmgr_unregister(dev->dbgmgr, p);
p                 643 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 					struct kfd_process *p, void *data)
p                 680 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	aw_info.process = p;
p                 743 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 					struct kfd_process *p, void *data)
p                 793 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	wac_info.process = p;
p                 827 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 				struct kfd_process *p, void *data)
p                 852 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 				struct kfd_process *p, void *data)
p                 858 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
p                 862 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                 865 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	if (kfd_has_process_device_data(p)) {
p                 867 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		pdd = kfd_get_first_process_device_data(p);
p                 898 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 			pdd = kfd_get_next_process_device_data(p, pdd);
p                 902 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                 908 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 				struct kfd_process *p, void *data)
p                 916 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
p                 922 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		mutex_lock(&p->mutex);
p                 924 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		if (!kfd_has_process_device_data(p))
p                 928 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		pdd = kfd_get_first_process_device_data(p);
p                 931 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 			pdd = kfd_get_next_process_device_data(p, pdd);
p                 946 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                 948 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	if (!kfd_has_process_device_data(p)) {
p                 955 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	pdd = kfd_get_first_process_device_data(p);
p                 981 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		pdd = kfd_get_next_process_device_data(p, pdd);
p                 983 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                 994 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                 998 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
p                1014 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		if (p->signal_page) {
p                1025 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		mutex_lock(&p->mutex);
p                1026 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		pdd = kfd_bind_process_to_device(kfd, p);
p                1040 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		mutex_unlock(&p->mutex);
p                1049 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		err = kfd_event_page_set(p, kern_addr, size);
p                1056 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	err = kfd_event_create(filp, p, args->event_type,
p                1065 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                1069 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
p                1074 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	return kfd_event_destroy(p, args->event_id);
p                1077 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
p                1082 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	return kfd_set_event(p, args->event_id);
p                1085 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
p                1090 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	return kfd_reset_event(p, args->event_id);
p                1093 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
p                1099 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	err = kfd_wait_on_events(p, args->num_events,
p                1107 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 					struct kfd_process *p, void *data)
p                1118 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                1120 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	pdd = kfd_bind_process_to_device(dev, p);
p                1128 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                1138 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                1143 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		struct kfd_process *p, void *data)
p                1184 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
p                1201 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                1203 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	pdd = kfd_get_process_device_data(dev, p);
p                1218 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                1223 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                1248 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 					struct kfd_process *p, void *data)
p                1276 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		offset = kfd_get_process_doorbells(dev, p);
p                1285 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                1287 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	pdd = kfd_bind_process_to_device(dev, p);
p                1307 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                1325 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                1330 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 					struct kfd_process *p, void *data)
p                1342 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                1344 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	pdd = kfd_get_process_device_data(dev, p);
p                1369 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                1374 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 					struct kfd_process *p, void *data)
p                1410 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                1412 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	pdd = kfd_bind_process_to_device(dev, p);
p                1434 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		peer_pdd = kfd_bind_process_to_device(peer, p);
p                1449 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                1462 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		peer_pdd = kfd_get_process_device_data(peer, p);
p                1475 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                1484 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 					struct kfd_process *p, void *data)
p                1519 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                1521 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	pdd = kfd_get_process_device_data(dev, p);
p                1541 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		peer_pdd = kfd_get_process_device_data(peer, p);
p                1557 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                1564 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                1571 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		struct kfd_process *p, void *data)
p                1626 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 				   struct kfd_process *p, void *data)
p                1645 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_lock(&p->mutex);
p                1647 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	pdd = kfd_bind_process_to_device(dev, p);
p                1666 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                1675 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	mutex_unlock(&p->mutex);
p                 760 drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
p                 788 drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c 					(dev->kgd, vmid) == p->pasid) {
p                 790 drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c 						vmid, p->pasid);
p                 797 drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c 		pr_err("Didn't find vmid for pasid %d\n", p->pasid);
p                 802 drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c 	pdd = kfd_get_process_device_data(dev, p);
p                  96 drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
p                 105 drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c 	pmgr->pasid = p->pasid;
p                 108 drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c 	pmgr->dbgdev->pqm = &p->pqm;
p                 116 drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
p                 119 drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c 	if (pmgr->pasid != p->pasid) {
p                 121 drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c 				p->pasid);
p                 287 drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p);
p                 288 drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p);
p                 880 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	struct kfd_process *p;
p                 887 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	p = kfd_lookup_process_by_mm(mm);
p                 888 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	if (!p)
p                 891 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	r = kfd_process_evict_queues(p);
p                 893 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	kfd_unref_process(p);
p                 899 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	struct kfd_process *p;
p                 906 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	p = kfd_lookup_process_by_mm(mm);
p                 907 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	if (!p)
p                 910 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	r = kfd_process_restore_queues(p);
p                 912 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	kfd_unref_process(p);
p                 927 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	struct kfd_process *p;
p                 937 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	p = kfd_lookup_process_by_mm(mm);
p                 938 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	if (!p)
p                 941 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	if (fence->seqno == p->last_eviction_seqno)
p                 944 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	p->last_eviction_seqno = fence->seqno;
p                 949 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	active_time = get_jiffies_64() - p->last_restore_timestamp;
p                 958 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	schedule_delayed_work(&p->eviction_work, delay_jiffies);
p                 960 drivers/gpu/drm/amd/amdkfd/kfd_device.c 	kfd_unref_process(p);
p                1839 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
p                1842 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	if (!p)
p                1844 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	pdd = kfd_get_process_device_data(dqm->dev, p);
p                1847 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	kfd_unref_process(p);
p                  64 drivers/gpu/drm/amd/amdkfd/kfd_events.c static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
p                  85 drivers/gpu/drm/amd/amdkfd/kfd_events.c 			page, p);
p                  94 drivers/gpu/drm/amd/amdkfd/kfd_events.c static int allocate_event_notification_slot(struct kfd_process *p,
p                  99 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	if (!p->signal_page) {
p                 100 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		p->signal_page = allocate_signal_page(p);
p                 101 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		if (!p->signal_page)
p                 104 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		p->signal_mapped_size = 256*8;
p                 113 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
p                 119 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
p                 128 drivers/gpu/drm/amd/amdkfd/kfd_events.c static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
p                 130 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	return idr_find(&p->event_idr, id);
p                 151 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	struct kfd_process *p, uint32_t id, uint32_t bits)
p                 155 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT)
p                 162 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
p                 165 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		return idr_find(&p->event_idr, id);
p                 172 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
p                 175 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		ev = idr_find(&p->event_idr, id);
p                 182 drivers/gpu/drm/amd/amdkfd/kfd_events.c 				struct kfd_process *p,
p                 187 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	if (p->signal_mapped_size &&
p                 188 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	    p->signal_event_count == p->signal_mapped_size / 8) {
p                 189 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		if (!p->signal_event_limit_reached) {
p                 191 drivers/gpu/drm/amd/amdkfd/kfd_events.c 			p->signal_event_limit_reached = true;
p                 196 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	ret = allocate_event_notification_slot(p, ev);
p                 202 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	p->signal_event_count++;
p                 204 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
p                 206 drivers/gpu/drm/amd/amdkfd/kfd_events.c 			p->signal_event_count, ev->event_id,
p                 212 drivers/gpu/drm/amd/amdkfd/kfd_events.c static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
p                 219 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
p                 230 drivers/gpu/drm/amd/amdkfd/kfd_events.c void kfd_event_init_process(struct kfd_process *p)
p                 232 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_init(&p->event_mutex);
p                 233 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	idr_init(&p->event_idr);
p                 234 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	p->signal_page = NULL;
p                 235 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	p->signal_event_count = 0;
p                 238 drivers/gpu/drm/amd/amdkfd/kfd_events.c static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
p                 249 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		p->signal_event_count--;
p                 251 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	idr_remove(&p->event_idr, ev->event_id);
p                 255 drivers/gpu/drm/amd/amdkfd/kfd_events.c static void destroy_events(struct kfd_process *p)
p                 260 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	idr_for_each_entry(&p->event_idr, ev, id)
p                 261 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		destroy_event(p, ev);
p                 262 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	idr_destroy(&p->event_idr);
p                 269 drivers/gpu/drm/amd/amdkfd/kfd_events.c static void shutdown_signal_page(struct kfd_process *p)
p                 271 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	struct kfd_signal_page *page = p->signal_page;
p                 281 drivers/gpu/drm/amd/amdkfd/kfd_events.c void kfd_event_free_process(struct kfd_process *p)
p                 283 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	destroy_events(p);
p                 284 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	shutdown_signal_page(p);
p                 298 drivers/gpu/drm/amd/amdkfd/kfd_events.c int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
p                 303 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	if (p->signal_page)
p                 316 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	p->signal_page = page;
p                 317 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	p->signal_mapped_size = size;
p                 322 drivers/gpu/drm/amd/amdkfd/kfd_events.c int kfd_event_create(struct file *devkfd, struct kfd_process *p,
p                 341 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_lock(&p->event_mutex);
p                 346 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		ret = create_signal_event(devkfd, p, ev);
p                 354 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		ret = create_other_event(p, ev);
p                 365 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_unlock(&p->event_mutex);
p                 371 drivers/gpu/drm/amd/amdkfd/kfd_events.c int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
p                 376 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_lock(&p->event_mutex);
p                 378 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	ev = lookup_event_by_id(p, event_id);
p                 381 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		destroy_event(p, ev);
p                 385 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_unlock(&p->event_mutex);
p                 407 drivers/gpu/drm/amd/amdkfd/kfd_events.c int kfd_set_event(struct kfd_process *p, uint32_t event_id)
p                 412 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_lock(&p->event_mutex);
p                 414 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	ev = lookup_event_by_id(p, event_id);
p                 421 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_unlock(&p->event_mutex);
p                 431 drivers/gpu/drm/amd/amdkfd/kfd_events.c int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
p                 436 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_lock(&p->event_mutex);
p                 438 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	ev = lookup_event_by_id(p, event_id);
p                 445 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_unlock(&p->event_mutex);
p                 450 drivers/gpu/drm/amd/amdkfd/kfd_events.c static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
p                 452 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
p                 455 drivers/gpu/drm/amd/amdkfd/kfd_events.c static void set_event_from_interrupt(struct kfd_process *p,
p                 459 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		acknowledge_signal(p, ev);
p                 474 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
p                 476 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	if (!p)
p                 479 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_lock(&p->event_mutex);
p                 482 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		ev = lookup_signaled_event_by_partial_id(p, partial_id,
p                 485 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		set_event_from_interrupt(p, ev);
p                 486 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	} else if (p->signal_page) {
p                 492 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		uint64_t *slots = page_slots(p->signal_page);
p                 499 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) {
p                 503 drivers/gpu/drm/amd/amdkfd/kfd_events.c 			idr_for_each_entry(&p->event_idr, ev, id) {
p                 508 drivers/gpu/drm/amd/amdkfd/kfd_events.c 					set_event_from_interrupt(p, ev);
p                 517 drivers/gpu/drm/amd/amdkfd/kfd_events.c 					ev = lookup_event_by_id(p, id);
p                 518 drivers/gpu/drm/amd/amdkfd/kfd_events.c 					set_event_from_interrupt(p, ev);
p                 523 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_unlock(&p->event_mutex);
p                 524 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	kfd_unref_process(p);
p                 544 drivers/gpu/drm/amd/amdkfd/kfd_events.c static int init_event_waiter_get_status(struct kfd_process *p,
p                 548 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	struct kfd_event *ev = lookup_event_by_id(p, event_id);
p                 665 drivers/gpu/drm/amd/amdkfd/kfd_events.c int kfd_wait_on_events(struct kfd_process *p,
p                 684 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_lock(&p->event_mutex);
p                 695 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		ret = init_event_waiter_get_status(p, &event_waiters[i],
p                 718 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_unlock(&p->event_mutex);
p                 769 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_lock(&p->event_mutex);
p                 772 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_unlock(&p->event_mutex);
p                 782 drivers/gpu/drm/amd/amdkfd/kfd_events.c int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
p                 795 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	page = p->signal_page;
p                 822 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		p->signal_mapped_size = vma->vm_end - vma->vm_start;
p                 831 drivers/gpu/drm/amd/amdkfd/kfd_events.c static void lookup_events_by_type_and_signal(struct kfd_process *p,
p                 842 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	idr_for_each_entry_continue(&p->event_idr, ev, id)
p                 856 drivers/gpu/drm/amd/amdkfd/kfd_events.c 				p->lead_thread->pid);
p                 857 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		send_sig(SIGSEGV, p->lead_thread, 0);
p                 865 drivers/gpu/drm/amd/amdkfd/kfd_events.c 					p->lead_thread->pid);
p                 866 drivers/gpu/drm/amd/amdkfd/kfd_events.c 			send_sig(SIGTERM, p->lead_thread, 0);
p                 870 drivers/gpu/drm/amd/amdkfd/kfd_events.c 				p->lead_thread->pid);
p                 888 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
p                 891 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	if (!p)
p                 897 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mm = get_task_mm(p->lead_thread);
p                 899 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		kfd_unref_process(p);
p                 940 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		mutex_lock(&p->event_mutex);
p                 943 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
p                 946 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		mutex_unlock(&p->event_mutex);
p                 949 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	kfd_unref_process(p);
p                 960 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
p                 962 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	if (!p)
p                 965 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_lock(&p->event_mutex);
p                 968 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
p                 970 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_unlock(&p->event_mutex);
p                 971 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	kfd_unref_process(p);
p                 979 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
p                 982 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	if (!p)
p                 998 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_lock(&p->event_mutex);
p                1001 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	idr_for_each_entry_continue(&p->event_idr, ev, id)
p                1007 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	mutex_unlock(&p->event_mutex);
p                1008 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	kfd_unref_process(p);
p                1015 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	struct kfd_process *p;
p                1035 drivers/gpu/drm/amd/amdkfd/kfd_events.c 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
p                1036 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		mutex_lock(&p->event_mutex);
p                1038 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		idr_for_each_entry_continue(&p->event_idr, ev, id) {
p                1049 drivers/gpu/drm/amd/amdkfd/kfd_events.c 		mutex_unlock(&p->event_mutex);
p                 112 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	struct kfd_process *p = pdd->process;
p                 123 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
p                 135 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c void kfd_iommu_unbind_process(struct kfd_process *p)
p                 139 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	list_for_each_entry(pdd, &p->per_device_data, per_device_list)
p                 141 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 			amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
p                 148 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	struct kfd_process *p;
p                 159 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	p = kfd_lookup_process_by_pasid(pasid);
p                 160 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	if (!p)
p                 167 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
p                 168 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 		if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
p                 176 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	mutex_lock(&p->mutex);
p                 178 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	pdd = kfd_get_process_device_data(dev, p);
p                 185 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	mutex_unlock(&p->mutex);
p                 187 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	kfd_unref_process(p);
p                 220 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	struct kfd_process *p;
p                 226 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
p                 227 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 		mutex_lock(&p->mutex);
p                 228 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 		pdd = kfd_get_process_device_data(kfd, p);
p                 231 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 			mutex_unlock(&p->mutex);
p                 235 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 		err = amd_iommu_bind_pasid(kfd->pdev, p->pasid,
p                 236 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 				p->lead_thread);
p                 239 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 					p->pasid);
p                 240 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 			mutex_unlock(&p->mutex);
p                 245 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 		mutex_unlock(&p->mutex);
p                 261 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	struct kfd_process *p;
p                 266 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
p                 267 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 		mutex_lock(&p->mutex);
p                 268 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 		pdd = kfd_get_process_device_data(kfd, p);
p                 271 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 			mutex_unlock(&p->mutex);
p                 277 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c 		mutex_unlock(&p->mutex);
p                  34 drivers/gpu/drm/amd/amdkfd/kfd_iommu.h void kfd_iommu_unbind_process(struct kfd_process *p);
p                  57 drivers/gpu/drm/amd/amdkfd/kfd_iommu.h static inline void kfd_iommu_unbind_process(struct kfd_process *p)
p                  78 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h 				struct queue_properties *p,
p                 166 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 		    uint32_t queue_id, struct queue_properties *p,
p                 170 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
p                 171 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 	uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1);
p                 174 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 					  (uint32_t __user *)p->write_ptr,
p                 180 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 			 struct queue_properties *p, struct mm_struct *mms)
p                 183 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c 					       (uint32_t __user *)p->write_ptr,
p                 163 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 			struct queue_properties *p, struct mm_struct *mms)
p                 167 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 	uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
p                 170 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 					  (uint32_t __user *)p->write_ptr,
p                 340 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 		struct queue_properties *p, struct mm_struct *mms)
p                 343 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c 					       (uint32_t __user *)p->write_ptr,
p                 184 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 			struct queue_properties *p, struct mm_struct *mms)
p                 187 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 	uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
p                 190 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 					  (uint32_t __user *)p->write_ptr,
p                 358 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 		struct queue_properties *p, struct mm_struct *mms)
p                 361 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c 					       (uint32_t __user *)p->write_ptr,
p                 158 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 			struct queue_properties *p, struct mm_struct *mms)
p                 161 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
p                 162 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 	uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1);
p                 165 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 					  (uint32_t __user *)p->write_ptr,
p                 341 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 		struct queue_properties *p, struct mm_struct *mms)
p                 344 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c 					       (uint32_t __user *)p->write_ptr,
p                 748 drivers/gpu/drm/amd/amdkfd/kfd_priv.h typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
p                 766 drivers/gpu/drm/amd/amdkfd/kfd_priv.h void kfd_unref_process(struct kfd_process *p);
p                 767 drivers/gpu/drm/amd/amdkfd/kfd_priv.h int kfd_process_evict_queues(struct kfd_process *p);
p                 768 drivers/gpu/drm/amd/amdkfd/kfd_priv.h int kfd_process_restore_queues(struct kfd_process *p);
p                 775 drivers/gpu/drm/amd/amdkfd/kfd_priv.h 						struct kfd_process *p);
p                 777 drivers/gpu/drm/amd/amdkfd/kfd_priv.h 							struct kfd_process *p);
p                 779 drivers/gpu/drm/amd/amdkfd/kfd_priv.h 							struct kfd_process *p);
p                 787 drivers/gpu/drm/amd/amdkfd/kfd_priv.h void *kfd_process_device_translate_handle(struct kfd_process_device *p,
p                 794 drivers/gpu/drm/amd/amdkfd/kfd_priv.h 							struct kfd_process *p);
p                 796 drivers/gpu/drm/amd/amdkfd/kfd_priv.h 						struct kfd_process *p,
p                 798 drivers/gpu/drm/amd/amdkfd/kfd_priv.h bool kfd_has_process_device_data(struct kfd_process *p);
p                 899 drivers/gpu/drm/amd/amdkfd/kfd_priv.h void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
p                 900 drivers/gpu/drm/amd/amdkfd/kfd_priv.h int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
p                 909 drivers/gpu/drm/amd/amdkfd/kfd_priv.h 			struct queue_properties *p);
p                 911 drivers/gpu/drm/amd/amdkfd/kfd_priv.h 			struct queue_properties *p);
p                1005 drivers/gpu/drm/amd/amdkfd/kfd_priv.h void kfd_event_init_process(struct kfd_process *p);
p                1006 drivers/gpu/drm/amd/amdkfd/kfd_priv.h void kfd_event_free_process(struct kfd_process *p);
p                1008 drivers/gpu/drm/amd/amdkfd/kfd_priv.h int kfd_wait_on_events(struct kfd_process *p,
p                1018 drivers/gpu/drm/amd/amdkfd/kfd_priv.h int kfd_set_event(struct kfd_process *p, uint32_t event_id);
p                1019 drivers/gpu/drm/amd/amdkfd/kfd_priv.h int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
p                1020 drivers/gpu/drm/amd/amdkfd/kfd_priv.h int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
p                1022 drivers/gpu/drm/amd/amdkfd/kfd_priv.h int kfd_event_create(struct file *devkfd, struct kfd_process *p,
p                1026 drivers/gpu/drm/amd/amdkfd/kfd_priv.h int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
p                1035 drivers/gpu/drm/amd/amdkfd/kfd_priv.h int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
p                  66 drivers/gpu/drm/amd/amdkfd/kfd_process.c static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
p                  83 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		struct kfd_process *p = container_of(attr, struct kfd_process,
p                  85 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		val = p->pasid;
p                 364 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	struct kfd_process *p;
p                 368 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	p = find_process_by_mm(thread->mm);
p                 371 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	return p;
p                 374 drivers/gpu/drm/amd/amdkfd/kfd_process.c void kfd_unref_process(struct kfd_process *p)
p                 376 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	kref_put(&p->ref, kfd_process_ref_release);
p                 381 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	struct kfd_process *p = pdd->process;
p                 392 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		list_for_each_entry(peer_pdd, &p->per_device_data,
p                 405 drivers/gpu/drm/amd/amdkfd/kfd_process.c static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
p                 409 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	list_for_each_entry(pdd, &p->per_device_data, per_device_list)
p                 413 drivers/gpu/drm/amd/amdkfd/kfd_process.c static void kfd_process_destroy_pdds(struct kfd_process *p)
p                 417 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	list_for_each_entry_safe(pdd, temp, &p->per_device_data,
p                 420 drivers/gpu/drm/amd/amdkfd/kfd_process.c 				pdd->dev->id, p->pasid);
p                 451 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	struct kfd_process *p = container_of(work, struct kfd_process,
p                 455 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	if (p->kobj) {
p                 456 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		sysfs_remove_file(p->kobj, &p->attr_pasid);
p                 457 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		kobject_del(p->kobj);
p                 458 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		kobject_put(p->kobj);
p                 459 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		p->kobj = NULL;
p                 462 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	kfd_iommu_unbind_process(p);
p                 464 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	kfd_process_free_outstanding_kfd_bos(p);
p                 466 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	kfd_process_destroy_pdds(p);
p                 467 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	dma_fence_put(p->ef);
p                 469 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	kfd_event_free_process(p);
p                 471 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	kfd_pasid_free(p->pasid);
p                 472 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	kfd_free_process_doorbells(p);
p                 474 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	mutex_destroy(&p->mutex);
p                 476 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	put_task_struct(p->lead_thread);
p                 478 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	kfree(p);
p                 483 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	struct kfd_process *p = container_of(ref, struct kfd_process, ref);
p                 485 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	INIT_WORK(&p->release_work, kfd_process_wq_release);
p                 486 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	queue_work(kfd_process_wq, &p->release_work);
p                 497 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	struct kfd_process *p;
p                 504 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	p = container_of(mn, struct kfd_process, mmu_notifier);
p                 505 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	if (WARN_ON(p->mm != mm))
p                 509 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	hash_del_rcu(&p->kfd_processes);
p                 513 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	cancel_delayed_work_sync(&p->eviction_work);
p                 514 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	cancel_delayed_work_sync(&p->restore_work);
p                 516 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	mutex_lock(&p->mutex);
p                 522 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
p                 526 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
p                 527 drivers/gpu/drm/amd/amdkfd/kfd_process.c 			if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
p                 535 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	kfd_process_dequeue_from_all_devices(p);
p                 536 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	pqm_uninit(&p->pqm);
p                 539 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	p->mm = NULL;
p                 541 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	mutex_unlock(&p->mutex);
p                 543 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	mmu_notifier_put(&p->mmu_notifier);
p                 551 drivers/gpu/drm/amd/amdkfd/kfd_process.c static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
p                 556 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
p                 716 drivers/gpu/drm/amd/amdkfd/kfd_process.c 							struct kfd_process *p)
p                 720 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	list_for_each_entry(pdd, &p->per_device_data, per_device_list)
p                 728 drivers/gpu/drm/amd/amdkfd/kfd_process.c 							struct kfd_process *p)
p                 746 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	pdd->qpd.pqm = &p->pqm;
p                 748 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	pdd->process = p;
p                 751 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	list_add(&pdd->per_device_list, &p->per_device_data);
p                 776 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	struct kfd_process *p;
p                 783 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	p = pdd->process;
p                 788 drivers/gpu/drm/amd/amdkfd/kfd_process.c 			dev->kgd, drm_file, p->pasid,
p                 789 drivers/gpu/drm/amd/amdkfd/kfd_process.c 			&pdd->vm, &p->kgd_process_info, &p->ef);
p                 791 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
p                 792 drivers/gpu/drm/amd/amdkfd/kfd_process.c 			&pdd->vm, &p->kgd_process_info, &p->ef);
p                 829 drivers/gpu/drm/amd/amdkfd/kfd_process.c 							struct kfd_process *p)
p                 834 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	pdd = kfd_get_process_device_data(dev, p);
p                 852 drivers/gpu/drm/amd/amdkfd/kfd_process.c 						struct kfd_process *p)
p                 854 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	return list_first_entry(&p->per_device_data,
p                 860 drivers/gpu/drm/amd/amdkfd/kfd_process.c 						struct kfd_process *p,
p                 863 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	if (list_is_last(&pdd->per_device_list, &p->per_device_data))
p                 868 drivers/gpu/drm/amd/amdkfd/kfd_process.c bool kfd_has_process_device_data(struct kfd_process *p)
p                 870 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	return !(list_empty(&p->per_device_data));
p                 907 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	struct kfd_process *p, *ret_p = NULL;
p                 912 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
p                 913 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		if (p->pasid == pasid) {
p                 914 drivers/gpu/drm/amd/amdkfd/kfd_process.c 			kref_get(&p->ref);
p                 915 drivers/gpu/drm/amd/amdkfd/kfd_process.c 			ret_p = p;
p                 928 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	struct kfd_process *p;
p                 932 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	p = find_process_by_mm(mm);
p                 933 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	if (p)
p                 934 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		kref_get(&p->ref);
p                 938 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	return p;
p                 946 drivers/gpu/drm/amd/amdkfd/kfd_process.c int kfd_process_evict_queues(struct kfd_process *p)
p                 952 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
p                 968 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
p                 982 drivers/gpu/drm/amd/amdkfd/kfd_process.c int kfd_process_restore_queues(struct kfd_process *p)
p                 987 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
p                1003 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	struct kfd_process *p;
p                1011 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	p = container_of(dwork, struct kfd_process, eviction_work);
p                1012 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
p                1021 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	flush_delayed_work(&p->restore_work);
p                1023 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	pr_debug("Started evicting pasid %d\n", p->pasid);
p                1024 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	ret = kfd_process_evict_queues(p);
p                1026 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		dma_fence_signal(p->ef);
p                1027 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		dma_fence_put(p->ef);
p                1028 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		p->ef = NULL;
p                1029 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		queue_delayed_work(kfd_restore_wq, &p->restore_work,
p                1032 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		pr_debug("Finished evicting pasid %d\n", p->pasid);
p                1034 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		pr_err("Failed to evict queues of pasid %d\n", p->pasid);
p                1040 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	struct kfd_process *p;
p                1048 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	p = container_of(dwork, struct kfd_process, restore_work);
p                1049 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	pr_debug("Started restoring pasid %d\n", p->pasid);
p                1061 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	p->last_restore_timestamp = get_jiffies_64();
p                1062 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
p                1063 drivers/gpu/drm/amd/amdkfd/kfd_process.c 						     &p->ef);
p                1066 drivers/gpu/drm/amd/amdkfd/kfd_process.c 			 p->pasid, PROCESS_BACK_OFF_TIME_MS);
p                1067 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
p                1073 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	ret = kfd_process_restore_queues(p);
p                1075 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		pr_debug("Finished restoring pasid %d\n", p->pasid);
p                1077 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		pr_err("Failed to restore queues of pasid %d\n", p->pasid);
p                1082 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	struct kfd_process *p;
p                1086 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
p                1087 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		cancel_delayed_work_sync(&p->eviction_work);
p                1088 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		cancel_delayed_work_sync(&p->restore_work);
p                1090 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		if (kfd_process_evict_queues(p))
p                1091 drivers/gpu/drm/amd/amdkfd/kfd_process.c 			pr_err("Failed to suspend process %d\n", p->pasid);
p                1092 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		dma_fence_signal(p->ef);
p                1093 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		dma_fence_put(p->ef);
p                1094 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		p->ef = NULL;
p                1101 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	struct kfd_process *p;
p                1105 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
p                1106 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
p                1108 drivers/gpu/drm/amd/amdkfd/kfd_process.c 			       p->pasid);
p                1167 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	struct kfd_process *p;
p                1173 drivers/gpu/drm/amd/amdkfd/kfd_process.c 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
p                1175 drivers/gpu/drm/amd/amdkfd/kfd_process.c 			   p->lead_thread->tgid, p->pasid);
p                1177 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		mutex_lock(&p->mutex);
p                1178 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		r = pqm_debugfs_mqds(m, &p->pqm);
p                1179 drivers/gpu/drm/amd/amdkfd/kfd_process.c 		mutex_unlock(&p->mutex);
p                 127 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
p                 131 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 	list_for_each_entry(pdd, &p->per_device_data, per_device_list)
p                 135 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
p                 143 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 	pqm->process = p;
p                 411 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 			struct queue_properties *p)
p                 422 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 	pqn->q->properties.queue_address = p->queue_address;
p                 423 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 	pqn->q->properties.queue_size = p->queue_size;
p                 424 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 	pqn->q->properties.queue_percent = p->queue_percent;
p                 425 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 	pqn->q->properties.priority = p->priority;
p                 436 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 			struct queue_properties *p)
p                 452 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 	pqn->q->properties.cu_mask_count = p->cu_mask_count;
p                 453 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 	pqn->q->properties.cu_mask = p->cu_mask;
p                 147 drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
p                 149 drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c 	return (struct i2c_payload *)p->payloads.container;
p                 152 drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
p                 154 drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c 	return p->payloads.count;
p                 157 drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
p                 159 drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c 	if (!p || !*p)
p                 161 drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c 	dal_vector_destruct(&(*p)->payloads);
p                 162 drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c 	kfree(*p);
p                 163 drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c 	*p = NULL;
p                 318 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c 	unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, l,
p                 350 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c 	p = 3 * wx - w;
p                 352 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c 	a = ix + p * l0;
p                 355 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c 	if ((ix % w) == 0 && p != 0)
p                 341 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c 	unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, l,
p                 373 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c 	p = 3 * wx - w;
p                 375 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c 	a = ix + p * l0;
p                 378 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c 	if ((ix % w) == 0 && p != 0)
p                 513 drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c 	unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, S, ix, wx, p, l0, a, ax, l,
p                 545 drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c 	p = 3 * wx - w;
p                 547 drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c 	a = ix + p * l0;
p                 550 drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c 	if ((ix % w) == 0 && p != 0)
p                 111 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 	int   *p = ofs;
p                 114 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (0) : ((((bpp >=  8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp -  6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
p                 115 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (-2) : ((((bpp >=  8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp -  6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
p                 116 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (-2) : ((((bpp >=  8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp -  6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
p                 117 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (-4) : ((((bpp >=  8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp -  6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
p                 118 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (-6) : ((((bpp >=  8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp -  6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
p                 119 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0))));
p                 120 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0))));
p                 121 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0))));
p                 122 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0))));
p                 123 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0))));
p                 124 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = -10;
p                 125 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (-12) : ((bpp >=  8) ? (-10) : (-12 + dsc_roundf((bpp -  6) * (2 / 2.0))));
p                 126 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = -12;
p                 127 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = -12;
p                 128 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = -12;
p                 130 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp -  8) * (8 / 2.0))));
p                 131 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp -  8) * (8 / 2.0))));
p                 132 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp -  8) * (6 / 2.0))));
p                 133 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp -  8) * (6 / 2.0))));
p                 134 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp -  8) * (6 / 2.0))));
p                 135 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp -  8) * (6 / 2.0))));
p                 136 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp -  8) * (6 / 2.0))));
p                 137 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp -  8) * (4 / 2.0))));
p                 138 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp -  8) * (2 / 2.0))));
p                 139 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp -  8) * (2 / 2.0))));
p                 140 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = -10;
p                 141 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp -  6) * (2.0 / 1))));
p                 142 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = -12;
p                 143 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = -12;
p                 144 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = -12;
p                 146 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (2) : ((bpp >=  8) ? (10) : (2 + dsc_roundf((bpp -  6) * (8 / 2.0))));
p                 147 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (0) : ((bpp >=  8) ? (8) : (0 + dsc_roundf((bpp -  6) * (8 / 2.0))));
p                 148 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (0) : ((bpp >=  8) ? (6) : (0 + dsc_roundf((bpp -  6) * (6 / 2.0))));
p                 149 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (-2) : ((bpp >=  8) ? (4) : (-2 + dsc_roundf((bpp -  6) * (6 / 2.0))));
p                 150 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (-4) : ((bpp >=  8) ? (2) : (-4 + dsc_roundf((bpp -  6) * (6 / 2.0))));
p                 151 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (-6) : ((bpp >=  8) ? (0) : (-6 + dsc_roundf((bpp -  6) * (6 / 2.0))));
p                 152 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (-8) : ((bpp >=  8) ? (-2) : (-8 + dsc_roundf((bpp -  6) * (6 / 2.0))));
p                 153 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (-8) : ((bpp >=  8) ? (-4) : (-8 + dsc_roundf((bpp -  6) * (4 / 2.0))));
p                 154 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (-8) : ((bpp >=  8) ? (-6) : (-8 + dsc_roundf((bpp -  6) * (2 / 2.0))));
p                 155 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  6) ? (-10) : ((bpp >=  8) ? (-8) : (-10 + dsc_roundf((bpp -  6) * (2 / 2.0))));
p                 156 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = -10;
p                 157 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = (bpp <=  4) ? (-12) : ((bpp >=  5) ? (-10) : (-12 + dsc_roundf((bpp -  4) * (2 / 1.0))));
p                 158 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = -12;
p                 159 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = -12;
p                 160 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c 		*p++ = -12;
p                1351 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 	struct gamma_pixel *p = points;
p                1358 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 	p_last = p + numberof_points - 1;
p                1364 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 		p->r = value;
p                1365 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 		p->g = value;
p                1366 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 		p->b = value;
p                1368 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 		++p;
p                1372 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 	p->r = dc_fixpt_div(p_last->r, dividers.divider1);
p                1373 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 	p->g = dc_fixpt_div(p_last->g, dividers.divider1);
p                1374 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 	p->b = dc_fixpt_div(p_last->b, dividers.divider1);
p                1376 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 	++p;
p                1378 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 	p->r = dc_fixpt_div(p_last->r, dividers.divider2);
p                1379 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 	p->g = dc_fixpt_div(p_last->g, dividers.divider2);
p                1380 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 	p->b = dc_fixpt_div(p_last->b, dividers.divider2);
p                1382 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 	++p;
p                1384 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 	p->r = dc_fixpt_div(p_last->r, dividers.divider3);
p                1385 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 	p->g = dc_fixpt_div(p_last->g, dividers.divider3);
p                1386 drivers/gpu/drm/amd/display/modules/color/color_gamma.c 	p->b = dc_fixpt_div(p_last->b, dividers.divider3);
p                  92 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c 		const VCEClockInfoArray *p = (const VCEClockInfoArray *)
p                  94 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c 		table_size = sizeof(uint8_t) + p->ucNumEntries * sizeof(VCEClockInfo);
p                 191 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c 		const UVDClockInfoArray *p = (const UVDClockInfoArray *)
p                 195 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c 			     p->ucNumEntries * sizeof(UVDClockInfo);
p                 128 drivers/gpu/drm/arm/display/komeda/komeda_kms.h #define to_kplane(p)	container_of(p, struct komeda_plane, base)
p                 129 drivers/gpu/drm/arm/display/komeda/komeda_kms.h #define to_kplane_st(p)	container_of(p, struct komeda_plane_state, base)
p                 130 drivers/gpu/drm/arm/display/komeda/komeda_kms.h #define to_kconn(p)	container_of(p, struct komeda_wb_connector, base)
p                 131 drivers/gpu/drm/arm/display/komeda/komeda_kms.h #define to_kcrtc(p)	container_of(p, struct komeda_crtc, base)
p                 132 drivers/gpu/drm/arm/display/komeda/komeda_kms.h #define to_kcrtc_st(p)	container_of(p, struct komeda_crtc_state, base)
p                 133 drivers/gpu/drm/arm/display/komeda/komeda_kms.h #define to_kdev(p)	container_of(p, struct komeda_kms_dev, base)
p                 135 drivers/gpu/drm/arm/malidp_planes.c static void malidp_plane_atomic_print_state(struct drm_printer *p,
p                 140 drivers/gpu/drm/arm/malidp_planes.c 	drm_printf(p, "\trotmem_size=%u\n", ms->rotmem_size);
p                 141 drivers/gpu/drm/arm/malidp_planes.c 	drm_printf(p, "\tformat_id=%u\n", ms->format);
p                 142 drivers/gpu/drm/arm/malidp_planes.c 	drm_printf(p, "\tn_planes=%u\n", ms->n_planes);
p                 143 drivers/gpu/drm/arm/malidp_planes.c 	drm_printf(p, "\tmmu_prefetch_mode=%s\n",
p                 145 drivers/gpu/drm/arm/malidp_planes.c 	drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize);
p                 549 drivers/gpu/drm/armada/armada_crtc.c 		uint32_t *p = &pix[y * stride];
p                 552 drivers/gpu/drm/armada/armada_crtc.c 		for (x = 0; x < width; x++, p++) {
p                 553 drivers/gpu/drm/armada/armada_crtc.c 			uint32_t val = *p;
p                  23 drivers/gpu/drm/armada/armada_debugfs.c 	struct drm_printer p = drm_seq_file_printer(m);
p                  26 drivers/gpu/drm/armada/armada_debugfs.c 	drm_mm_print(&priv->linear, &p);
p                  96 drivers/gpu/drm/armada/armada_gem.c 		struct page *p = alloc_pages(GFP_KERNEL, order);
p                  98 drivers/gpu/drm/armada/armada_gem.c 		if (p) {
p                  99 drivers/gpu/drm/armada/armada_gem.c 			obj->addr = page_address(p);
p                 100 drivers/gpu/drm/armada/armada_gem.c 			obj->phys_addr = page_to_phys(p);
p                 101 drivers/gpu/drm/armada/armada_gem.c 			obj->page = p;
p                  19 drivers/gpu/drm/armada/armada_plane.h #define armada_addr(state, f, p) to_armada_plane_state(state)->addrs[f][p]
p                  32 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h #define ATMEL_HLCDC_LAYER_DMA_IRQ(p)		BIT(2 + (8 * (p)))
p                  33 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h #define ATMEL_HLCDC_LAYER_DSCR_IRQ(p)		BIT(3 + (8 * (p)))
p                  34 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h #define ATMEL_HLCDC_LAYER_ADD_IRQ(p)		BIT(4 + (8 * (p)))
p                  35 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h #define ATMEL_HLCDC_LAYER_DONE_IRQ(p)		BIT(5 + (8 * (p)))
p                  36 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h #define ATMEL_HLCDC_LAYER_OVR_IRQ(p)		BIT(6 + (8 * (p)))
p                  38 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h #define ATMEL_HLCDC_LAYER_PLANE_HEAD(p)		(((p) * 0x10) + 0x1c)
p                  39 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h #define ATMEL_HLCDC_LAYER_PLANE_ADDR(p)		(((p) * 0x10) + 0x20)
p                  40 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h #define ATMEL_HLCDC_LAYER_PLANE_CTRL(p)		(((p) * 0x10) + 0x24)
p                  41 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h #define ATMEL_HLCDC_LAYER_PLANE_NEXT(p)		(((p) * 0x10) + 0x28)
p                 280 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h drm_plane_to_atmel_hlcdc_plane(struct drm_plane *p)
p                 282 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h 	return container_of(p, struct atmel_hlcdc_plane, base);
p                 594 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
p                 597 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 	struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
p                 715 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
p                 718 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 	struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
p                 734 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
p                 737 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 	struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
p                 739 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 			drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
p                 742 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 	if (!p->state->crtc || !p->state->fb)
p                 746 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 		atmel_hlcdc_plane_atomic_disable(p, old_s);
p                 841 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c static int atmel_hlcdc_plane_alloc_dscrs(struct drm_plane *p,
p                 844 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 	struct atmel_hlcdc_dc *dc = p->dev->dev_private;
p                 874 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c static void atmel_hlcdc_plane_reset(struct drm_plane *p)
p                 878 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 	if (p->state) {
p                 879 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 		state = drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
p                 885 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 		p->state = NULL;
p                 890 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 		if (atmel_hlcdc_plane_alloc_dscrs(p, state)) {
p                 892 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 			dev_err(p->dev->dev,
p                 896 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 		__drm_atomic_helper_plane_reset(p, &state->base);
p                 901 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c atmel_hlcdc_plane_atomic_duplicate_state(struct drm_plane *p)
p                 904 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 			drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
p                 911 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 	if (atmel_hlcdc_plane_alloc_dscrs(p, copy)) {
p                 922 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *p,
p                 927 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c 	struct atmel_hlcdc_dc *dc = p->dev->dev_private;
p                 195 drivers/gpu/drm/bridge/adv7511/adv7511.h #define ADV7511_PACKET(p, x)	    ((p) * 0x20 + (x))
p                  65 drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c #define OUTVACT_LPCMD_TIME(p)		(((p) & 0xff) << 16)
p                  66 drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c #define INVACT_LPCMD_TIME(p)		((p) & 0xff)
p                  97 drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c #define VID_PKT_SIZE(p)			((p) & 0x3fff)
p                 156 drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c #define HSTX_TO_CNT(p)			(((p) & 0xffff) << 16)
p                 157 drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c #define LPRX_TO_CNT(p)			((p) & 0xffff)
p                 379 drivers/gpu/drm/drm_atomic.c static void drm_atomic_crtc_print_state(struct drm_printer *p,
p                 384 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
p                 385 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tenable=%d\n", state->enable);
p                 386 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tactive=%d\n", state->active);
p                 387 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tself_refresh_active=%d\n", state->self_refresh_active);
p                 388 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
p                 389 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
p                 390 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
p                 391 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
p                 392 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
p                 393 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
p                 394 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
p                 395 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
p                 396 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
p                 399 drivers/gpu/drm/drm_atomic.c 		crtc->funcs->atomic_print_state(p, state);
p                 649 drivers/gpu/drm/drm_atomic.c static void drm_atomic_plane_print_state(struct drm_printer *p,
p                 656 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
p                 657 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
p                 658 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
p                 660 drivers/gpu/drm/drm_atomic.c 		drm_framebuffer_print_info(p, 2, state->fb);
p                 661 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
p                 662 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
p                 663 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\trotation=%x\n", state->rotation);
p                 664 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos);
p                 665 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tcolor-encoding=%s\n",
p                 667 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tcolor-range=%s\n",
p                 671 drivers/gpu/drm/drm_atomic.c 		plane->funcs->atomic_print_state(p, state);
p                1001 drivers/gpu/drm/drm_atomic.c static void drm_atomic_connector_print_state(struct drm_printer *p,
p                1006 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
p                1007 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
p                1008 drivers/gpu/drm/drm_atomic.c 	drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
p                1012 drivers/gpu/drm/drm_atomic.c 			drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id);
p                1015 drivers/gpu/drm/drm_atomic.c 		connector->funcs->atomic_print_state(p, state);
p                1428 drivers/gpu/drm/drm_atomic.c 	struct drm_printer p = drm_info_printer(state->dev->dev);
p                1440 drivers/gpu/drm/drm_atomic.c 		drm_atomic_plane_print_state(&p, plane_state);
p                1443 drivers/gpu/drm/drm_atomic.c 		drm_atomic_crtc_print_state(&p, crtc_state);
p                1446 drivers/gpu/drm/drm_atomic.c 		drm_atomic_connector_print_state(&p, connector_state);
p                1449 drivers/gpu/drm/drm_atomic.c static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
p                1464 drivers/gpu/drm/drm_atomic.c 		drm_atomic_plane_print_state(p, plane->state);
p                1472 drivers/gpu/drm/drm_atomic.c 		drm_atomic_crtc_print_state(p, crtc->state);
p                1481 drivers/gpu/drm/drm_atomic.c 		drm_atomic_connector_print_state(p, connector->state);
p                1502 drivers/gpu/drm/drm_atomic.c void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
p                1504 drivers/gpu/drm/drm_atomic.c 	__drm_state_dump(dev, p, false);
p                1513 drivers/gpu/drm/drm_atomic.c 	struct drm_printer p = drm_seq_file_printer(m);
p                1515 drivers/gpu/drm/drm_atomic.c 	__drm_state_dump(dev, &p, true);
p                1294 drivers/gpu/drm/drm_bufs.c 			void *data, int *p,
p                1325 drivers/gpu/drm/drm_bufs.c 	if (*p >= count) {
p                1341 drivers/gpu/drm/drm_bufs.c 	*p = count;
p                1482 drivers/gpu/drm/drm_bufs.c int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
p                1510 drivers/gpu/drm/drm_bufs.c 	if (*p >= dma->buf_count) {
p                1545 drivers/gpu/drm/drm_bufs.c 	*p = dma->buf_count;
p                1546 drivers/gpu/drm/drm_bufs.c 	DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
p                 445 drivers/gpu/drm/drm_client.c 	struct drm_printer p = drm_seq_file_printer(m);
p                 450 drivers/gpu/drm/drm_client.c 		drm_printf(&p, "%s\n", client->name);
p                 609 drivers/gpu/drm/drm_file.c 				  struct drm_pending_event *p,
p                 617 drivers/gpu/drm/drm_file.c 	p->event = e;
p                 618 drivers/gpu/drm/drm_file.c 	list_add(&p->pending_link, &file_priv->pending_event_list);
p                 619 drivers/gpu/drm/drm_file.c 	p->file_priv = file_priv;
p                 651 drivers/gpu/drm/drm_file.c 			   struct drm_pending_event *p,
p                 658 drivers/gpu/drm/drm_file.c 	ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
p                 675 drivers/gpu/drm/drm_file.c 			   struct drm_pending_event *p)
p                 679 drivers/gpu/drm/drm_file.c 	if (p->file_priv) {
p                 680 drivers/gpu/drm/drm_file.c 		p->file_priv->event_space += p->event->length;
p                 681 drivers/gpu/drm/drm_file.c 		list_del(&p->pending_link);
p                 685 drivers/gpu/drm/drm_file.c 	if (p->fence)
p                 686 drivers/gpu/drm/drm_file.c 		dma_fence_put(p->fence);
p                 688 drivers/gpu/drm/drm_file.c 	kfree(p);
p                1038 drivers/gpu/drm/drm_framebuffer.c void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
p                1044 drivers/gpu/drm/drm_framebuffer.c 	drm_printf_indent(p, indent, "allocated by = %s\n", fb->comm);
p                1045 drivers/gpu/drm/drm_framebuffer.c 	drm_printf_indent(p, indent, "refcount=%u\n",
p                1047 drivers/gpu/drm/drm_framebuffer.c 	drm_printf_indent(p, indent, "format=%s\n",
p                1049 drivers/gpu/drm/drm_framebuffer.c 	drm_printf_indent(p, indent, "modifier=0x%llx\n", fb->modifier);
p                1050 drivers/gpu/drm/drm_framebuffer.c 	drm_printf_indent(p, indent, "size=%ux%u\n", fb->width, fb->height);
p                1051 drivers/gpu/drm/drm_framebuffer.c 	drm_printf_indent(p, indent, "layers:\n");
p                1054 drivers/gpu/drm/drm_framebuffer.c 		drm_printf_indent(p, indent + 1, "size[%u]=%dx%d\n", i,
p                1057 drivers/gpu/drm/drm_framebuffer.c 		drm_printf_indent(p, indent + 1, "pitch[%u]=%u\n", i, fb->pitches[i]);
p                1058 drivers/gpu/drm/drm_framebuffer.c 		drm_printf_indent(p, indent + 1, "offset[%u]=%u\n", i, fb->offsets[i]);
p                1059 drivers/gpu/drm/drm_framebuffer.c 		drm_printf_indent(p, indent + 1, "obj[%u]:%s\n", i,
p                1062 drivers/gpu/drm/drm_framebuffer.c 			drm_gem_print_info(p, indent + 2, fb->obj[i]);
p                1071 drivers/gpu/drm/drm_framebuffer.c 	struct drm_printer p = drm_seq_file_printer(m);
p                1076 drivers/gpu/drm/drm_framebuffer.c 		drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
p                1077 drivers/gpu/drm/drm_framebuffer.c 		drm_framebuffer_print_info(&p, 1, fb);
p                 556 drivers/gpu/drm/drm_gem.c 	struct page *p, **pages;
p                 578 drivers/gpu/drm/drm_gem.c 		p = shmem_read_mapping_page(mapping, i);
p                 579 drivers/gpu/drm/drm_gem.c 		if (IS_ERR(p))
p                 581 drivers/gpu/drm/drm_gem.c 		pages[i] = p;
p                 589 drivers/gpu/drm/drm_gem.c 				(page_to_pfn(p) >= 0x00100000UL));
p                 605 drivers/gpu/drm/drm_gem.c 	return ERR_CAST(p);
p                1204 drivers/gpu/drm/drm_gem.c void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
p                1207 drivers/gpu/drm/drm_gem.c 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
p                1208 drivers/gpu/drm/drm_gem.c 	drm_printf_indent(p, indent, "refcount=%u\n",
p                1210 drivers/gpu/drm/drm_gem.c 	drm_printf_indent(p, indent, "start=%08lx\n",
p                1212 drivers/gpu/drm/drm_gem.c 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
p                1213 drivers/gpu/drm/drm_gem.c 	drm_printf_indent(p, indent, "imported=%s\n",
p                1217 drivers/gpu/drm/drm_gem.c 		obj->funcs->print_info(p, indent, obj);
p                1219 drivers/gpu/drm/drm_gem.c 		obj->dev->driver->gem_print_info(p, indent, obj);
p                 405 drivers/gpu/drm/drm_gem_cma_helper.c void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
p                 410 drivers/gpu/drm/drm_gem_cma_helper.c 	drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
p                 411 drivers/gpu/drm/drm_gem_cma_helper.c 	drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
p                 565 drivers/gpu/drm/drm_gem_shmem_helper.c void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
p                 570 drivers/gpu/drm/drm_gem_shmem_helper.c 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
p                 571 drivers/gpu/drm/drm_gem_shmem_helper.c 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
p                 572 drivers/gpu/drm/drm_gem_shmem_helper.c 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
p                 133 drivers/gpu/drm/drm_internal.h void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
p                 213 drivers/gpu/drm/drm_internal.h void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
p                 945 drivers/gpu/drm/drm_mm.c static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
p                 952 drivers/gpu/drm/drm_mm.c 		drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
p                 963 drivers/gpu/drm/drm_mm.c void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
p                 968 drivers/gpu/drm/drm_mm.c 	total_free += drm_mm_dump_hole(p, &mm->head_node);
p                 971 drivers/gpu/drm/drm_mm.c 		drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
p                 974 drivers/gpu/drm/drm_mm.c 		total_free += drm_mm_dump_hole(p, entry);
p                 978 drivers/gpu/drm/drm_mm.c 	drm_printf(p, "total: %llu, used %llu free %llu\n", total,
p                 497 drivers/gpu/drm/drm_mode_config.c 		struct drm_printer p = drm_debug_printer("[leaked fb]");
p                 498 drivers/gpu/drm/drm_mode_config.c 		drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
p                 499 drivers/gpu/drm/drm_mode_config.c 		drm_framebuffer_print_info(&p, 1, fb);
p                 843 drivers/gpu/drm/drm_modes.c void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
p                 845 drivers/gpu/drm/drm_modes.c 	if (!p)
p                 848 drivers/gpu/drm/drm_modes.c 	p->crtc_clock = p->clock;
p                 849 drivers/gpu/drm/drm_modes.c 	p->crtc_hdisplay = p->hdisplay;
p                 850 drivers/gpu/drm/drm_modes.c 	p->crtc_hsync_start = p->hsync_start;
p                 851 drivers/gpu/drm/drm_modes.c 	p->crtc_hsync_end = p->hsync_end;
p                 852 drivers/gpu/drm/drm_modes.c 	p->crtc_htotal = p->htotal;
p                 853 drivers/gpu/drm/drm_modes.c 	p->crtc_hskew = p->hskew;
p                 854 drivers/gpu/drm/drm_modes.c 	p->crtc_vdisplay = p->vdisplay;
p                 855 drivers/gpu/drm/drm_modes.c 	p->crtc_vsync_start = p->vsync_start;
p                 856 drivers/gpu/drm/drm_modes.c 	p->crtc_vsync_end = p->vsync_end;
p                 857 drivers/gpu/drm/drm_modes.c 	p->crtc_vtotal = p->vtotal;
p                 859 drivers/gpu/drm/drm_modes.c 	if (p->flags & DRM_MODE_FLAG_INTERLACE) {
p                 861 drivers/gpu/drm/drm_modes.c 			p->crtc_vdisplay /= 2;
p                 862 drivers/gpu/drm/drm_modes.c 			p->crtc_vsync_start /= 2;
p                 863 drivers/gpu/drm/drm_modes.c 			p->crtc_vsync_end /= 2;
p                 864 drivers/gpu/drm/drm_modes.c 			p->crtc_vtotal /= 2;
p                 869 drivers/gpu/drm/drm_modes.c 		if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
p                 870 drivers/gpu/drm/drm_modes.c 			p->crtc_vdisplay *= 2;
p                 871 drivers/gpu/drm/drm_modes.c 			p->crtc_vsync_start *= 2;
p                 872 drivers/gpu/drm/drm_modes.c 			p->crtc_vsync_end *= 2;
p                 873 drivers/gpu/drm/drm_modes.c 			p->crtc_vtotal *= 2;
p                 878 drivers/gpu/drm/drm_modes.c 		if (p->vscan > 1) {
p                 879 drivers/gpu/drm/drm_modes.c 			p->crtc_vdisplay *= p->vscan;
p                 880 drivers/gpu/drm/drm_modes.c 			p->crtc_vsync_start *= p->vscan;
p                 881 drivers/gpu/drm/drm_modes.c 			p->crtc_vsync_end *= p->vscan;
p                 882 drivers/gpu/drm/drm_modes.c 			p->crtc_vtotal *= p->vscan;
p                 887 drivers/gpu/drm/drm_modes.c 		unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
p                 891 drivers/gpu/drm/drm_modes.c 			p->crtc_clock *= 2;
p                 892 drivers/gpu/drm/drm_modes.c 			p->crtc_vdisplay += p->crtc_vtotal;
p                 893 drivers/gpu/drm/drm_modes.c 			p->crtc_vsync_start += p->crtc_vtotal;
p                 894 drivers/gpu/drm/drm_modes.c 			p->crtc_vsync_end += p->crtc_vtotal;
p                 895 drivers/gpu/drm/drm_modes.c 			p->crtc_vtotal += p->crtc_vtotal;
p                 900 drivers/gpu/drm/drm_modes.c 	p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
p                 901 drivers/gpu/drm/drm_modes.c 	p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
p                 902 drivers/gpu/drm/drm_modes.c 	p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
p                 903 drivers/gpu/drm/drm_modes.c 	p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
p                 139 drivers/gpu/drm/drm_pci.c static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
p                 141 drivers/gpu/drm/drm_pci.c 	if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
p                 142 drivers/gpu/drm/drm_pci.c 	    (p->busnum & 0xff) != dev->pdev->bus->number ||
p                 143 drivers/gpu/drm/drm_pci.c 	    p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
p                 146 drivers/gpu/drm/drm_pci.c 	p->irq = dev->pdev->irq;
p                 148 drivers/gpu/drm/drm_pci.c 	DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p                 149 drivers/gpu/drm/drm_pci.c 		  p->irq);
p                 168 drivers/gpu/drm/drm_pci.c 	struct drm_irq_busid *p = data;
p                 180 drivers/gpu/drm/drm_pci.c 	return drm_pci_irq_by_busid(dev, p);
p                 102 drivers/gpu/drm/drm_prime.c 	struct rb_node **p, *rb;
p                 113 drivers/gpu/drm/drm_prime.c 	p = &prime_fpriv->dmabufs.rb_node;
p                 114 drivers/gpu/drm/drm_prime.c 	while (*p) {
p                 117 drivers/gpu/drm/drm_prime.c 		rb = *p;
p                 120 drivers/gpu/drm/drm_prime.c 			p = &rb->rb_right;
p                 122 drivers/gpu/drm/drm_prime.c 			p = &rb->rb_left;
p                 124 drivers/gpu/drm/drm_prime.c 	rb_link_node(&member->dmabuf_rb, rb, p);
p                 128 drivers/gpu/drm/drm_prime.c 	p = &prime_fpriv->handles.rb_node;
p                 129 drivers/gpu/drm/drm_prime.c 	while (*p) {
p                 132 drivers/gpu/drm/drm_prime.c 		rb = *p;
p                 135 drivers/gpu/drm/drm_prime.c 			p = &rb->rb_right;
p                 137 drivers/gpu/drm/drm_prime.c 			p = &rb->rb_left;
p                 139 drivers/gpu/drm/drm_prime.c 	rb_link_node(&member->handle_rb, rb, p);
p                  38 drivers/gpu/drm/drm_print.c void __drm_puts_coredump(struct drm_printer *p, const char *str)
p                  40 drivers/gpu/drm/drm_print.c 	struct drm_print_iterator *iterator = p->arg;
p                  80 drivers/gpu/drm/drm_print.c void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf)
p                  82 drivers/gpu/drm/drm_print.c 	struct drm_print_iterator *iterator = p->arg;
p                 120 drivers/gpu/drm/drm_print.c 	__drm_puts_coredump(p, (const char *) buf);
p                 126 drivers/gpu/drm/drm_print.c void __drm_puts_seq_file(struct drm_printer *p, const char *str)
p                 128 drivers/gpu/drm/drm_print.c 	seq_puts(p->arg, str);
p                 132 drivers/gpu/drm/drm_print.c void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf)
p                 134 drivers/gpu/drm/drm_print.c 	seq_printf(p->arg, "%pV", vaf);
p                 138 drivers/gpu/drm/drm_print.c void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf)
p                 140 drivers/gpu/drm/drm_print.c 	dev_info(p->arg, "[" DRM_NAME "] %pV", vaf);
p                 144 drivers/gpu/drm/drm_print.c void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
p                 146 drivers/gpu/drm/drm_print.c 	pr_debug("%s %pV", p->prefix, vaf);
p                 158 drivers/gpu/drm/drm_print.c void drm_puts(struct drm_printer *p, const char *str)
p                 160 drivers/gpu/drm/drm_print.c 	if (p->puts)
p                 161 drivers/gpu/drm/drm_print.c 		p->puts(p, str);
p                 163 drivers/gpu/drm/drm_print.c 		drm_printf(p, "%s", str);
p                 172 drivers/gpu/drm/drm_print.c void drm_printf(struct drm_printer *p, const char *f, ...)
p                 177 drivers/gpu/drm/drm_print.c 	drm_vprintf(p, f, &args);
p                 274 drivers/gpu/drm/drm_print.c void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset)
p                 283 drivers/gpu/drm/drm_print.c 		drm_printf(p, "%*s = 0x%08x\n",
p                 120 drivers/gpu/drm/etnaviv/etnaviv_drv.c 	struct drm_printer p = drm_seq_file_printer(m);
p                 123 drivers/gpu/drm/etnaviv/etnaviv_drv.c 	drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
p                 131 drivers/gpu/drm/etnaviv/etnaviv_drv.c 	struct drm_printer p = drm_seq_file_printer(m);
p                 151 drivers/gpu/drm/etnaviv/etnaviv_drv.c 	drm_mm_print(&mmu_context->mm, &p);
p                  61 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
p                  63 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	if (IS_ERR(p)) {
p                  64 drivers/gpu/drm/etnaviv/etnaviv_gem.c 		dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
p                  65 drivers/gpu/drm/etnaviv/etnaviv_gem.c 		return PTR_ERR(p);
p                  68 drivers/gpu/drm/etnaviv/etnaviv_gem.c 	etnaviv_obj->pages = p;
p                 247 drivers/gpu/drm/exynos/exynos_drm_drv.c 		struct device *p = NULL, *d;
p                 252 drivers/gpu/drm/exynos/exynos_drm_drv.c 		while ((d = platform_find_device_by_driver(p, &info->driver->driver))) {
p                 253 drivers/gpu/drm/exynos/exynos_drm_drv.c 			put_device(p);
p                 259 drivers/gpu/drm/exynos/exynos_drm_drv.c 			p = d;
p                 261 drivers/gpu/drm/exynos/exynos_drm_drv.c 		put_device(p);
p                 541 drivers/gpu/drm/exynos/exynos_drm_dsi.c 		unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s)
p                 586 drivers/gpu/drm/exynos/exynos_drm_dsi.c 		*p = best_p;
p                 600 drivers/gpu/drm/exynos/exynos_drm_dsi.c 	u8 p, s;
p                 605 drivers/gpu/drm/exynos/exynos_drm_dsi.c 	fout = exynos_dsi_pll_find_pms(dsi, fin, freq, &p, &m, &s);
p                 611 drivers/gpu/drm/exynos/exynos_drm_dsi.c 	dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
p                 616 drivers/gpu/drm/exynos/exynos_drm_dsi.c 	reg = DSIM_PLL_EN | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
p                  42 drivers/gpu/drm/gma500/cdv_intel_display.c 	 .p = {.min = 28, .max = 140},
p                  54 drivers/gpu/drm/gma500/cdv_intel_display.c 	 .p = {.min = 28, .max = 140},
p                  69 drivers/gpu/drm/gma500/cdv_intel_display.c 	 .p = {.min = 5, .max = 90},
p                  81 drivers/gpu/drm/gma500/cdv_intel_display.c 	 .p = {.min = 5, .max = 100},
p                  93 drivers/gpu/drm/gma500/cdv_intel_display.c 	 .p = {.min = 5, .max = 90},
p                 105 drivers/gpu/drm/gma500/cdv_intel_display.c 	 .p = {.min = 5, .max = 100},
p                 217 drivers/gpu/drm/gma500/cdv_intel_display.c 	u32 m, n_vco, p;
p                 307 drivers/gpu/drm/gma500/cdv_intel_display.c 	ret = cdv_sb_read(dev, SB_P(pipe), &p);
p                 310 drivers/gpu/drm/gma500/cdv_intel_display.c 	p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
p                 311 drivers/gpu/drm/gma500/cdv_intel_display.c 	p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
p                 314 drivers/gpu/drm/gma500/cdv_intel_display.c 		p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
p                 317 drivers/gpu/drm/gma500/cdv_intel_display.c 		p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
p                 320 drivers/gpu/drm/gma500/cdv_intel_display.c 		p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
p                 323 drivers/gpu/drm/gma500/cdv_intel_display.c 		p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
p                 329 drivers/gpu/drm/gma500/cdv_intel_display.c 	ret = cdv_sb_write(dev, SB_P(pipe), p);
p                 395 drivers/gpu/drm/gma500/cdv_intel_display.c 	clock->p = clock->p1 * clock->p2;
p                 397 drivers/gpu/drm/gma500/cdv_intel_display.c 	clock->dot = clock->vco / clock->p;
p                 833 drivers/gpu/drm/gma500/cdv_intel_display.c 	clock->p = clock->p1 * clock->p2;
p                 835 drivers/gpu/drm/gma500/cdv_intel_display.c 	clock->dot = clock->vco / clock->p;
p                 850 drivers/gpu/drm/gma500/cdv_intel_display.c 	struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
p                 861 drivers/gpu/drm/gma500/cdv_intel_display.c 		dpll = p->dpll;
p                 863 drivers/gpu/drm/gma500/cdv_intel_display.c 			fp = p->fp0;
p                 865 drivers/gpu/drm/gma500/cdv_intel_display.c 			fp = p->fp1;
p                 924 drivers/gpu/drm/gma500/cdv_intel_display.c 	struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
p                 939 drivers/gpu/drm/gma500/cdv_intel_display.c 		htot = p->htotal;
p                 940 drivers/gpu/drm/gma500/cdv_intel_display.c 		hsync = p->hsync;
p                 941 drivers/gpu/drm/gma500/cdv_intel_display.c 		vtot = p->vtotal;
p                 942 drivers/gpu/drm/gma500/cdv_intel_display.c 		vsync = p->vsync;
p                1310 drivers/gpu/drm/gma500/cdv_intel_dp.c 	uint8_t p = 0;
p                1319 drivers/gpu/drm/gma500/cdv_intel_dp.c 		if (this_p > p)
p                1320 drivers/gpu/drm/gma500/cdv_intel_dp.c 			p = this_p;
p                1326 drivers/gpu/drm/gma500/cdv_intel_dp.c 	if (p == DP_TRAIN_PRE_EMPHASIS_MASK)
p                1327 drivers/gpu/drm/gma500/cdv_intel_dp.c 		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
p                1330 drivers/gpu/drm/gma500/cdv_intel_dp.c 		intel_dp->train_set[lane] = v | p;
p                 674 drivers/gpu/drm/gma500/gma_display.c 	if (clock->p < limit->p.min || limit->p.max < clock->p)
p                  27 drivers/gpu/drm/gma500/gma_display.h 	int p;
p                  40 drivers/gpu/drm/gma500/gma_display.h 	struct gma_range_t dot, vco, n, m, m1, m2, p, p1;
p                 137 drivers/gpu/drm/gma500/intel_bios.c get_blocksize(void *p)
p                 141 drivers/gpu/drm/gma500/intel_bios.c 	block_ptr = (u16 *)((char *)p - 2);
p                 250 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c 	u8 *p;
p                 267 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c 	p = data;
p                 269 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c 		b1 = *p++;
p                 270 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c 		b2 = *p++;
p                 271 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c 		b3 = *p++;
p                 272 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c 		b4 = *p++;
p                 283 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c 			b1 = *p++;
p                 284 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c 			b2 = *p++;
p                 285 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c 			b3 = *p++;
p                 288 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c 			b1 = *p++;
p                 289 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c 			b2 = *p++;
p                 292 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c 			b1 = *p++;
p                 136 drivers/gpu/drm/gma500/mmu.c 	PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
p                 175 drivers/gpu/drm/gma500/mmu.c 	pd->p = alloc_page(GFP_DMA32);
p                 176 drivers/gpu/drm/gma500/mmu.c 	if (!pd->p)
p                 201 drivers/gpu/drm/gma500/mmu.c 	v = kmap(pd->p);
p                 205 drivers/gpu/drm/gma500/mmu.c 	kunmap(pd->p);
p                 225 drivers/gpu/drm/gma500/mmu.c 	__free_page(pd->p);
p                 233 drivers/gpu/drm/gma500/mmu.c 	__free_page(pt->p);
p                 263 drivers/gpu/drm/gma500/mmu.c 	__free_page(pd->p);
p                 282 drivers/gpu/drm/gma500/mmu.c 	pt->p = alloc_page(GFP_DMA32);
p                 283 drivers/gpu/drm/gma500/mmu.c 	if (!pt->p) {
p                 290 drivers/gpu/drm/gma500/mmu.c 	v = kmap_atomic(pt->p);
p                 341 drivers/gpu/drm/gma500/mmu.c 		v = kmap_atomic(pd->p);
p                 343 drivers/gpu/drm/gma500/mmu.c 		v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
p                 352 drivers/gpu/drm/gma500/mmu.c 	pt->v = kmap_atomic(pt->p);
p                 369 drivers/gpu/drm/gma500/mmu.c 	pt->v = kmap_atomic(pt->p);
p                 380 drivers/gpu/drm/gma500/mmu.c 		v = kmap_atomic(pd->p);
p                 425 drivers/gpu/drm/gma500/mmu.c 	return page_to_pfn(pd->p) << PAGE_SHIFT;
p                 777 drivers/gpu/drm/gma500/mmu.c 		v = kmap_atomic(pd->p);
p                  39 drivers/gpu/drm/gma500/mmu.h 	struct page *p;
p                  47 drivers/gpu/drm/gma500/mmu.h 	struct page *p;
p                 140 drivers/gpu/drm/gma500/oaktrail_crtc.c 				clock.p = clock.p1 * limit->p2.p2_slow;
p                 141 drivers/gpu/drm/gma500/oaktrail_crtc.c 				target_vco = target * clock.p;
p                 151 drivers/gpu/drm/gma500/oaktrail_crtc.c 					      (clock.n * clock.p);
p                 183 drivers/gpu/drm/gma500/oaktrail_device.c 	struct psb_pipe *p = &regs->pipe[0];
p                 198 drivers/gpu/drm/gma500/oaktrail_device.c 	p->conf = PSB_RVDC32(PIPEACONF);
p                 199 drivers/gpu/drm/gma500/oaktrail_device.c 	p->src = PSB_RVDC32(PIPEASRC);
p                 200 drivers/gpu/drm/gma500/oaktrail_device.c 	p->fp0 = PSB_RVDC32(MRST_FPA0);
p                 201 drivers/gpu/drm/gma500/oaktrail_device.c 	p->fp1 = PSB_RVDC32(MRST_FPA1);
p                 202 drivers/gpu/drm/gma500/oaktrail_device.c 	p->dpll = PSB_RVDC32(MRST_DPLL_A);
p                 203 drivers/gpu/drm/gma500/oaktrail_device.c 	p->htotal = PSB_RVDC32(HTOTAL_A);
p                 204 drivers/gpu/drm/gma500/oaktrail_device.c 	p->hblank = PSB_RVDC32(HBLANK_A);
p                 205 drivers/gpu/drm/gma500/oaktrail_device.c 	p->hsync = PSB_RVDC32(HSYNC_A);
p                 206 drivers/gpu/drm/gma500/oaktrail_device.c 	p->vtotal = PSB_RVDC32(VTOTAL_A);
p                 207 drivers/gpu/drm/gma500/oaktrail_device.c 	p->vblank = PSB_RVDC32(VBLANK_A);
p                 208 drivers/gpu/drm/gma500/oaktrail_device.c 	p->vsync = PSB_RVDC32(VSYNC_A);
p                 210 drivers/gpu/drm/gma500/oaktrail_device.c 	p->cntr = PSB_RVDC32(DSPACNTR);
p                 211 drivers/gpu/drm/gma500/oaktrail_device.c 	p->stride = PSB_RVDC32(DSPASTRIDE);
p                 212 drivers/gpu/drm/gma500/oaktrail_device.c 	p->addr = PSB_RVDC32(DSPABASE);
p                 213 drivers/gpu/drm/gma500/oaktrail_device.c 	p->surf = PSB_RVDC32(DSPASURF);
p                 214 drivers/gpu/drm/gma500/oaktrail_device.c 	p->linoff = PSB_RVDC32(DSPALINOFF);
p                 215 drivers/gpu/drm/gma500/oaktrail_device.c 	p->tileoff = PSB_RVDC32(DSPATILEOFF);
p                 224 drivers/gpu/drm/gma500/oaktrail_device.c 		p->palette[i] = PSB_RVDC32(PALETTE_A + (i << 2));
p                 297 drivers/gpu/drm/gma500/oaktrail_device.c 	struct psb_pipe *p = &regs->pipe[0];
p                 315 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->fp0, MRST_FPA0);
p                 316 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->fp1, MRST_FPA1);
p                 319 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->dpll, MRST_DPLL_A);
p                 323 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->htotal, HTOTAL_A);
p                 324 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->hblank, HBLANK_A);
p                 325 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->hsync, HSYNC_A);
p                 326 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->vtotal, VTOTAL_A);
p                 327 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->vblank, VBLANK_A);
p                 328 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->vsync, VSYNC_A);
p                 329 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->src, PIPEASRC);
p                 337 drivers/gpu/drm/gma500/oaktrail_device.c 		PSB_WVDC32(p->conf, PIPEACONF);
p                 340 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->linoff, DSPALINOFF);
p                 341 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->stride, DSPASTRIDE);
p                 342 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->tileoff, DSPATILEOFF);
p                 345 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->cntr, DSPACNTR);
p                 346 drivers/gpu/drm/gma500/oaktrail_device.c 	PSB_WVDC32(p->surf, DSPASURF);
p                 355 drivers/gpu/drm/gma500/oaktrail_device.c 		PSB_WVDC32(p->palette[i], PALETTE_A + (i << 2));
p                  32 drivers/gpu/drm/gma500/psb_intel_display.c 	 .p = {.min = 5, .max = 80},
p                  44 drivers/gpu/drm/gma500/psb_intel_display.c 	 .p = {.min = 7, .max = 98},
p                  69 drivers/gpu/drm/gma500/psb_intel_display.c 	clock->p = clock->p1 * clock->p2;
p                  71 drivers/gpu/drm/gma500/psb_intel_display.c 	clock->dot = clock->vco / clock->p;
p                 308 drivers/gpu/drm/gma500/psb_intel_display.c 	struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
p                 319 drivers/gpu/drm/gma500/psb_intel_display.c 		dpll = p->dpll;
p                 322 drivers/gpu/drm/gma500/psb_intel_display.c 			fp = p->fp0;
p                 324 drivers/gpu/drm/gma500/psb_intel_display.c 		        fp = p->fp1;
p                 384 drivers/gpu/drm/gma500/psb_intel_display.c 	struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
p                 394 drivers/gpu/drm/gma500/psb_intel_display.c 		htot = p->htotal;
p                 395 drivers/gpu/drm/gma500/psb_intel_display.c 		hsync = p->hsync;
p                 396 drivers/gpu/drm/gma500/psb_intel_display.c 		vtot = p->vtotal;
p                 397 drivers/gpu/drm/gma500/psb_intel_display.c 		vsync = p->vsync;
p                1370 drivers/gpu/drm/gma500/psb_intel_reg.h #define LANE_PLL_PIPE(p)	(((p) == 0) ? (1 << 21) : (0 << 21))
p                  73 drivers/gpu/drm/i2c/tda9950.c static int tda9950_write_range(struct i2c_client *client, u8 addr, u8 *p, int cnt)
p                  83 drivers/gpu/drm/i2c/tda9950.c 	memcpy(buf + 1, p, cnt);
p                  90 drivers/gpu/drm/i2c/tda9950.c 	dev_dbg(&client->dev, "wr 0x%02x: %*ph\n", addr, cnt, p);
p                 103 drivers/gpu/drm/i2c/tda9950.c static int tda9950_read_range(struct i2c_client *client, u8 addr, u8 *p, int cnt)
p                 115 drivers/gpu/drm/i2c/tda9950.c 	msg[1].buf = p;
p                 121 drivers/gpu/drm/i2c/tda9950.c 	dev_dbg(&client->dev, "rd 0x%02x: %*ph\n", addr, cnt, p);
p                 607 drivers/gpu/drm/i2c/tda998x_drv.c reg_write_range(struct tda998x_priv *priv, u16 reg, u8 *p, int cnt)
p                 621 drivers/gpu/drm/i2c/tda998x_drv.c 	memcpy(&buf[1], p, cnt);
p                1735 drivers/gpu/drm/i2c/tda998x_drv.c 			      const struct tda998x_encoder_params *p)
p                1737 drivers/gpu/drm/i2c/tda998x_drv.c 	priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
p                1738 drivers/gpu/drm/i2c/tda998x_drv.c 			    (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
p                1739 drivers/gpu/drm/i2c/tda998x_drv.c 			    VIP_CNTRL_0_SWAP_B(p->swap_b) |
p                1740 drivers/gpu/drm/i2c/tda998x_drv.c 			    (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
p                1741 drivers/gpu/drm/i2c/tda998x_drv.c 	priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
p                1742 drivers/gpu/drm/i2c/tda998x_drv.c 			    (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
p                1743 drivers/gpu/drm/i2c/tda998x_drv.c 			    VIP_CNTRL_1_SWAP_D(p->swap_d) |
p                1744 drivers/gpu/drm/i2c/tda998x_drv.c 			    (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
p                1745 drivers/gpu/drm/i2c/tda998x_drv.c 	priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
p                1746 drivers/gpu/drm/i2c/tda998x_drv.c 			    (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
p                1747 drivers/gpu/drm/i2c/tda998x_drv.c 			    VIP_CNTRL_2_SWAP_F(p->swap_f) |
p                1748 drivers/gpu/drm/i2c/tda998x_drv.c 			    (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
p                1750 drivers/gpu/drm/i2c/tda998x_drv.c 	if (p->audio_params.format != AFMT_UNUSED) {
p                1752 drivers/gpu/drm/i2c/tda998x_drv.c 		bool spdif = p->audio_params.format == AFMT_SPDIF;
p                1757 drivers/gpu/drm/i2c/tda998x_drv.c 		priv->audio.cea = p->audio_params.cea;
p                1758 drivers/gpu/drm/i2c/tda998x_drv.c 		priv->audio.sample_rate = p->audio_params.sample_rate;
p                1759 drivers/gpu/drm/i2c/tda998x_drv.c 		memcpy(priv->audio.status, p->audio_params.status,
p                1761 drivers/gpu/drm/i2c/tda998x_drv.c 			   sizeof(p->audio_params.status)));
p                1762 drivers/gpu/drm/i2c/tda998x_drv.c 		priv->audio.ena_ap = p->audio_params.config;
p                1765 drivers/gpu/drm/i2c/tda998x_drv.c 		ratio = spdif ? 64 : p->audio_params.sample_width * 2;
p                1263 drivers/gpu/drm/i915/display/intel_bios.c 	enum port p;
p                1268 drivers/gpu/drm/i915/display/intel_bios.c 	p = get_port_by_ddc_pin(dev_priv, info->alternate_ddc_pin);
p                1269 drivers/gpu/drm/i915/display/intel_bios.c 	if (p != PORT_NONE) {
p                1273 drivers/gpu/drm/i915/display/intel_bios.c 			      port_name(p), port_name(p));
p                1287 drivers/gpu/drm/i915/display/intel_bios.c 		info = &dev_priv->vbt.ddi_port_info[p];
p                1314 drivers/gpu/drm/i915/display/intel_bios.c 	enum port p;
p                1319 drivers/gpu/drm/i915/display/intel_bios.c 	p = get_port_by_aux_ch(dev_priv, info->alternate_aux_channel);
p                1320 drivers/gpu/drm/i915/display/intel_bios.c 	if (p != PORT_NONE) {
p                1324 drivers/gpu/drm/i915/display/intel_bios.c 			      port_name(p), port_name(p));
p                1338 drivers/gpu/drm/i915/display/intel_bios.c 		info = &dev_priv->vbt.ddi_port_info[p];
p                1232 drivers/gpu/drm/i915/display/intel_ddi.c 	int n, p, r;
p                1268 drivers/gpu/drm/i915/display/intel_ddi.c 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
p                1272 drivers/gpu/drm/i915/display/intel_ddi.c 	return (refclk * n * 100) / (p * r);
p                1976 drivers/gpu/drm/i915/display/intel_ddi.c 	enum pipe p;
p                2015 drivers/gpu/drm/i915/display/intel_ddi.c 	for_each_pipe(dev_priv, p) {
p                2016 drivers/gpu/drm/i915/display/intel_ddi.c 		enum transcoder cpu_transcoder = (enum transcoder)p;
p                2042 drivers/gpu/drm/i915/display/intel_ddi.c 			mst_pipe_mask |= BIT(p);
p                2044 drivers/gpu/drm/i915/display/intel_ddi.c 		*pipe_mask |= BIT(p);
p                 152 drivers/gpu/drm/i915/display/intel_display.c 	} dot, vco, n, m, m1, m2, p, p1;
p                 233 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 4, .max = 128 },
p                 246 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 4, .max = 128 },
p                 259 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 4, .max = 128 },
p                 272 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 5, .max = 80 },
p                 285 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 7, .max = 98 },
p                 299 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 10, .max = 30 },
p                 314 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 5, .max = 80 },
p                 327 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 28, .max = 112 },
p                 341 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 14, .max = 42 },
p                 357 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 5, .max = 80 },
p                 370 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 7, .max = 112 },
p                 388 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 5, .max = 80 },
p                 401 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 28, .max = 112 },
p                 414 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 14, .max = 56 },
p                 428 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 28, .max = 112 },
p                 441 drivers/gpu/drm/i915/display/intel_display.c 	.p = { .min = 14, .max = 42 },
p                 536 drivers/gpu/drm/i915/display/intel_display.c 	clock->p = clock->p1 * clock->p2;
p                 537 drivers/gpu/drm/i915/display/intel_display.c 	if (WARN_ON(clock->n == 0 || clock->p == 0))
p                 540 drivers/gpu/drm/i915/display/intel_display.c 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
p                 553 drivers/gpu/drm/i915/display/intel_display.c 	clock->p = clock->p1 * clock->p2;
p                 554 drivers/gpu/drm/i915/display/intel_display.c 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
p                 557 drivers/gpu/drm/i915/display/intel_display.c 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
p                 565 drivers/gpu/drm/i915/display/intel_display.c 	clock->p = clock->p1 * clock->p2;
p                 566 drivers/gpu/drm/i915/display/intel_display.c 	if (WARN_ON(clock->n == 0 || clock->p == 0))
p                 569 drivers/gpu/drm/i915/display/intel_display.c 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
p                 577 drivers/gpu/drm/i915/display/intel_display.c 	clock->p = clock->p1 * clock->p2;
p                 578 drivers/gpu/drm/i915/display/intel_display.c 	if (WARN_ON(clock->n == 0 || clock->p == 0))
p                 582 drivers/gpu/drm/i915/display/intel_display.c 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
p                 613 drivers/gpu/drm/i915/display/intel_display.c 		if (clock->p < limit->p.min || limit->p.max < clock->p)
p                 697 drivers/gpu/drm/i915/display/intel_display.c 					    clock.p != match_clock->p)
p                 753 drivers/gpu/drm/i915/display/intel_display.c 					    clock.p != match_clock->p)
p                 845 drivers/gpu/drm/i915/display/intel_display.c 		return calculated_clock->p > best_clock->p;
p                 859 drivers/gpu/drm/i915/display/intel_display.c 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
p                 896 drivers/gpu/drm/i915/display/intel_display.c 				clock.p = clock.p1 * clock.p2;
p                 901 drivers/gpu/drm/i915/display/intel_display.c 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
p                 963 drivers/gpu/drm/i915/display/intel_display.c 			clock.p = clock.p1 * clock.p2;
p                 965 drivers/gpu/drm/i915/display/intel_display.c 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
p                1137 drivers/gpu/drm/i915/display/intel_display.c #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
p                1138 drivers/gpu/drm/i915/display/intel_display.c #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
p                1152 drivers/gpu/drm/i915/display/intel_display.c #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
p                1153 drivers/gpu/drm/i915/display/intel_display.c #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
p                1283 drivers/gpu/drm/i915/display/intel_display.c #define assert_plane_enabled(p) assert_plane(p, true)
p                1284 drivers/gpu/drm/i915/display/intel_display.c #define assert_plane_disabled(p) assert_plane(p, false)
p                  91 drivers/gpu/drm/i915/display/intel_display.h #define pipe_name(p) ((p) + 'A')
p                 155 drivers/gpu/drm/i915/display/intel_display.h #define plane_name(p) ((p) + 'A')
p                 156 drivers/gpu/drm/i915/display/intel_display.h #define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
p                 561 drivers/gpu/drm/i915/display/intel_display.h #define assert_pll_enabled(d, p) assert_pll(d, p, true)
p                 562 drivers/gpu/drm/i915/display/intel_display.h #define assert_pll_disabled(d, p) assert_pll(d, p, false)
p                 568 drivers/gpu/drm/i915/display/intel_display.h #define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
p                 569 drivers/gpu/drm/i915/display/intel_display.h #define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
p                 571 drivers/gpu/drm/i915/display/intel_display.h #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
p                 572 drivers/gpu/drm/i915/display/intel_display.h #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
p                 446 drivers/gpu/drm/i915/display/intel_display_types.h 	int	p;
p                3078 drivers/gpu/drm/i915/display/intel_dp.c 	enum pipe p;
p                3080 drivers/gpu/drm/i915/display/intel_dp.c 	for_each_pipe(dev_priv, p) {
p                3081 drivers/gpu/drm/i915/display/intel_dp.c 		u32 val = I915_READ(TRANS_DP_CTL(p));
p                3084 drivers/gpu/drm/i915/display/intel_dp.c 			*pipe = p;
p                  42 drivers/gpu/drm/i915/display/intel_dp_link_training.c 	u8 p = 0;
p                  53 drivers/gpu/drm/i915/display/intel_dp_link_training.c 		if (this_p > p)
p                  54 drivers/gpu/drm/i915/display/intel_dp_link_training.c 			p = this_p;
p                  62 drivers/gpu/drm/i915/display/intel_dp_link_training.c 	if (p >= preemph_max)
p                  63 drivers/gpu/drm/i915/display/intel_dp_link_training.c 		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
p                  66 drivers/gpu/drm/i915/display/intel_dp_link_training.c 		intel_dp->train_set[lane] = v | p;
p                 610 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	unsigned p, n2, r2;
p                 689 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 				 unsigned int p,
p                 695 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	if (best->p == 0) {
p                 696 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 		best->p = p;
p                 716 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	a = freq2k * budget * p * r2;
p                 717 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	b = freq2k * budget * best->p * best->r2;
p                 718 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
p                 719 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	diff_best = abs_diff(freq2k * best->p * best->r2,
p                 726 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
p                 727 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 			best->p = p;
p                 733 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 		best->p = p;
p                 739 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 			best->p = p;
p                 752 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	unsigned p, n2, r2;
p                 801 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 			for (p = P_MIN; p <= P_MAX; p += P_INC)
p                 803 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 						     r2, n2, p, &best);
p                 808 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	*p_out = best.p;
p                 820 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	unsigned int p, n2, r2;
p                 822 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
p                 826 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	      WRPLL_DIVIDER_POST(p);
p                1119 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	unsigned int p;			/* chosen divider */
p                1150 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 			ctx->p = divider;
p                1158 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 		ctx->p = divider;
p                1162 drivers/gpu/drm/i915/display/intel_dpll_mgr.c static void skl_wrpll_get_multipliers(unsigned int p,
p                1168 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	if (p % 2 == 0) {
p                1169 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 		unsigned int half = p / 2;
p                1188 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
p                1191 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 		*p2 = p / 3;
p                1192 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	} else if (p == 5 || p == 7) {
p                1193 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 		*p0 = p;
p                1196 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	} else if (p == 15) {
p                1200 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	} else if (p == 21) {
p                1204 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	} else if (p == 35) {
p                1318 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 				unsigned int p = dividers[d].list[i];
p                1319 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 				u64 dco_freq = p * afe_clock;
p                1324 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 						      p);
p                1340 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 		if (d == 0 && ctx.p)
p                1344 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	if (!ctx.p) {
p                1354 drivers/gpu/drm/i915/display/intel_dpll_mgr.c 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
p                 358 drivers/gpu/drm/i915/display/intel_dpll_mgr.h #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
p                 359 drivers/gpu/drm/i915/display/intel_dpll_mgr.h #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
p                  61 drivers/gpu/drm/i915/display/vlv_dsi_pll.c 	unsigned int m, n, p;
p                  88 drivers/gpu/drm/i915/display/vlv_dsi_pll.c 		for (p = p_min; p <= p_max && delta; p++) {
p                  93 drivers/gpu/drm/i915/display/vlv_dsi_pll.c 			int calc_dsi_clk = (m * ref_clk) / (p * n);
p                  98 drivers/gpu/drm/i915/display/vlv_dsi_pll.c 				calc_p = p;
p                 262 drivers/gpu/drm/i915/display/vlv_dsi_pll.c 	u32 m = 0, p = 0, n;
p                 290 drivers/gpu/drm/i915/display/vlv_dsi_pll.c 		p++;
p                 292 drivers/gpu/drm/i915/display/vlv_dsi_pll.c 	p--;
p                 294 drivers/gpu/drm/i915/display/vlv_dsi_pll.c 	if (!p) {
p                 311 drivers/gpu/drm/i915/display/vlv_dsi_pll.c 	dsi_clock = (m * refclk) / (p * n);
p                 668 drivers/gpu/drm/i915/gem/i915_gem_context.c static int context_idr_cleanup(int id, void *p, void *data)
p                 670 drivers/gpu/drm/i915/gem/i915_gem_context.c 	context_close(p);
p                 674 drivers/gpu/drm/i915/gem/i915_gem_context.c static int vm_idr_cleanup(int id, void *p, void *data)
p                 676 drivers/gpu/drm/i915/gem/i915_gem_context.c 	i915_vm_put(p);
p                 913 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static inline void *unmask_page(unsigned long p)
p                 915 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	return (void *)(uintptr_t)(p & PAGE_MASK);
p                 918 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static inline unsigned int unmask_flags(unsigned long p)
p                 920 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	return p & ~PAGE_MASK;
p                 114 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		struct page *p;
p                 149 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
p                 150 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		cpu = kmap(p) + offset_in_page(offset);
p                 168 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		kunmap(p);
p                 350 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 				    struct drm_printer *p)
p                 359 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	drm_printf(p, "Signals:\n");
p                 364 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 			drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
p                 375 drivers/gpu/drm/i915/gt/intel_engine.h 				    struct drm_printer *p);
p                  18 drivers/gpu/drm/i915/gt/intel_engine_user.c 	struct rb_node *p = i915->uabi_engines.rb_node;
p                  20 drivers/gpu/drm/i915/gt/intel_engine_user.c 	while (p) {
p                  22 drivers/gpu/drm/i915/gt/intel_engine_user.c 			rb_entry(p, typeof(*it), uabi_node);
p                  25 drivers/gpu/drm/i915/gt/intel_engine_user.c 			p = p->rb_left;
p                  28 drivers/gpu/drm/i915/gt/intel_engine_user.c 			p = p->rb_right;
p                  30 drivers/gpu/drm/i915/gt/intel_engine_user.c 			p = p->rb_left;
p                 198 drivers/gpu/drm/i915/gt/intel_engine_user.c 	struct rb_node **p, *prev;
p                 204 drivers/gpu/drm/i915/gt/intel_engine_user.c 	p = &i915->uabi_engines.rb_node;
p                 224 drivers/gpu/drm/i915/gt/intel_engine_user.c 		rb_link_node(&engine->uabi_node, prev, p);
p                 235 drivers/gpu/drm/i915/gt/intel_engine_user.c 		p = &prev->rb_right;
p                 303 drivers/gpu/drm/i915/gt/intel_hangcheck.c 		struct drm_printer p = drm_debug_printer("hangcheck");
p                 309 drivers/gpu/drm/i915/gt/intel_hangcheck.c 			intel_engine_dump(engine, &p, "%s\n", engine->name);
p                 305 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct i915_priolist *p;
p                 316 drivers/gpu/drm/i915/gt/intel_lrc.c 	p = to_priolist(rb);
p                 317 drivers/gpu/drm/i915/gt/intel_lrc.c 	return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
p                 953 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct i915_dependency *p;
p                 958 drivers/gpu/drm/i915/gt/intel_lrc.c 		for_each_waiter(p, rq) {
p                 960 drivers/gpu/drm/i915/gt/intel_lrc.c 				container_of(p->waiter, typeof(*w), sched);
p                1269 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct i915_priolist *p = to_priolist(rb);
p                1273 drivers/gpu/drm/i915/gt/intel_lrc.c 		priolist_for_each_request_consume(rq, rn, p, i) {
p                1334 drivers/gpu/drm/i915/gt/intel_lrc.c 		rb_erase_cached(&p->node, &execlists->queue);
p                1335 drivers/gpu/drm/i915/gt/intel_lrc.c 		i915_priolist_free(p);
p                2363 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct drm_printer p = drm_debug_printer(__func__);
p                2365 drivers/gpu/drm/i915/gt/intel_lrc.c 		intel_engine_dump(engine, &p, NULL);
p                2600 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct i915_priolist *p = to_priolist(rb);
p                2603 drivers/gpu/drm/i915/gt/intel_lrc.c 		priolist_for_each_request_consume(rq, rn, p, i) {
p                2608 drivers/gpu/drm/i915/gt/intel_lrc.c 		rb_erase_cached(&p->node, &execlists->queue);
p                2609 drivers/gpu/drm/i915/gt/intel_lrc.c 		i915_priolist_free(p);
p                3952 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
p                3955 drivers/gpu/drm/i915/gt/intel_lrc.c 		priolist_for_each_request(rq, p, i) {
p                 740 drivers/gpu/drm/i915/gt/intel_reset.c 		struct drm_printer p = drm_debug_printer(__func__);
p                 743 drivers/gpu/drm/i915/gt/intel_reset.c 			intel_engine_dump(engine, &p, "%s\n", engine->name);
p                 277 drivers/gpu/drm/i915/gt/selftest_context.c 		struct drm_printer p = drm_debug_printer(__func__);
p                 279 drivers/gpu/drm/i915/gt/selftest_context.c 		intel_engine_dump(engine, &p,
p                  29 drivers/gpu/drm/i915/gt/selftest_engine_pm.c 		const typeof(*igt_atomic_phases) *p;
p                  31 drivers/gpu/drm/i915/gt/selftest_engine_pm.c 		for (p = igt_atomic_phases; p->name; p++) {
p                  49 drivers/gpu/drm/i915/gt/selftest_engine_pm.c 			p->critical_section_begin();
p                  52 drivers/gpu/drm/i915/gt/selftest_engine_pm.c 				       engine->name, p->name);
p                  56 drivers/gpu/drm/i915/gt/selftest_engine_pm.c 			p->critical_section_end();
p                 609 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
p                 613 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					intel_engine_dump(engine, &p,
p                 871 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
p                 875 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					intel_engine_dump(engine, &p,
p                 895 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					struct drm_printer p =
p                 901 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					intel_engine_dump(engine, &p,
p                 915 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				struct drm_printer p =
p                 921 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				intel_engine_dump(engine, &p,
p                1020 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	typeof(*phases) *p;
p                1023 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	for (p = phases; p->name; p++) {
p                1024 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		if (p->flags & TEST_PRIORITY) {
p                1029 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = __igt_reset_engines(arg, p->name, p->flags);
p                1079 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
p                1083 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
p                1268 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
p                1272 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
p                1291 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
p                1294 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
p                1465 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
p                1470 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				intel_engine_dump(engine, &p,
p                1572 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
p                1576 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
p                1611 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				     const struct igt_atomic_section *p,
p                1618 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		  engine->name, mode, p->name);
p                1621 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	p->critical_section_begin();
p                1625 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	p->critical_section_end();
p                1630 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		       engine->name, mode, p->name);
p                1636 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				   const struct igt_atomic_section *p)
p                1642 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = __igt_atomic_reset_engine(engine, p, "idle");
p                1660 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = __igt_atomic_reset_engine(engine, p, "active");
p                1687 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	const typeof(*igt_atomic_phases) *p;
p                1705 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	for (p = igt_atomic_phases; p->name; p++) {
p                1710 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			err = igt_atomic_reset_engine(engine, p);
p                 447 drivers/gpu/drm/i915/gt/selftest_lrc.c 			struct drm_printer p = drm_info_printer(i915->drm.dev);
p                 452 drivers/gpu/drm/i915/gt/selftest_lrc.c 			intel_engine_dump(engine, &p, "%s\n", engine->name);
p                1239 drivers/gpu/drm/i915/gt/selftest_lrc.c 				struct drm_printer p =
p                1244 drivers/gpu/drm/i915/gt/selftest_lrc.c 				intel_engine_dump(engine, &p,
p                1255 drivers/gpu/drm/i915/gt/selftest_lrc.c 				struct drm_printer p =
p                1260 drivers/gpu/drm/i915/gt/selftest_lrc.c 				intel_engine_dump(engine, &p,
p                2136 drivers/gpu/drm/i915/gt/selftest_lrc.c 		const struct phase *p;
p                2150 drivers/gpu/drm/i915/gt/selftest_lrc.c 		for (p = phases; p->name; p++) {
p                2153 drivers/gpu/drm/i915/gt/selftest_lrc.c 						  p->flags);
p                2156 drivers/gpu/drm/i915/gt/selftest_lrc.c 				       __func__, p->name, class, nsibling, err);
p                  64 drivers/gpu/drm/i915/gt/selftest_reset.c 	const typeof(*igt_atomic_phases) *p;
p                  76 drivers/gpu/drm/i915/gt/selftest_reset.c 	for (p = igt_atomic_phases; p->name; p++) {
p                  79 drivers/gpu/drm/i915/gt/selftest_reset.c 		GEM_TRACE("__intel_gt_reset under %s\n", p->name);
p                  82 drivers/gpu/drm/i915/gt/selftest_reset.c 		p->critical_section_begin();
p                  86 drivers/gpu/drm/i915/gt/selftest_reset.c 		p->critical_section_end();
p                  90 drivers/gpu/drm/i915/gt/selftest_reset.c 			pr_err("__intel_gt_reset failed under %s\n", p->name);
p                 108 drivers/gpu/drm/i915/gt/selftest_reset.c 	const typeof(*igt_atomic_phases) *p;
p                 132 drivers/gpu/drm/i915/gt/selftest_reset.c 		for (p = igt_atomic_phases; p->name; p++) {
p                 134 drivers/gpu/drm/i915/gt/selftest_reset.c 				  engine->name, p->name);
p                 136 drivers/gpu/drm/i915/gt/selftest_reset.c 			p->critical_section_begin();
p                 138 drivers/gpu/drm/i915/gt/selftest_reset.c 			p->critical_section_end();
p                 142 drivers/gpu/drm/i915/gt/selftest_reset.c 				       engine->name, p->name);
p                 115 drivers/gpu/drm/i915/gt/selftest_timeline.c 	}, *p;
p                 140 drivers/gpu/drm/i915/gt/selftest_timeline.c 	for (p = phases; p->name; p++) {
p                 141 drivers/gpu/drm/i915/gt/selftest_timeline.c 		pr_debug("%s(%s)\n", __func__, p->name);
p                 143 drivers/gpu/drm/i915/gt/selftest_timeline.c 			err = __mock_hwsp_timeline(&state, na, p->flags);
p                 168 drivers/gpu/drm/i915/gt/selftest_timeline.c 		      const struct __igt_sync *p,
p                 173 drivers/gpu/drm/i915/gt/selftest_timeline.c 	if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
p                 175 drivers/gpu/drm/i915/gt/selftest_timeline.c 		       name, p->name, ctx, p->seqno, yesno(p->expected));
p                 179 drivers/gpu/drm/i915/gt/selftest_timeline.c 	if (p->set) {
p                 180 drivers/gpu/drm/i915/gt/selftest_timeline.c 		ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
p                 207 drivers/gpu/drm/i915/gt/selftest_timeline.c 	}, *p;
p                 213 drivers/gpu/drm/i915/gt/selftest_timeline.c 	for (p = pass; p->name; p++) {
p                 218 drivers/gpu/drm/i915/gt/selftest_timeline.c 				ret = __igt_sync(&tl, ctx, p, "1");
p                 231 drivers/gpu/drm/i915/gt/selftest_timeline.c 			for (p = pass; p->name; p++) {
p                 232 drivers/gpu/drm/i915/gt/selftest_timeline.c 				ret = __igt_sync(&tl, ctx, p, "2");
p                  30 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c 	u32 p, i;
p                  35 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c 	for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
p                  37 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c 			policy = &policies->policy[p][i];
p                 563 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		struct i915_priolist *p = to_priolist(rb);
p                 567 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		priolist_for_each_request_consume(rq, rn, p, i) {
p                 583 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		rb_erase_cached(&p->node, &execlists->queue);
p                 584 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		i915_priolist_free(p);
p                 717 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		struct i915_priolist *p = to_priolist(rb);
p                 720 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		priolist_for_each_request_consume(rq, rn, p, i) {
p                 727 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		rb_erase_cached(&p->node, &execlists->queue);
p                 728 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		i915_priolist_free(p);
p                  88 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	enum intel_platform p;
p                  95 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	.p = INTEL_##platform_, \
p                 102 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c __uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev)
p                 109 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) {
p                 110 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 		if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
p                 123 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 			if (fw_blobs[i].p < fw_blobs[i - 1].p)
p                 126 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 			if (fw_blobs[i].p == fw_blobs[i - 1].p &&
p                 131 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 			       intel_platform_name(fw_blobs[i - 1].p),
p                 133 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 			       intel_platform_name(fw_blobs[i].p),
p                 141 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	if (i915_modparams.enable_guc == -1 && p < INTEL_ICELAKE)
p                 605 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
p                 607 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	drm_printf(p, "%s firmware: %s\n",
p                 609 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	drm_printf(p, "\tstatus: %s\n",
p                 611 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
p                 614 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
p                 615 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
p                 239 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
p                2870 drivers/gpu/drm/i915/gvt/cmd_parser.c 		void *p;
p                2873 drivers/gpu/drm/i915/gvt/cmd_parser.c 		p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
p                2875 drivers/gpu/drm/i915/gvt/cmd_parser.c 		if (!p) {
p                2879 drivers/gpu/drm/i915/gvt/cmd_parser.c 		s->ring_scan_buffer[ring_id] = p;
p                 211 drivers/gpu/drm/i915/gvt/dmabuf.c 	struct intel_vgpu_primary_plane_format p;
p                 218 drivers/gpu/drm/i915/gvt/dmabuf.c 		ret = intel_vgpu_decode_primary_plane(vgpu, &p);
p                 221 drivers/gpu/drm/i915/gvt/dmabuf.c 		info->start = p.base;
p                 222 drivers/gpu/drm/i915/gvt/dmabuf.c 		info->start_gpa = p.base_gpa;
p                 223 drivers/gpu/drm/i915/gvt/dmabuf.c 		info->width = p.width;
p                 224 drivers/gpu/drm/i915/gvt/dmabuf.c 		info->height = p.height;
p                 225 drivers/gpu/drm/i915/gvt/dmabuf.c 		info->stride = p.stride;
p                 226 drivers/gpu/drm/i915/gvt/dmabuf.c 		info->drm_format = p.drm_format;
p                 228 drivers/gpu/drm/i915/gvt/dmabuf.c 		switch (p.tiled) {
p                 245 drivers/gpu/drm/i915/gvt/dmabuf.c 			gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
p                  84 drivers/gpu/drm/i915/gvt/firmware.c 	void *p;
p                 102 drivers/gpu/drm/i915/gvt/firmware.c 	p = firmware + h->cfg_space_offset;
p                 105 drivers/gpu/drm/i915/gvt/firmware.c 		pci_read_config_dword(pdev, i, p + i);
p                 107 drivers/gpu/drm/i915/gvt/firmware.c 	memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size);
p                 109 drivers/gpu/drm/i915/gvt/firmware.c 	p = firmware + h->mmio_offset;
p                 112 drivers/gpu/drm/i915/gvt/firmware.c 	intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p);
p                 114 drivers/gpu/drm/i915/gvt/firmware.c 	memcpy(gvt->firmware.mmio, p, info->mmio_size);
p                 147 drivers/gpu/drm/i915/gvt/gtt.c #define gtt_init_entry(e, t, p, v) do { \
p                 149 drivers/gpu/drm/i915/gvt/gtt.c 	(e)->pdev = p; \
p                 101 drivers/gpu/drm/i915/gvt/handlers.c 	struct intel_gvt_mmio_info *info, *p;
p                 119 drivers/gpu/drm/i915/gvt/handlers.c 		p = find_mmio_info(gvt, info->offset);
p                 120 drivers/gpu/drm/i915/gvt/handlers.c 		if (p) {
p                 987 drivers/gpu/drm/i915/gvt/handlers.c 				int p = addr + t;
p                 989 drivers/gpu/drm/i915/gvt/handlers.c 				dpcd->data[p] = buf[t];
p                 991 drivers/gpu/drm/i915/gvt/handlers.c 				if (p == DPCD_TRAINING_PATTERN_SET)
p                  52 drivers/gpu/drm/i915/gvt/hypercall.h 	unsigned long (*from_virt_to_mfn)(void *p);
p                 348 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct kvmgt_pgfn *p;
p                 352 drivers/gpu/drm/i915/gvt/kvmgt.c 	hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
p                 353 drivers/gpu/drm/i915/gvt/kvmgt.c 		hash_del(&p->hnode);
p                 354 drivers/gpu/drm/i915/gvt/kvmgt.c 		kfree(p);
p                 361 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct kvmgt_pgfn *p, *res = NULL;
p                 363 drivers/gpu/drm/i915/gvt/kvmgt.c 	hash_for_each_possible(info->ptable, p, hnode, gfn) {
p                 364 drivers/gpu/drm/i915/gvt/kvmgt.c 		if (gfn == p->gfn) {
p                 365 drivers/gpu/drm/i915/gvt/kvmgt.c 			res = p;
p                 376 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct kvmgt_pgfn *p;
p                 378 drivers/gpu/drm/i915/gvt/kvmgt.c 	p = __kvmgt_protect_table_find(info, gfn);
p                 379 drivers/gpu/drm/i915/gvt/kvmgt.c 	return !!p;
p                 384 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct kvmgt_pgfn *p;
p                 389 drivers/gpu/drm/i915/gvt/kvmgt.c 	p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
p                 390 drivers/gpu/drm/i915/gvt/kvmgt.c 	if (WARN(!p, "gfn: 0x%llx\n", gfn))
p                 393 drivers/gpu/drm/i915/gvt/kvmgt.c 	p->gfn = gfn;
p                 394 drivers/gpu/drm/i915/gvt/kvmgt.c 	hash_add(info->ptable, &p->hnode, gfn);
p                 400 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct kvmgt_pgfn *p;
p                 402 drivers/gpu/drm/i915/gvt/kvmgt.c 	p = __kvmgt_protect_table_find(info, gfn);
p                 403 drivers/gpu/drm/i915/gvt/kvmgt.c 	if (p) {
p                 404 drivers/gpu/drm/i915/gvt/kvmgt.c 		hash_del(&p->hnode);
p                 405 drivers/gpu/drm/i915/gvt/kvmgt.c 		kfree(p);
p                 149 drivers/gpu/drm/i915/gvt/mpt.h static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
p                 151 drivers/gpu/drm/i915/gvt/mpt.h 	return intel_gvt_host.mpt->from_virt_to_mfn(p);
p                  70 drivers/gpu/drm/i915/gvt/reg.h 	typeof(_pipe) (p) = (_pipe); \
p                  72 drivers/gpu/drm/i915/gvt/reg.h 	(((p) == PIPE_A) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50080)) : \
p                  74 drivers/gpu/drm/i915/gvt/reg.h 	(((p) == PIPE_B) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50088)) : \
p                  76 drivers/gpu/drm/i915/gvt/reg.h 	(((p) == PIPE_C) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x5008C)) : \
p                 992 drivers/gpu/drm/i915/gvt/scheduler.c 	struct workload_thread_param *p = (struct workload_thread_param *)priv;
p                 993 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_gvt *gvt = p->gvt;
p                 994 drivers/gpu/drm/i915/gvt/scheduler.c 	int ring_id = p->ring_id;
p                1003 drivers/gpu/drm/i915/gvt/scheduler.c 	kfree(p);
p                 185 drivers/gpu/drm/i915/i915_active.c 	struct rb_node **p, *parent;
p                 208 drivers/gpu/drm/i915/i915_active.c 	p = &ref->tree.rb_node;
p                 209 drivers/gpu/drm/i915/i915_active.c 	while (*p) {
p                 210 drivers/gpu/drm/i915/i915_active.c 		parent = *p;
p                 219 drivers/gpu/drm/i915/i915_active.c 			p = &parent->rb_right;
p                 221 drivers/gpu/drm/i915/i915_active.c 			p = &parent->rb_left;
p                 229 drivers/gpu/drm/i915/i915_active.c 	rb_link_node(&node->node, parent, p);
p                 502 drivers/gpu/drm/i915/i915_active.c 	struct rb_node *prev, *p;
p                 518 drivers/gpu/drm/i915/i915_active.c 		p = &ref->cache->node;
p                 523 drivers/gpu/drm/i915/i915_active.c 	p = ref->tree.rb_node;
p                 524 drivers/gpu/drm/i915/i915_active.c 	while (p) {
p                 526 drivers/gpu/drm/i915/i915_active.c 			rb_entry(p, struct active_node, node);
p                 531 drivers/gpu/drm/i915/i915_active.c 		prev = p;
p                 533 drivers/gpu/drm/i915/i915_active.c 			p = p->rb_right;
p                 535 drivers/gpu/drm/i915/i915_active.c 			p = p->rb_left;
p                 544 drivers/gpu/drm/i915/i915_active.c 	for (p = prev; p; p = rb_next(p)) {
p                 546 drivers/gpu/drm/i915/i915_active.c 			rb_entry(p, struct active_node, node);
p                 577 drivers/gpu/drm/i915/i915_active.c 	rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
p                 578 drivers/gpu/drm/i915/i915_active.c 	if (p == &ref->cache->node)
p                 582 drivers/gpu/drm/i915/i915_active.c 	return rb_entry(p, struct active_node, node);
p                 673 drivers/gpu/drm/i915/i915_active.c 		struct rb_node **p, *parent;
p                 676 drivers/gpu/drm/i915/i915_active.c 		p = &ref->tree.rb_node;
p                 677 drivers/gpu/drm/i915/i915_active.c 		while (*p) {
p                 680 drivers/gpu/drm/i915/i915_active.c 			parent = *p;
p                 684 drivers/gpu/drm/i915/i915_active.c 				p = &parent->rb_right;
p                 686 drivers/gpu/drm/i915/i915_active.c 				p = &parent->rb_left;
p                 688 drivers/gpu/drm/i915/i915_active.c 		rb_link_node(&node->node, parent, p);
p                  63 drivers/gpu/drm/i915/i915_debugfs.c 	struct drm_printer p = drm_seq_file_printer(m);
p                  69 drivers/gpu/drm/i915/i915_debugfs.c 	intel_device_info_dump_flags(info, &p);
p                  70 drivers/gpu/drm/i915/i915_debugfs.c 	intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
p                  71 drivers/gpu/drm/i915/i915_debugfs.c 	intel_driver_caps_print(&dev_priv->caps, &p);
p                  74 drivers/gpu/drm/i915/i915_debugfs.c 	i915_params_dump(&i915_modparams, &p);
p                 266 drivers/gpu/drm/i915/i915_debugfs.c 		struct rb_node *p = obj->vma.tree.rb_node;
p                 268 drivers/gpu/drm/i915/i915_debugfs.c 		while (p) {
p                 271 drivers/gpu/drm/i915/i915_debugfs.c 			vma = rb_entry(p, typeof(*vma), obj_node);
p                 286 drivers/gpu/drm/i915/i915_debugfs.c 				p = p->rb_right;
p                 288 drivers/gpu/drm/i915/i915_debugfs.c 				p = p->rb_left;
p                1793 drivers/gpu/drm/i915/i915_debugfs.c 	struct drm_printer p;
p                1798 drivers/gpu/drm/i915/i915_debugfs.c 	p = drm_seq_file_printer(m);
p                1799 drivers/gpu/drm/i915/i915_debugfs.c 	intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
p                1811 drivers/gpu/drm/i915/i915_debugfs.c 	struct drm_printer p;
p                1816 drivers/gpu/drm/i915/i915_debugfs.c 	p = drm_seq_file_printer(m);
p                1817 drivers/gpu/drm/i915/i915_debugfs.c 	intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
p                2336 drivers/gpu/drm/i915/i915_debugfs.c 		struct drm_printer p = drm_seq_file_printer(m);
p                2338 drivers/gpu/drm/i915/i915_debugfs.c 		print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
p                2785 drivers/gpu/drm/i915/i915_debugfs.c 	struct drm_printer p;
p                2795 drivers/gpu/drm/i915/i915_debugfs.c 	p = drm_seq_file_printer(m);
p                2797 drivers/gpu/drm/i915/i915_debugfs.c 		intel_engine_dump(engine, &p, "%s\n", engine->name);
p                2807 drivers/gpu/drm/i915/i915_debugfs.c 	struct drm_printer p = drm_seq_file_printer(m);
p                2809 drivers/gpu/drm/i915/i915_debugfs.c 	intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
p                1484 drivers/gpu/drm/i915/i915_drv.c 		struct drm_printer p = drm_debug_printer("i915 device info:");
p                1486 drivers/gpu/drm/i915/i915_drv.c 		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
p                1494 drivers/gpu/drm/i915/i915_drv.c 		intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
p                1495 drivers/gpu/drm/i915/i915_drv.c 		intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
p                1863 drivers/gpu/drm/i915/i915_drv.h #define IS_REVID(p, since, until) \
p                1864 drivers/gpu/drm/i915/i915_drv.h 	(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
p                1868 drivers/gpu/drm/i915/i915_drv.h 		      enum intel_platform p)
p                1877 drivers/gpu/drm/i915/i915_drv.h 	return p / pbits;
p                1882 drivers/gpu/drm/i915/i915_drv.h 		    enum intel_platform p)
p                1887 drivers/gpu/drm/i915/i915_drv.h 	return p % pbits + INTEL_SUBPLATFORM_BITS;
p                1891 drivers/gpu/drm/i915/i915_drv.h intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
p                1893 drivers/gpu/drm/i915/i915_drv.h 	const unsigned int pi = __platform_mask_index(info, p);
p                1899 drivers/gpu/drm/i915/i915_drv.h IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
p                1902 drivers/gpu/drm/i915/i915_drv.h 	const unsigned int pi = __platform_mask_index(info, p);
p                1903 drivers/gpu/drm/i915/i915_drv.h 	const unsigned int pb = __platform_mask_bit(info, p);
p                1905 drivers/gpu/drm/i915/i915_drv.h 	BUILD_BUG_ON(!__builtin_constant_p(p));
p                1912 drivers/gpu/drm/i915/i915_drv.h 	       enum intel_platform p, unsigned int s)
p                1915 drivers/gpu/drm/i915/i915_drv.h 	const unsigned int pi = __platform_mask_index(info, p);
p                1916 drivers/gpu/drm/i915/i915_drv.h 	const unsigned int pb = __platform_mask_bit(info, p);
p                1920 drivers/gpu/drm/i915/i915_drv.h 	BUILD_BUG_ON(!__builtin_constant_p(p));
p                2021 drivers/gpu/drm/i915/i915_drv.h #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
p                2051 drivers/gpu/drm/i915/i915_drv.h #define IS_CNL_REVID(p, since, until) \
p                2052 drivers/gpu/drm/i915/i915_drv.h 	(IS_CANNONLAKE(p) && IS_REVID(p, since, until))
p                2060 drivers/gpu/drm/i915/i915_drv.h #define IS_ICL_REVID(p, since, until) \
p                2061 drivers/gpu/drm/i915/i915_drv.h 	(IS_ICELAKE(p) && IS_REVID(p, since, until))
p                 570 drivers/gpu/drm/i915/i915_gem_gtt.c 			    struct i915_page_dma *p,
p                 573 drivers/gpu/drm/i915/i915_gem_gtt.c 	p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
p                 574 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (unlikely(!p->page))
p                 577 drivers/gpu/drm/i915/i915_gem_gtt.c 	p->daddr = dma_map_page_attrs(vm->dma,
p                 578 drivers/gpu/drm/i915/i915_gem_gtt.c 				      p->page, 0, PAGE_SIZE,
p                 582 drivers/gpu/drm/i915/i915_gem_gtt.c 	if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
p                 583 drivers/gpu/drm/i915/i915_gem_gtt.c 		vm_free_page(vm, p->page);
p                 591 drivers/gpu/drm/i915/i915_gem_gtt.c 			  struct i915_page_dma *p)
p                 593 drivers/gpu/drm/i915/i915_gem_gtt.c 	return __setup_page_dma(vm, p, __GFP_HIGHMEM);
p                 597 drivers/gpu/drm/i915/i915_gem_gtt.c 			     struct i915_page_dma *p)
p                 599 drivers/gpu/drm/i915/i915_gem_gtt.c 	dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
p                 600 drivers/gpu/drm/i915/i915_gem_gtt.c 	vm_free_page(vm, p->page);
p                 606 drivers/gpu/drm/i915/i915_gem_gtt.c fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
p                 608 drivers/gpu/drm/i915/i915_gem_gtt.c 	kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
p                 681 drivers/gpu/drm/i915/i915_gem_gtt.c 	struct i915_page_dma *p = px_base(&vm->scratch[0]);
p                 684 drivers/gpu/drm/i915/i915_gem_gtt.c 	dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
p                 686 drivers/gpu/drm/i915/i915_gem_gtt.c 	__free_pages(p->page, order);
p                 162 drivers/gpu/drm/i915/i915_gpu_error.c static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
p                 164 drivers/gpu/drm/i915/i915_gpu_error.c 	i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
p                 170 drivers/gpu/drm/i915/i915_gpu_error.c 	struct drm_printer p = {
p                 174 drivers/gpu/drm/i915/i915_gpu_error.c 	return p;
p                 186 drivers/gpu/drm/i915/i915_gpu_error.c 		struct page *p;
p                 188 drivers/gpu/drm/i915/i915_gpu_error.c 		p = alloc_page(gfp);
p                 189 drivers/gpu/drm/i915/i915_gpu_error.c 		if (!p)
p                 192 drivers/gpu/drm/i915/i915_gpu_error.c 		pagevec_add(pv, p);
p                 213 drivers/gpu/drm/i915/i915_gpu_error.c 	struct page *p;
p                 215 drivers/gpu/drm/i915/i915_gpu_error.c 	p = alloc_page(gfp);
p                 216 drivers/gpu/drm/i915/i915_gpu_error.c 	if (!p && pagevec_count(pv))
p                 217 drivers/gpu/drm/i915/i915_gpu_error.c 		p = pv->pages[--pv->nr];
p                 219 drivers/gpu/drm/i915/i915_gpu_error.c 	return p ? page_address(p) : NULL;
p                 224 drivers/gpu/drm/i915/i915_gpu_error.c 	struct page *p = virt_to_page(addr);
p                 227 drivers/gpu/drm/i915/i915_gpu_error.c 		pagevec_add(pv, p);
p                 229 drivers/gpu/drm/i915/i915_gpu_error.c 		__free_page(p);
p                 597 drivers/gpu/drm/i915/i915_gpu_error.c 	struct drm_printer p = i915_error_printer(m);
p                 599 drivers/gpu/drm/i915/i915_gpu_error.c 	intel_device_info_dump_flags(info, &p);
p                 600 drivers/gpu/drm/i915/i915_gpu_error.c 	intel_driver_caps_print(caps, &p);
p                 601 drivers/gpu/drm/i915/i915_gpu_error.c 	intel_device_info_dump_topology(&runtime->sseu, &p);
p                 607 drivers/gpu/drm/i915/i915_gpu_error.c 	struct drm_printer p = i915_error_printer(m);
p                 609 drivers/gpu/drm/i915/i915_gpu_error.c 	i915_params_dump(params, &p);
p                 627 drivers/gpu/drm/i915/i915_gpu_error.c 	struct drm_printer p = i915_error_printer(m);
p                 634 drivers/gpu/drm/i915/i915_gpu_error.c 	intel_uc_fw_dump(&error_uc->guc_fw, &p);
p                 635 drivers/gpu/drm/i915/i915_gpu_error.c 	intel_uc_fw_dump(&error_uc->huc_fw, &p);
p                 181 drivers/gpu/drm/i915/i915_params.c static __always_inline void _print_param(struct drm_printer *p,
p                 187 drivers/gpu/drm/i915/i915_params.c 		drm_printf(p, "i915.%s=%s\n", name, yesno(*(const bool *)x));
p                 189 drivers/gpu/drm/i915/i915_params.c 		drm_printf(p, "i915.%s=%d\n", name, *(const int *)x);
p                 191 drivers/gpu/drm/i915/i915_params.c 		drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x);
p                 193 drivers/gpu/drm/i915/i915_params.c 		drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
p                 206 drivers/gpu/drm/i915/i915_params.c void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
p                 208 drivers/gpu/drm/i915/i915_params.c #define PRINT(T, x, ...) _print_param(p, #x, #T, &params->x);
p                  90 drivers/gpu/drm/i915/i915_params.h void i915_params_dump(const struct i915_params *params, struct drm_printer *p);
p                 895 drivers/gpu/drm/i915/i915_pci.c 	char *s, *p, *tok;
p                 916 drivers/gpu/drm/i915/i915_pci.c 	for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) {
p                3675 drivers/gpu/drm/i915/i915_perf.c static int destroy_config(int id, void *p, void *data)
p                3678 drivers/gpu/drm/i915/i915_perf.c 	struct i915_oa_config *oa_config = p;
p                7349 drivers/gpu/drm/i915/i915_reg.h #define  GEN9_PIPE_PLANE_FLIP_DONE(p)	(1 << (3 + (p)))
p                  56 drivers/gpu/drm/i915/i915_scheduler.c 		const struct i915_priolist *p = to_priolist(rb);
p                  58 drivers/gpu/drm/i915/i915_scheduler.c 		GEM_BUG_ON(p->priority >= last_prio);
p                  59 drivers/gpu/drm/i915/i915_scheduler.c 		last_prio = p->priority;
p                  61 drivers/gpu/drm/i915/i915_scheduler.c 		GEM_BUG_ON(!p->used);
p                  62 drivers/gpu/drm/i915/i915_scheduler.c 		for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
p                  63 drivers/gpu/drm/i915/i915_scheduler.c 			if (list_empty(&p->requests[i]))
p                  66 drivers/gpu/drm/i915/i915_scheduler.c 			GEM_BUG_ON(!(p->used & BIT(i)));
p                  75 drivers/gpu/drm/i915/i915_scheduler.c 	struct i915_priolist *p;
p                  95 drivers/gpu/drm/i915/i915_scheduler.c 		p = to_priolist(rb);
p                  96 drivers/gpu/drm/i915/i915_scheduler.c 		if (prio > p->priority) {
p                  98 drivers/gpu/drm/i915/i915_scheduler.c 		} else if (prio < p->priority) {
p                 107 drivers/gpu/drm/i915/i915_scheduler.c 		p = &execlists->default_priolist;
p                 109 drivers/gpu/drm/i915/i915_scheduler.c 		p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
p                 111 drivers/gpu/drm/i915/i915_scheduler.c 		if (unlikely(!p)) {
p                 127 drivers/gpu/drm/i915/i915_scheduler.c 	p->priority = prio;
p                 128 drivers/gpu/drm/i915/i915_scheduler.c 	for (i = 0; i < ARRAY_SIZE(p->requests); i++)
p                 129 drivers/gpu/drm/i915/i915_scheduler.c 		INIT_LIST_HEAD(&p->requests[i]);
p                 130 drivers/gpu/drm/i915/i915_scheduler.c 	rb_link_node(&p->node, rb, parent);
p                 131 drivers/gpu/drm/i915/i915_scheduler.c 	rb_insert_color_cached(&p->node, &execlists->queue, first);
p                 132 drivers/gpu/drm/i915/i915_scheduler.c 	p->used = 0;
p                 135 drivers/gpu/drm/i915/i915_scheduler.c 	p->used |= BIT(idx);
p                 136 drivers/gpu/drm/i915/i915_scheduler.c 	return &p->requests[idx];
p                 139 drivers/gpu/drm/i915/i915_scheduler.c void __i915_priolist_free(struct i915_priolist *p)
p                 141 drivers/gpu/drm/i915/i915_scheduler.c 	kmem_cache_free(global.slab_priorities, p);
p                 234 drivers/gpu/drm/i915/i915_scheduler.c 	struct i915_dependency *dep, *p;
p                 284 drivers/gpu/drm/i915/i915_scheduler.c 		list_for_each_entry(p, &node->signalers_list, signal_link) {
p                 285 drivers/gpu/drm/i915/i915_scheduler.c 			GEM_BUG_ON(p == dep); /* no cycles! */
p                 287 drivers/gpu/drm/i915/i915_scheduler.c 			if (node_signaled(p->signaler))
p                 290 drivers/gpu/drm/i915/i915_scheduler.c 			if (prio > READ_ONCE(p->signaler->attr.priority))
p                 291 drivers/gpu/drm/i915/i915_scheduler.c 				list_move_tail(&p->dfs_link, &dfs);
p                 317 drivers/gpu/drm/i915/i915_scheduler.c 	list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
p                  48 drivers/gpu/drm/i915/i915_scheduler.h void __i915_priolist_free(struct i915_priolist *p);
p                  49 drivers/gpu/drm/i915/i915_scheduler.h static inline void i915_priolist_free(struct i915_priolist *p)
p                  51 drivers/gpu/drm/i915/i915_scheduler.h 	if (p->priority != I915_PRIORITY_NORMAL)
p                  52 drivers/gpu/drm/i915/i915_scheduler.h 		__i915_priolist_free(p);
p                  99 drivers/gpu/drm/i915/i915_syncmap.c static inline u32 *__sync_seqno(struct i915_syncmap *p)
p                 101 drivers/gpu/drm/i915/i915_syncmap.c 	GEM_BUG_ON(p->height);
p                 102 drivers/gpu/drm/i915/i915_syncmap.c 	return (u32 *)(p + 1);
p                 105 drivers/gpu/drm/i915/i915_syncmap.c static inline struct i915_syncmap **__sync_child(struct i915_syncmap *p)
p                 107 drivers/gpu/drm/i915/i915_syncmap.c 	GEM_BUG_ON(!p->height);
p                 108 drivers/gpu/drm/i915/i915_syncmap.c 	return (struct i915_syncmap **)(p + 1);
p                 112 drivers/gpu/drm/i915/i915_syncmap.c __sync_branch_idx(const struct i915_syncmap *p, u64 id)
p                 114 drivers/gpu/drm/i915/i915_syncmap.c 	return (id >> p->height) & MASK;
p                 118 drivers/gpu/drm/i915/i915_syncmap.c __sync_leaf_idx(const struct i915_syncmap *p, u64 id)
p                 120 drivers/gpu/drm/i915/i915_syncmap.c 	GEM_BUG_ON(p->height);
p                 124 drivers/gpu/drm/i915/i915_syncmap.c static inline u64 __sync_branch_prefix(const struct i915_syncmap *p, u64 id)
p                 126 drivers/gpu/drm/i915/i915_syncmap.c 	return id >> p->height >> SHIFT;
p                 129 drivers/gpu/drm/i915/i915_syncmap.c static inline u64 __sync_leaf_prefix(const struct i915_syncmap *p, u64 id)
p                 131 drivers/gpu/drm/i915/i915_syncmap.c 	GEM_BUG_ON(p->height);
p                 156 drivers/gpu/drm/i915/i915_syncmap.c 	struct i915_syncmap *p;
p                 159 drivers/gpu/drm/i915/i915_syncmap.c 	p = *root;
p                 160 drivers/gpu/drm/i915/i915_syncmap.c 	if (!p)
p                 163 drivers/gpu/drm/i915/i915_syncmap.c 	if (likely(__sync_leaf_prefix(p, id) == p->prefix))
p                 168 drivers/gpu/drm/i915/i915_syncmap.c 		p = p->parent;
p                 169 drivers/gpu/drm/i915/i915_syncmap.c 		if (!p)
p                 172 drivers/gpu/drm/i915/i915_syncmap.c 		if (__sync_branch_prefix(p, id) == p->prefix)
p                 178 drivers/gpu/drm/i915/i915_syncmap.c 		if (!p->height)
p                 181 drivers/gpu/drm/i915/i915_syncmap.c 		p = __sync_child(p)[__sync_branch_idx(p, id)];
p                 182 drivers/gpu/drm/i915/i915_syncmap.c 		if (!p)
p                 185 drivers/gpu/drm/i915/i915_syncmap.c 		if (__sync_branch_prefix(p, id) != p->prefix)
p                 189 drivers/gpu/drm/i915/i915_syncmap.c 	*root = p;
p                 191 drivers/gpu/drm/i915/i915_syncmap.c 	idx = __sync_leaf_idx(p, id);
p                 192 drivers/gpu/drm/i915/i915_syncmap.c 	if (!(p->bitmap & BIT(idx)))
p                 195 drivers/gpu/drm/i915/i915_syncmap.c 	return seqno_later(__sync_seqno(p)[idx], seqno);
p                 201 drivers/gpu/drm/i915/i915_syncmap.c 	struct i915_syncmap *p;
p                 203 drivers/gpu/drm/i915/i915_syncmap.c 	p = kmalloc(sizeof(*p) + KSYNCMAP * sizeof(u32), GFP_KERNEL);
p                 204 drivers/gpu/drm/i915/i915_syncmap.c 	if (unlikely(!p))
p                 207 drivers/gpu/drm/i915/i915_syncmap.c 	p->parent = parent;
p                 208 drivers/gpu/drm/i915/i915_syncmap.c 	p->height = 0;
p                 209 drivers/gpu/drm/i915/i915_syncmap.c 	p->bitmap = 0;
p                 210 drivers/gpu/drm/i915/i915_syncmap.c 	p->prefix = __sync_leaf_prefix(p, id);
p                 211 drivers/gpu/drm/i915/i915_syncmap.c 	return p;
p                 214 drivers/gpu/drm/i915/i915_syncmap.c static inline void __sync_set_seqno(struct i915_syncmap *p, u64 id, u32 seqno)
p                 216 drivers/gpu/drm/i915/i915_syncmap.c 	unsigned int idx = __sync_leaf_idx(p, id);
p                 218 drivers/gpu/drm/i915/i915_syncmap.c 	p->bitmap |= BIT(idx);
p                 219 drivers/gpu/drm/i915/i915_syncmap.c 	__sync_seqno(p)[idx] = seqno;
p                 222 drivers/gpu/drm/i915/i915_syncmap.c static inline void __sync_set_child(struct i915_syncmap *p,
p                 226 drivers/gpu/drm/i915/i915_syncmap.c 	p->bitmap |= BIT(idx);
p                 227 drivers/gpu/drm/i915/i915_syncmap.c 	__sync_child(p)[idx] = child;
p                 232 drivers/gpu/drm/i915/i915_syncmap.c 	struct i915_syncmap *p = *root;
p                 235 drivers/gpu/drm/i915/i915_syncmap.c 	if (!p) {
p                 236 drivers/gpu/drm/i915/i915_syncmap.c 		p = __sync_alloc_leaf(NULL, id);
p                 237 drivers/gpu/drm/i915/i915_syncmap.c 		if (unlikely(!p))
p                 244 drivers/gpu/drm/i915/i915_syncmap.c 	GEM_BUG_ON(__sync_leaf_prefix(p, id) == p->prefix);
p                 248 drivers/gpu/drm/i915/i915_syncmap.c 		if (!p->parent)
p                 251 drivers/gpu/drm/i915/i915_syncmap.c 		p = p->parent;
p                 253 drivers/gpu/drm/i915/i915_syncmap.c 		if (__sync_branch_prefix(p, id) == p->prefix)
p                 281 drivers/gpu/drm/i915/i915_syncmap.c 		if (__sync_branch_prefix(p, id) != p->prefix) {
p                 291 drivers/gpu/drm/i915/i915_syncmap.c 			above = fls64(__sync_branch_prefix(p, id) ^ p->prefix);
p                 293 drivers/gpu/drm/i915/i915_syncmap.c 			next->height = above + p->height;
p                 297 drivers/gpu/drm/i915/i915_syncmap.c 			if (p->parent) {
p                 298 drivers/gpu/drm/i915/i915_syncmap.c 				idx = __sync_branch_idx(p->parent, id);
p                 299 drivers/gpu/drm/i915/i915_syncmap.c 				__sync_child(p->parent)[idx] = next;
p                 300 drivers/gpu/drm/i915/i915_syncmap.c 				GEM_BUG_ON(!(p->parent->bitmap & BIT(idx)));
p                 302 drivers/gpu/drm/i915/i915_syncmap.c 			next->parent = p->parent;
p                 305 drivers/gpu/drm/i915/i915_syncmap.c 			idx = p->prefix >> (above - SHIFT) & MASK;
p                 306 drivers/gpu/drm/i915/i915_syncmap.c 			__sync_set_child(next, idx, p);
p                 307 drivers/gpu/drm/i915/i915_syncmap.c 			p->parent = next;
p                 310 drivers/gpu/drm/i915/i915_syncmap.c 			p = next;
p                 312 drivers/gpu/drm/i915/i915_syncmap.c 			if (!p->height)
p                 317 drivers/gpu/drm/i915/i915_syncmap.c 		GEM_BUG_ON(!p->height);
p                 318 drivers/gpu/drm/i915/i915_syncmap.c 		idx = __sync_branch_idx(p, id);
p                 319 drivers/gpu/drm/i915/i915_syncmap.c 		next = __sync_child(p)[idx];
p                 321 drivers/gpu/drm/i915/i915_syncmap.c 			next = __sync_alloc_leaf(p, id);
p                 325 drivers/gpu/drm/i915/i915_syncmap.c 			__sync_set_child(p, idx, next);
p                 326 drivers/gpu/drm/i915/i915_syncmap.c 			p = next;
p                 330 drivers/gpu/drm/i915/i915_syncmap.c 		p = next;
p                 334 drivers/gpu/drm/i915/i915_syncmap.c 	GEM_BUG_ON(p->prefix != __sync_leaf_prefix(p, id));
p                 335 drivers/gpu/drm/i915/i915_syncmap.c 	__sync_set_seqno(p, id, seqno);
p                 336 drivers/gpu/drm/i915/i915_syncmap.c 	*root = p;
p                 355 drivers/gpu/drm/i915/i915_syncmap.c 	struct i915_syncmap *p = *root;
p                 361 drivers/gpu/drm/i915/i915_syncmap.c 	if (likely(p && __sync_leaf_prefix(p, id) == p->prefix)) {
p                 362 drivers/gpu/drm/i915/i915_syncmap.c 		__sync_set_seqno(p, id, seqno);
p                 369 drivers/gpu/drm/i915/i915_syncmap.c static void __sync_free(struct i915_syncmap *p)
p                 371 drivers/gpu/drm/i915/i915_syncmap.c 	if (p->height) {
p                 374 drivers/gpu/drm/i915/i915_syncmap.c 		while ((i = ffs(p->bitmap))) {
p                 375 drivers/gpu/drm/i915/i915_syncmap.c 			p->bitmap &= ~0u << i;
p                 376 drivers/gpu/drm/i915/i915_syncmap.c 			__sync_free(__sync_child(p)[i - 1]);
p                 380 drivers/gpu/drm/i915/i915_syncmap.c 	kfree(p);
p                 397 drivers/gpu/drm/i915/i915_syncmap.c 	struct i915_syncmap *p;
p                 399 drivers/gpu/drm/i915/i915_syncmap.c 	p = *root;
p                 400 drivers/gpu/drm/i915/i915_syncmap.c 	if (!p)
p                 403 drivers/gpu/drm/i915/i915_syncmap.c 	while (p->parent)
p                 404 drivers/gpu/drm/i915/i915_syncmap.c 		p = p->parent;
p                 406 drivers/gpu/drm/i915/i915_syncmap.c 	__sync_free(p);
p                 140 drivers/gpu/drm/i915/i915_utils.h #define check_struct_size(p, member, n, sz) \
p                 141 drivers/gpu/drm/i915/i915_utils.h 	likely(__check_struct_size(sizeof(*(p)), \
p                 142 drivers/gpu/drm/i915/i915_utils.h 				   sizeof(*(p)->member) + __must_be_array((p)->member), \
p                 104 drivers/gpu/drm/i915/i915_vma.c 	struct rb_node *rb, **p;
p                 180 drivers/gpu/drm/i915/i915_vma.c 	p = &obj->vma.tree.rb_node;
p                 181 drivers/gpu/drm/i915/i915_vma.c 	while (*p) {
p                 185 drivers/gpu/drm/i915/i915_vma.c 		rb = *p;
p                 201 drivers/gpu/drm/i915/i915_vma.c 			p = &rb->rb_right;
p                 203 drivers/gpu/drm/i915/i915_vma.c 			p = &rb->rb_left;
p                 205 drivers/gpu/drm/i915/i915_vma.c 	rb_link_node(&vma->obj_node, rb, p);
p                  77 drivers/gpu/drm/i915/intel_device_info.c 				  struct drm_printer *p)
p                  79 drivers/gpu/drm/i915/intel_device_info.c #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
p                  83 drivers/gpu/drm/i915/intel_device_info.c #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
p                  88 drivers/gpu/drm/i915/intel_device_info.c static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
p                  92 drivers/gpu/drm/i915/intel_device_info.c 	drm_printf(p, "slice total: %u, mask=%04x\n",
p                  94 drivers/gpu/drm/i915/intel_device_info.c 	drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
p                  96 drivers/gpu/drm/i915/intel_device_info.c 		drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
p                 100 drivers/gpu/drm/i915/intel_device_info.c 	drm_printf(p, "EU total: %u\n", sseu->eu_total);
p                 101 drivers/gpu/drm/i915/intel_device_info.c 	drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
p                 102 drivers/gpu/drm/i915/intel_device_info.c 	drm_printf(p, "has slice power gating: %s\n",
p                 104 drivers/gpu/drm/i915/intel_device_info.c 	drm_printf(p, "has subslice power gating: %s\n",
p                 106 drivers/gpu/drm/i915/intel_device_info.c 	drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
p                 110 drivers/gpu/drm/i915/intel_device_info.c 				    struct drm_printer *p)
p                 112 drivers/gpu/drm/i915/intel_device_info.c 	sseu_dump(&info->sseu, p);
p                 114 drivers/gpu/drm/i915/intel_device_info.c 	drm_printf(p, "CS timestamp frequency: %u kHz\n",
p                 153 drivers/gpu/drm/i915/intel_device_info.c 				     struct drm_printer *p)
p                 158 drivers/gpu/drm/i915/intel_device_info.c 		drm_printf(p, "Unavailable\n");
p                 163 drivers/gpu/drm/i915/intel_device_info.c 		drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n",
p                 170 drivers/gpu/drm/i915/intel_device_info.c 			drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
p                 800 drivers/gpu/drm/i915/intel_device_info.c static bool find_devid(u16 id, const u16 *p, unsigned int num)
p                 802 drivers/gpu/drm/i915/intel_device_info.c 	for (; num; num--, p++) {
p                 803 drivers/gpu/drm/i915/intel_device_info.c 		if (*p == id)
p                 981 drivers/gpu/drm/i915/intel_device_info.c 			     struct drm_printer *p)
p                 983 drivers/gpu/drm/i915/intel_device_info.c 	drm_printf(p, "Has logical contexts? %s\n",
p                 985 drivers/gpu/drm/i915/intel_device_info.c 	drm_printf(p, "scheduler: %x\n", caps->scheduler);
p                 228 drivers/gpu/drm/i915/intel_device_info.h 				  struct drm_printer *p);
p                 230 drivers/gpu/drm/i915/intel_device_info.h 				    struct drm_printer *p);
p                 232 drivers/gpu/drm/i915/intel_device_info.h 				     struct drm_printer *p);
p                 237 drivers/gpu/drm/i915/intel_device_info.h 			     struct drm_printer *p);
p                 173 drivers/gpu/drm/i915/intel_runtime_pm.c __print_intel_runtime_pm_wakeref(struct drm_printer *p,
p                 185 drivers/gpu/drm/i915/intel_runtime_pm.c 		drm_printf(p, "Wakeref last acquired:\n%s", buf);
p                 190 drivers/gpu/drm/i915/intel_runtime_pm.c 		drm_printf(p, "Wakeref last released:\n%s", buf);
p                 193 drivers/gpu/drm/i915/intel_runtime_pm.c 	drm_printf(p, "Wakeref count: %lu\n", dbg->count);
p                 205 drivers/gpu/drm/i915/intel_runtime_pm.c 		drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
p                 226 drivers/gpu/drm/i915/intel_runtime_pm.c 		struct drm_printer p = drm_debug_printer("i915");
p                 228 drivers/gpu/drm/i915/intel_runtime_pm.c 		__print_intel_runtime_pm_wakeref(&p, debug);
p                 265 drivers/gpu/drm/i915/intel_runtime_pm.c 				    struct drm_printer *p)
p                 295 drivers/gpu/drm/i915/intel_runtime_pm.c 	__print_intel_runtime_pm_wakeref(p, &dbg);
p                 205 drivers/gpu/drm/i915/intel_runtime_pm.h 				    struct drm_printer *p);
p                 208 drivers/gpu/drm/i915/intel_runtime_pm.h 						  struct drm_printer *p)
p                 374 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			}, *p;
p                 386 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			for (p = phases; p->name; p++) {
p                 389 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 				offset = p->offset;
p                 395 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (p->step < 0) {
p                 404 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 						       __func__, p->name, err, npages, prime, offset);
p                 411 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
p                 419 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (p->step > 0) {
p                 426 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 				offset = p->offset;
p                 432 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (p->step < 0) {
p                 441 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 						       __func__, p->name, vma->node.start, vma->node.size,
p                 450 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 						       __func__, p->name, vma->node.start, vma->node.size,
p                 455 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (p->step > 0) {
p                 462 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 				offset = p->offset;
p                 468 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (p->step < 0) {
p                 477 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 						       __func__, p->name, err, npages, prime, offset);
p                 484 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
p                 492 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (p->step > 0) {
p                 499 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 				offset = p->offset;
p                 505 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (p->step < 0) {
p                 514 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
p                 523 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 						       __func__, p->name, vma->node.start, vma->node.size,
p                 528 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 					if (p->step > 0) {
p                  29 drivers/gpu/drm/i915/selftests/i915_syncmap.c __sync_print(struct i915_syncmap *p,
p                  55 drivers/gpu/drm/i915/selftests/i915_syncmap.c 	len = scnprintf(buf, *sz, "0x%016llx", p->prefix << p->height << SHIFT);
p                  58 drivers/gpu/drm/i915/selftests/i915_syncmap.c 	X = (p->height + SHIFT) / 4;
p                  61 drivers/gpu/drm/i915/selftests/i915_syncmap.c 	if (!p->height) {
p                  62 drivers/gpu/drm/i915/selftests/i915_syncmap.c 		for_each_set_bit(i, (unsigned long *)&p->bitmap, KSYNCMAP) {
p                  64 drivers/gpu/drm/i915/selftests/i915_syncmap.c 					i, __sync_seqno(p)[i]);
p                  76 drivers/gpu/drm/i915/selftests/i915_syncmap.c 	if (p->height) {
p                  77 drivers/gpu/drm/i915/selftests/i915_syncmap.c 		for_each_set_bit(i, (unsigned long *)&p->bitmap, KSYNCMAP) {
p                  78 drivers/gpu/drm/i915/selftests/i915_syncmap.c 			buf = __sync_print(__sync_child(p)[i], buf, sz,
p                  80 drivers/gpu/drm/i915/selftests/i915_syncmap.c 					   last << 1 | !!(p->bitmap >> (i + 1)),
p                  89 drivers/gpu/drm/i915/selftests/i915_syncmap.c i915_syncmap_print_to_buf(struct i915_syncmap *p, char *buf, unsigned long sz)
p                  91 drivers/gpu/drm/i915/selftests/i915_syncmap.c 	if (!p)
p                  94 drivers/gpu/drm/i915/selftests/i915_syncmap.c 	while (p->parent)
p                  95 drivers/gpu/drm/i915/selftests/i915_syncmap.c 		p = p->parent;
p                  97 drivers/gpu/drm/i915/selftests/i915_syncmap.c 	__sync_print(p, buf, &sz, 0, 1, 0);
p                 707 drivers/gpu/drm/i915/selftests/i915_vma.c 	}, *p;
p                 720 drivers/gpu/drm/i915/selftests/i915_vma.c 	for (p = phases; p->name; p++) { /* exercise both create/lookup */
p                 745 drivers/gpu/drm/i915/selftests/i915_vma.c 				if (!assert_pin(vma, &view, sz*PAGE_SIZE, p->name)) {
p                 747 drivers/gpu/drm/i915/selftests/i915_vma.c 					       p->name, offset, sz);
p                 754 drivers/gpu/drm/i915/selftests/i915_vma.c 					       p->name, offset, sz);
p                 771 drivers/gpu/drm/i915/selftests/i915_vma.c 			       p->name, count, nvma);
p                 787 drivers/gpu/drm/i915/selftests/i915_vma.c 		if (!assert_pin(vma, NULL, obj->base.size, p->name)) {
p                 788 drivers/gpu/drm/i915/selftests/i915_vma.c 			pr_err("(%s) inconsistent full pin\n", p->name);
p                 799 drivers/gpu/drm/i915/selftests/i915_vma.c 			pr_err("(%s) allocated an extra full vma!\n", p->name);
p                 868 drivers/gpu/drm/i915/selftests/i915_vma.c 	}, *p;
p                 887 drivers/gpu/drm/i915/selftests/i915_vma.c 		for (p = planes; p->width; p++) {
p                 890 drivers/gpu/drm/i915/selftests/i915_vma.c 				.rotated.plane[0] = *p,
p                 918 drivers/gpu/drm/i915/selftests/i915_vma.c 			for (y = 0 ; y < p->height; y++) {
p                 919 drivers/gpu/drm/i915/selftests/i915_vma.c 				for (x = 0 ; x < p->width; x++) {
p                 924 drivers/gpu/drm/i915/selftests/i915_vma.c 						offset = (x * p->height + y) * PAGE_SIZE;
p                 926 drivers/gpu/drm/i915/selftests/i915_vma.c 						offset = (y * p->width + x) * PAGE_SIZE;
p                 949 drivers/gpu/drm/i915/selftests/i915_vma.c 			for (y = 0 ; y < p->height; y++) {
p                 950 drivers/gpu/drm/i915/selftests/i915_vma.c 				for (x = 0 ; x < p->width; x++) {
p                  27 drivers/gpu/drm/imx/ipuv3-plane.c to_ipu_plane_state(struct drm_plane_state *p)
p                  29 drivers/gpu/drm/imx/ipuv3-plane.c 	return container_of(p, struct ipu_plane_state, base);
p                  32 drivers/gpu/drm/imx/ipuv3-plane.c static inline struct ipu_plane *to_ipu_plane(struct drm_plane *p)
p                  34 drivers/gpu/drm/imx/ipuv3-plane.c 	return container_of(p, struct ipu_plane, base);
p                 476 drivers/gpu/drm/mcde/mcde_drv.c 		struct device *p = NULL, *d;
p                 478 drivers/gpu/drm/mcde/mcde_drv.c 		while ((d = platform_find_device_by_driver(p, drv))) {
p                 479 drivers/gpu/drm/mcde/mcde_drv.c 			put_device(p);
p                 481 drivers/gpu/drm/mcde/mcde_drv.c 			p = d;
p                 483 drivers/gpu/drm/mcde/mcde_drv.c 		put_device(p);
p                 107 drivers/gpu/drm/mgag200/mgag200_mode.c 	unsigned int p, m, n;
p                 115 drivers/gpu/drm/mgag200/mgag200_mode.c 		m = n = p = 0;
p                 141 drivers/gpu/drm/mgag200/mgag200_mode.c 						p = testp - 1;
p                 149 drivers/gpu/drm/mgag200/mgag200_mode.c 		m = n = p = 0;
p                 184 drivers/gpu/drm/mgag200/mgag200_mode.c 						p = testp - 1;
p                 196 drivers/gpu/drm/mgag200/mgag200_mode.c 		p |= (fvv << 4);
p                 209 drivers/gpu/drm/mgag200/mgag200_mode.c 	WREG_DAC(MGA1064_PIX_PLLC_P, p);
p                 226 drivers/gpu/drm/mgag200/mgag200_mode.c 	unsigned int p, m, n;
p                 232 drivers/gpu/drm/mgag200/mgag200_mode.c 	m = n = p = 0;
p                 263 drivers/gpu/drm/mgag200/mgag200_mode.c 							p = ((testn & 0x600) >> 3) |
p                 296 drivers/gpu/drm/mgag200/mgag200_mode.c 						p = testp - 1;
p                 345 drivers/gpu/drm/mgag200/mgag200_mode.c 		WREG_DAC(MGA1064_WB_PIX_PLLC_P, p);
p                 405 drivers/gpu/drm/mgag200/mgag200_mode.c 	unsigned int p, m, n;
p                 409 drivers/gpu/drm/mgag200/mgag200_mode.c 	m = n = p = 0;
p                 434 drivers/gpu/drm/mgag200/mgag200_mode.c 					p = testp - 1;
p                 460 drivers/gpu/drm/mgag200/mgag200_mode.c 	WREG_DAC(MGA1064_EV_PIX_PLLC_P, p);
p                 498 drivers/gpu/drm/mgag200/mgag200_mode.c 	unsigned int p, m, n;
p                 504 drivers/gpu/drm/mgag200/mgag200_mode.c 	m = n = p = 0;
p                 530 drivers/gpu/drm/mgag200/mgag200_mode.c 					p = testp;
p                 564 drivers/gpu/drm/mgag200/mgag200_mode.c 						p = testp - 1;
p                 567 drivers/gpu/drm/mgag200/mgag200_mode.c 						p |= 0x80;
p                 591 drivers/gpu/drm/mgag200/mgag200_mode.c 		WREG_DAC(MGA1064_EH_PIX_PLLC_P, p);
p                 628 drivers/gpu/drm/mgag200/mgag200_mode.c 	unsigned int p, m, n;
p                 633 drivers/gpu/drm/mgag200/mgag200_mode.c 	m = n = p = 0;
p                 665 drivers/gpu/drm/mgag200/mgag200_mode.c 						p = testr | (testr << 3);
p                 696 drivers/gpu/drm/mgag200/mgag200_mode.c 	WREG_DAC(MGA1064_ER_PIX_PLLC_P, p);
p                  14 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
p                  18 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c 	drm_printf(p, "PFP state:\n");
p                  22 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c 		drm_printf(p, "  %02x: %08x\n", i,
p                  29 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c static int me_print(struct msm_gpu *gpu, struct drm_printer *p)
p                  33 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c 	drm_printf(p, "ME state:\n");
p                  37 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c 		drm_printf(p, "  %02x: %08x\n", i,
p                  44 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c static int meq_print(struct msm_gpu *gpu, struct drm_printer *p)
p                  48 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c 	drm_printf(p, "MEQ state:\n");
p                  52 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c 		drm_printf(p, "  %02x: %08x\n", i,
p                  59 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c static int roq_print(struct msm_gpu *gpu, struct drm_printer *p)
p                  63 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c 	drm_printf(p, "ROQ state:\n");
p                  71 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c 		drm_printf(p, "  %02x: %08x %08x %08x %08x\n", i,
p                  83 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c 	struct drm_printer p = drm_seq_file_printer(m);
p                  84 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c 	int (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
p                  87 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c 	return show(priv->gpu, &p);
p                1310 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		struct drm_printer *p)
p                1320 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	adreno_show(gpu, state, p);
p                1326 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	drm_printf(p, "registers-hlsq:\n");
p                1343 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 			drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
p                  63 drivers/gpu/drm/msm/adreno/a6xx_gpu.h 		struct drm_printer *p);
p                 929 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		struct drm_printer *p)
p                 945 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 			drm_printf(p, "  - { offset: 0x%06x, value: 0x%08x }\n",
p                 951 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c static void print_ascii85(struct drm_printer *p, size_t len, u32 *data)
p                 964 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	drm_puts(p, "    data: !!ascii85 |\n");
p                 965 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	drm_puts(p, "      ");
p                 971 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		drm_puts(p, ascii85_encode(data[i], out));
p                 973 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	drm_puts(p, "\n");
p                 976 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c static void print_name(struct drm_printer *p, const char *fmt, const char *name)
p                 978 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	drm_puts(p, fmt);
p                 979 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	drm_puts(p, name);
p                 980 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	drm_puts(p, "\n");
p                 984 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		struct drm_printer *p)
p                 992 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	print_name(p, "  - type: ", block->name);
p                 995 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		drm_printf(p, "    - bank: %d\n", i);
p                 996 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		drm_printf(p, "      size: %d\n", block->size);
p                1001 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		print_ascii85(p, block->size << 2,
p                1007 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		struct drm_printer *p)
p                1014 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		drm_printf(p, "    - context: %d\n", ctx);
p                1025 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 				drm_printf(p, "      - { offset: 0x%06x, value: 0x%08x }\n",
p                1033 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		struct drm_printer *p)
p                1038 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		print_name(p, "  - cluster-name: ", dbgahb->name);
p                1040 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 			obj->data, p);
p                1045 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		struct drm_printer *p)
p                1050 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		print_name(p, "  - cluster-name: ", cluster->name);
p                1052 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 			obj->data, p);
p                1057 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		struct drm_printer *p)
p                1064 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	print_name(p, "  - regs-name: ", indexed->name);
p                1065 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	drm_printf(p, "    dwords: %d\n", indexed->count);
p                1067 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	print_ascii85(p, indexed->count << 2, obj->data);
p                1071 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		u32 *data, struct drm_printer *p)
p                1074 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		print_name(p, "  - debugbus-block: ", block->name);
p                1080 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		drm_printf(p, "    count: %d\n", block->count << 1);
p                1082 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		print_ascii85(p, block->count << 3, data);
p                1087 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		struct drm_printer *p)
p                1094 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		a6xx_show_debugbus_block(obj->handle, obj->data, p);
p                1100 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		drm_puts(p, "  - debugbus-block: A6XX_DBGBUS_VBIF\n");
p                1101 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		drm_printf(p, "    count: %d\n", VBIF_DEBUGBUS_BLOCK_SIZE);
p                1104 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		print_ascii85(p, VBIF_DEBUGBUS_BLOCK_SIZE << 2, obj->data);
p                1110 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		a6xx_show_debugbus_block(obj->handle, obj->data, p);
p                1115 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		struct drm_printer *p)
p                1124 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	adreno_show(gpu, state, p);
p                1126 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	drm_puts(p, "registers:\n");
p                1134 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		a6xx_show_registers(regs->registers, obj->data, regs->count, p);
p                1137 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	drm_puts(p, "registers-gmu:\n");
p                1145 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		a6xx_show_registers(regs->registers, obj->data, regs->count, p);
p                1148 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	drm_puts(p, "indexed-registers:\n");
p                1150 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		a6xx_show_indexed_regs(&a6xx_state->indexed_regs[i], p);
p                1152 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	drm_puts(p, "shader-blocks:\n");
p                1154 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		a6xx_show_shader(&a6xx_state->shaders[i], p);
p                1156 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	drm_puts(p, "clusters:\n");
p                1158 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		a6xx_show_cluster(&a6xx_state->clusters[i], p);
p                1161 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		a6xx_show_dbgahb_cluster(&a6xx_state->dbgahb_clusters[i], p);
p                1163 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	drm_puts(p, "debugbus:\n");
p                1164 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	a6xx_show_debugbus(a6xx_state, p);
p                 654 drivers/gpu/drm/msm/adreno/adreno_gpu.c static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
p                 687 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	drm_puts(p, "    data: !!ascii85 |\n");
p                 688 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	drm_puts(p, "     ");
p                 690 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	drm_puts(p, *ptr);
p                 692 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	drm_puts(p, "\n");
p                 696 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		struct drm_printer *p)
p                 704 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	drm_printf(p, "revision: %d (%d.%d.%d.%d)\n",
p                 709 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
p                 711 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	drm_puts(p, "ringbuffer:\n");
p                 714 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_printf(p, "  - id: %d\n", i);
p                 715 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_printf(p, "    iova: 0x%016llx\n", state->ring[i].iova);
p                 716 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_printf(p, "    last-fence: %d\n", state->ring[i].seqno);
p                 717 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_printf(p, "    retired-fence: %d\n", state->ring[i].fence);
p                 718 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_printf(p, "    rptr: %d\n", state->ring[i].rptr);
p                 719 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_printf(p, "    wptr: %d\n", state->ring[i].wptr);
p                 720 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_printf(p, "    size: %d\n", MSM_GPU_RINGBUFFER_SZ);
p                 722 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		adreno_show_object(p, &state->ring[i].data,
p                 727 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_puts(p, "bos:\n");
p                 730 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			drm_printf(p, "  - iova: 0x%016llx\n",
p                 732 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			drm_printf(p, "    size: %zd\n", state->bos[i].size);
p                 734 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			adreno_show_object(p, &state->bos[i].data,
p                 740 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_puts(p, "registers:\n");
p                 743 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
p                 232 drivers/gpu/drm/msm/adreno/adreno_gpu.h 		struct drm_printer *p);
p                  35 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c #define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
p                  36 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c 		(p) ? (p)->parent->base.id : -1, \
p                  37 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c 		(p) ? (p)->intf_idx - INTF_0 : -1, \
p                  38 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c 		(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
p                  41 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c #define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
p                  42 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c 		(p) ? (p)->parent->base.id : -1, \
p                  43 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c 		(p) ? (p)->intf_idx - INTF_0 : -1, \
p                  44 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c 		(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
p                 303 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h 		struct dpu_enc_phys_init_params *p);
p                 311 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h 		struct dpu_enc_phys_init_params *p);
p                 764 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 		struct dpu_enc_phys_init_params *p)
p                 771 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 	DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
p                 780 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 	phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
p                 781 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 	phys_enc->intf_idx = p->intf_idx;
p                 784 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 	phys_enc->parent = p->parent;
p                 785 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 	phys_enc->parent_ops = p->parent_ops;
p                 786 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 	phys_enc->dpu_kms = p->dpu_kms;
p                 787 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 	phys_enc->split_role = p->split_role;
p                 789 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 	phys_enc->enc_spinlock = p->enc_spinlock;
p                 701 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 		struct dpu_enc_phys_init_params *p)
p                 707 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 	if (!p) {
p                 718 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 	phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
p                 719 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 	phys_enc->intf_idx = p->intf_idx;
p                 724 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 	phys_enc->parent = p->parent;
p                 725 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 	phys_enc->parent_ops = p->parent_ops;
p                 726 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 	phys_enc->dpu_kms = p->dpu_kms;
p                 727 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 	phys_enc->split_role = p->split_role;
p                 729 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 	phys_enc->enc_spinlock = p->enc_spinlock;
p                 755 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 	DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->intf_idx);
p                  82 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 		const struct intf_timing_params *p,
p                  98 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
p                  99 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	p->h_front_porch;
p                 100 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
p                 101 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	p->v_front_porch;
p                 103 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
p                 104 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	hsync_period) + p->hsync_skew;
p                 105 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
p                 106 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	p->hsync_skew - 1;
p                 109 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 		display_v_start += p->hsync_pulse_width + p->h_back_porch;
p                 110 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 		display_v_end -= p->h_front_porch;
p                 113 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
p                 114 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	hsync_end_x = hsync_period - p->h_front_porch - 1;
p                 116 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	if (p->width != p->xres) {
p                 118 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 		active_h_end = active_h_start + p->xres - 1;
p                 124 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	if (p->height != p->yres) {
p                 126 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 		active_v_end = active_v_start + (p->yres * hsync_period) - 1;
p                 142 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
p                 147 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 		hsync_polarity = p->yres >= 720 ? 0 : 1;
p                 148 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 		vsync_polarity = p->yres >= 720 ? 0 : 1;
p                 172 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 			p->vsync_pulse_width * hsync_period);
p                 179 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
p                 180 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
p                 181 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c 	DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
p                  58 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h 			const struct intf_timing_params *p,
p                  85 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h 			struct split_pipe_cfg *p);
p                 225 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c 	void *p = dpu_hw_util_get_log_mask_ptr();
p                 228 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c 	if (!p)
p                 233 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c 	debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
p                  30 drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c 		struct drm_printer p = drm_info_printer(mdp4_kms->dev->dev);
p                  31 drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c 		drm_state_dump(mdp4_kms->dev, &p);
p                 987 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c mdp5_crtc_atomic_print_state(struct drm_printer *p,
p                 998 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c 		drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));
p                1000 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c 	drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
p                1004 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c 		drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
p                1007 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c 	drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
p                  33 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *p);
p                  34 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p,
p                  32 drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c 		struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev);
p                  33 drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c 		drm_state_dump(mdp5_kms->dev, &p);
p                  35 drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c 			mdp5_smp_dump(mdp5_kms->smp, &p);
p                 249 drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c 	struct drm_printer p = drm_seq_file_printer(m);
p                 252 drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c 		drm_printf(&p, "no SMP pool\n");
p                 256 drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c 	mdp5_smp_dump(mdp5_kms->smp, &p);
p                 156 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c mdp5_plane_atomic_print_state(struct drm_printer *p,
p                 162 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 	drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
p                 165 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 		drm_printf(p, "\tright-hwpipe=%s\n",
p                 168 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 	drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied);
p                 169 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 	drm_printf(p, "\tzpos=%u\n", pstate->zpos);
p                 170 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 	drm_printf(p, "\talpha=%u\n", pstate->alpha);
p                 171 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c 	drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
p                 330 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
p                 338 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c 	drm_printf(p, "name\tinuse\tplane\n");
p                 339 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c 	drm_printf(p, "----\t-----\t-----\n");
p                 359 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c 			drm_printf(p, "%s:%d\t%d\t%s\n",
p                 367 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c 	drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
p                 368 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c 	drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
p                  73 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p);
p                 495 drivers/gpu/drm/msm/edp/edp_ctrl.c 	u8 p = pattern;
p                 497 drivers/gpu/drm/msm/edp/edp_ctrl.c 	DBG("pattern=%x", p);
p                 499 drivers/gpu/drm/msm/edp/edp_ctrl.c 				DP_TRAINING_PATTERN_SET, &p, 1) < 1) {
p                 117 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c 		struct i2c_msg *p = &msgs[i];
p                 118 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c 		uint32_t raw_addr = p->addr << 1;
p                 120 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c 		if (p->flags & I2C_M_RD)
p                 136 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c 		if (p->flags & I2C_M_RD) {
p                 137 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c 			index += p->len;
p                 139 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c 			for (j = 0; j < p->len; j++) {
p                 140 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c 				ddc_data = HDMI_DDC_DATA_DATA(p->buf[j]) |
p                 147 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c 		i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
p                 149 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c 						(p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) |
p                 179 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c 		struct i2c_msg *p = &msgs[i];
p                 181 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c 		if (!(p->flags & I2C_M_RD))
p                 199 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c 		for (j = 0; j < p->len; j++) {
p                 201 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c 			p->buf[j] = FIELD(ddc_data, HDMI_DDC_DATA_DATA);
p                  26 drivers/gpu/drm/msm/msm_debugfs.c 	struct drm_printer p = drm_seq_file_printer(m);
p                  36 drivers/gpu/drm/msm/msm_debugfs.c 	drm_printf(&p, "%s Status:\n", gpu->name);
p                  37 drivers/gpu/drm/msm/msm_debugfs.c 	gpu->funcs->show(gpu, show_priv->state, &p);
p                 129 drivers/gpu/drm/msm/msm_debugfs.c 	struct drm_printer p = drm_seq_file_printer(m);
p                 131 drivers/gpu/drm/msm/msm_debugfs.c 	drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
p                 354 drivers/gpu/drm/msm/msm_drv.c 		void *p;
p                 367 drivers/gpu/drm/msm/msm_drv.c 		p = dma_alloc_attrs(dev->dev, size,
p                 369 drivers/gpu/drm/msm/msm_drv.c 		if (!p) {
p                 340 drivers/gpu/drm/msm/msm_drv.h 		int w, int h, int p, uint32_t format);
p                 207 drivers/gpu/drm/msm/msm_fb.c msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format)
p                 213 drivers/gpu/drm/msm/msm_fb.c 		.pitches = { p },
p                  83 drivers/gpu/drm/msm/msm_gem.c 	struct page **p;
p                  86 drivers/gpu/drm/msm/msm_gem.c 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
p                  87 drivers/gpu/drm/msm/msm_gem.c 	if (!p)
p                  94 drivers/gpu/drm/msm/msm_gem.c 		kvfree(p);
p                 100 drivers/gpu/drm/msm/msm_gem.c 		p[i] = phys_to_page(paddr);
p                 104 drivers/gpu/drm/msm/msm_gem.c 	return p;
p                 113 drivers/gpu/drm/msm/msm_gem.c 		struct page **p;
p                 117 drivers/gpu/drm/msm/msm_gem.c 			p = drm_gem_get_pages(obj);
p                 119 drivers/gpu/drm/msm/msm_gem.c 			p = get_pages_vram(obj, npages);
p                 121 drivers/gpu/drm/msm/msm_gem.c 		if (IS_ERR(p)) {
p                 123 drivers/gpu/drm/msm/msm_gem.c 					PTR_ERR(p));
p                 124 drivers/gpu/drm/msm/msm_gem.c 			return p;
p                 127 drivers/gpu/drm/msm/msm_gem.c 		msm_obj->pages = p;
p                 129 drivers/gpu/drm/msm/msm_gem.c 		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
p                 189 drivers/gpu/drm/msm/msm_gem.c 	struct page **p;
p                 198 drivers/gpu/drm/msm/msm_gem.c 	p = get_pages(obj);
p                 200 drivers/gpu/drm/msm/msm_gem.c 	return p;
p                 267 drivers/gpu/drm/msm/msm_gpu.c 	struct drm_printer p;
p                 279 drivers/gpu/drm/msm/msm_gpu.c 	p = drm_coredump_printer(&iter);
p                 281 drivers/gpu/drm/msm/msm_gpu.c 	drm_printf(&p, "---\n");
p                 282 drivers/gpu/drm/msm/msm_gpu.c 	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
p                 283 drivers/gpu/drm/msm/msm_gpu.c 	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
p                 284 drivers/gpu/drm/msm/msm_gpu.c 	drm_printf(&p, "time: %lld.%09ld\n",
p                 287 drivers/gpu/drm/msm/msm_gpu.c 		drm_printf(&p, "comm: %s\n", state->comm);
p                 289 drivers/gpu/drm/msm/msm_gpu.c 		drm_printf(&p, "cmdline: %s\n", state->cmd);
p                 291 drivers/gpu/drm/msm/msm_gpu.c 	gpu->funcs->show(gpu, state, &p);
p                  58 drivers/gpu/drm/msm/msm_gpu.h 			struct drm_printer *p);
p                 335 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c 			struct filter_params *p = &fparams[k][j];
p                 338 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c 				int64_t c = (p->k1 + p->ki*i + p->ki2*i*i +
p                 339 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c 					     p->ki3*i*i*i)
p                 340 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c 					+ (p->kr + p->kir*i + p->ki2r*i*i +
p                 341 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c 					   p->ki3r*i*i*i) * rs[k]
p                 342 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c 					+ (p->kf + p->kif*i + p->ki2f*i*i +
p                 343 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c 					   p->ki3f*i*i*i) * flicker
p                 344 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c 					+ (p->krf + p->kirf*i + p->ki2rf*i*i +
p                 345 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c 					   p->ki3rf*i*i*i) * flicker * rs[k];
p                   3 drivers/gpu/drm/nouveau/dispnv50/atom.h #define nv50_atom(p) container_of((p), struct nv50_atom, state)
p                  14 drivers/gpu/drm/nouveau/dispnv50/atom.h #define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
p                 152 drivers/gpu/drm/nouveau/dispnv50/atom.h #define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
p                 655 drivers/gpu/drm/nouveau/dispnv50/disp.c #define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
p                 656 drivers/gpu/drm/nouveau/dispnv50/disp.c #define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
p                 657 drivers/gpu/drm/nouveau/dispnv50/disp.c #define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
p                  79 drivers/gpu/drm/nouveau/dispnv50/disp.h #define evo_mthd(p, m, s) do {						\
p                  83 drivers/gpu/drm/nouveau/dispnv50/disp.h 	*((p)++) = ((_s << 18) | _m);					\
p                  86 drivers/gpu/drm/nouveau/dispnv50/disp.h #define evo_data(p, d) do {						\
p                  90 drivers/gpu/drm/nouveau/dispnv50/disp.h 	*((p)++) = _d;							\
p                   3 drivers/gpu/drm/nouveau/dispnv50/wndw.h #define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
p                   4 drivers/gpu/drm/nouveau/include/nvkm/core/client.h #define nvkm_client(p) container_of((p), struct nvkm_client, object)
p                  37 drivers/gpu/drm/nouveau/include/nvkm/core/client.h #define nvif_printk(o,l,p,f,a...) do {                                         \
p                  41 drivers/gpu/drm/nouveau/include/nvkm/core/client.h 		printk(KERN_##p "nouveau: %s:%08x:%08x: "f, _client->name,     \
p                 283 drivers/gpu/drm/nouveau/include/nvkm/core/device.h #define nvdev_printk_(d,l,p,f,a...) do {                                       \
p                 286 drivers/gpu/drm/nouveau/include/nvkm/core/device.h 		dev_##p(_device->dev, f, ##a);                                 \
p                 288 drivers/gpu/drm/nouveau/include/nvkm/core/device.h #define nvdev_printk(d,l,p,f,a...) nvdev_printk_((d), NV_DBG_##l, p, f, ##a)
p                   4 drivers/gpu/drm/nouveau/include/nvkm/core/engine.h #define nvkm_engine(p) container_of((p), struct nvkm_engine, subdev)
p                  58 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h #define nvkm_memory_target(p) (p)->func->target(p)
p                  59 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h #define nvkm_memory_page(p) (p)->func->page(p)
p                  60 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h #define nvkm_memory_bar2(p) (p)->func->bar2(p)
p                  61 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h #define nvkm_memory_addr(p) (p)->func->addr(p)
p                  62 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h #define nvkm_memory_size(p) (p)->func->size(p)
p                  63 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h #define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
p                  64 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h #define nvkm_memory_map(p,o,vm,va,av,ac)                                       \
p                  65 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h 	(p)->func->map((p),(o),(vm),(va),(av),(ac))
p                   4 drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h #define nvkm_oproxy(p) container_of((p), struct nvkm_oproxy, base)
p                  18 drivers/gpu/drm/nouveau/include/nvkm/core/os.h #define iowrite64_native(v,p) do {                                             \
p                  19 drivers/gpu/drm/nouveau/include/nvkm/core/os.h 	u32 __iomem *_p = (u32 __iomem *)(p);				       \
p                  37 drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h #define nvkm_printk_(s,l,p,f,a...) do {                                        \
p                  40 drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h 		dev_##p(_subdev->device->dev, "%s: "f,                         \
p                  44 drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h #define nvkm_printk(s,l,p,f,a...) nvkm_printk_((s), NV_DBG_##l, p, f, ##a)
p                   4 drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h #define nvkm_disp(p) container_of((p), struct nvkm_disp, engine)
p                   4 drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h #define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
p                   4 drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h #define nvkm_nvdec(p) container_of((p), struct nvkm_nvdec, engine)
p                   4 drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h #define nvkm_xtensa(p) container_of((p), struct nvkm_xtensa, engine)
p                  12 drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h 		    struct nvbios_ramcfg *p);
p                  22 drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h 		    struct nvbios_ramcfg *p);
p                   3 drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h #define nvkm_gsp(p) container_of((p), struct nvkm_gsp, subdev)
p                  55 drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h #define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
p                  48 drivers/gpu/drm/nouveau/nouveau_connector.h #define nouveau_conn_atom(p)                                                   \
p                  49 drivers/gpu/drm/nouveau/nouveau_connector.h 	container_of((p), struct nouveau_conn_atom, state)
p                 328 drivers/gpu/drm/nouveau/nouveau_display.c #define PROP_ENUM(p,gen,n,list) do {                                           \
p                 337 drivers/gpu/drm/nouveau/nouveau_display.c 		p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c);        \
p                 339 drivers/gpu/drm/nouveau/nouveau_display.c 		while (p && l->gen_mask) {                                     \
p                 341 drivers/gpu/drm/nouveau/nouveau_display.c 				drm_property_add_enum(p, l->type, l->name);    \
p                 536 drivers/gpu/drm/nouveau/nouveau_svm.c 			struct nvif_vmm_pfnmap_v0 p;
p                 593 drivers/gpu/drm/nouveau/nouveau_svm.c 	args.i.p.version = 0;
p                 641 drivers/gpu/drm/nouveau/nouveau_svm.c 		args.i.p.page = PAGE_SHIFT;
p                 642 drivers/gpu/drm/nouveau/nouveau_svm.c 		args.i.p.addr = start;
p                 655 drivers/gpu/drm/nouveau/nouveau_svm.c 			args.i.p.size = pi << PAGE_SHIFT;
p                 685 drivers/gpu/drm/nouveau/nouveau_svm.c 			 args.i.p.addr,
p                 686 drivers/gpu/drm/nouveau/nouveau_svm.c 			 args.i.p.addr + args.i.p.size, fn - fi);
p                 689 drivers/gpu/drm/nouveau/nouveau_svm.c 		range.start = args.i.p.addr;
p                 690 drivers/gpu/drm/nouveau/nouveau_svm.c 		range.end = args.i.p.addr + args.i.p.size;
p                  27 drivers/gpu/drm/nouveau/nouveau_svm.h static inline int nouveau_svmm_init(struct drm_device *device, void *p,
p                  42 drivers/gpu/drm/nouveau/nouveau_svm.h static inline int nouveau_svmm_bind(struct drm_device *device, void *p,
p                  50 drivers/gpu/drm/nouveau/nouveau_usif.c 	struct usif_notify_p *p;
p                  91 drivers/gpu/drm/nouveau/nouveau_usif.c 	if (WARN_ON(!ntfy->p || ntfy->reply != (length + size)))
p                  93 drivers/gpu/drm/nouveau/nouveau_usif.c 	filp = ntfy->p->base.file_priv;
p                  96 drivers/gpu/drm/nouveau/nouveau_usif.c 	memcpy(&ntfy->p->e.data[0], header, length);
p                  97 drivers/gpu/drm/nouveau/nouveau_usif.c 	memcpy(&ntfy->p->e.data[length], data, size);
p                 100 drivers/gpu/drm/nouveau/nouveau_usif.c 		struct nvif_notify_rep_v0 *rep = (void *)ntfy->p->e.data;
p                 111 drivers/gpu/drm/nouveau/nouveau_usif.c 	if (!WARN_ON(filp->event_space < ntfy->p->e.base.length)) {
p                 112 drivers/gpu/drm/nouveau/nouveau_usif.c 		list_add_tail(&ntfy->p->base.link, &filp->event_list);
p                 113 drivers/gpu/drm/nouveau/nouveau_usif.c 		filp->event_space -= ntfy->p->e.base.length;
p                 209 drivers/gpu/drm/nouveau/nouveau_usif.c 	ntfy->p = kmalloc(sizeof(*ntfy->p) + ntfy->reply, GFP_KERNEL);
p                 210 drivers/gpu/drm/nouveau/nouveau_usif.c 	if (ret = -ENOMEM, !ntfy->p)
p                 212 drivers/gpu/drm/nouveau/nouveau_usif.c 	ntfy->p->base.event = &ntfy->p->e.base;
p                 213 drivers/gpu/drm/nouveau/nouveau_usif.c 	ntfy->p->base.file_priv = f;
p                 214 drivers/gpu/drm/nouveau/nouveau_usif.c 	ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF;
p                 215 drivers/gpu/drm/nouveau/nouveau_usif.c 	ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply;
p                 221 drivers/gpu/drm/nouveau/nouveau_usif.c 		kfree(ntfy->p);
p                 245 drivers/gpu/drm/nouveau/nouveau_usif.c 		kfree(ntfy->p);
p                2640 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c #define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
p                2683 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c #define _(n,p,m) case NVKM_ENGINE_##n: if (p) return (m); break
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h #define nvkm_control(p) container_of((p), struct nvkm_control, object)
p                  24 drivers/gpu/drm/nouveau/nvkm/engine/device/user.c #define nvkm_udevice(p) container_of((p), struct nvkm_udevice, object)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h #define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h #define nvkm_dp(p) container_of((p), struct nvkm_dp, outp)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h #define nv50_disp(p) container_of((p), struct nv50_disp, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c #define nv04_disp_root(p) container_of((p), struct nv04_disp_root, object)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h #define nv50_disp_root(p) container_of((p), struct nv50_disp_root, object)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h #define nvkm_dma(p) container_of((p), struct nvkm_dma, engine)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h #define nvkm_dmaobj(p) container_of((p), struct nvkm_dmaobj, object)
p                  24 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c #define gf100_dmaobj(p) container_of((p), struct gf100_dmaobj, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c #define gf119_dmaobj(p) container_of((p), struct gf119_dmaobj, base)
p                  22 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c #define gv100_dmaobj(p) container_of((p), struct gv100_dmaobj, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c #define nv04_dmaobj(p) container_of((p), struct nv04_dmaobj, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c #define nv50_dmaobj(p) container_of((p), struct nv50_dmaobj, base)
p                 121 drivers/gpu/drm/nouveau/nvkm/engine/falcon.c 	void *p = vmalloc(len);
p                 123 drivers/gpu/drm/nouveau/nvkm/engine/falcon.c 	if (p)
p                 124 drivers/gpu/drm/nouveau/nvkm/engine/falcon.c 		memcpy(p, src, len);
p                 125 drivers/gpu/drm/nouveau/nvkm/engine/falcon.c 	return p;
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h #define nvkm_fifo_chan(p) container_of((p), struct nvkm_fifo_chan, object)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h #define gf100_fifo_chan(p) container_of((p), struct gf100_fifo_chan, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h #define gk104_fifo_chan(p) container_of((p), struct gk104_fifo_chan, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h #define nv04_fifo_chan(p) container_of((p), struct nv04_fifo_chan, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h #define nv50_fifo_chan(p) container_of((p), struct nv50_fifo_chan, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h #define gf100_fifo(p) container_of((p), struct gf100_fifo, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h #define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h #define nv04_fifo(p) container_of((p), struct nv04_fifo, base)
p                  34 drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c 	int i, p;
p                  40 drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c 	for (i = 0, p = 0; i < fifo->base.nr; i++) {
p                  42 drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c 			nvkm_wo32(cur, p++ * 4, i);
p                  47 drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c 	nvkm_wr32(device, 0x0032ec, p);
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h #define nv50_fifo(p) container_of((p), struct nv50_fifo, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h #define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
p                  75 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c 			const u32 p = GPC_UNIT(gpc, 0xc44 + (ppc * 4));
p                  79 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c 			mmio_wr32(info, p, bs);
p                 153 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c #define gf100_gr_object(p) container_of((p), struct gf100_gr_object, object)
p                1020 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c gf100_gr_mmio(struct gf100_gr *gr, const struct gf100_gr_pack *p)
p                1026 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c 	pack_for_each_init(init, pack, p) {
p                1037 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
p                1046 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c 	pack_for_each_init(init, pack, p) {
p                1050 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c 		if ((pack == p && init == p->init) || data != init->data) {
p                1075 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c gf100_gr_mthd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
p                1082 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c 	pack_for_each_init(init, pack, p) {
p                1087 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c 		if ((pack == p && init == p->init) || data != init->data) {
p                  26 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h #define gf100_gr(p) container_of((p), struct gf100_gr, base)
p                 252 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h #define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
p                 349 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c #define nv04_gr(p) container_of((p), struct nv04_gr, base)
p                 357 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c #define nv04_gr_chan(p) container_of((p), struct nv04_gr_chan, object)
p                 389 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c #define nv10_gr(p) container_of((p), struct nv10_gr, base)
p                 397 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c #define nv10_gr_chan(p) container_of((p), struct nv10_gr_chan, object)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h #define nv20_gr(p) container_of((p), struct nv20_gr, base)
p                  22 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h #define nv20_gr_chan(p) container_of((p), struct nv20_gr_chan, object)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h #define nv40_gr(p) container_of((p), struct nv40_gr, base)
p                  19 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h #define nv40_gr_chan(p) container_of((p), struct nv40_gr_chan, object)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h #define nv50_gr(p) container_of((p), struct nv50_gr, base)
p                  22 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h #define nv50_gr_chan(p) container_of((p), struct nv50_gr_chan, object)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h #define nvkm_gr(p) container_of((p), struct nvkm_gr, engine)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h #define nv31_mpeg(p) container_of((p), struct nv31_mpeg, engine)
p                  21 drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h #define nv31_mpeg_chan(p) container_of((p), struct nv31_mpeg_chan, object)
p                  24 drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c #define nv44_mpeg(p) container_of((p), struct nv44_mpeg, engine)
p                  41 drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c #define nv44_mpeg_chan(p) container_of((p), struct nv44_mpeg_chan, object)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h #define nv40_pm(p) container_of((p), struct nv40_pm, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h #define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
p                  70 drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h #define nvkm_perfdom(p) container_of((p), struct nvkm_perfdom, object)
p                  99 drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h #define nvkm_perfmon(p) container_of((p), struct nvkm_perfmon, object)
p                   6 drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h #define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h #define nvkm_sw_chan(p) container_of((p), struct nvkm_sw_chan, object)
p                  24 drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c #define nv04_sw_chan(p) container_of((p), struct nv04_sw_chan, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h #define nv50_sw_chan(p) container_of((p), struct nv50_sw_chan, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h #define nvkm_nvsw(p) container_of((p), struct nvkm_nvsw, object)
p                   4 drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h #define nvkm_sw(p) container_of((p), struct nvkm_sw, engine)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h #define gf100_bar(p) container_of((p), struct gf100_bar, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h #define nv50_bar(p) container_of((p), struct nv50_bar, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h #define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev)
p                  34 drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c 	u32 p = *addr;
p                  42 drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c 		nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h #define nvkm_bios(p) container_of((p), struct nvkm_bios, subdev)
p                  79 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		struct nvbios_ramcfg *p)
p                  81 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	memset(p, 0x00, sizeof(*p));
p                  83 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->rammap_00_16_20 = (nvbios_rd08(bios, data + 0x16) & 0x20) >> 5;
p                  84 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->rammap_00_16_40 = (nvbios_rd08(bios, data + 0x16) & 0x40) >> 6;
p                  85 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->rammap_00_17_02 = (nvbios_rd08(bios, data + 0x17) & 0x02) >> 1;
p                  92 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p)
p                  95 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	memset(p, 0x00, sizeof(*p));
p                  96 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->rammap_ver = *ver;
p                  97 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->rammap_hdr = *hdr;
p                 100 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_min      =  nvbios_rd16(bios, data + 0x00);
p                 101 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_max      =  nvbios_rd16(bios, data + 0x02);
p                 102 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_10_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
p                 103 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_10_04_08 = (nvbios_rd08(bios, data + 0x04) & 0x08) >> 3;
p                 106 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_min      =  nvbios_rd16(bios, data + 0x00);
p                 107 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_max      =  nvbios_rd16(bios, data + 0x02);
p                 108 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_08_01 = (nvbios_rd08(bios, data + 0x08) & 0x01) >> 0;
p                 109 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_08_0c = (nvbios_rd08(bios, data + 0x08) & 0x0c) >> 2;
p                 110 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_08_10 = (nvbios_rd08(bios, data + 0x08) & 0x10) >> 4;
p                 112 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_09_01ff = (temp & 0x000001ff) >> 0;
p                 113 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_0a_03fe = (temp & 0x0003fe00) >> 9;
p                 114 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_0a_0400 = (temp & 0x00040000) >> 18;
p                 115 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_0a_0800 = (temp & 0x00080000) >> 19;
p                 116 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_0b_01f0 = (temp & 0x01f00000) >> 20;
p                 117 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_0b_0200 = (temp & 0x02000000) >> 25;
p                 118 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_0b_0400 = (temp & 0x04000000) >> 26;
p                 119 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_0b_0800 = (temp & 0x08000000) >> 27;
p                 120 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_0d    =  nvbios_rd08(bios, data + 0x0d);
p                 121 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_0e    =  nvbios_rd08(bios, data + 0x0e);
p                 122 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_0f    =  nvbios_rd08(bios, data + 0x0f);
p                 123 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->rammap_11_11_0c = (nvbios_rd08(bios, data + 0x11) & 0x0c) >> 2;
p                 160 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		struct nvbios_ramcfg *p)
p                 167 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_ver = 0;
p                 168 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_timing   =  nvbios_rd08(bios, data + 0x01);
p                 169 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_00_03_01 = (nvbios_rd08(bios, data + 0x03) & 0x01) >> 0;
p                 170 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_00_03_02 = (nvbios_rd08(bios, data + 0x03) & 0x02) >> 1;
p                 171 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_DLLoff   = (nvbios_rd08(bios, data + 0x03) & 0x04) >> 2;
p                 172 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_00_03_08 = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3;
p                 173 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_RON      = (nvbios_rd08(bios, data + 0x03) & 0x10) >> 3;
p                 174 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_FBVDDQ   = (nvbios_rd08(bios, data + 0x03) & 0x80) >> 7;
p                 175 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_00_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
p                 176 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_00_04_04 = (nvbios_rd08(bios, data + 0x04) & 0x04) >> 2;
p                 177 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_00_04_20 = (nvbios_rd08(bios, data + 0x04) & 0x20) >> 5;
p                 178 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_00_05    = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
p                 179 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_00_06    = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
p                 180 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_00_07    = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
p                 181 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_00_08    = (nvbios_rd08(bios, data + 0x08) & 0xff) >> 0;
p                 182 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_00_09    = (nvbios_rd08(bios, data + 0x09) & 0xff) >> 0;
p                 183 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_00_0a_0f = (nvbios_rd08(bios, data + 0x0a) & 0x0f) >> 0;
p                 184 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_00_0a_f0 = (nvbios_rd08(bios, data + 0x0a) & 0xf0) >> 4;
p                 192 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		u8 *ver, u8 *hdr, struct nvbios_ramcfg *p)
p                 195 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_ver = *ver;
p                 196 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 	p->ramcfg_hdr = *hdr;
p                 199 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_timing   =  nvbios_rd08(bios, data + 0x01);
p                 200 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_10_02_01 = (nvbios_rd08(bios, data + 0x02) & 0x01) >> 0;
p                 201 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_10_02_02 = (nvbios_rd08(bios, data + 0x02) & 0x02) >> 1;
p                 202 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_10_02_04 = (nvbios_rd08(bios, data + 0x02) & 0x04) >> 2;
p                 203 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_10_02_08 = (nvbios_rd08(bios, data + 0x02) & 0x08) >> 3;
p                 204 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_10_02_10 = (nvbios_rd08(bios, data + 0x02) & 0x10) >> 4;
p                 205 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_10_02_20 = (nvbios_rd08(bios, data + 0x02) & 0x20) >> 5;
p                 206 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_DLLoff   = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
p                 207 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_10_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
p                 208 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_10_04_01 = (nvbios_rd08(bios, data + 0x04) & 0x01) >> 0;
p                 209 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_FBVDDQ   = (nvbios_rd08(bios, data + 0x04) & 0x08) >> 3;
p                 210 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_10_05    = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
p                 211 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_10_06    = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
p                 212 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_10_07    = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
p                 213 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_10_08    = (nvbios_rd08(bios, data + 0x08) & 0xff) >> 0;
p                 214 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_10_09_0f = (nvbios_rd08(bios, data + 0x09) & 0x0f) >> 0;
p                 215 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_10_09_f0 = (nvbios_rd08(bios, data + 0x09) & 0xf0) >> 4;
p                 218 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_timing   =  nvbios_rd08(bios, data + 0x00);
p                 219 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_01_01 = (nvbios_rd08(bios, data + 0x01) & 0x01) >> 0;
p                 220 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_01_02 = (nvbios_rd08(bios, data + 0x01) & 0x02) >> 1;
p                 221 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_01_04 = (nvbios_rd08(bios, data + 0x01) & 0x04) >> 2;
p                 222 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_01_08 = (nvbios_rd08(bios, data + 0x01) & 0x08) >> 3;
p                 223 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_01_10 = (nvbios_rd08(bios, data + 0x01) & 0x10) >> 4;
p                 224 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_DLLoff =   (nvbios_rd08(bios, data + 0x01) & 0x20) >> 5;
p                 225 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_01_40 = (nvbios_rd08(bios, data + 0x01) & 0x40) >> 6;
p                 226 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_01_80 = (nvbios_rd08(bios, data + 0x01) & 0x80) >> 7;
p                 227 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_02_03 = (nvbios_rd08(bios, data + 0x02) & 0x03) >> 0;
p                 228 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_02_04 = (nvbios_rd08(bios, data + 0x02) & 0x04) >> 2;
p                 229 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_02_08 = (nvbios_rd08(bios, data + 0x02) & 0x08) >> 3;
p                 230 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_02_10 = (nvbios_rd08(bios, data + 0x02) & 0x10) >> 4;
p                 231 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_02_40 = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
p                 232 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_02_80 = (nvbios_rd08(bios, data + 0x02) & 0x80) >> 7;
p                 233 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
p                 234 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_03_30 = (nvbios_rd08(bios, data + 0x03) & 0x30) >> 4;
p                 235 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_03_c0 = (nvbios_rd08(bios, data + 0x03) & 0xc0) >> 6;
p                 236 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_03_f0 = (nvbios_rd08(bios, data + 0x03) & 0xf0) >> 4;
p                 237 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_04    = (nvbios_rd08(bios, data + 0x04) & 0xff) >> 0;
p                 238 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_06    = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
p                 239 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_07_02 = (nvbios_rd08(bios, data + 0x07) & 0x02) >> 1;
p                 240 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_07_04 = (nvbios_rd08(bios, data + 0x07) & 0x04) >> 2;
p                 241 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_07_08 = (nvbios_rd08(bios, data + 0x07) & 0x08) >> 3;
p                 242 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_07_10 = (nvbios_rd08(bios, data + 0x07) & 0x10) >> 4;
p                 243 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_07_40 = (nvbios_rd08(bios, data + 0x07) & 0x40) >> 6;
p                 244 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_07_80 = (nvbios_rd08(bios, data + 0x07) & 0x80) >> 7;
p                 245 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_08_01 = (nvbios_rd08(bios, data + 0x08) & 0x01) >> 0;
p                 246 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_08_02 = (nvbios_rd08(bios, data + 0x08) & 0x02) >> 1;
p                 247 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_08_04 = (nvbios_rd08(bios, data + 0x08) & 0x04) >> 2;
p                 248 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_08_08 = (nvbios_rd08(bios, data + 0x08) & 0x08) >> 3;
p                 249 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_08_10 = (nvbios_rd08(bios, data + 0x08) & 0x10) >> 4;
p                 250 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_08_20 = (nvbios_rd08(bios, data + 0x08) & 0x20) >> 5;
p                 251 drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c 		p->ramcfg_11_09    = (nvbios_rd08(bios, data + 0x09) & 0xff) >> 0;
p                  86 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p)
p                  89 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 	p->timing_ver = *ver;
p                  90 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 	p->timing_hdr = *hdr;
p                  93 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_WR    = nvbios_rd08(bios, data + 0x00);
p                  94 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_WTR   = nvbios_rd08(bios, data + 0x01);
p                  95 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_CL    = nvbios_rd08(bios, data + 0x02);
p                  96 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_RC    = nvbios_rd08(bios, data + 0x03);
p                  97 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_RFC   = nvbios_rd08(bios, data + 0x05);
p                  98 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_RAS   = nvbios_rd08(bios, data + 0x07);
p                  99 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_RP    = nvbios_rd08(bios, data + 0x09);
p                 100 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_RCDRD = nvbios_rd08(bios, data + 0x0a);
p                 101 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_RCDWR = nvbios_rd08(bios, data + 0x0b);
p                 102 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_RRD   = nvbios_rd08(bios, data + 0x0c);
p                 103 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_13    = nvbios_rd08(bios, data + 0x0d);
p                 104 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_ODT   = nvbios_rd08(bios, data + 0x0e) & 0x07;
p                 105 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		if (p->ramcfg_ver >= 0x10)
p                 106 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 			p->ramcfg_RON = nvbios_rd08(bios, data + 0x0e) & 0x07;
p                 108 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_24  = 0xff;
p                 109 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_21  = 0;
p                 110 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_20  = 0;
p                 111 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_CWL = 0;
p                 112 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_18  = 0;
p                 113 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_10_16  = 0;
p                 117 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 			p->timing_10_24  = nvbios_rd08(bios, data + 0x18);
p                 122 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 			p->timing_10_21  = nvbios_rd08(bios, data + 0x15);
p                 125 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 			p->timing_10_20  = nvbios_rd08(bios, data + 0x14);
p                 128 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 			p->timing_10_CWL = nvbios_rd08(bios, data + 0x13);
p                 131 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 			p->timing_10_18  = nvbios_rd08(bios, data + 0x12);
p                 135 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 			p->timing_10_16  = nvbios_rd08(bios, data + 0x10);
p                 140 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing[0] = nvbios_rd32(bios, data + 0x00);
p                 141 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing[1] = nvbios_rd32(bios, data + 0x04);
p                 142 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing[2] = nvbios_rd32(bios, data + 0x08);
p                 143 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing[3] = nvbios_rd32(bios, data + 0x0c);
p                 144 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing[4] = nvbios_rd32(bios, data + 0x10);
p                 145 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing[5] = nvbios_rd32(bios, data + 0x14);
p                 146 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing[6] = nvbios_rd32(bios, data + 0x18);
p                 147 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing[7] = nvbios_rd32(bios, data + 0x1c);
p                 148 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing[8] = nvbios_rd32(bios, data + 0x20);
p                 149 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing[9] = nvbios_rd32(bios, data + 0x24);
p                 150 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing[10] = nvbios_rd32(bios, data + 0x28);
p                 151 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_20_2e_03 = (nvbios_rd08(bios, data + 0x2e) & 0x03) >> 0;
p                 152 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_20_2e_30 = (nvbios_rd08(bios, data + 0x2e) & 0x30) >> 4;
p                 153 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_20_2e_c0 = (nvbios_rd08(bios, data + 0x2e) & 0xc0) >> 6;
p                 154 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_20_2f_03 = (nvbios_rd08(bios, data + 0x2f) & 0x03) >> 0;
p                 156 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_20_2c_003f = (temp & 0x003f) >> 0;
p                 157 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_20_2c_1fc0 = (temp & 0x1fc0) >> 6;
p                 158 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_20_30_07 = (nvbios_rd08(bios, data + 0x30) & 0x07) >> 0;
p                 159 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_20_30_f8 = (nvbios_rd08(bios, data + 0x30) & 0xf8) >> 3;
p                 161 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_20_31_0007 = (temp & 0x0007) >> 0;
p                 162 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_20_31_0078 = (temp & 0x0078) >> 3;
p                 163 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_20_31_0780 = (temp & 0x0780) >> 7;
p                 164 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_20_31_0800 = (temp & 0x0800) >> 11;
p                 165 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_20_31_7000 = (temp & 0x7000) >> 12;
p                 166 drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c 		p->timing_20_31_8000 = (temp & 0x8000) >> 15;
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h #define nvkm_bus(p) container_of((p), struct nvkm_bus, subdev)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c #define gf100_clk(p) container_of((p), struct gf100_clk, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c #define gk104_clk(p) container_of((p), struct gk104_clk, base)
p                 125 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h #define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
p                 139 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c #define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base)
p                 193 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c 	const struct gm20b_clk_dvfs_params *p = clk->dvfs_params;
p                 199 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c 	coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
p                 228 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c 	const struct gk20a_clk_pllg_params *p = clk->base.params;
p                 247 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c 	if (n >> DFS_DET_RANGE > p->max_n) {
p                 249 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c 		n = p->max_n << DFS_DET_RANGE;
p                  25 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c #define gt215_clk(p) container_of((p), struct gt215_clk, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c #define mcp77_clk(p) container_of((p), struct mcp77_clk, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c #define nv40_clk(p) container_of((p), struct nv40_clk, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h #define nv50_clk(p) container_of((p), struct nv50_clk, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h #define nvkm_clk(p) container_of((p), struct nvkm_clk, subdev)
p                   6 drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h #define clk_init(s,p)       hwsq_init(&(s)->base, (p))
p                  63 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h 	u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
p                  64 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h 	u32 val = ioread32(p + (off & ~PAGE_MASK));
p                  65 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h 	io_mapping_unmap_atomic(p);
p                  72 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h 	u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
p                  73 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h 	iowrite32(val, p + (off & ~PAGE_MASK));
p                  75 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h 	io_mapping_unmap_atomic(p);
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h #define nv04_devinit(p) container_of((p), struct nv04_devinit, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h #define nv50_devinit(p) container_of((p), struct nv50_devinit, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h #define nvkm_devinit(p) container_of((p), struct nvkm_devinit, subdev)
p                   3 drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h #define nvkm_fault_buffer(p) container_of((p), struct nvkm_fault_buffer, object)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h #define nvkm_fault(p) container_of((p), struct nvkm_fault, subdev)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h #define gf100_fb(p) container_of((p), struct gf100_fb, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h #define nv50_fb(p) container_of((p), struct nv50_fb, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h #define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c #define nvkm_vram(p) container_of((p), struct nvkm_vram, memory)
p                 164 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h #define ram_init(s,p)        ramfuc_init(&(s)->base, (p))
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c #define gf100_ram(p) container_of((p), struct gf100_ram, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c #define gk104_ram(p) container_of((p), struct gk104_ram, base)
p                1444 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	struct nvbios_ramcfg *p, *n;
p                1451 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	p = &list_last_entry(&ram->cfg, typeof(*cfg), head)->bios;
p                1484 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->rammap_11_0a_03fe |= p->rammap_11_0a_03fe != n->rammap_11_0a_03fe;
p                1485 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->rammap_11_09_01ff |= p->rammap_11_09_01ff != n->rammap_11_09_01ff;
p                1486 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->rammap_11_0a_0400 |= p->rammap_11_0a_0400 != n->rammap_11_0a_0400;
p                1487 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->rammap_11_0a_0800 |= p->rammap_11_0a_0800 != n->rammap_11_0a_0800;
p                1488 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->rammap_11_0b_01f0 |= p->rammap_11_0b_01f0 != n->rammap_11_0b_01f0;
p                1489 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->rammap_11_0b_0200 |= p->rammap_11_0b_0200 != n->rammap_11_0b_0200;
p                1490 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->rammap_11_0d |= p->rammap_11_0d != n->rammap_11_0d;
p                1491 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->rammap_11_0f |= p->rammap_11_0f != n->rammap_11_0f;
p                1492 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->rammap_11_0e |= p->rammap_11_0e != n->rammap_11_0e;
p                1493 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->rammap_11_0b_0800 |= p->rammap_11_0b_0800 != n->rammap_11_0b_0800;
p                1494 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->rammap_11_0b_0400 |= p->rammap_11_0b_0400 != n->rammap_11_0b_0400;
p                1495 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->ramcfg_11_01_01 |= p->ramcfg_11_01_01 != n->ramcfg_11_01_01;
p                1496 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->ramcfg_11_01_02 |= p->ramcfg_11_01_02 != n->ramcfg_11_01_02;
p                1497 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->ramcfg_11_01_10 |= p->ramcfg_11_01_10 != n->ramcfg_11_01_10;
p                1498 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->ramcfg_11_02_03 |= p->ramcfg_11_02_03 != n->ramcfg_11_02_03;
p                1499 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->ramcfg_11_08_20 |= p->ramcfg_11_08_20 != n->ramcfg_11_08_20;
p                1500 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c 	d->timing_20_30_07 |= p->timing_20_30_07 != n->timing_20_30_07;
p                  25 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c #define gt215_ram(p) container_of((p), struct gt215_ram, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c #define mcp77_ram(p) container_of((p), struct mcp77_ram, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h #define nv40_ram(p) container_of((p), struct nv40_ram, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c #define nv50_ram(p) container_of((p), struct nv50_ram, base)
p                   6 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h #define ram_init(s,p)       hwsq_init(&(s)->base, (p))
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h #define nvkm_fuse(p) container_of((p), struct nvkm_fuse, subdev)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h #define nvkm_gpio(p) container_of((p), struct nvkm_gpio, subdev)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c #define anx9805_pad(p) container_of((p), struct anx9805_pad, base)
p                  25 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c #define anx9805_bus(p) container_of((p), struct anx9805_bus, base)
p                  26 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c #define anx9805_aux(p) container_of((p), struct anx9805_aux, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c #define g94_i2c_aux(p) container_of((p), struct g94_i2c_aux, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c #define gm200_i2c_aux(p) container_of((p), struct gm200_i2c_aux, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c #define gf119_i2c_bus(p) container_of((p), struct gf119_i2c_bus, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c #define nv04_i2c_bus(p) container_of((p), struct nv04_i2c_bus, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c #define nv4e_i2c_bus(p) container_of((p), struct nv4e_i2c_bus, base)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c #define nv50_i2c_bus(p) container_of((p), struct nv50_i2c_bus, base)
p                  61 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h #define PAD_MSG(p,l,f,a...) do {                                               \
p                  62 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h 	struct nvkm_i2c_pad *_pad = (p);                                       \
p                  65 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h #define PAD_ERR(p,f,a...) PAD_MSG((p), error, f, ##a)
p                  66 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h #define PAD_DBG(p,f,a...) PAD_MSG((p), debug, f, ##a)
p                  67 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h #define PAD_TRACE(p,f,a...) PAD_MSG((p), trace, f, ##a)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h #define nvkm_i2c(p) container_of((p), struct nvkm_i2c, subdev)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h #define nvkm_iccsense(p) container_of((p), struct nvkm_iccsense, subdev)
p                  59 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
p                  70 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c #define gk20a_instobj_dma(p) \
p                  71 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 	container_of(gk20a_instobj(p), struct gk20a_instobj_dma, base)
p                  89 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c #define gk20a_instobj_iommu(p) \
p                  90 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 	container_of(gk20a_instobj(p), struct gk20a_instobj_iommu, base)
p                 113 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
p                 446 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 		struct page *p = alloc_page(GFP_KERNEL);
p                 449 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 		if (p == NULL) {
p                 453 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 		node->pages[i] = p;
p                 454 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 		dma_adr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c #define nv04_instmem(p) container_of((p), struct nv04_instmem, base)
p                  37 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c #define nv04_instobj(p) container_of((p), struct nv04_instobj, base.memory)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c #define nv40_instmem(p) container_of((p), struct nv40_instmem, base)
p                  39 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c #define nv40_instobj(p) container_of((p), struct nv40_instobj, base.memory)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c #define nv50_instmem(p) container_of((p), struct nv50_instmem, base)
p                  43 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c #define nv50_instobj(p) container_of((p), struct nv50_instobj, base.memory)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h #define nvkm_instmem(p) container_of((p), struct nvkm_instmem, subdev)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h #define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
p                  24 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c #define gp100_mc(p) container_of((p), struct gp100_mc, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h #define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
p                  22 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c #define nvkm_mem(p) container_of((p), struct nvkm_mem, memory)
p                 205 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 		struct page *p = alloc_page(gfp);
p                 206 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 		if (!p)
p                 210 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 						    p, 0, PAGE_SIZE,
p                 213 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 			__free_page(p);
p                 217 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 		mem->mem[mem->pages] = p;
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h #define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
p                   3 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h #define nvkm_umem(p) container_of((p), struct nvkm_umem, object)
p                   3 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h #define nvkm_ummu(p) container_of((p), struct nvkm_ummu, object)
p                   3 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h #define nvkm_uvmm(p) container_of((p), struct nvkm_uvmm, object)
p                1708 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		const int p = page - vmm->func->page;
p                1711 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (vmm->func->page_block && prev && prev->page != p)
p                1716 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (vmm->func->page_block && next && next->page != p)
p                 277 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h #define VMM_PRINT(l,v,p,f,a...) do {                                           \
p                 280 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h 		nvkm_printk_(&_vmm->mmu->subdev, 0, p, "%s: "f"\n",            \
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h #define nvkm_mxm(p) container_of((p), struct nvkm_mxm, subdev)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h #define nvkm_pci(p) container_of((p), struct nvkm_pci, subdev)
p                  22 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c #define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h #define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev)
p                 398 drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c 			const struct nvkm_therm_clkgate_pack *p)
p                 403 drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c 	therm->func->clkgate_init(therm, p);
p                  33 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c 		   const struct nvkm_therm_clkgate_pack *p)
p                  40 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c 	pack_for_each_init(init, pack, p) {
p                  27 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h #define gk104_therm(p) (container_of((p), struct gk104_therm, base))
p                   3 drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h #define nvkm_therm(p) container_of((p), struct nvkm_therm, subdev)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h #define nvkm_timer(p) container_of((p), struct nvkm_timer, subdev)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h #define nvkm_top(p) container_of((p), struct nvkm_top, subdev)
p                  32 drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c #define gk104_volt(p) container_of((p), struct gk104_volt, base)
p                  22 drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c #define gk20a_volt(p) container_of((p), struct gk20a_volt, base)
p                   4 drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h #define nvkm_volt(p) container_of((p), struct nvkm_volt, subdev)
p                  92 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
p                3451 drivers/gpu/drm/omapdrm/dss/dispc.c static int dispc_dump_regs(struct seq_file *s, void *p)
p                1382 drivers/gpu/drm/omapdrm/dss/dsi.c static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
p                1443 drivers/gpu/drm/omapdrm/dss/dsi.c static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
p                1534 drivers/gpu/drm/omapdrm/dss/dsi.c static int dsi_dump_dsi_regs(struct seq_file *s, void *p)
p                1832 drivers/gpu/drm/omapdrm/dss/dsi.c 		unsigned int p = dsi->lanes[i].polarity;
p                1835 drivers/gpu/drm/omapdrm/dss/dsi.c 			l |= 1 << (i * 2 + (p ? 0 : 1));
p                1838 drivers/gpu/drm/omapdrm/dss/dsi.c 			l |= 1 << (i * 2 + (p ? 1 : 0));
p                2623 drivers/gpu/drm/omapdrm/dss/dsi.c 	u8 *p;
p                2640 drivers/gpu/drm/omapdrm/dss/dsi.c 	p = data;
p                2645 drivers/gpu/drm/omapdrm/dss/dsi.c 		b1 = *p++;
p                2646 drivers/gpu/drm/omapdrm/dss/dsi.c 		b2 = *p++;
p                2647 drivers/gpu/drm/omapdrm/dss/dsi.c 		b3 = *p++;
p                2648 drivers/gpu/drm/omapdrm/dss/dsi.c 		b4 = *p++;
p                2662 drivers/gpu/drm/omapdrm/dss/dsi.c 			b1 = *p++;
p                2663 drivers/gpu/drm/omapdrm/dss/dsi.c 			b2 = *p++;
p                2664 drivers/gpu/drm/omapdrm/dss/dsi.c 			b3 = *p++;
p                2667 drivers/gpu/drm/omapdrm/dss/dsi.c 			b1 = *p++;
p                2668 drivers/gpu/drm/omapdrm/dss/dsi.c 			b2 = *p++;
p                2671 drivers/gpu/drm/omapdrm/dss/dsi.c 			b1 = *p++;
p                 355 drivers/gpu/drm/omapdrm/dss/dss.c static int dss_dump_regs(struct seq_file *s, void *p)
p                 380 drivers/gpu/drm/omapdrm/dss/dss.c static int dss_debug_dump_clocks(struct seq_file *s, void *p)
p                 254 drivers/gpu/drm/omapdrm/dss/hdmi4.c static int hdmi_dump_regs(struct seq_file *s, void *p)
p                 253 drivers/gpu/drm/omapdrm/dss/hdmi5.c static int hdmi_dump_regs(struct seq_file *s, void *p)
p                  34 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c 	const char *p = prop->value;
p                  38 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c 	for (i = 0; total < prop->length; total += l, p += l, i++)
p                  39 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c 		l = strlen(p) + 1;
p                 599 drivers/gpu/drm/omapdrm/dss/venc.c static int venc_dump_regs(struct seq_file *s, void *p)
p                  37 drivers/gpu/drm/omapdrm/omap_debugfs.c 	struct drm_printer p = drm_seq_file_printer(m);
p                  39 drivers/gpu/drm/omapdrm/omap_debugfs.c 	drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
p                 688 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	struct tcm_pt *p = &block->area.p0;
p                 692 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			(p->x * geom[block->fmt].slot_w) + x,
p                 693 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			(p->y * geom[block->fmt].slot_h) + y);
p                1020 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
p                1023 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	map[p->y / ydiv][p->x / xdiv] = c;
p                1026 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
p                1028 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	return map[p->y / ydiv][p->x / xdiv];
p                1038 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	char *p = map[yd] + (x0 / xdiv);
p                1041 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 		p += w;
p                1043 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 			*p++ = *nice++;
p                1080 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	struct tcm_area a, p;
p                1134 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 					tcm_for_each_slice(a, block->area, p)
p                 261 drivers/gpu/drm/omapdrm/tcm.h static inline bool __tcm_is_in(struct tcm_pt *p, struct tcm_area *a)
p                 266 drivers/gpu/drm/omapdrm/tcm.h 		return p->x >= a->p0.x && p->x <= a->p1.x &&
p                 267 drivers/gpu/drm/omapdrm/tcm.h 		       p->y >= a->p0.y && p->y <= a->p1.y;
p                 269 drivers/gpu/drm/omapdrm/tcm.h 		i = p->x + p->y * a->tcm->width;
p                  30 drivers/gpu/drm/panel/panel-lg-lb035q02.c #define to_lb035q02_device(p) container_of(p, struct lb035q02_device, panel)
p                  30 drivers/gpu/drm/panel/panel-nec-nl8048hl11.c #define to_nl8048_device(p) container_of(p, struct nl8048_panel, panel)
p                 126 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	struct seiko_panel *p = to_seiko_panel(panel);
p                 128 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	if (!p->enabled)
p                 131 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	if (p->backlight) {
p                 132 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 		p->backlight->props.power = FB_BLANK_POWERDOWN;
p                 133 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 		p->backlight->props.state |= BL_CORE_FBBLANK;
p                 134 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 		backlight_update_status(p->backlight);
p                 137 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	p->enabled = false;
p                 144 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	struct seiko_panel *p = to_seiko_panel(panel);
p                 146 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	if (!p->prepared)
p                 149 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	regulator_disable(p->avdd);
p                 154 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	regulator_disable(p->dvdd);
p                 156 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	p->prepared = false;
p                 163 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	struct seiko_panel *p = to_seiko_panel(panel);
p                 166 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	if (p->prepared)
p                 169 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	err = regulator_enable(p->dvdd);
p                 178 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	err = regulator_enable(p->avdd);
p                 184 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	p->prepared = true;
p                 189 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	regulator_disable(p->dvdd);
p                 195 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	struct seiko_panel *p = to_seiko_panel(panel);
p                 197 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	if (p->enabled)
p                 200 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	if (p->backlight) {
p                 201 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 		p->backlight->props.state &= ~BL_CORE_FBBLANK;
p                 202 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 		p->backlight->props.power = FB_BLANK_UNBLANK;
p                 203 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 		backlight_update_status(p->backlight);
p                 206 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	p->enabled = true;
p                 213 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	struct seiko_panel *p = to_seiko_panel(panel);
p                 216 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	return seiko_panel_get_fixed_modes(p);
p                 223 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	struct seiko_panel *p = to_seiko_panel(panel);
p                 226 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	if (p->desc->num_timings < num_timings)
p                 227 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 		num_timings = p->desc->num_timings;
p                 231 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 			timings[i] = p->desc->timings[i];
p                 233 drivers/gpu/drm/panel/panel-seiko-43wvf1g.c 	return p->desc->num_timings;
p                  36 drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c #define to_ls037v7dw01_device(p) \
p                  37 drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c 	container_of(p, struct ls037v7dw01_panel, panel)
p                 233 drivers/gpu/drm/panel/panel-simple.c 	struct panel_simple *p = to_panel_simple(panel);
p                 235 drivers/gpu/drm/panel/panel-simple.c 	if (!p->enabled)
p                 238 drivers/gpu/drm/panel/panel-simple.c 	if (p->backlight) {
p                 239 drivers/gpu/drm/panel/panel-simple.c 		p->backlight->props.power = FB_BLANK_POWERDOWN;
p                 240 drivers/gpu/drm/panel/panel-simple.c 		p->backlight->props.state |= BL_CORE_FBBLANK;
p                 241 drivers/gpu/drm/panel/panel-simple.c 		backlight_update_status(p->backlight);
p                 244 drivers/gpu/drm/panel/panel-simple.c 	if (p->desc->delay.disable)
p                 245 drivers/gpu/drm/panel/panel-simple.c 		msleep(p->desc->delay.disable);
p                 247 drivers/gpu/drm/panel/panel-simple.c 	p->enabled = false;
p                 254 drivers/gpu/drm/panel/panel-simple.c 	struct panel_simple *p = to_panel_simple(panel);
p                 256 drivers/gpu/drm/panel/panel-simple.c 	if (!p->prepared)
p                 259 drivers/gpu/drm/panel/panel-simple.c 	gpiod_set_value_cansleep(p->enable_gpio, 0);
p                 261 drivers/gpu/drm/panel/panel-simple.c 	regulator_disable(p->supply);
p                 263 drivers/gpu/drm/panel/panel-simple.c 	if (p->desc->delay.unprepare)
p                 264 drivers/gpu/drm/panel/panel-simple.c 		msleep(p->desc->delay.unprepare);
p                 266 drivers/gpu/drm/panel/panel-simple.c 	p->prepared = false;
p                 273 drivers/gpu/drm/panel/panel-simple.c 	struct panel_simple *p = to_panel_simple(panel);
p                 277 drivers/gpu/drm/panel/panel-simple.c 	if (p->prepared)
p                 280 drivers/gpu/drm/panel/panel-simple.c 	err = regulator_enable(p->supply);
p                 286 drivers/gpu/drm/panel/panel-simple.c 	gpiod_set_value_cansleep(p->enable_gpio, 1);
p                 288 drivers/gpu/drm/panel/panel-simple.c 	delay = p->desc->delay.prepare;
p                 289 drivers/gpu/drm/panel/panel-simple.c 	if (p->no_hpd)
p                 290 drivers/gpu/drm/panel/panel-simple.c 		delay += p->desc->delay.hpd_absent_delay;
p                 294 drivers/gpu/drm/panel/panel-simple.c 	p->prepared = true;
p                 301 drivers/gpu/drm/panel/panel-simple.c 	struct panel_simple *p = to_panel_simple(panel);
p                 303 drivers/gpu/drm/panel/panel-simple.c 	if (p->enabled)
p                 306 drivers/gpu/drm/panel/panel-simple.c 	if (p->desc->delay.enable)
p                 307 drivers/gpu/drm/panel/panel-simple.c 		msleep(p->desc->delay.enable);
p                 309 drivers/gpu/drm/panel/panel-simple.c 	if (p->backlight) {
p                 310 drivers/gpu/drm/panel/panel-simple.c 		p->backlight->props.state &= ~BL_CORE_FBBLANK;
p                 311 drivers/gpu/drm/panel/panel-simple.c 		p->backlight->props.power = FB_BLANK_UNBLANK;
p                 312 drivers/gpu/drm/panel/panel-simple.c 		backlight_update_status(p->backlight);
p                 315 drivers/gpu/drm/panel/panel-simple.c 	p->enabled = true;
p                 322 drivers/gpu/drm/panel/panel-simple.c 	struct panel_simple *p = to_panel_simple(panel);
p                 326 drivers/gpu/drm/panel/panel-simple.c 	if (p->ddc) {
p                 327 drivers/gpu/drm/panel/panel-simple.c 		struct edid *edid = drm_get_edid(panel->connector, p->ddc);
p                 336 drivers/gpu/drm/panel/panel-simple.c 	num += panel_simple_get_non_edid_modes(p);
p                 345 drivers/gpu/drm/panel/panel-simple.c 	struct panel_simple *p = to_panel_simple(panel);
p                 348 drivers/gpu/drm/panel/panel-simple.c 	if (p->desc->num_timings < num_timings)
p                 349 drivers/gpu/drm/panel/panel-simple.c 		num_timings = p->desc->num_timings;
p                 353 drivers/gpu/drm/panel/panel-simple.c 			timings[i] = p->desc->timings[i];
p                 355 drivers/gpu/drm/panel/panel-simple.c 	return p->desc->num_timings;
p                  73 drivers/gpu/drm/panel/panel-sony-acx565akm.c #define to_acx565akm_device(p) container_of(p, struct acx565akm_panel, panel)
p                  89 drivers/gpu/drm/panel/panel-tpo-td028ttec1.c #define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
p                  68 drivers/gpu/drm/panel/panel-tpo-td043mtea1.c #define to_td043mtea1_device(p) container_of(p, struct td043mtea1_panel, panel)
p                 166 drivers/gpu/drm/panfrost/panfrost_gpu.c 	.issues = hw_issues_##name##_r##_rev##p##_p##stat,	\
p                 168 drivers/gpu/drm/panfrost/panfrost_gpu.c #define GPU_REV(name, r, p) GPU_REV_EXT(name, r, p, 0, )
p                 374 drivers/gpu/drm/qxl/qxl_ttm.c 	struct drm_printer p = drm_seq_file_printer(m);
p                 377 drivers/gpu/drm/qxl/qxl_ttm.c 	drm_mm_print(mm, &p);
p                 261 drivers/gpu/drm/radeon/atombios_dp.c 	u8 p = 0;
p                 275 drivers/gpu/drm/radeon/atombios_dp.c 		if (this_p > p)
p                 276 drivers/gpu/drm/radeon/atombios_dp.c 			p = this_p;
p                 282 drivers/gpu/drm/radeon/atombios_dp.c 	if (p >= DP_PRE_EMPHASIS_MAX)
p                 283 drivers/gpu/drm/radeon/atombios_dp.c 		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
p                 287 drivers/gpu/drm/radeon/atombios_dp.c 		  pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
p                 290 drivers/gpu/drm/radeon/atombios_dp.c 		train_set[lane] = v | p;
p                 109 drivers/gpu/drm/radeon/atombios_i2c.c 	struct i2c_msg *p;
p                 114 drivers/gpu/drm/radeon/atombios_i2c.c 	p = &msgs[0];
p                 115 drivers/gpu/drm/radeon/atombios_i2c.c 	if ((num == 1) && (p->len == 0)) {
p                 117 drivers/gpu/drm/radeon/atombios_i2c.c 					    p->addr, HW_I2C_WRITE,
p                 126 drivers/gpu/drm/radeon/atombios_i2c.c 		p = &msgs[i];
p                 127 drivers/gpu/drm/radeon/atombios_i2c.c 		remaining = p->len;
p                 130 drivers/gpu/drm/radeon/atombios_i2c.c 		if (p->flags & I2C_M_RD) {
p                 143 drivers/gpu/drm/radeon/atombios_i2c.c 						    p->addr, flags,
p                 144 drivers/gpu/drm/radeon/atombios_i2c.c 						    &p->buf[buffer_offset], current_count);
p                1458 drivers/gpu/drm/radeon/btc_dpm.c 	const u32 *p = NULL;
p                1461 drivers/gpu/drm/radeon/btc_dpm.c 		p = (const u32 *)&barts_cgcg_cgls_default;
p                1464 drivers/gpu/drm/radeon/btc_dpm.c 		p = (const u32 *)&turks_cgcg_cgls_default;
p                1467 drivers/gpu/drm/radeon/btc_dpm.c 		p = (const u32 *)&caicos_cgcg_cgls_default;
p                1472 drivers/gpu/drm/radeon/btc_dpm.c 	btc_program_mgcg_hw_sequence(rdev, p, count);
p                1479 drivers/gpu/drm/radeon/btc_dpm.c 	const u32 *p = NULL;
p                1483 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&barts_cgcg_cgls_enable;
p                1486 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&turks_cgcg_cgls_enable;
p                1489 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&caicos_cgcg_cgls_enable;
p                1495 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&barts_cgcg_cgls_disable;
p                1498 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&turks_cgcg_cgls_disable;
p                1501 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&caicos_cgcg_cgls_disable;
p                1507 drivers/gpu/drm/radeon/btc_dpm.c 	btc_program_mgcg_hw_sequence(rdev, p, count);
p                1513 drivers/gpu/drm/radeon/btc_dpm.c 	const u32 *p = NULL;
p                1516 drivers/gpu/drm/radeon/btc_dpm.c 		p = (const u32 *)&barts_mgcg_default;
p                1519 drivers/gpu/drm/radeon/btc_dpm.c 		p = (const u32 *)&turks_mgcg_default;
p                1522 drivers/gpu/drm/radeon/btc_dpm.c 		p = (const u32 *)&caicos_mgcg_default;
p                1527 drivers/gpu/drm/radeon/btc_dpm.c 	btc_program_mgcg_hw_sequence(rdev, p, count);
p                1534 drivers/gpu/drm/radeon/btc_dpm.c 	const u32 *p = NULL;
p                1538 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&barts_mgcg_enable;
p                1541 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&turks_mgcg_enable;
p                1544 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&caicos_mgcg_enable;
p                1550 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&barts_mgcg_disable[0];
p                1553 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&turks_mgcg_disable[0];
p                1556 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&caicos_mgcg_disable[0];
p                1562 drivers/gpu/drm/radeon/btc_dpm.c 	btc_program_mgcg_hw_sequence(rdev, p, count);
p                1568 drivers/gpu/drm/radeon/btc_dpm.c 	const u32 *p = NULL;
p                1571 drivers/gpu/drm/radeon/btc_dpm.c 		p = (const u32 *)&barts_sysls_default;
p                1574 drivers/gpu/drm/radeon/btc_dpm.c 		p = (const u32 *)&turks_sysls_default;
p                1577 drivers/gpu/drm/radeon/btc_dpm.c 		p = (const u32 *)&caicos_sysls_default;
p                1582 drivers/gpu/drm/radeon/btc_dpm.c 	btc_program_mgcg_hw_sequence(rdev, p, count);
p                1589 drivers/gpu/drm/radeon/btc_dpm.c 	const u32 *p = NULL;
p                1593 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&barts_sysls_enable;
p                1596 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&turks_sysls_enable;
p                1599 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&caicos_sysls_enable;
p                1605 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&barts_sysls_disable;
p                1608 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&turks_sysls_disable;
p                1611 drivers/gpu/drm/radeon/btc_dpm.c 			p = (const u32 *)&caicos_sysls_disable;
p                1617 drivers/gpu/drm/radeon/btc_dpm.c 	btc_program_mgcg_hw_sequence(rdev, p, count);
p                  40 drivers/gpu/drm/radeon/evergreen_cs.c int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
p                 190 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
p                 201 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
p                 205 drivers/gpu/drm/radeon/evergreen_cs.c 	struct evergreen_cs_track *track = p->track;
p                 215 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
p                 223 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
p                 227 drivers/gpu/drm/radeon/evergreen_cs.c 	struct evergreen_cs_track *track = p->track;
p                 238 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
p                 246 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
p                 254 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
p                 258 drivers/gpu/drm/radeon/evergreen_cs.c 	struct evergreen_cs_track *track = p->track;
p                 281 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
p                 288 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
p                 297 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_surface_check(struct radeon_cs_parser *p,
p                 306 drivers/gpu/drm/radeon/evergreen_cs.c 		return evergreen_surface_check_linear(p, surf, prefix);
p                 308 drivers/gpu/drm/radeon/evergreen_cs.c 		return evergreen_surface_check_linear_aligned(p, surf, prefix);
p                 310 drivers/gpu/drm/radeon/evergreen_cs.c 		return evergreen_surface_check_1d(p, surf, prefix);
p                 312 drivers/gpu/drm/radeon/evergreen_cs.c 		return evergreen_surface_check_2d(p, surf, prefix);
p                 314 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
p                 321 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
p                 333 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
p                 344 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
p                 354 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
p                 364 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
p                 374 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
p                 387 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
p                 394 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
p                 396 drivers/gpu/drm/radeon/evergreen_cs.c 	struct evergreen_cs_track *track = p->track;
p                 417 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
p                 423 drivers/gpu/drm/radeon/evergreen_cs.c 	r = evergreen_surface_value_conv_check(p, &surf, "cb");
p                 428 drivers/gpu/drm/radeon/evergreen_cs.c 	r = evergreen_surface_check(p, &surf, "cb");
p                 430 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
p                 439 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
p                 451 drivers/gpu/drm/radeon/evergreen_cs.c 			uint32_t *ib = p->ib.ptr;
p                 469 drivers/gpu/drm/radeon/evergreen_cs.c 				if (!evergreen_surface_check(p, &surf, "cb")) {
p                 479 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
p                 484 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
p                 496 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
p                 499 drivers/gpu/drm/radeon/evergreen_cs.c 	struct evergreen_cs_track *track = p->track;
p                 503 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
p                 540 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
p                 553 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
p                 561 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
p                 563 drivers/gpu/drm/radeon/evergreen_cs.c 	struct evergreen_cs_track *track = p->track;
p                 584 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
p                 591 drivers/gpu/drm/radeon/evergreen_cs.c 	r = evergreen_surface_value_conv_check(p, &surf, "stencil");
p                 596 drivers/gpu/drm/radeon/evergreen_cs.c 	r = evergreen_surface_check(p, &surf, NULL);
p                 603 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_surface_check(p, &surf, "stencil");
p                 605 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
p                 614 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
p                 620 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
p                 625 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
p                 633 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
p                 639 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
p                 649 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
p                 658 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
p                 660 drivers/gpu/drm/radeon/evergreen_cs.c 	struct evergreen_cs_track *track = p->track;
p                 689 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d depth invalid format %d\n",
p                 694 drivers/gpu/drm/radeon/evergreen_cs.c 	r = evergreen_surface_value_conv_check(p, &surf, "depth");
p                 696 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
p                 702 drivers/gpu/drm/radeon/evergreen_cs.c 	r = evergreen_surface_check(p, &surf, "depth");
p                 704 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
p                 712 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
p                 718 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
p                 728 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
p                 734 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
p                 744 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
p                 753 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
p                 764 drivers/gpu/drm/radeon/evergreen_cs.c 	texdw[0] = radeon_get_ib_value(p, idx + 0);
p                 765 drivers/gpu/drm/radeon/evergreen_cs.c 	texdw[1] = radeon_get_ib_value(p, idx + 1);
p                 766 drivers/gpu/drm/radeon/evergreen_cs.c 	texdw[2] = radeon_get_ib_value(p, idx + 2);
p                 767 drivers/gpu/drm/radeon/evergreen_cs.c 	texdw[3] = radeon_get_ib_value(p, idx + 3);
p                 768 drivers/gpu/drm/radeon/evergreen_cs.c 	texdw[4] = radeon_get_ib_value(p, idx + 4);
p                 769 drivers/gpu/drm/radeon/evergreen_cs.c 	texdw[5] = radeon_get_ib_value(p, idx + 5);
p                 770 drivers/gpu/drm/radeon/evergreen_cs.c 	texdw[6] = radeon_get_ib_value(p, idx + 6);
p                 771 drivers/gpu/drm/radeon/evergreen_cs.c 	texdw[7] = radeon_get_ib_value(p, idx + 7);
p                 792 drivers/gpu/drm/radeon/evergreen_cs.c 	if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
p                 793 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
p                 814 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
p                 819 drivers/gpu/drm/radeon/evergreen_cs.c 	r = evergreen_surface_value_conv_check(p, &surf, "texture");
p                 825 drivers/gpu/drm/radeon/evergreen_cs.c 	evergreen_surface_check(p, &surf, NULL);
p                 828 drivers/gpu/drm/radeon/evergreen_cs.c 	r = evergreen_surface_check(p, &surf, "texture");
p                 830 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
p                 838 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
p                 843 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
p                 853 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
p                 864 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
p                 888 drivers/gpu/drm/radeon/evergreen_cs.c 			evergreen_surface_check(p, &surf, NULL);
p                 895 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d invalid array mode %d\n",
p                 902 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_surface_check(p, &surf, "mipmap");
p                 913 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
p                 920 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
p                 932 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_cs_track_check(struct radeon_cs_parser *p)
p                 934 drivers/gpu/drm/radeon/evergreen_cs.c 	struct evergreen_cs_track *track = p->track;
p                 959 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "No buffer for streamout %d\n", i);
p                 981 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
p                 986 drivers/gpu/drm/radeon/evergreen_cs.c 				r = evergreen_cs_track_validate_cb(p, i);
p                 999 drivers/gpu/drm/radeon/evergreen_cs.c 			r = evergreen_cs_track_validate_stencil(p);
p                1006 drivers/gpu/drm/radeon/evergreen_cs.c 			r = evergreen_cs_track_validate_depth(p);
p                1025 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
p                1045 drivers/gpu/drm/radeon/evergreen_cs.c 	return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
p                1048 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_packet0_check(struct radeon_cs_parser *p,
p                1056 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_cs_packet_parse_vline(p);
p                1070 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
p                1080 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_packet0_check(p, pkt, idx, reg);
p                1094 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
p                1096 drivers/gpu/drm/radeon/evergreen_cs.c 	struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
p                1101 drivers/gpu/drm/radeon/evergreen_cs.c 	ib = p->ib.ptr;
p                1143 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1145 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1152 drivers/gpu/drm/radeon/evergreen_cs.c 		track->db_depth_control = radeon_get_ib_value(p, idx);
p                1156 drivers/gpu/drm/radeon/evergreen_cs.c 		if (p->rdev->family < CHIP_CAYMAN) {
p                1157 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1163 drivers/gpu/drm/radeon/evergreen_cs.c 		if (p->rdev->family < CHIP_CAYMAN) {
p                1164 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1170 drivers/gpu/drm/radeon/evergreen_cs.c 		track->db_z_info = radeon_get_ib_value(p, idx);
p                1171 drivers/gpu/drm/radeon/evergreen_cs.c 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                1172 drivers/gpu/drm/radeon/evergreen_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1174 drivers/gpu/drm/radeon/evergreen_cs.c 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1198 drivers/gpu/drm/radeon/evergreen_cs.c 		track->db_s_info = radeon_get_ib_value(p, idx);
p                1202 drivers/gpu/drm/radeon/evergreen_cs.c 		track->db_depth_view = radeon_get_ib_value(p, idx);
p                1206 drivers/gpu/drm/radeon/evergreen_cs.c 		track->db_depth_size = radeon_get_ib_value(p, idx);
p                1210 drivers/gpu/drm/radeon/evergreen_cs.c 		track->db_depth_slice = radeon_get_ib_value(p, idx);
p                1214 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1216 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1220 drivers/gpu/drm/radeon/evergreen_cs.c 		track->db_z_read_offset = radeon_get_ib_value(p, idx);
p                1226 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1228 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1232 drivers/gpu/drm/radeon/evergreen_cs.c 		track->db_z_write_offset = radeon_get_ib_value(p, idx);
p                1238 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1240 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1244 drivers/gpu/drm/radeon/evergreen_cs.c 		track->db_s_read_offset = radeon_get_ib_value(p, idx);
p                1250 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1252 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1256 drivers/gpu/drm/radeon/evergreen_cs.c 		track->db_s_write_offset = radeon_get_ib_value(p, idx);
p                1262 drivers/gpu/drm/radeon/evergreen_cs.c 		track->vgt_strmout_config = radeon_get_ib_value(p, idx);
p                1266 drivers/gpu/drm/radeon/evergreen_cs.c 		track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
p                1273 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1275 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1280 drivers/gpu/drm/radeon/evergreen_cs.c 		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
p                1291 drivers/gpu/drm/radeon/evergreen_cs.c 		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
p                1295 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1297 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
p                1304 drivers/gpu/drm/radeon/evergreen_cs.c 		track->cb_target_mask = radeon_get_ib_value(p, idx);
p                1308 drivers/gpu/drm/radeon/evergreen_cs.c 		track->cb_shader_mask = radeon_get_ib_value(p, idx);
p                1312 drivers/gpu/drm/radeon/evergreen_cs.c 		if (p->rdev->family >= CHIP_CAYMAN) {
p                1313 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1317 drivers/gpu/drm/radeon/evergreen_cs.c 		tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
p                1321 drivers/gpu/drm/radeon/evergreen_cs.c 		if (p->rdev->family < CHIP_CAYMAN) {
p                1322 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1326 drivers/gpu/drm/radeon/evergreen_cs.c 		tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
p                1338 drivers/gpu/drm/radeon/evergreen_cs.c 		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
p                1346 drivers/gpu/drm/radeon/evergreen_cs.c 		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
p                1358 drivers/gpu/drm/radeon/evergreen_cs.c 		track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
p                1359 drivers/gpu/drm/radeon/evergreen_cs.c 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                1360 drivers/gpu/drm/radeon/evergreen_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1362 drivers/gpu/drm/radeon/evergreen_cs.c 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1376 drivers/gpu/drm/radeon/evergreen_cs.c 		track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
p                1377 drivers/gpu/drm/radeon/evergreen_cs.c 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                1378 drivers/gpu/drm/radeon/evergreen_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1380 drivers/gpu/drm/radeon/evergreen_cs.c 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1398 drivers/gpu/drm/radeon/evergreen_cs.c 		track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
p                1406 drivers/gpu/drm/radeon/evergreen_cs.c 		track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
p                1418 drivers/gpu/drm/radeon/evergreen_cs.c 		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
p                1427 drivers/gpu/drm/radeon/evergreen_cs.c 		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
p                1439 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1441 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1445 drivers/gpu/drm/radeon/evergreen_cs.c 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                1467 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1469 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1473 drivers/gpu/drm/radeon/evergreen_cs.c 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                1500 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1502 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
p                1517 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1519 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
p                1534 drivers/gpu/drm/radeon/evergreen_cs.c 		track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
p                1545 drivers/gpu/drm/radeon/evergreen_cs.c 		track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
p                1555 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1557 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1562 drivers/gpu/drm/radeon/evergreen_cs.c 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
p                1571 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1573 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1578 drivers/gpu/drm/radeon/evergreen_cs.c 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
p                1584 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1586 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1590 drivers/gpu/drm/radeon/evergreen_cs.c 		track->htile_offset = radeon_get_ib_value(p, idx);
p                1597 drivers/gpu/drm/radeon/evergreen_cs.c 		track->htile_surface = radeon_get_ib_value(p, idx);
p                1702 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1704 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1711 drivers/gpu/drm/radeon/evergreen_cs.c 		if (p->rdev->family >= CHIP_CAYMAN) {
p                1712 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONFIG_REG "
p                1716 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1718 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONFIG_REG "
p                1725 drivers/gpu/drm/radeon/evergreen_cs.c 		if (p->rdev->family < CHIP_CAYMAN) {
p                1726 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1730 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1732 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1739 drivers/gpu/drm/radeon/evergreen_cs.c 		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
p                1742 drivers/gpu/drm/radeon/evergreen_cs.c 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
p                1756 drivers/gpu/drm/radeon/evergreen_cs.c static inline bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg)
p                1758 drivers/gpu/drm/radeon/evergreen_cs.c 	struct evergreen_cs_track *track = p->track;
p                1772 drivers/gpu/drm/radeon/evergreen_cs.c static int evergreen_packet3_check(struct radeon_cs_parser *p,
p                1784 drivers/gpu/drm/radeon/evergreen_cs.c 	track = (struct evergreen_cs_track *)p->track;
p                1785 drivers/gpu/drm/radeon/evergreen_cs.c 	ib = p->ib.ptr;
p                1787 drivers/gpu/drm/radeon/evergreen_cs.c 	idx_value = radeon_get_ib_value(p, idx);
p                1801 drivers/gpu/drm/radeon/evergreen_cs.c 		tmp = radeon_get_ib_value(p, idx + 1);
p                1813 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1842 drivers/gpu/drm/radeon/evergreen_cs.c 		if (p->rdev->family < CHIP_CAYMAN) {
p                1859 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1867 drivers/gpu/drm/radeon/evergreen_cs.c 			 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
p                1872 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_cs_track_check(p);
p                1874 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
p                1894 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1902 drivers/gpu/drm/radeon/evergreen_cs.c 			 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
p                1907 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_cs_track_check(p);
p                1909 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
p                1922 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1929 drivers/gpu/drm/radeon/evergreen_cs.c 			 radeon_get_ib_value(p, idx+1) +
p                1930 drivers/gpu/drm/radeon/evergreen_cs.c 			 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
p                1935 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_cs_track_check(p);
p                1937 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
p                1947 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_cs_track_check(p);
p                1949 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
p                1958 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_cs_track_check(p);
p                1960 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
p                1969 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_cs_track_check(p);
p                1971 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
p                1980 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_cs_track_check(p);
p                1982 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
p                1991 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_cs_track_check(p);
p                1993 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
p                2017 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2046 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n",
p                2051 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_cs_track_check(p);
p                2053 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
p                2063 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_cs_track_check(p);
p                2065 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
p                2074 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2080 drivers/gpu/drm/radeon/evergreen_cs.c 		r = evergreen_cs_track_check(p);
p                2082 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
p                2095 drivers/gpu/drm/radeon/evergreen_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2102 drivers/gpu/drm/radeon/evergreen_cs.c 				 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
p                2103 drivers/gpu/drm/radeon/evergreen_cs.c 				 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
p                2120 drivers/gpu/drm/radeon/evergreen_cs.c 		command = radeon_get_ib_value(p, idx+4);
p                2122 drivers/gpu/drm/radeon/evergreen_cs.c 		info = radeon_get_ib_value(p, idx+1);
p                2149 drivers/gpu/drm/radeon/evergreen_cs.c 				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2155 drivers/gpu/drm/radeon/evergreen_cs.c 				tmp = radeon_get_ib_value(p, idx) +
p                2156 drivers/gpu/drm/radeon/evergreen_cs.c 					((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
p                2161 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
p                2187 drivers/gpu/drm/radeon/evergreen_cs.c 				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2193 drivers/gpu/drm/radeon/evergreen_cs.c 				tmp = radeon_get_ib_value(p, idx+2) +
p                2194 drivers/gpu/drm/radeon/evergreen_cs.c 					((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
p                2199 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
p                2225 drivers/gpu/drm/radeon/evergreen_cs.c 		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
p                2226 drivers/gpu/drm/radeon/evergreen_cs.c 		    radeon_get_ib_value(p, idx + 2) != 0) {
p                2227 drivers/gpu/drm/radeon/evergreen_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2243 drivers/gpu/drm/radeon/evergreen_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2249 drivers/gpu/drm/radeon/evergreen_cs.c 				 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
p                2250 drivers/gpu/drm/radeon/evergreen_cs.c 				 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
p                2264 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2271 drivers/gpu/drm/radeon/evergreen_cs.c 			 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
p                2272 drivers/gpu/drm/radeon/evergreen_cs.c 			 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
p                2286 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2293 drivers/gpu/drm/radeon/evergreen_cs.c 			 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
p                2294 drivers/gpu/drm/radeon/evergreen_cs.c 			 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
p                2310 drivers/gpu/drm/radeon/evergreen_cs.c 			if (evergreen_is_safe_reg(p, reg))
p                2312 drivers/gpu/drm/radeon/evergreen_cs.c 			r = evergreen_cs_handle_reg(p, reg, idx);
p                2327 drivers/gpu/drm/radeon/evergreen_cs.c 			if (evergreen_is_safe_reg(p, reg))
p                2329 drivers/gpu/drm/radeon/evergreen_cs.c 			r = evergreen_cs_handle_reg(p, reg, idx);
p                2352 drivers/gpu/drm/radeon/evergreen_cs.c 			switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
p                2355 drivers/gpu/drm/radeon/evergreen_cs.c 				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2360 drivers/gpu/drm/radeon/evergreen_cs.c 				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                2386 drivers/gpu/drm/radeon/evergreen_cs.c 				    !radeon_cs_packet_next_is_pkt3_nop(p)) {
p                2392 drivers/gpu/drm/radeon/evergreen_cs.c 					r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2401 drivers/gpu/drm/radeon/evergreen_cs.c 				r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
p                2411 drivers/gpu/drm/radeon/evergreen_cs.c 				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2416 drivers/gpu/drm/radeon/evergreen_cs.c 				offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
p                2417 drivers/gpu/drm/radeon/evergreen_cs.c 				size = radeon_get_ib_value(p, idx+1+(i*8)+1);
p                2418 drivers/gpu/drm/radeon/evergreen_cs.c 				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
p                2420 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn_ratelimited(p->dev, "vbo resource seems too big for the bo\n");
p                2493 drivers/gpu/drm/radeon/evergreen_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2498 drivers/gpu/drm/radeon/evergreen_cs.c 			offset = radeon_get_ib_value(p, idx+1);
p                2499 drivers/gpu/drm/radeon/evergreen_cs.c 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
p                2512 drivers/gpu/drm/radeon/evergreen_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2517 drivers/gpu/drm/radeon/evergreen_cs.c 			offset = radeon_get_ib_value(p, idx+3);
p                2518 drivers/gpu/drm/radeon/evergreen_cs.c 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
p                2537 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2542 drivers/gpu/drm/radeon/evergreen_cs.c 		offset = radeon_get_ib_value(p, idx+0);
p                2543 drivers/gpu/drm/radeon/evergreen_cs.c 		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
p                2566 drivers/gpu/drm/radeon/evergreen_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2571 drivers/gpu/drm/radeon/evergreen_cs.c 			offset = radeon_get_ib_value(p, idx+1);
p                2572 drivers/gpu/drm/radeon/evergreen_cs.c 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
p                2583 drivers/gpu/drm/radeon/evergreen_cs.c 			reg = radeon_get_ib_value(p, idx+1) << 2;
p                2584 drivers/gpu/drm/radeon/evergreen_cs.c 			if (!evergreen_is_safe_reg(p, reg)) {
p                2585 drivers/gpu/drm/radeon/evergreen_cs.c 				dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
p                2593 drivers/gpu/drm/radeon/evergreen_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2598 drivers/gpu/drm/radeon/evergreen_cs.c 			offset = radeon_get_ib_value(p, idx+3);
p                2599 drivers/gpu/drm/radeon/evergreen_cs.c 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
p                2610 drivers/gpu/drm/radeon/evergreen_cs.c 			reg = radeon_get_ib_value(p, idx+3) << 2;
p                2611 drivers/gpu/drm/radeon/evergreen_cs.c 			if (!evergreen_is_safe_reg(p, reg)) {
p                2612 drivers/gpu/drm/radeon/evergreen_cs.c 				dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
p                2634 drivers/gpu/drm/radeon/evergreen_cs.c 			dev_warn(p->dev, "forbidden register for append cnt 0x%08x at %d\n",
p                2643 drivers/gpu/drm/radeon/evergreen_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                2648 drivers/gpu/drm/radeon/evergreen_cs.c 			offset = radeon_get_ib_value(p, idx + 1);
p                2652 drivers/gpu/drm/radeon/evergreen_cs.c 			offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32;
p                2672 drivers/gpu/drm/radeon/evergreen_cs.c int evergreen_cs_parse(struct radeon_cs_parser *p)
p                2679 drivers/gpu/drm/radeon/evergreen_cs.c 	if (p->track == NULL) {
p                2685 drivers/gpu/drm/radeon/evergreen_cs.c 		if (p->rdev->family >= CHIP_CAYMAN) {
p                2686 drivers/gpu/drm/radeon/evergreen_cs.c 			tmp = p->rdev->config.cayman.tile_config;
p                2689 drivers/gpu/drm/radeon/evergreen_cs.c 			tmp = p->rdev->config.evergreen.tile_config;
p                2746 drivers/gpu/drm/radeon/evergreen_cs.c 		p->track = track;
p                2749 drivers/gpu/drm/radeon/evergreen_cs.c 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
p                2751 drivers/gpu/drm/radeon/evergreen_cs.c 			kfree(p->track);
p                2752 drivers/gpu/drm/radeon/evergreen_cs.c 			p->track = NULL;
p                2755 drivers/gpu/drm/radeon/evergreen_cs.c 		p->idx += pkt.count + 2;
p                2758 drivers/gpu/drm/radeon/evergreen_cs.c 			r = evergreen_cs_parse_packet0(p, &pkt);
p                2763 drivers/gpu/drm/radeon/evergreen_cs.c 			r = evergreen_packet3_check(p, &pkt);
p                2767 drivers/gpu/drm/radeon/evergreen_cs.c 			kfree(p->track);
p                2768 drivers/gpu/drm/radeon/evergreen_cs.c 			p->track = NULL;
p                2772 drivers/gpu/drm/radeon/evergreen_cs.c 			kfree(p->track);
p                2773 drivers/gpu/drm/radeon/evergreen_cs.c 			p->track = NULL;
p                2776 drivers/gpu/drm/radeon/evergreen_cs.c 	} while (p->idx < p->chunk_ib->length_dw);
p                2778 drivers/gpu/drm/radeon/evergreen_cs.c 	for (r = 0; r < p->ib.length_dw; r++) {
p                2779 drivers/gpu/drm/radeon/evergreen_cs.c 		pr_info("%05d  0x%08X\n", r, p->ib.ptr[r]);
p                2783 drivers/gpu/drm/radeon/evergreen_cs.c 	kfree(p->track);
p                2784 drivers/gpu/drm/radeon/evergreen_cs.c 	p->track = NULL;
p                2797 drivers/gpu/drm/radeon/evergreen_cs.c int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
p                2799 drivers/gpu/drm/radeon/evergreen_cs.c 	struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
p                2802 drivers/gpu/drm/radeon/evergreen_cs.c 	uint32_t *ib = p->ib.ptr;
p                2808 drivers/gpu/drm/radeon/evergreen_cs.c 		if (p->idx >= ib_chunk->length_dw) {
p                2810 drivers/gpu/drm/radeon/evergreen_cs.c 				  p->idx, ib_chunk->length_dw);
p                2813 drivers/gpu/drm/radeon/evergreen_cs.c 		idx = p->idx;
p                2814 drivers/gpu/drm/radeon/evergreen_cs.c 		header = radeon_get_ib_value(p, idx);
p                2821 drivers/gpu/drm/radeon/evergreen_cs.c 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
p                2829 drivers/gpu/drm/radeon/evergreen_cs.c 				dst_offset = radeon_get_ib_value(p, idx+1);
p                2833 drivers/gpu/drm/radeon/evergreen_cs.c 				p->idx += count + 7;
p                2837 drivers/gpu/drm/radeon/evergreen_cs.c 				dst_offset = radeon_get_ib_value(p, idx+1);
p                2838 drivers/gpu/drm/radeon/evergreen_cs.c 				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
p                2842 drivers/gpu/drm/radeon/evergreen_cs.c 				p->idx += count + 3;
p                2849 drivers/gpu/drm/radeon/evergreen_cs.c 				dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
p                2855 drivers/gpu/drm/radeon/evergreen_cs.c 			r = r600_dma_cs_next_reloc(p, &src_reloc);
p                2860 drivers/gpu/drm/radeon/evergreen_cs.c 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
p                2869 drivers/gpu/drm/radeon/evergreen_cs.c 				src_offset = radeon_get_ib_value(p, idx+2);
p                2870 drivers/gpu/drm/radeon/evergreen_cs.c 				src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
p                2871 drivers/gpu/drm/radeon/evergreen_cs.c 				dst_offset = radeon_get_ib_value(p, idx+1);
p                2872 drivers/gpu/drm/radeon/evergreen_cs.c 				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
p                2874 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
p                2879 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
p                2887 drivers/gpu/drm/radeon/evergreen_cs.c 				p->idx += 5;
p                2892 drivers/gpu/drm/radeon/evergreen_cs.c 				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
p                2894 drivers/gpu/drm/radeon/evergreen_cs.c 					src_offset = radeon_get_ib_value(p, idx+1);
p                2898 drivers/gpu/drm/radeon/evergreen_cs.c 					dst_offset = radeon_get_ib_value(p, idx + 7);
p                2899 drivers/gpu/drm/radeon/evergreen_cs.c 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
p                2904 drivers/gpu/drm/radeon/evergreen_cs.c 					src_offset = radeon_get_ib_value(p, idx+7);
p                2905 drivers/gpu/drm/radeon/evergreen_cs.c 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
p                2909 drivers/gpu/drm/radeon/evergreen_cs.c 					dst_offset = radeon_get_ib_value(p, idx+1);
p                2914 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
p                2919 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
p                2923 drivers/gpu/drm/radeon/evergreen_cs.c 				p->idx += 9;
p                2928 drivers/gpu/drm/radeon/evergreen_cs.c 				src_offset = radeon_get_ib_value(p, idx+2);
p                2929 drivers/gpu/drm/radeon/evergreen_cs.c 				src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
p                2930 drivers/gpu/drm/radeon/evergreen_cs.c 				dst_offset = radeon_get_ib_value(p, idx+1);
p                2931 drivers/gpu/drm/radeon/evergreen_cs.c 				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
p                2933 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
p                2938 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
p                2946 drivers/gpu/drm/radeon/evergreen_cs.c 				p->idx += 5;
p                2951 drivers/gpu/drm/radeon/evergreen_cs.c 				if (p->family < CHIP_CAYMAN) {
p                2960 drivers/gpu/drm/radeon/evergreen_cs.c 				p->idx += 9;
p                2965 drivers/gpu/drm/radeon/evergreen_cs.c 				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
p                2970 drivers/gpu/drm/radeon/evergreen_cs.c 				dst_offset = radeon_get_ib_value(p, idx+1);
p                2971 drivers/gpu/drm/radeon/evergreen_cs.c 				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
p                2972 drivers/gpu/drm/radeon/evergreen_cs.c 				dst2_offset = radeon_get_ib_value(p, idx+2);
p                2973 drivers/gpu/drm/radeon/evergreen_cs.c 				dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
p                2974 drivers/gpu/drm/radeon/evergreen_cs.c 				src_offset = radeon_get_ib_value(p, idx+3);
p                2975 drivers/gpu/drm/radeon/evergreen_cs.c 				src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
p                2977 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
p                2982 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
p                2987 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
p                2997 drivers/gpu/drm/radeon/evergreen_cs.c 				p->idx += 7;
p                3001 drivers/gpu/drm/radeon/evergreen_cs.c 				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
p                3005 drivers/gpu/drm/radeon/evergreen_cs.c 				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
p                3010 drivers/gpu/drm/radeon/evergreen_cs.c 				dst_offset = radeon_get_ib_value(p, idx+1);
p                3012 drivers/gpu/drm/radeon/evergreen_cs.c 				dst2_offset = radeon_get_ib_value(p, idx+2);
p                3014 drivers/gpu/drm/radeon/evergreen_cs.c 				src_offset = radeon_get_ib_value(p, idx+8);
p                3015 drivers/gpu/drm/radeon/evergreen_cs.c 				src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
p                3017 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
p                3022 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
p                3027 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
p                3035 drivers/gpu/drm/radeon/evergreen_cs.c 				p->idx += 10;
p                3040 drivers/gpu/drm/radeon/evergreen_cs.c 				if (p->family < CHIP_CAYMAN) {
p                3045 drivers/gpu/drm/radeon/evergreen_cs.c 				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
p                3058 drivers/gpu/drm/radeon/evergreen_cs.c 				p->idx += 12;
p                3063 drivers/gpu/drm/radeon/evergreen_cs.c 				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
p                3067 drivers/gpu/drm/radeon/evergreen_cs.c 				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
p                3072 drivers/gpu/drm/radeon/evergreen_cs.c 				dst_offset = radeon_get_ib_value(p, idx+1);
p                3074 drivers/gpu/drm/radeon/evergreen_cs.c 				dst2_offset = radeon_get_ib_value(p, idx+2);
p                3076 drivers/gpu/drm/radeon/evergreen_cs.c 				src_offset = radeon_get_ib_value(p, idx+8);
p                3077 drivers/gpu/drm/radeon/evergreen_cs.c 				src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
p                3079 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
p                3084 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
p                3089 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
p                3097 drivers/gpu/drm/radeon/evergreen_cs.c 				p->idx += 10;
p                3103 drivers/gpu/drm/radeon/evergreen_cs.c 				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
p                3105 drivers/gpu/drm/radeon/evergreen_cs.c 					src_offset = radeon_get_ib_value(p, idx+1);
p                3109 drivers/gpu/drm/radeon/evergreen_cs.c 					dst_offset = radeon_get_ib_value(p, idx+7);
p                3110 drivers/gpu/drm/radeon/evergreen_cs.c 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
p                3115 drivers/gpu/drm/radeon/evergreen_cs.c 					src_offset = radeon_get_ib_value(p, idx+7);
p                3116 drivers/gpu/drm/radeon/evergreen_cs.c 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
p                3120 drivers/gpu/drm/radeon/evergreen_cs.c 					dst_offset = radeon_get_ib_value(p, idx+1);
p                3125 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
p                3130 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
p                3134 drivers/gpu/drm/radeon/evergreen_cs.c 				p->idx += 9;
p                3139 drivers/gpu/drm/radeon/evergreen_cs.c 				if (p->family < CHIP_CAYMAN) {
p                3145 drivers/gpu/drm/radeon/evergreen_cs.c 				p->idx += 13;
p                3150 drivers/gpu/drm/radeon/evergreen_cs.c 				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
p                3154 drivers/gpu/drm/radeon/evergreen_cs.c 				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
p                3159 drivers/gpu/drm/radeon/evergreen_cs.c 				dst_offset = radeon_get_ib_value(p, idx+1);
p                3161 drivers/gpu/drm/radeon/evergreen_cs.c 				dst2_offset = radeon_get_ib_value(p, idx+2);
p                3163 drivers/gpu/drm/radeon/evergreen_cs.c 				src_offset = radeon_get_ib_value(p, idx+8);
p                3164 drivers/gpu/drm/radeon/evergreen_cs.c 				src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
p                3166 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
p                3171 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
p                3176 drivers/gpu/drm/radeon/evergreen_cs.c 					dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
p                3184 drivers/gpu/drm/radeon/evergreen_cs.c 				p->idx += 10;
p                3192 drivers/gpu/drm/radeon/evergreen_cs.c 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
p                3197 drivers/gpu/drm/radeon/evergreen_cs.c 			dst_offset = radeon_get_ib_value(p, idx+1);
p                3198 drivers/gpu/drm/radeon/evergreen_cs.c 			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
p                3200 drivers/gpu/drm/radeon/evergreen_cs.c 				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
p                3206 drivers/gpu/drm/radeon/evergreen_cs.c 			p->idx += 4;
p                3209 drivers/gpu/drm/radeon/evergreen_cs.c 			p->idx += 1;
p                3215 drivers/gpu/drm/radeon/evergreen_cs.c 	} while (p->idx < p->chunk_ib->length_dw);
p                3217 drivers/gpu/drm/radeon/evergreen_cs.c 	for (r = 0; r < p->ib->length_dw; r++) {
p                3218 drivers/gpu/drm/radeon/evergreen_cs.c 		pr_info("%05d  0x%08X\n", r, p->ib.ptr[r]);
p                1259 drivers/gpu/drm/radeon/r100.c int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
p                1270 drivers/gpu/drm/radeon/r100.c 	r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1274 drivers/gpu/drm/radeon/r100.c 		radeon_cs_dump_packet(p, pkt);
p                1278 drivers/gpu/drm/radeon/r100.c 	value = radeon_get_ib_value(p, idx);
p                1282 drivers/gpu/drm/radeon/r100.c 	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                1288 drivers/gpu/drm/radeon/r100.c 				radeon_cs_dump_packet(p, pkt);
p                1295 drivers/gpu/drm/radeon/r100.c 		p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
p                1297 drivers/gpu/drm/radeon/r100.c 		p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
p                1301 drivers/gpu/drm/radeon/r100.c int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
p                1312 drivers/gpu/drm/radeon/r100.c 	ib = p->ib.ptr;
p                1313 drivers/gpu/drm/radeon/r100.c 	track = (struct r100_cs_track *)p->track;
p                1314 drivers/gpu/drm/radeon/r100.c 	c = radeon_get_ib_value(p, idx++) & 0x1F;
p                1318 drivers/gpu/drm/radeon/r100.c 	    radeon_cs_dump_packet(p, pkt);
p                1323 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1327 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
p                1330 drivers/gpu/drm/radeon/r100.c 		idx_value = radeon_get_ib_value(p, idx);
p                1331 drivers/gpu/drm/radeon/r100.c 		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
p                1336 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1340 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
p                1343 drivers/gpu/drm/radeon/r100.c 		ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset);
p                1349 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1353 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
p                1356 drivers/gpu/drm/radeon/r100.c 		idx_value = radeon_get_ib_value(p, idx);
p                1357 drivers/gpu/drm/radeon/r100.c 		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
p                1365 drivers/gpu/drm/radeon/r100.c int r100_cs_parse_packet0(struct radeon_cs_parser *p,
p                1394 drivers/gpu/drm/radeon/r100.c 			r = check(p, pkt, idx, reg);
p                1424 drivers/gpu/drm/radeon/r100.c int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
p                1434 drivers/gpu/drm/radeon/r100.c 	ib = p->ib.ptr;
p                1437 drivers/gpu/drm/radeon/r100.c 	r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
p                1448 drivers/gpu/drm/radeon/r100.c 	if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
p                1454 drivers/gpu/drm/radeon/r100.c 	r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
p                1458 drivers/gpu/drm/radeon/r100.c 	h_idx = p->idx - 2;
p                1459 drivers/gpu/drm/radeon/r100.c 	p->idx += waitreloc.count + 2;
p                1460 drivers/gpu/drm/radeon/r100.c 	p->idx += p3reloc.count + 2;
p                1462 drivers/gpu/drm/radeon/r100.c 	header = radeon_get_ib_value(p, h_idx);
p                1463 drivers/gpu/drm/radeon/r100.c 	crtc_id = radeon_get_ib_value(p, h_idx + 5);
p                1465 drivers/gpu/drm/radeon/r100.c 	crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
p                1551 drivers/gpu/drm/radeon/r100.c static int r100_packet0_check(struct radeon_cs_parser *p,
p                1564 drivers/gpu/drm/radeon/r100.c 	ib = p->ib.ptr;
p                1565 drivers/gpu/drm/radeon/r100.c 	track = (struct r100_cs_track *)p->track;
p                1567 drivers/gpu/drm/radeon/r100.c 	idx_value = radeon_get_ib_value(p, idx);
p                1571 drivers/gpu/drm/radeon/r100.c 		r = r100_cs_packet_parse_vline(p);
p                1575 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
p                1583 drivers/gpu/drm/radeon/r100.c 		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
p                1588 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1592 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
p                1601 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1605 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
p                1617 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1621 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
p                1624 drivers/gpu/drm/radeon/r100.c 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                1644 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1648 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
p                1662 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1666 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
p                1680 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1684 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
p                1698 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1702 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
p                1705 drivers/gpu/drm/radeon/r100.c 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                1769 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1773 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
p                1892 drivers/gpu/drm/radeon/r100.c int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
p                1899 drivers/gpu/drm/radeon/r100.c 	value = radeon_get_ib_value(p, idx + 2);
p                1910 drivers/gpu/drm/radeon/r100.c static int r100_packet3_check(struct radeon_cs_parser *p,
p                1919 drivers/gpu/drm/radeon/r100.c 	ib = p->ib.ptr;
p                1921 drivers/gpu/drm/radeon/r100.c 	track = (struct r100_cs_track *)p->track;
p                1924 drivers/gpu/drm/radeon/r100.c 		r = r100_packet3_load_vbpntr(p, pkt, idx);
p                1929 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1932 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
p                1935 drivers/gpu/drm/radeon/r100.c 		ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset);
p                1936 drivers/gpu/drm/radeon/r100.c 		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
p                1943 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1946 drivers/gpu/drm/radeon/r100.c 			radeon_cs_dump_packet(p, pkt);
p                1949 drivers/gpu/drm/radeon/r100.c 		ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset);
p                1951 drivers/gpu/drm/radeon/r100.c 		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
p                1956 drivers/gpu/drm/radeon/r100.c 		track->max_indx = radeon_get_ib_value(p, idx+1);
p                1958 drivers/gpu/drm/radeon/r100.c 		track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
p                1960 drivers/gpu/drm/radeon/r100.c 		r = r100_cs_track_check(p->rdev, track);
p                1965 drivers/gpu/drm/radeon/r100.c 		if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
p                1969 drivers/gpu/drm/radeon/r100.c 		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
p                1970 drivers/gpu/drm/radeon/r100.c 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
p                1972 drivers/gpu/drm/radeon/r100.c 		r = r100_cs_track_check(p->rdev, track);
p                1978 drivers/gpu/drm/radeon/r100.c 		if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
p                1982 drivers/gpu/drm/radeon/r100.c 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
p                1984 drivers/gpu/drm/radeon/r100.c 		r = r100_cs_track_check(p->rdev, track);
p                1990 drivers/gpu/drm/radeon/r100.c 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
p                1991 drivers/gpu/drm/radeon/r100.c 		r = r100_cs_track_check(p->rdev, track);
p                1997 drivers/gpu/drm/radeon/r100.c 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
p                1998 drivers/gpu/drm/radeon/r100.c 		r = r100_cs_track_check(p->rdev, track);
p                2004 drivers/gpu/drm/radeon/r100.c 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
p                2005 drivers/gpu/drm/radeon/r100.c 		r = r100_cs_track_check(p->rdev, track);
p                2011 drivers/gpu/drm/radeon/r100.c 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
p                2012 drivers/gpu/drm/radeon/r100.c 		r = r100_cs_track_check(p->rdev, track);
p                2019 drivers/gpu/drm/radeon/r100.c 		if (p->rdev->hyperz_filp != p->filp)
p                2031 drivers/gpu/drm/radeon/r100.c int r100_cs_parse(struct radeon_cs_parser *p)
p                2040 drivers/gpu/drm/radeon/r100.c 	r100_cs_track_clear(p->rdev, track);
p                2041 drivers/gpu/drm/radeon/r100.c 	p->track = track;
p                2043 drivers/gpu/drm/radeon/r100.c 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
p                2047 drivers/gpu/drm/radeon/r100.c 		p->idx += pkt.count + 2;
p                2050 drivers/gpu/drm/radeon/r100.c 			if (p->rdev->family >= CHIP_R200)
p                2051 drivers/gpu/drm/radeon/r100.c 				r = r100_cs_parse_packet0(p, &pkt,
p                2052 drivers/gpu/drm/radeon/r100.c 					p->rdev->config.r100.reg_safe_bm,
p                2053 drivers/gpu/drm/radeon/r100.c 					p->rdev->config.r100.reg_safe_bm_size,
p                2056 drivers/gpu/drm/radeon/r100.c 				r = r100_cs_parse_packet0(p, &pkt,
p                2057 drivers/gpu/drm/radeon/r100.c 					p->rdev->config.r100.reg_safe_bm,
p                2058 drivers/gpu/drm/radeon/r100.c 					p->rdev->config.r100.reg_safe_bm_size,
p                2064 drivers/gpu/drm/radeon/r100.c 			r = r100_packet3_check(p, &pkt);
p                2073 drivers/gpu/drm/radeon/r100.c 	} while (p->idx < p->chunk_ib->length_dw);
p                  88 drivers/gpu/drm/radeon/r100_track.h int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
p                  90 drivers/gpu/drm/radeon/r100_track.h int r200_packet0_check(struct radeon_cs_parser *p,
p                  94 drivers/gpu/drm/radeon/r100_track.h int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
p                  98 drivers/gpu/drm/radeon/r100_track.h int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
p                 145 drivers/gpu/drm/radeon/r200.c int r200_packet0_check(struct radeon_cs_parser *p,
p                 159 drivers/gpu/drm/radeon/r200.c 	ib = p->ib.ptr;
p                 160 drivers/gpu/drm/radeon/r200.c 	track = (struct r100_cs_track *)p->track;
p                 161 drivers/gpu/drm/radeon/r200.c 	idx_value = radeon_get_ib_value(p, idx);
p                 164 drivers/gpu/drm/radeon/r200.c 		r = r100_cs_packet_parse_vline(p);
p                 168 drivers/gpu/drm/radeon/r200.c 			radeon_cs_dump_packet(p, pkt);
p                 176 drivers/gpu/drm/radeon/r200.c 		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
p                 181 drivers/gpu/drm/radeon/r200.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                 185 drivers/gpu/drm/radeon/r200.c 			radeon_cs_dump_packet(p, pkt);
p                 194 drivers/gpu/drm/radeon/r200.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                 198 drivers/gpu/drm/radeon/r200.c 			radeon_cs_dump_packet(p, pkt);
p                 213 drivers/gpu/drm/radeon/r200.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                 217 drivers/gpu/drm/radeon/r200.c 			radeon_cs_dump_packet(p, pkt);
p                 220 drivers/gpu/drm/radeon/r200.c 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                 266 drivers/gpu/drm/radeon/r200.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                 270 drivers/gpu/drm/radeon/r200.c 			radeon_cs_dump_packet(p, pkt);
p                 284 drivers/gpu/drm/radeon/r200.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                 288 drivers/gpu/drm/radeon/r200.c 			radeon_cs_dump_packet(p, pkt);
p                 292 drivers/gpu/drm/radeon/r200.c 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                 361 drivers/gpu/drm/radeon/r200.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                 365 drivers/gpu/drm/radeon/r200.c 			radeon_cs_dump_packet(p, pkt);
p                 631 drivers/gpu/drm/radeon/r300.c static int r300_packet0_check(struct radeon_cs_parser *p,
p                 643 drivers/gpu/drm/radeon/r300.c 	ib = p->ib.ptr;
p                 644 drivers/gpu/drm/radeon/r300.c 	track = (struct r100_cs_track *)p->track;
p                 645 drivers/gpu/drm/radeon/r300.c 	idx_value = radeon_get_ib_value(p, idx);
p                 650 drivers/gpu/drm/radeon/r300.c 		r = r100_cs_packet_parse_vline(p);
p                 654 drivers/gpu/drm/radeon/r300.c 			radeon_cs_dump_packet(p, pkt);
p                 660 drivers/gpu/drm/radeon/r300.c 		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
p                 669 drivers/gpu/drm/radeon/r300.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                 673 drivers/gpu/drm/radeon/r300.c 			radeon_cs_dump_packet(p, pkt);
p                 682 drivers/gpu/drm/radeon/r300.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                 686 drivers/gpu/drm/radeon/r300.c 			radeon_cs_dump_packet(p, pkt);
p                 711 drivers/gpu/drm/radeon/r300.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                 715 drivers/gpu/drm/radeon/r300.c 			radeon_cs_dump_packet(p, pkt);
p                 719 drivers/gpu/drm/radeon/r300.c 		if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
p                 752 drivers/gpu/drm/radeon/r300.c 		if (p->rdev->family < CHIP_RV515)
p                 759 drivers/gpu/drm/radeon/r300.c 		if (p->rdev->family < CHIP_RV515) {
p                 768 drivers/gpu/drm/radeon/r300.c 		    p->rdev->cmask_filp != p->filp) {
p                 783 drivers/gpu/drm/radeon/r300.c 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                 784 drivers/gpu/drm/radeon/r300.c 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                 788 drivers/gpu/drm/radeon/r300.c 				radeon_cs_dump_packet(p, pkt);
p                 818 drivers/gpu/drm/radeon/r300.c 			if (p->rdev->family < CHIP_RV515) {
p                 868 drivers/gpu/drm/radeon/r300.c 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                 869 drivers/gpu/drm/radeon/r300.c 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                 873 drivers/gpu/drm/radeon/r300.c 				radeon_cs_dump_packet(p, pkt);
p                 968 drivers/gpu/drm/radeon/r300.c 			if (p->rdev->family < CHIP_R420) {
p                1035 drivers/gpu/drm/radeon/r300.c 		if (p->rdev->family >= CHIP_RV515) {
p                1084 drivers/gpu/drm/radeon/r300.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1088 drivers/gpu/drm/radeon/r300.c 			radeon_cs_dump_packet(p, pkt);
p                1102 drivers/gpu/drm/radeon/r300.c 		if (p->rdev->hyperz_filp != p->filp) {
p                1112 drivers/gpu/drm/radeon/r300.c 		if (p->rdev->hyperz_filp != p->filp) {
p                1126 drivers/gpu/drm/radeon/r300.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1130 drivers/gpu/drm/radeon/r300.c 			radeon_cs_dump_packet(p, pkt);
p                1150 drivers/gpu/drm/radeon/r300.c 		if (idx_value && (p->rdev->hyperz_filp != p->filp))
p                1154 drivers/gpu/drm/radeon/r300.c 		if (idx_value && (p->rdev->hyperz_filp != p->filp))
p                1157 drivers/gpu/drm/radeon/r300.c 		if (p->rdev->family >= CHIP_RV350)
p                1163 drivers/gpu/drm/radeon/r300.c 		if (p->rdev->family == CHIP_RV530)
p                1176 drivers/gpu/drm/radeon/r300.c static int r300_packet3_check(struct radeon_cs_parser *p,
p                1185 drivers/gpu/drm/radeon/r300.c 	ib = p->ib.ptr;
p                1187 drivers/gpu/drm/radeon/r300.c 	track = (struct r100_cs_track *)p->track;
p                1190 drivers/gpu/drm/radeon/r300.c 		r = r100_packet3_load_vbpntr(p, pkt, idx);
p                1195 drivers/gpu/drm/radeon/r300.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1198 drivers/gpu/drm/radeon/r300.c 			radeon_cs_dump_packet(p, pkt);
p                1201 drivers/gpu/drm/radeon/r300.c 		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
p                1202 drivers/gpu/drm/radeon/r300.c 		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
p                1212 drivers/gpu/drm/radeon/r300.c 		if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
p                1216 drivers/gpu/drm/radeon/r300.c 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
p                1218 drivers/gpu/drm/radeon/r300.c 		r = r100_cs_track_check(p->rdev, track);
p                1227 drivers/gpu/drm/radeon/r300.c 		if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
p                1231 drivers/gpu/drm/radeon/r300.c 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
p                1233 drivers/gpu/drm/radeon/r300.c 		r = r100_cs_track_check(p->rdev, track);
p                1239 drivers/gpu/drm/radeon/r300.c 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
p                1240 drivers/gpu/drm/radeon/r300.c 		r = r100_cs_track_check(p->rdev, track);
p                1246 drivers/gpu/drm/radeon/r300.c 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
p                1247 drivers/gpu/drm/radeon/r300.c 		r = r100_cs_track_check(p->rdev, track);
p                1253 drivers/gpu/drm/radeon/r300.c 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
p                1254 drivers/gpu/drm/radeon/r300.c 		r = r100_cs_track_check(p->rdev, track);
p                1260 drivers/gpu/drm/radeon/r300.c 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
p                1261 drivers/gpu/drm/radeon/r300.c 		r = r100_cs_track_check(p->rdev, track);
p                1268 drivers/gpu/drm/radeon/r300.c 		if (p->rdev->hyperz_filp != p->filp)
p                1272 drivers/gpu/drm/radeon/r300.c 		if (p->rdev->cmask_filp != p->filp)
p                1284 drivers/gpu/drm/radeon/r300.c int r300_cs_parse(struct radeon_cs_parser *p)
p                1293 drivers/gpu/drm/radeon/r300.c 	r100_cs_track_clear(p->rdev, track);
p                1294 drivers/gpu/drm/radeon/r300.c 	p->track = track;
p                1296 drivers/gpu/drm/radeon/r300.c 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
p                1300 drivers/gpu/drm/radeon/r300.c 		p->idx += pkt.count + 2;
p                1303 drivers/gpu/drm/radeon/r300.c 			r = r100_cs_parse_packet0(p, &pkt,
p                1304 drivers/gpu/drm/radeon/r300.c 						  p->rdev->config.r300.reg_safe_bm,
p                1305 drivers/gpu/drm/radeon/r300.c 						  p->rdev->config.r300.reg_safe_bm_size,
p                1311 drivers/gpu/drm/radeon/r300.c 			r = r300_packet3_check(p, &pkt);
p                1320 drivers/gpu/drm/radeon/r300.c 	} while (p->idx < p->chunk_ib->length_dw);
p                 350 drivers/gpu/drm/radeon/r600_cs.c static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
p                 352 drivers/gpu/drm/radeon/r600_cs.c 	struct r600_cs_track *track = p->track;
p                 357 drivers/gpu/drm/radeon/r600_cs.c 	volatile u32 *ib = p->ib.ptr;
p                 366 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
p                 389 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
p                 407 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
p                 414 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
p                 419 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
p                 424 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
p                 452 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
p                 483 drivers/gpu/drm/radeon/r600_cs.c 				dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
p                 501 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
p                 511 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "%s invalid tile mode\n", __func__);
p                 517 drivers/gpu/drm/radeon/r600_cs.c static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
p                 519 drivers/gpu/drm/radeon/r600_cs.c 	struct r600_cs_track *track = p->track;
p                 527 drivers/gpu/drm/radeon/r600_cs.c 	volatile u32 *ib = p->ib.ptr;
p                 531 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "z/stencil with no depth buffer\n");
p                 549 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
p                 554 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "z/stencil buffer size not set\n");
p                 560 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
p                 585 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
p                 598 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
p                 605 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
p                 610 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
p                 615 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
p                 624 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
p                 638 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
p                 643 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
p                 682 drivers/gpu/drm/radeon/r600_cs.c 				dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
p                 695 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
p                 706 drivers/gpu/drm/radeon/r600_cs.c static int r600_cs_track_check(struct radeon_cs_parser *p)
p                 708 drivers/gpu/drm/radeon/r600_cs.c 	struct r600_cs_track *track = p->track;
p                 713 drivers/gpu/drm/radeon/r600_cs.c 	if (p->rdev == NULL)
p                 730 drivers/gpu/drm/radeon/r600_cs.c 					dev_warn(p->dev, "No buffer for streamout %d\n", i);
p                 759 drivers/gpu/drm/radeon/r600_cs.c 					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
p                 764 drivers/gpu/drm/radeon/r600_cs.c 				r = r600_cs_track_validate_cb(p, i);
p                 777 drivers/gpu/drm/radeon/r600_cs.c 		r = r600_cs_track_validate_db(p);
p                 794 drivers/gpu/drm/radeon/r600_cs.c static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
p                 801 drivers/gpu/drm/radeon/r600_cs.c 	return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
p                 825 drivers/gpu/drm/radeon/r600_cs.c int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
p                 837 drivers/gpu/drm/radeon/r600_cs.c 	ib = p->ib.ptr;
p                 840 drivers/gpu/drm/radeon/r600_cs.c 	r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
p                 851 drivers/gpu/drm/radeon/r600_cs.c 	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
p                 867 drivers/gpu/drm/radeon/r600_cs.c 	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
p                 872 drivers/gpu/drm/radeon/r600_cs.c 	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
p                 878 drivers/gpu/drm/radeon/r600_cs.c 	r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
p                 882 drivers/gpu/drm/radeon/r600_cs.c 	h_idx = p->idx - 2;
p                 883 drivers/gpu/drm/radeon/r600_cs.c 	p->idx += wait_reg_mem.count + 2;
p                 884 drivers/gpu/drm/radeon/r600_cs.c 	p->idx += p3reloc.count + 2;
p                 886 drivers/gpu/drm/radeon/r600_cs.c 	header = radeon_get_ib_value(p, h_idx);
p                 887 drivers/gpu/drm/radeon/r600_cs.c 	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
p                 890 drivers/gpu/drm/radeon/r600_cs.c 	crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
p                 919 drivers/gpu/drm/radeon/r600_cs.c static int r600_packet0_check(struct radeon_cs_parser *p,
p                 927 drivers/gpu/drm/radeon/r600_cs.c 		r = r600_cs_packet_parse_vline(p);
p                 941 drivers/gpu/drm/radeon/r600_cs.c static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
p                 951 drivers/gpu/drm/radeon/r600_cs.c 		r = r600_packet0_check(p, pkt, idx, reg);
p                 969 drivers/gpu/drm/radeon/r600_cs.c static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
p                 971 drivers/gpu/drm/radeon/r600_cs.c 	struct r600_cs_track *track = (struct r600_cs_track *)p->track;
p                 978 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
p                 984 drivers/gpu/drm/radeon/r600_cs.c 	ib = p->ib.ptr;
p                1017 drivers/gpu/drm/radeon/r600_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
p                1019 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1026 drivers/gpu/drm/radeon/r600_cs.c 		track->sq_config = radeon_get_ib_value(p, idx);
p                1029 drivers/gpu/drm/radeon/r600_cs.c 		track->db_depth_control = radeon_get_ib_value(p, idx);
p                1033 drivers/gpu/drm/radeon/r600_cs.c 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
p                1034 drivers/gpu/drm/radeon/r600_cs.c 		    radeon_cs_packet_next_is_pkt3_nop(p)) {
p                1035 drivers/gpu/drm/radeon/r600_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1037 drivers/gpu/drm/radeon/r600_cs.c 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1041 drivers/gpu/drm/radeon/r600_cs.c 			track->db_depth_info = radeon_get_ib_value(p, idx);
p                1052 drivers/gpu/drm/radeon/r600_cs.c 			track->db_depth_info = radeon_get_ib_value(p, idx);
p                1057 drivers/gpu/drm/radeon/r600_cs.c 		track->db_depth_view = radeon_get_ib_value(p, idx);
p                1061 drivers/gpu/drm/radeon/r600_cs.c 		track->db_depth_size = radeon_get_ib_value(p, idx);
p                1066 drivers/gpu/drm/radeon/r600_cs.c 		track->vgt_strmout_en = radeon_get_ib_value(p, idx);
p                1070 drivers/gpu/drm/radeon/r600_cs.c 		track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
p                1077 drivers/gpu/drm/radeon/r600_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1079 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1084 drivers/gpu/drm/radeon/r600_cs.c 		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
p                1096 drivers/gpu/drm/radeon/r600_cs.c 		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
p                1100 drivers/gpu/drm/radeon/r600_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1102 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
p                1109 drivers/gpu/drm/radeon/r600_cs.c 		track->cb_target_mask = radeon_get_ib_value(p, idx);
p                1113 drivers/gpu/drm/radeon/r600_cs.c 		track->cb_shader_mask = radeon_get_ib_value(p, idx);
p                1116 drivers/gpu/drm/radeon/r600_cs.c 		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
p                1122 drivers/gpu/drm/radeon/r600_cs.c 		tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
p                1134 drivers/gpu/drm/radeon/r600_cs.c 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
p                1135 drivers/gpu/drm/radeon/r600_cs.c 		     radeon_cs_packet_next_is_pkt3_nop(p)) {
p                1136 drivers/gpu/drm/radeon/r600_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1138 drivers/gpu/drm/radeon/r600_cs.c 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
p                1142 drivers/gpu/drm/radeon/r600_cs.c 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
p                1152 drivers/gpu/drm/radeon/r600_cs.c 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
p                1165 drivers/gpu/drm/radeon/r600_cs.c 		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
p                1177 drivers/gpu/drm/radeon/r600_cs.c 		track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
p                1199 drivers/gpu/drm/radeon/r600_cs.c 		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
p                1201 drivers/gpu/drm/radeon/r600_cs.c 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
p                1208 drivers/gpu/drm/radeon/r600_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1210 drivers/gpu/drm/radeon/r600_cs.c 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
p                1230 drivers/gpu/drm/radeon/r600_cs.c 		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
p                1232 drivers/gpu/drm/radeon/r600_cs.c 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
p                1239 drivers/gpu/drm/radeon/r600_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1241 drivers/gpu/drm/radeon/r600_cs.c 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
p                1261 drivers/gpu/drm/radeon/r600_cs.c 		track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
p                1274 drivers/gpu/drm/radeon/r600_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1276 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1281 drivers/gpu/drm/radeon/r600_cs.c 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
p                1289 drivers/gpu/drm/radeon/r600_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1291 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1295 drivers/gpu/drm/radeon/r600_cs.c 		track->db_offset = radeon_get_ib_value(p, idx) << 8;
p                1302 drivers/gpu/drm/radeon/r600_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1304 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1308 drivers/gpu/drm/radeon/r600_cs.c 		track->htile_offset = radeon_get_ib_value(p, idx) << 8;
p                1314 drivers/gpu/drm/radeon/r600_cs.c 		track->htile_surface = radeon_get_ib_value(p, idx);
p                1372 drivers/gpu/drm/radeon/r600_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1374 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
p                1381 drivers/gpu/drm/radeon/r600_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1383 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "bad SET_CONFIG_REG "
p                1390 drivers/gpu/drm/radeon/r600_cs.c 		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
p                1393 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
p                1469 drivers/gpu/drm/radeon/r600_cs.c static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
p                1476 drivers/gpu/drm/radeon/r600_cs.c 	struct r600_cs_track *track = p->track;
p                1487 drivers/gpu/drm/radeon/r600_cs.c 	if (p->rdev == NULL)
p                1494 drivers/gpu/drm/radeon/r600_cs.c 	word0 = radeon_get_ib_value(p, idx + 0);
p                1495 drivers/gpu/drm/radeon/r600_cs.c 	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                1501 drivers/gpu/drm/radeon/r600_cs.c 	word1 = radeon_get_ib_value(p, idx + 1);
p                1502 drivers/gpu/drm/radeon/r600_cs.c 	word2 = radeon_get_ib_value(p, idx + 2) << 8;
p                1503 drivers/gpu/drm/radeon/r600_cs.c 	word3 = radeon_get_ib_value(p, idx + 3) << 8;
p                1504 drivers/gpu/drm/radeon/r600_cs.c 	word4 = radeon_get_ib_value(p, idx + 4);
p                1505 drivers/gpu/drm/radeon/r600_cs.c 	word5 = radeon_get_ib_value(p, idx + 5);
p                1529 drivers/gpu/drm/radeon/r600_cs.c 		if (p->family >= CHIP_RV770)
p                1546 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
p                1549 drivers/gpu/drm/radeon/r600_cs.c 	if (!r600_fmt_is_valid_texture(format, p->family)) {
p                1550 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
p                1557 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
p                1565 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
p                1570 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
p                1575 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
p                1581 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "texture blevel %d > llevel %d\n",
p                1595 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
p                1599 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
p                1610 drivers/gpu/drm/radeon/r600_cs.c static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
p                1616 drivers/gpu/drm/radeon/r600_cs.c 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
p                1622 drivers/gpu/drm/radeon/r600_cs.c 	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
p                1626 drivers/gpu/drm/radeon/r600_cs.c static int r600_packet3_check(struct radeon_cs_parser *p,
p                1638 drivers/gpu/drm/radeon/r600_cs.c 	track = (struct r600_cs_track *)p->track;
p                1639 drivers/gpu/drm/radeon/r600_cs.c 	ib = p->ib.ptr;
p                1641 drivers/gpu/drm/radeon/r600_cs.c 	idx_value = radeon_get_ib_value(p, idx);
p                1655 drivers/gpu/drm/radeon/r600_cs.c 		tmp = radeon_get_ib_value(p, idx + 1);
p                1667 drivers/gpu/drm/radeon/r600_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1683 drivers/gpu/drm/radeon/r600_cs.c 		if (p->family >= CHIP_RV770 || pkt->count) {
p                1708 drivers/gpu/drm/radeon/r600_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1716 drivers/gpu/drm/radeon/r600_cs.c 			 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
p                1721 drivers/gpu/drm/radeon/r600_cs.c 		r = r600_cs_track_check(p);
p                1723 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
p                1733 drivers/gpu/drm/radeon/r600_cs.c 		r = r600_cs_track_check(p);
p                1735 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
p                1745 drivers/gpu/drm/radeon/r600_cs.c 		r = r600_cs_track_check(p);
p                1747 drivers/gpu/drm/radeon/r600_cs.c 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
p                1760 drivers/gpu/drm/radeon/r600_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1767 drivers/gpu/drm/radeon/r600_cs.c 				 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
p                1768 drivers/gpu/drm/radeon/r600_cs.c 				 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
p                1785 drivers/gpu/drm/radeon/r600_cs.c 		command = radeon_get_ib_value(p, idx+4);
p                1797 drivers/gpu/drm/radeon/r600_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1803 drivers/gpu/drm/radeon/r600_cs.c 			tmp = radeon_get_ib_value(p, idx) +
p                1804 drivers/gpu/drm/radeon/r600_cs.c 				((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
p                1809 drivers/gpu/drm/radeon/r600_cs.c 				dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
p                1827 drivers/gpu/drm/radeon/r600_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1833 drivers/gpu/drm/radeon/r600_cs.c 			tmp = radeon_get_ib_value(p, idx+2) +
p                1834 drivers/gpu/drm/radeon/r600_cs.c 				((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
p                1839 drivers/gpu/drm/radeon/r600_cs.c 				dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
p                1855 drivers/gpu/drm/radeon/r600_cs.c 		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
p                1856 drivers/gpu/drm/radeon/r600_cs.c 		    radeon_get_ib_value(p, idx + 2) != 0) {
p                1857 drivers/gpu/drm/radeon/r600_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1873 drivers/gpu/drm/radeon/r600_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1879 drivers/gpu/drm/radeon/r600_cs.c 				 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
p                1880 drivers/gpu/drm/radeon/r600_cs.c 				 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
p                1894 drivers/gpu/drm/radeon/r600_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1901 drivers/gpu/drm/radeon/r600_cs.c 			 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
p                1902 drivers/gpu/drm/radeon/r600_cs.c 			 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
p                1919 drivers/gpu/drm/radeon/r600_cs.c 			r = r600_cs_check_reg(p, reg, idx+1+i);
p                1935 drivers/gpu/drm/radeon/r600_cs.c 			r = r600_cs_check_reg(p, reg, idx+1+i);
p                1957 drivers/gpu/drm/radeon/r600_cs.c 			switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
p                1960 drivers/gpu/drm/radeon/r600_cs.c 				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1966 drivers/gpu/drm/radeon/r600_cs.c 				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
p                1974 drivers/gpu/drm/radeon/r600_cs.c 				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                1981 drivers/gpu/drm/radeon/r600_cs.c 				r = r600_check_texture_resource(p,  idx+(i*7)+1,
p                1983 drivers/gpu/drm/radeon/r600_cs.c 								base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
p                1984 drivers/gpu/drm/radeon/r600_cs.c 								mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
p                1995 drivers/gpu/drm/radeon/r600_cs.c 				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                2000 drivers/gpu/drm/radeon/r600_cs.c 				offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
p                2001 drivers/gpu/drm/radeon/r600_cs.c 				size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
p                2002 drivers/gpu/drm/radeon/r600_cs.c 				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
p                2004 drivers/gpu/drm/radeon/r600_cs.c 					dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
p                2081 drivers/gpu/drm/radeon/r600_cs.c 		if (p->family < CHIP_RS780) {
p                2096 drivers/gpu/drm/radeon/r600_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                2107 drivers/gpu/drm/radeon/r600_cs.c 			offset = radeon_get_ib_value(p, idx+1) << 8;
p                2123 drivers/gpu/drm/radeon/r600_cs.c 		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
p                2140 drivers/gpu/drm/radeon/r600_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                2145 drivers/gpu/drm/radeon/r600_cs.c 			offset = radeon_get_ib_value(p, idx+1);
p                2146 drivers/gpu/drm/radeon/r600_cs.c 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
p                2159 drivers/gpu/drm/radeon/r600_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                2164 drivers/gpu/drm/radeon/r600_cs.c 			offset = radeon_get_ib_value(p, idx+3);
p                2165 drivers/gpu/drm/radeon/r600_cs.c 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
p                2184 drivers/gpu/drm/radeon/r600_cs.c 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                2189 drivers/gpu/drm/radeon/r600_cs.c 		offset = radeon_get_ib_value(p, idx+0);
p                2190 drivers/gpu/drm/radeon/r600_cs.c 		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
p                2213 drivers/gpu/drm/radeon/r600_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                2218 drivers/gpu/drm/radeon/r600_cs.c 			offset = radeon_get_ib_value(p, idx+1);
p                2219 drivers/gpu/drm/radeon/r600_cs.c 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
p                2230 drivers/gpu/drm/radeon/r600_cs.c 			reg = radeon_get_ib_value(p, idx+1) << 2;
p                2231 drivers/gpu/drm/radeon/r600_cs.c 			if (!r600_is_safe_reg(p, reg, idx+1))
p                2237 drivers/gpu/drm/radeon/r600_cs.c 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
p                2242 drivers/gpu/drm/radeon/r600_cs.c 			offset = radeon_get_ib_value(p, idx+3);
p                2243 drivers/gpu/drm/radeon/r600_cs.c 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
p                2254 drivers/gpu/drm/radeon/r600_cs.c 			reg = radeon_get_ib_value(p, idx+3) << 2;
p                2255 drivers/gpu/drm/radeon/r600_cs.c 			if (!r600_is_safe_reg(p, reg, idx+3))
p                2268 drivers/gpu/drm/radeon/r600_cs.c int r600_cs_parse(struct radeon_cs_parser *p)
p                2274 drivers/gpu/drm/radeon/r600_cs.c 	if (p->track == NULL) {
p                2280 drivers/gpu/drm/radeon/r600_cs.c 		if (p->rdev->family < CHIP_RV770) {
p                2281 drivers/gpu/drm/radeon/r600_cs.c 			track->npipes = p->rdev->config.r600.tiling_npipes;
p                2282 drivers/gpu/drm/radeon/r600_cs.c 			track->nbanks = p->rdev->config.r600.tiling_nbanks;
p                2283 drivers/gpu/drm/radeon/r600_cs.c 			track->group_size = p->rdev->config.r600.tiling_group_size;
p                2284 drivers/gpu/drm/radeon/r600_cs.c 		} else if (p->rdev->family <= CHIP_RV740) {
p                2285 drivers/gpu/drm/radeon/r600_cs.c 			track->npipes = p->rdev->config.rv770.tiling_npipes;
p                2286 drivers/gpu/drm/radeon/r600_cs.c 			track->nbanks = p->rdev->config.rv770.tiling_nbanks;
p                2287 drivers/gpu/drm/radeon/r600_cs.c 			track->group_size = p->rdev->config.rv770.tiling_group_size;
p                2289 drivers/gpu/drm/radeon/r600_cs.c 		p->track = track;
p                2292 drivers/gpu/drm/radeon/r600_cs.c 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
p                2294 drivers/gpu/drm/radeon/r600_cs.c 			kfree(p->track);
p                2295 drivers/gpu/drm/radeon/r600_cs.c 			p->track = NULL;
p                2298 drivers/gpu/drm/radeon/r600_cs.c 		p->idx += pkt.count + 2;
p                2301 drivers/gpu/drm/radeon/r600_cs.c 			r = r600_cs_parse_packet0(p, &pkt);
p                2306 drivers/gpu/drm/radeon/r600_cs.c 			r = r600_packet3_check(p, &pkt);
p                2310 drivers/gpu/drm/radeon/r600_cs.c 			kfree(p->track);
p                2311 drivers/gpu/drm/radeon/r600_cs.c 			p->track = NULL;
p                2315 drivers/gpu/drm/radeon/r600_cs.c 			kfree(p->track);
p                2316 drivers/gpu/drm/radeon/r600_cs.c 			p->track = NULL;
p                2319 drivers/gpu/drm/radeon/r600_cs.c 	} while (p->idx < p->chunk_ib->length_dw);
p                2321 drivers/gpu/drm/radeon/r600_cs.c 	for (r = 0; r < p->ib.length_dw; r++) {
p                2322 drivers/gpu/drm/radeon/r600_cs.c 		pr_info("%05d  0x%08X\n", r, p->ib.ptr[r]);
p                2326 drivers/gpu/drm/radeon/r600_cs.c 	kfree(p->track);
p                2327 drivers/gpu/drm/radeon/r600_cs.c 	p->track = NULL;
p                2342 drivers/gpu/drm/radeon/r600_cs.c int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
p                2349 drivers/gpu/drm/radeon/r600_cs.c 	if (p->chunk_relocs == NULL) {
p                2353 drivers/gpu/drm/radeon/r600_cs.c 	relocs_chunk = p->chunk_relocs;
p                2354 drivers/gpu/drm/radeon/r600_cs.c 	idx = p->dma_reloc_idx;
p                2355 drivers/gpu/drm/radeon/r600_cs.c 	if (idx >= p->nrelocs) {
p                2357 drivers/gpu/drm/radeon/r600_cs.c 			  idx, p->nrelocs);
p                2360 drivers/gpu/drm/radeon/r600_cs.c 	*cs_reloc = &p->relocs[idx];
p                2361 drivers/gpu/drm/radeon/r600_cs.c 	p->dma_reloc_idx++;
p                2378 drivers/gpu/drm/radeon/r600_cs.c int r600_dma_cs_parse(struct radeon_cs_parser *p)
p                2380 drivers/gpu/drm/radeon/r600_cs.c 	struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
p                2383 drivers/gpu/drm/radeon/r600_cs.c 	volatile u32 *ib = p->ib.ptr;
p                2389 drivers/gpu/drm/radeon/r600_cs.c 		if (p->idx >= ib_chunk->length_dw) {
p                2391 drivers/gpu/drm/radeon/r600_cs.c 				  p->idx, ib_chunk->length_dw);
p                2394 drivers/gpu/drm/radeon/r600_cs.c 		idx = p->idx;
p                2395 drivers/gpu/drm/radeon/r600_cs.c 		header = radeon_get_ib_value(p, idx);
p                2402 drivers/gpu/drm/radeon/r600_cs.c 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
p                2408 drivers/gpu/drm/radeon/r600_cs.c 				dst_offset = radeon_get_ib_value(p, idx+1);
p                2412 drivers/gpu/drm/radeon/r600_cs.c 				p->idx += count + 5;
p                2414 drivers/gpu/drm/radeon/r600_cs.c 				dst_offset = radeon_get_ib_value(p, idx+1);
p                2415 drivers/gpu/drm/radeon/r600_cs.c 				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
p                2419 drivers/gpu/drm/radeon/r600_cs.c 				p->idx += count + 3;
p                2422 drivers/gpu/drm/radeon/r600_cs.c 				dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
p                2428 drivers/gpu/drm/radeon/r600_cs.c 			r = r600_dma_cs_next_reloc(p, &src_reloc);
p                2433 drivers/gpu/drm/radeon/r600_cs.c 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
p                2439 drivers/gpu/drm/radeon/r600_cs.c 				idx_value = radeon_get_ib_value(p, idx + 2);
p                2443 drivers/gpu/drm/radeon/r600_cs.c 					src_offset = radeon_get_ib_value(p, idx+1);
p                2447 drivers/gpu/drm/radeon/r600_cs.c 					dst_offset = radeon_get_ib_value(p, idx+5);
p                2448 drivers/gpu/drm/radeon/r600_cs.c 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
p                2453 drivers/gpu/drm/radeon/r600_cs.c 					src_offset = radeon_get_ib_value(p, idx+5);
p                2454 drivers/gpu/drm/radeon/r600_cs.c 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
p                2458 drivers/gpu/drm/radeon/r600_cs.c 					dst_offset = radeon_get_ib_value(p, idx+1);
p                2462 drivers/gpu/drm/radeon/r600_cs.c 				p->idx += 7;
p                2464 drivers/gpu/drm/radeon/r600_cs.c 				if (p->family >= CHIP_RV770) {
p                2465 drivers/gpu/drm/radeon/r600_cs.c 					src_offset = radeon_get_ib_value(p, idx+2);
p                2466 drivers/gpu/drm/radeon/r600_cs.c 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
p                2467 drivers/gpu/drm/radeon/r600_cs.c 					dst_offset = radeon_get_ib_value(p, idx+1);
p                2468 drivers/gpu/drm/radeon/r600_cs.c 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
p                2474 drivers/gpu/drm/radeon/r600_cs.c 					p->idx += 5;
p                2476 drivers/gpu/drm/radeon/r600_cs.c 					src_offset = radeon_get_ib_value(p, idx+2);
p                2477 drivers/gpu/drm/radeon/r600_cs.c 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
p                2478 drivers/gpu/drm/radeon/r600_cs.c 					dst_offset = radeon_get_ib_value(p, idx+1);
p                2479 drivers/gpu/drm/radeon/r600_cs.c 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
p                2485 drivers/gpu/drm/radeon/r600_cs.c 					p->idx += 4;
p                2489 drivers/gpu/drm/radeon/r600_cs.c 				dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
p                2494 drivers/gpu/drm/radeon/r600_cs.c 				dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
p                2500 drivers/gpu/drm/radeon/r600_cs.c 			if (p->family < CHIP_RV770) {
p                2504 drivers/gpu/drm/radeon/r600_cs.c 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
p                2509 drivers/gpu/drm/radeon/r600_cs.c 			dst_offset = radeon_get_ib_value(p, idx+1);
p                2510 drivers/gpu/drm/radeon/r600_cs.c 			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
p                2512 drivers/gpu/drm/radeon/r600_cs.c 				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
p                2518 drivers/gpu/drm/radeon/r600_cs.c 			p->idx += 4;
p                2521 drivers/gpu/drm/radeon/r600_cs.c 			p->idx += 1;
p                2527 drivers/gpu/drm/radeon/r600_cs.c 	} while (p->idx < p->chunk_ib->length_dw);
p                2529 drivers/gpu/drm/radeon/r600_cs.c 	for (r = 0; r < p->ib->length_dw; r++) {
p                2530 drivers/gpu/drm/radeon/r600_cs.c 		pr_info("%05d  0x%08X\n", r, p->ib.ptr[r]);
p                 203 drivers/gpu/drm/radeon/r600_dpm.c 			    u32 *p, u32 *u)
p                 218 drivers/gpu/drm/radeon/r600_dpm.c 	*p = i_c / (1 << (2 * (*u)));
p                 338 drivers/gpu/drm/radeon/r600_dpm.c void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p)
p                 340 drivers/gpu/drm/radeon/r600_dpm.c 	WREG32(CG_BSP, BSP(p) | BSU(u));
p                 410 drivers/gpu/drm/radeon/r600_dpm.c void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p)
p                 412 drivers/gpu/drm/radeon/r600_dpm.c 	WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK);
p                 425 drivers/gpu/drm/radeon/r600_dpm.c void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p)
p                 427 drivers/gpu/drm/radeon/r600_dpm.c 	WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK);
p                 141 drivers/gpu/drm/radeon/r600_dpm.h 			    u32 *p, u32 *u);
p                 153 drivers/gpu/drm/radeon/r600_dpm.h void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p);
p                 167 drivers/gpu/drm/radeon/r600_dpm.h void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p);
p                 170 drivers/gpu/drm/radeon/r600_dpm.h void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p);
p                1089 drivers/gpu/drm/radeon/radeon.h static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
p                1091 drivers/gpu/drm/radeon/radeon.h 	struct radeon_cs_chunk *ibc = p->chunk_ib;
p                1095 drivers/gpu/drm/radeon/radeon.h 	return p->ib.ptr[idx];
p                1108 drivers/gpu/drm/radeon/radeon.h typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
p                1111 drivers/gpu/drm/radeon/radeon.h typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
p                1733 drivers/gpu/drm/radeon/radeon.h int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
p                1734 drivers/gpu/drm/radeon/radeon.h int radeon_vce_cs_parse(struct radeon_cs_parser *p);
p                1818 drivers/gpu/drm/radeon/radeon.h 	int (*cs_parse)(struct radeon_cs_parser *p);
p                2703 drivers/gpu/drm/radeon/radeon.h #define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
p                2750 drivers/gpu/drm/radeon/radeon.h #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
p                2808 drivers/gpu/drm/radeon/radeon.h extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
p                2941 drivers/gpu/drm/radeon/radeon.h int radeon_cs_packet_parse(struct radeon_cs_parser *p,
p                2944 drivers/gpu/drm/radeon/radeon.h bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
p                2945 drivers/gpu/drm/radeon/radeon.h void radeon_cs_dump_packet(struct radeon_cs_parser *p,
p                2947 drivers/gpu/drm/radeon/radeon.h int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
p                2950 drivers/gpu/drm/radeon/radeon.h int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
p                 134 drivers/gpu/drm/radeon/radeon_agp.c 	struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
p                 187 drivers/gpu/drm/radeon/radeon_agp.c 	while (p && p->chip_device != 0) {
p                 188 drivers/gpu/drm/radeon/radeon_agp.c 		if (info.id_vendor == p->hostbridge_vendor &&
p                 189 drivers/gpu/drm/radeon/radeon_agp.c 		    info.id_device == p->hostbridge_device &&
p                 190 drivers/gpu/drm/radeon/radeon_agp.c 		    rdev->pdev->vendor == p->chip_vendor &&
p                 191 drivers/gpu/drm/radeon/radeon_agp.c 		    rdev->pdev->device == p->chip_device &&
p                 192 drivers/gpu/drm/radeon/radeon_agp.c 		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
p                 193 drivers/gpu/drm/radeon/radeon_agp.c 		    rdev->pdev->subsystem_device == p->subsys_device) {
p                 194 drivers/gpu/drm/radeon/radeon_agp.c 			default_mode = p->default_mode;
p                 196 drivers/gpu/drm/radeon/radeon_agp.c 		++p;
p                  82 drivers/gpu/drm/radeon/radeon_asic.h int r100_cs_parse(struct radeon_cs_parser *p);
p                 121 drivers/gpu/drm/radeon/radeon_asic.h int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
p                 124 drivers/gpu/drm/radeon/radeon_asic.h int r100_cs_parse_packet0(struct radeon_cs_parser *p,
p                 128 drivers/gpu/drm/radeon/radeon_asic.h int r100_cs_packet_parse(struct radeon_cs_parser *p,
p                 174 drivers/gpu/drm/radeon/radeon_asic.h extern int r300_cs_parse(struct radeon_cs_parser *p);
p                 320 drivers/gpu/drm/radeon/radeon_asic.h int r600_cs_parse(struct radeon_cs_parser *p);
p                 321 drivers/gpu/drm/radeon/radeon_asic.h int r600_dma_cs_parse(struct radeon_cs_parser *p);
p                 528 drivers/gpu/drm/radeon/radeon_asic.h extern int evergreen_cs_parse(struct radeon_cs_parser *p);
p                 529 drivers/gpu/drm/radeon/radeon_asic.h extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
p                  81 drivers/gpu/drm/radeon/radeon_cs.c static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p                  89 drivers/gpu/drm/radeon/radeon_cs.c 	if (p->chunk_relocs == NULL) {
p                  92 drivers/gpu/drm/radeon/radeon_cs.c 	chunk = p->chunk_relocs;
p                  93 drivers/gpu/drm/radeon/radeon_cs.c 	p->dma_reloc_idx = 0;
p                  95 drivers/gpu/drm/radeon/radeon_cs.c 	p->nrelocs = chunk->length_dw / 4;
p                  96 drivers/gpu/drm/radeon/radeon_cs.c 	p->relocs = kvmalloc_array(p->nrelocs, sizeof(struct radeon_bo_list),
p                  98 drivers/gpu/drm/radeon/radeon_cs.c 	if (p->relocs == NULL) {
p                 104 drivers/gpu/drm/radeon/radeon_cs.c 	for (i = 0; i < p->nrelocs; i++) {
p                 110 drivers/gpu/drm/radeon/radeon_cs.c 		gobj = drm_gem_object_lookup(p->filp, r->handle);
p                 116 drivers/gpu/drm/radeon/radeon_cs.c 		p->relocs[i].robj = gem_to_radeon_bo(gobj);
p                 132 drivers/gpu/drm/radeon/radeon_cs.c 		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
p                 133 drivers/gpu/drm/radeon/radeon_cs.c 		    (i <= 0 || pci_find_capability(p->rdev->ddev->pdev,
p                 135 drivers/gpu/drm/radeon/radeon_cs.c 		     p->rdev->family == CHIP_RS780 ||
p                 136 drivers/gpu/drm/radeon/radeon_cs.c 		     p->rdev->family == CHIP_RS880)) {
p                 139 drivers/gpu/drm/radeon/radeon_cs.c 			p->relocs[i].preferred_domains =
p                 142 drivers/gpu/drm/radeon/radeon_cs.c 			p->relocs[i].allowed_domains =
p                 157 drivers/gpu/drm/radeon/radeon_cs.c 			p->relocs[i].preferred_domains = domain;
p                 160 drivers/gpu/drm/radeon/radeon_cs.c 			p->relocs[i].allowed_domains = domain;
p                 163 drivers/gpu/drm/radeon/radeon_cs.c 		if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
p                 164 drivers/gpu/drm/radeon/radeon_cs.c 			uint32_t domain = p->relocs[i].preferred_domains;
p                 172 drivers/gpu/drm/radeon/radeon_cs.c 			p->relocs[i].preferred_domains = domain;
p                 173 drivers/gpu/drm/radeon/radeon_cs.c 			p->relocs[i].allowed_domains = domain;
p                 177 drivers/gpu/drm/radeon/radeon_cs.c 		if (p->relocs[i].robj->prime_shared_count) {
p                 178 drivers/gpu/drm/radeon/radeon_cs.c 			p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM;
p                 179 drivers/gpu/drm/radeon/radeon_cs.c 			if (!p->relocs[i].allowed_domains) {
p                 186 drivers/gpu/drm/radeon/radeon_cs.c 		p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p                 187 drivers/gpu/drm/radeon/radeon_cs.c 		p->relocs[i].tv.num_shared = !r->write_domain;
p                 189 drivers/gpu/drm/radeon/radeon_cs.c 		radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
p                 193 drivers/gpu/drm/radeon/radeon_cs.c 	radeon_cs_buckets_get_list(&buckets, &p->validated);
p                 195 drivers/gpu/drm/radeon/radeon_cs.c 	if (p->cs_flags & RADEON_CS_USE_VM)
p                 196 drivers/gpu/drm/radeon/radeon_cs.c 		p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
p                 197 drivers/gpu/drm/radeon/radeon_cs.c 					      &p->validated);
p                 201 drivers/gpu/drm/radeon/radeon_cs.c 	r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
p                 209 drivers/gpu/drm/radeon/radeon_cs.c static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
p                 211 drivers/gpu/drm/radeon/radeon_cs.c 	p->priority = priority;
p                 218 drivers/gpu/drm/radeon/radeon_cs.c 		p->ring = RADEON_RING_TYPE_GFX_INDEX;
p                 221 drivers/gpu/drm/radeon/radeon_cs.c 		if (p->rdev->family >= CHIP_TAHITI) {
p                 222 drivers/gpu/drm/radeon/radeon_cs.c 			if (p->priority > 0)
p                 223 drivers/gpu/drm/radeon/radeon_cs.c 				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
p                 225 drivers/gpu/drm/radeon/radeon_cs.c 				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
p                 227 drivers/gpu/drm/radeon/radeon_cs.c 			p->ring = RADEON_RING_TYPE_GFX_INDEX;
p                 230 drivers/gpu/drm/radeon/radeon_cs.c 		if (p->rdev->family >= CHIP_CAYMAN) {
p                 231 drivers/gpu/drm/radeon/radeon_cs.c 			if (p->priority > 0)
p                 232 drivers/gpu/drm/radeon/radeon_cs.c 				p->ring = R600_RING_TYPE_DMA_INDEX;
p                 234 drivers/gpu/drm/radeon/radeon_cs.c 				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
p                 235 drivers/gpu/drm/radeon/radeon_cs.c 		} else if (p->rdev->family >= CHIP_RV770) {
p                 236 drivers/gpu/drm/radeon/radeon_cs.c 			p->ring = R600_RING_TYPE_DMA_INDEX;
p                 242 drivers/gpu/drm/radeon/radeon_cs.c 		p->ring = R600_RING_TYPE_UVD_INDEX;
p                 246 drivers/gpu/drm/radeon/radeon_cs.c 		p->ring = TN_RING_TYPE_VCE1_INDEX;
p                 252 drivers/gpu/drm/radeon/radeon_cs.c static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
p                 257 drivers/gpu/drm/radeon/radeon_cs.c 	list_for_each_entry(reloc, &p->validated, tv.head) {
p                 261 drivers/gpu/drm/radeon/radeon_cs.c 		r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
p                 270 drivers/gpu/drm/radeon/radeon_cs.c int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p                 278 drivers/gpu/drm/radeon/radeon_cs.c 	INIT_LIST_HEAD(&p->validated);
p                 285 drivers/gpu/drm/radeon/radeon_cs.c 	p->idx = 0;
p                 286 drivers/gpu/drm/radeon/radeon_cs.c 	p->ib.sa_bo = NULL;
p                 287 drivers/gpu/drm/radeon/radeon_cs.c 	p->const_ib.sa_bo = NULL;
p                 288 drivers/gpu/drm/radeon/radeon_cs.c 	p->chunk_ib = NULL;
p                 289 drivers/gpu/drm/radeon/radeon_cs.c 	p->chunk_relocs = NULL;
p                 290 drivers/gpu/drm/radeon/radeon_cs.c 	p->chunk_flags = NULL;
p                 291 drivers/gpu/drm/radeon/radeon_cs.c 	p->chunk_const_ib = NULL;
p                 292 drivers/gpu/drm/radeon/radeon_cs.c 	p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
p                 293 drivers/gpu/drm/radeon/radeon_cs.c 	if (p->chunks_array == NULL) {
p                 297 drivers/gpu/drm/radeon/radeon_cs.c 	if (copy_from_user(p->chunks_array, chunk_array_ptr,
p                 301 drivers/gpu/drm/radeon/radeon_cs.c 	p->cs_flags = 0;
p                 302 drivers/gpu/drm/radeon/radeon_cs.c 	p->nchunks = cs->num_chunks;
p                 303 drivers/gpu/drm/radeon/radeon_cs.c 	p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
p                 304 drivers/gpu/drm/radeon/radeon_cs.c 	if (p->chunks == NULL) {
p                 307 drivers/gpu/drm/radeon/radeon_cs.c 	for (i = 0; i < p->nchunks; i++) {
p                 312 drivers/gpu/drm/radeon/radeon_cs.c 		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
p                 317 drivers/gpu/drm/radeon/radeon_cs.c 		p->chunks[i].length_dw = user_chunk.length_dw;
p                 319 drivers/gpu/drm/radeon/radeon_cs.c 			p->chunk_relocs = &p->chunks[i];
p                 322 drivers/gpu/drm/radeon/radeon_cs.c 			p->chunk_ib = &p->chunks[i];
p                 324 drivers/gpu/drm/radeon/radeon_cs.c 			if (p->chunks[i].length_dw == 0)
p                 328 drivers/gpu/drm/radeon/radeon_cs.c 			p->chunk_const_ib = &p->chunks[i];
p                 330 drivers/gpu/drm/radeon/radeon_cs.c 			if (p->chunks[i].length_dw == 0)
p                 334 drivers/gpu/drm/radeon/radeon_cs.c 			p->chunk_flags = &p->chunks[i];
p                 336 drivers/gpu/drm/radeon/radeon_cs.c 			if (p->chunks[i].length_dw == 0)
p                 340 drivers/gpu/drm/radeon/radeon_cs.c 		size = p->chunks[i].length_dw;
p                 342 drivers/gpu/drm/radeon/radeon_cs.c 		p->chunks[i].user_ptr = cdata;
p                 347 drivers/gpu/drm/radeon/radeon_cs.c 			if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
p                 351 drivers/gpu/drm/radeon/radeon_cs.c 		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
p                 353 drivers/gpu/drm/radeon/radeon_cs.c 		if (p->chunks[i].kdata == NULL) {
p                 356 drivers/gpu/drm/radeon/radeon_cs.c 		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
p                 360 drivers/gpu/drm/radeon/radeon_cs.c 			p->cs_flags = p->chunks[i].kdata[0];
p                 361 drivers/gpu/drm/radeon/radeon_cs.c 			if (p->chunks[i].length_dw > 1)
p                 362 drivers/gpu/drm/radeon/radeon_cs.c 				ring = p->chunks[i].kdata[1];
p                 363 drivers/gpu/drm/radeon/radeon_cs.c 			if (p->chunks[i].length_dw > 2)
p                 364 drivers/gpu/drm/radeon/radeon_cs.c 				priority = (s32)p->chunks[i].kdata[2];
p                 369 drivers/gpu/drm/radeon/radeon_cs.c 	if (p->rdev) {
p                 370 drivers/gpu/drm/radeon/radeon_cs.c 		if ((p->cs_flags & RADEON_CS_USE_VM) &&
p                 371 drivers/gpu/drm/radeon/radeon_cs.c 		    !p->rdev->vm_manager.enabled) {
p                 376 drivers/gpu/drm/radeon/radeon_cs.c 		if (radeon_cs_get_ring(p, ring, priority))
p                 380 drivers/gpu/drm/radeon/radeon_cs.c 		if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
p                 381 drivers/gpu/drm/radeon/radeon_cs.c 			if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
p                 382 drivers/gpu/drm/radeon/radeon_cs.c 				DRM_ERROR("Ring %d requires VM!\n", p->ring);
p                 386 drivers/gpu/drm/radeon/radeon_cs.c 			if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
p                 388 drivers/gpu/drm/radeon/radeon_cs.c 					  p->ring);
p                 497 drivers/gpu/drm/radeon/radeon_cs.c static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
p                 500 drivers/gpu/drm/radeon/radeon_cs.c 	struct radeon_device *rdev = p->rdev;
p                 522 drivers/gpu/drm/radeon/radeon_cs.c 	for (i = 0; i < p->nrelocs; i++) {
p                 525 drivers/gpu/drm/radeon/radeon_cs.c 		bo = p->relocs[i].robj;
p                 536 drivers/gpu/drm/radeon/radeon_cs.c 		radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
p                 732 drivers/gpu/drm/radeon/radeon_cs.c int radeon_cs_packet_parse(struct radeon_cs_parser *p,
p                 736 drivers/gpu/drm/radeon/radeon_cs.c 	struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
p                 737 drivers/gpu/drm/radeon/radeon_cs.c 	struct radeon_device *rdev = p->rdev;
p                 746 drivers/gpu/drm/radeon/radeon_cs.c 	header = radeon_get_ib_value(p, idx);
p                 782 drivers/gpu/drm/radeon/radeon_cs.c 			printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
p                 784 drivers/gpu/drm/radeon/radeon_cs.c 			printk("\t0x%08x\n", radeon_get_ib_value(p, i));
p                 795 drivers/gpu/drm/radeon/radeon_cs.c bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
p                 800 drivers/gpu/drm/radeon/radeon_cs.c 	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
p                 817 drivers/gpu/drm/radeon/radeon_cs.c void radeon_cs_dump_packet(struct radeon_cs_parser *p,
p                 824 drivers/gpu/drm/radeon/radeon_cs.c 	ib = p->ib.ptr;
p                 841 drivers/gpu/drm/radeon/radeon_cs.c int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
p                 850 drivers/gpu/drm/radeon/radeon_cs.c 	if (p->chunk_relocs == NULL) {
p                 855 drivers/gpu/drm/radeon/radeon_cs.c 	relocs_chunk = p->chunk_relocs;
p                 856 drivers/gpu/drm/radeon/radeon_cs.c 	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
p                 859 drivers/gpu/drm/radeon/radeon_cs.c 	p->idx += p3reloc.count + 2;
p                 864 drivers/gpu/drm/radeon/radeon_cs.c 		radeon_cs_dump_packet(p, &p3reloc);
p                 867 drivers/gpu/drm/radeon/radeon_cs.c 	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
p                 871 drivers/gpu/drm/radeon/radeon_cs.c 		radeon_cs_dump_packet(p, &p3reloc);
p                 876 drivers/gpu/drm/radeon/radeon_cs.c 		*cs_reloc = p->relocs;
p                 881 drivers/gpu/drm/radeon/radeon_cs.c 		*cs_reloc = &p->relocs[(idx / 4)];
p                 168 drivers/gpu/drm/radeon/radeon_device.c 	struct radeon_px_quirk *p = radeon_px_quirk_list;
p                 171 drivers/gpu/drm/radeon/radeon_device.c 	while (p && p->chip_device != 0) {
p                 172 drivers/gpu/drm/radeon/radeon_device.c 		if (rdev->pdev->vendor == p->chip_vendor &&
p                 173 drivers/gpu/drm/radeon/radeon_device.c 		    rdev->pdev->device == p->chip_device &&
p                 174 drivers/gpu/drm/radeon/radeon_device.c 		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
p                 175 drivers/gpu/drm/radeon/radeon_device.c 		    rdev->pdev->subsystem_device == p->subsys_device) {
p                 176 drivers/gpu/drm/radeon/radeon_device.c 			rdev->px_quirk_flags = p->px_quirk_flags;
p                 179 drivers/gpu/drm/radeon/radeon_device.c 		++p;
p                 248 drivers/gpu/drm/radeon/radeon_gart.c 	unsigned p;
p                 256 drivers/gpu/drm/radeon/radeon_gart.c 	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
p                 257 drivers/gpu/drm/radeon/radeon_gart.c 	for (i = 0; i < pages; i++, p++) {
p                 258 drivers/gpu/drm/radeon/radeon_gart.c 		if (rdev->gart.pages[p]) {
p                 259 drivers/gpu/drm/radeon/radeon_gart.c 			rdev->gart.pages[p] = NULL;
p                 294 drivers/gpu/drm/radeon/radeon_gart.c 	unsigned p;
p                 303 drivers/gpu/drm/radeon/radeon_gart.c 	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
p                 305 drivers/gpu/drm/radeon/radeon_gart.c 	for (i = 0; i < pages; i++, p++) {
p                 306 drivers/gpu/drm/radeon/radeon_gart.c 		rdev->gart.pages[p] = pagelist[i];
p                 335 drivers/gpu/drm/radeon/radeon_i2c.c 	struct i2c_msg *p;
p                 467 drivers/gpu/drm/radeon/radeon_i2c.c 	p = &msgs[0];
p                 468 drivers/gpu/drm/radeon/radeon_i2c.c 	if ((num == 1) && (p->len == 0)) {
p                 473 drivers/gpu/drm/radeon/radeon_i2c.c 		WREG32(i2c_data, (p->addr << 1) & 0xff);
p                 499 drivers/gpu/drm/radeon/radeon_i2c.c 		p = &msgs[i];
p                 500 drivers/gpu/drm/radeon/radeon_i2c.c 		for (j = 0; j < p->len; j++) {
p                 501 drivers/gpu/drm/radeon/radeon_i2c.c 			if (p->flags & I2C_M_RD) {
p                 506 drivers/gpu/drm/radeon/radeon_i2c.c 				WREG32(i2c_data, ((p->addr << 1) & 0xff) | 0x1);
p                 527 drivers/gpu/drm/radeon/radeon_i2c.c 				p->buf[j] = RREG32(i2c_data) & 0xff;
p                 533 drivers/gpu/drm/radeon/radeon_i2c.c 				WREG32(i2c_data, (p->addr << 1) & 0xff);
p                 534 drivers/gpu/drm/radeon/radeon_i2c.c 				WREG32(i2c_data, p->buf[j]);
p                 588 drivers/gpu/drm/radeon/radeon_i2c.c 	struct i2c_msg *p;
p                 670 drivers/gpu/drm/radeon/radeon_i2c.c 	p = &msgs[0];
p                 671 drivers/gpu/drm/radeon/radeon_i2c.c 	if ((num == 1) && (p->len == 0)) {
p                 679 drivers/gpu/drm/radeon/radeon_i2c.c 		WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
p                 707 drivers/gpu/drm/radeon/radeon_i2c.c 		p = &msgs[i];
p                 708 drivers/gpu/drm/radeon/radeon_i2c.c 		remaining = p->len;
p                 710 drivers/gpu/drm/radeon/radeon_i2c.c 		if (p->flags & I2C_M_RD) {
p                 723 drivers/gpu/drm/radeon/radeon_i2c.c 				WREG32(AVIVO_DC_I2C_DATA, ((p->addr << 1) & 0xff) | 0x1);
p                 746 drivers/gpu/drm/radeon/radeon_i2c.c 					p->buf[buffer_offset + j] = RREG32(AVIVO_DC_I2C_DATA) & 0xff;
p                 763 drivers/gpu/drm/radeon/radeon_i2c.c 				WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
p                 765 drivers/gpu/drm/radeon/radeon_i2c.c 					WREG32(AVIVO_DC_I2C_DATA, p->buf[buffer_offset + j]);
p                 542 drivers/gpu/drm/radeon/radeon_legacy_tv.c 	uint32_t m, n, p;
p                 688 drivers/gpu/drm/radeon/radeon_legacy_tv.c 			p = NTSC_TV_PLL_P_27;
p                 692 drivers/gpu/drm/radeon/radeon_legacy_tv.c 			p = NTSC_TV_PLL_P_14;
p                 698 drivers/gpu/drm/radeon/radeon_legacy_tv.c 			p = PAL_TV_PLL_P_27;
p                 702 drivers/gpu/drm/radeon/radeon_legacy_tv.c 			p = PAL_TV_PLL_P_14;
p                 710 drivers/gpu/drm/radeon/radeon_legacy_tv.c 		((p & RADEON_TV_P_MASK) << RADEON_TV_P_SHIFT);
p                1450 drivers/gpu/drm/radeon/radeon_pm.c 	struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
p                1454 drivers/gpu/drm/radeon/radeon_pm.c 	while (p && p->chip_device != 0) {
p                1455 drivers/gpu/drm/radeon/radeon_pm.c 		if (rdev->pdev->vendor == p->chip_vendor &&
p                1456 drivers/gpu/drm/radeon/radeon_pm.c 		    rdev->pdev->device == p->chip_device &&
p                1457 drivers/gpu/drm/radeon/radeon_pm.c 		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
p                1458 drivers/gpu/drm/radeon/radeon_pm.c 		    rdev->pdev->subsystem_device == p->subsys_device) {
p                1462 drivers/gpu/drm/radeon/radeon_pm.c 		++p;
p                  31 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_PROTO(struct radeon_cs_parser *p),
p                  32 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_ARGS(p),
p                  40 drivers/gpu/drm/radeon/radeon_trace.h 			   __entry->ring = p->ring;
p                  41 drivers/gpu/drm/radeon/radeon_trace.h 			   __entry->dw = p->chunk_ib->length_dw;
p                  43 drivers/gpu/drm/radeon/radeon_trace.h 				p->rdev, p->ring);
p                 934 drivers/gpu/drm/radeon/radeon_ttm.c 	struct drm_printer p = drm_seq_file_printer(m);
p                 936 drivers/gpu/drm/radeon/radeon_ttm.c 	man->func->debug(man, &p);
p                1021 drivers/gpu/drm/radeon/radeon_ttm.c 		loff_t p = *pos / PAGE_SIZE;
p                1027 drivers/gpu/drm/radeon/radeon_ttm.c 		if (p >= rdev->gart.num_cpu_pages)
p                1030 drivers/gpu/drm/radeon/radeon_ttm.c 		page = rdev->gart.pages[p];
p                1036 drivers/gpu/drm/radeon/radeon_ttm.c 			kunmap(rdev->gart.pages[p]);
p                 442 drivers/gpu/drm/radeon/radeon_uvd.c static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
p                 454 drivers/gpu/drm/radeon/radeon_uvd.c 		if (p->rdev->family >= CHIP_PALM)
p                 465 drivers/gpu/drm/radeon/radeon_uvd.c static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
p                 510 drivers/gpu/drm/radeon/radeon_uvd.c 		r = radeon_uvd_validate_codec(p, msg[4]);
p                 516 drivers/gpu/drm/radeon/radeon_uvd.c 		for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
p                 517 drivers/gpu/drm/radeon/radeon_uvd.c 			if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
p                 522 drivers/gpu/drm/radeon/radeon_uvd.c 			if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
p                 523 drivers/gpu/drm/radeon/radeon_uvd.c 				p->rdev->uvd.filp[i] = p->filp;
p                 524 drivers/gpu/drm/radeon/radeon_uvd.c 				p->rdev->uvd.img_size[i] = img_size;
p                 534 drivers/gpu/drm/radeon/radeon_uvd.c 		r = radeon_uvd_validate_codec(p, msg[4]);
p                 542 drivers/gpu/drm/radeon/radeon_uvd.c 		for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
p                 543 drivers/gpu/drm/radeon/radeon_uvd.c 			if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
p                 544 drivers/gpu/drm/radeon/radeon_uvd.c 				if (p->rdev->uvd.filp[i] != p->filp) {
p                 557 drivers/gpu/drm/radeon/radeon_uvd.c 		for (i = 0; i < p->rdev->uvd.max_handles; ++i)
p                 558 drivers/gpu/drm/radeon/radeon_uvd.c 			atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
p                 572 drivers/gpu/drm/radeon/radeon_uvd.c static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
p                 582 drivers/gpu/drm/radeon/radeon_uvd.c 	relocs_chunk = p->chunk_relocs;
p                 583 drivers/gpu/drm/radeon/radeon_uvd.c 	offset = radeon_get_ib_value(p, data0);
p                 584 drivers/gpu/drm/radeon/radeon_uvd.c 	idx = radeon_get_ib_value(p, data1);
p                 591 drivers/gpu/drm/radeon/radeon_uvd.c 	reloc = &p->relocs[(idx / 4)];
p                 596 drivers/gpu/drm/radeon/radeon_uvd.c 	p->ib.ptr[data0] = start & 0xFFFFFFFF;
p                 597 drivers/gpu/drm/radeon/radeon_uvd.c 	p->ib.ptr[data1] = start >> 32;
p                 599 drivers/gpu/drm/radeon/radeon_uvd.c 	cmd = radeon_get_ib_value(p, p->idx) >> 1;
p                 625 drivers/gpu/drm/radeon/radeon_uvd.c 	    (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
p                 637 drivers/gpu/drm/radeon/radeon_uvd.c 		r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
p                 648 drivers/gpu/drm/radeon/radeon_uvd.c static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
p                 656 drivers/gpu/drm/radeon/radeon_uvd.c 	p->idx++;
p                 660 drivers/gpu/drm/radeon/radeon_uvd.c 			*data0 = p->idx;
p                 663 drivers/gpu/drm/radeon/radeon_uvd.c 			*data1 = p->idx;
p                 666 drivers/gpu/drm/radeon/radeon_uvd.c 			r = radeon_uvd_cs_reloc(p, *data0, *data1,
p                 679 drivers/gpu/drm/radeon/radeon_uvd.c 		p->idx++;
p                 684 drivers/gpu/drm/radeon/radeon_uvd.c int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
p                 700 drivers/gpu/drm/radeon/radeon_uvd.c 	if (p->chunk_ib->length_dw % 16) {
p                 702 drivers/gpu/drm/radeon/radeon_uvd.c 			  p->chunk_ib->length_dw);
p                 706 drivers/gpu/drm/radeon/radeon_uvd.c 	if (p->chunk_relocs == NULL) {
p                 713 drivers/gpu/drm/radeon/radeon_uvd.c 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
p                 718 drivers/gpu/drm/radeon/radeon_uvd.c 			r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
p                 724 drivers/gpu/drm/radeon/radeon_uvd.c 			p->idx += pkt.count + 2;
p                 730 drivers/gpu/drm/radeon/radeon_uvd.c 	} while (p->idx < p->chunk_ib->length_dw);
p                 470 drivers/gpu/drm/radeon/radeon_vce.c int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
p                 478 drivers/gpu/drm/radeon/radeon_vce.c 	relocs_chunk = p->chunk_relocs;
p                 479 drivers/gpu/drm/radeon/radeon_vce.c 	offset = radeon_get_ib_value(p, lo);
p                 480 drivers/gpu/drm/radeon/radeon_vce.c 	idx = radeon_get_ib_value(p, hi);
p                 488 drivers/gpu/drm/radeon/radeon_vce.c 	reloc = &p->relocs[(idx / 4)];
p                 493 drivers/gpu/drm/radeon/radeon_vce.c 	p->ib.ptr[lo] = start & 0xFFFFFFFF;
p                 494 drivers/gpu/drm/radeon/radeon_vce.c 	p->ib.ptr[hi] = start >> 32;
p                 519 drivers/gpu/drm/radeon/radeon_vce.c static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
p                 528 drivers/gpu/drm/radeon/radeon_vce.c 		if (atomic_read(&p->rdev->vce.handles[i]) == handle) {
p                 529 drivers/gpu/drm/radeon/radeon_vce.c 			if (p->rdev->vce.filp[i] != p->filp) {
p                 539 drivers/gpu/drm/radeon/radeon_vce.c 		if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
p                 540 drivers/gpu/drm/radeon/radeon_vce.c 			p->rdev->vce.filp[i] = p->filp;
p                 541 drivers/gpu/drm/radeon/radeon_vce.c 			p->rdev->vce.img_size[i] = 0;
p                 557 drivers/gpu/drm/radeon/radeon_vce.c int radeon_vce_cs_parse(struct radeon_cs_parser *p)
p                 565 drivers/gpu/drm/radeon/radeon_vce.c 	while (p->idx < p->chunk_ib->length_dw) {
p                 566 drivers/gpu/drm/radeon/radeon_vce.c 		uint32_t len = radeon_get_ib_value(p, p->idx);
p                 567 drivers/gpu/drm/radeon/radeon_vce.c 		uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
p                 583 drivers/gpu/drm/radeon/radeon_vce.c 			handle = radeon_get_ib_value(p, p->idx + 2);
p                 584 drivers/gpu/drm/radeon/radeon_vce.c 			session_idx = radeon_vce_validate_handle(p, handle,
p                 588 drivers/gpu/drm/radeon/radeon_vce.c 			size = &p->rdev->vce.img_size[session_idx];
p                 602 drivers/gpu/drm/radeon/radeon_vce.c 			*size = radeon_get_ib_value(p, p->idx + 8) *
p                 603 drivers/gpu/drm/radeon/radeon_vce.c 				radeon_get_ib_value(p, p->idx + 10) *
p                 616 drivers/gpu/drm/radeon/radeon_vce.c 			r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
p                 621 drivers/gpu/drm/radeon/radeon_vce.c 			r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
p                 632 drivers/gpu/drm/radeon/radeon_vce.c 			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
p                 639 drivers/gpu/drm/radeon/radeon_vce.c 			tmp = radeon_get_ib_value(p, p->idx + 4);
p                 640 drivers/gpu/drm/radeon/radeon_vce.c 			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
p                 647 drivers/gpu/drm/radeon/radeon_vce.c 			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
p                 665 drivers/gpu/drm/radeon/radeon_vce.c 		p->idx += len / 4;
p                 680 drivers/gpu/drm/radeon/radeon_vce.c 			atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
p                 122 drivers/gpu/drm/radeon/sumo_dpm.c 	u32 p, u;
p                 126 drivers/gpu/drm/radeon/sumo_dpm.c 			       xclk, 16, &p, &u);
p                 128 drivers/gpu/drm/radeon/sumo_dpm.c 	WREG32_P(CG_GIT, CG_GICST(p), ~CG_GICST_MASK);
p                 133 drivers/gpu/drm/radeon/sumo_dpm.c 	u32 p, u;
p                 137 drivers/gpu/drm/radeon/sumo_dpm.c 	r600_calculate_u_and_p(1, xclk, 14, &p, &u);
p                 139 drivers/gpu/drm/radeon/sumo_dpm.c 	WREG32(CG_GCOOR, PHC(grs) | SDC(p) | SU(u));
p                 151 drivers/gpu/drm/radeon/sumo_dpm.c 	u32 p, u;
p                 173 drivers/gpu/drm/radeon/sumo_dpm.c 			       xclk, 16, &p, &u);
p                 175 drivers/gpu/drm/radeon/sumo_dpm.c 	WREG32_P(CG_PWR_GATING_CNTL, PGP(p) | PGU(u),
p                 179 drivers/gpu/drm/radeon/sumo_dpm.c 			       xclk, 16, &p, &u);
p                 181 drivers/gpu/drm/radeon/sumo_dpm.c 	WREG32_P(CG_CG_VOLTAGE_CNTL, PGP(p) | PGU(u),
p                 464 drivers/gpu/drm/radeon/sumo_dpm.c 	u32 p, u;
p                 468 drivers/gpu/drm/radeon/sumo_dpm.c 			       xclk, 16, &p, &u);
p                 470 drivers/gpu/drm/radeon/sumo_dpm.c 	WREG32(CG_SSP, SSTU(u) | SST(p));
p                 928 drivers/gpu/drm/radeon/sumo_dpm.c 	u32 p, u;
p                 932 drivers/gpu/drm/radeon/sumo_dpm.c 			       xclk, 16, &p, &u);
p                 935 drivers/gpu/drm/radeon/sumo_dpm.c 	cg_sclk_dpm_ctrl_5 |= TT_TP(p) | TT_TU(u);
p                 972 drivers/gpu/drm/radeon/sumo_dpm.c 	u32 p, u;
p                 976 drivers/gpu/drm/radeon/sumo_dpm.c 			       xclk, 14, &p, &u);
p                 979 drivers/gpu/drm/radeon/sumo_dpm.c 	cg_sclk_dpm_ctrl_4 |= DC_HDC(p) | DC_HU(u);
p                 366 drivers/gpu/drm/radeon/trinity_dpm.c 	u32 p, u;
p                 386 drivers/gpu/drm/radeon/trinity_dpm.c 	r600_calculate_u_and_p(500, xclk, 16, &p, &u);
p                 388 drivers/gpu/drm/radeon/trinity_dpm.c 	WREG32(CG_PG_CTRL, SP(p) | SU(u));
p                 390 drivers/gpu/drm/radeon/trinity_dpm.c 	WREG32_P(CG_GIPOTS, CG_GIPOT(p), ~CG_GIPOT_MASK);
p                 885 drivers/gpu/drm/radeon/trinity_dpm.c 	u32 p, u;
p                 890 drivers/gpu/drm/radeon/trinity_dpm.c 	r600_calculate_u_and_p(interval, xclk, 16, &p, &u);
p                 892 drivers/gpu/drm/radeon/trinity_dpm.c 	val = (p + tp - 1) / tp;
p                1028 drivers/gpu/drm/radeon/trinity_dpm.c 	u32 p, u;
p                1034 drivers/gpu/drm/radeon/trinity_dpm.c 	r600_calculate_u_and_p(400, xclk, 16, &p, &u);
p                1036 drivers/gpu/drm/radeon/trinity_dpm.c 	ni = (p + tp - 1) / tp;
p                 105 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DPPR_DPS(n, p)		(((p)-1) << DPPR_DPS_SHIFT(n))
p                  34 drivers/gpu/drm/rcar-du/rcar_du_vsp.h static inline struct rcar_du_vsp_plane *to_rcar_vsp_plane(struct drm_plane *p)
p                  36 drivers/gpu/drm/rcar-du/rcar_du_vsp.h 	return container_of(p, struct rcar_du_vsp_plane, plane);
p                 329 drivers/gpu/drm/rockchip/rockchip_drm_drv.c 		struct device *p = NULL, *d;
p                 332 drivers/gpu/drm/rockchip/rockchip_drm_drv.c 			d = platform_find_device_by_driver(p, &drv->driver);
p                 333 drivers/gpu/drm/rockchip/rockchip_drm_drv.c 			put_device(p);
p                 334 drivers/gpu/drm/rockchip/rockchip_drm_drv.c 			p = d;
p                 223 drivers/gpu/drm/rockchip/rockchip_lvds.c 			pinctrl_select_state(lvds->pins->p,
p                 524 drivers/gpu/drm/rockchip/rockchip_lvds.c 	lvds->pins->p = devm_pinctrl_get(lvds->dev);
p                 525 drivers/gpu/drm/rockchip/rockchip_lvds.c 	if (IS_ERR(lvds->pins->p)) {
p                 531 drivers/gpu/drm/rockchip/rockchip_lvds.c 			pinctrl_lookup_state(lvds->pins->p, "lcdc");
p                  37 drivers/gpu/drm/shmobile/shmob_drm_plane.c #define to_shmob_plane(p)	container_of(p, struct shmob_drm_plane, plane)
p                 141 drivers/gpu/drm/sti/sti_crtc.c 	struct drm_plane *p;
p                 148 drivers/gpu/drm/sti/sti_crtc.c 	list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
p                 149 drivers/gpu/drm/sti/sti_crtc.c 		struct sti_plane *plane = to_sti_plane(p);
p                 154 drivers/gpu/drm/sti/sti_crtc.c 			if (p->state->crtc != crtc)
p                 175 drivers/gpu/drm/sti/sti_crtc.c 				sti_vid_commit(compo->vid[0], p->state);
p                 264 drivers/gpu/drm/sti/sti_crtc.c 		struct drm_plane *p;
p                 268 drivers/gpu/drm/sti/sti_crtc.c 		list_for_each_entry(p, &crtc->dev->mode_config.plane_list,
p                 270 drivers/gpu/drm/sti/sti_crtc.c 			struct sti_plane *plane = to_sti_plane(p);
p                  40 drivers/gpu/drm/sti/sti_drv.c 	struct drm_plane *p;
p                  44 drivers/gpu/drm/sti/sti_drv.c 	list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
p                  45 drivers/gpu/drm/sti/sti_drv.c 		struct sti_plane *plane = to_sti_plane(p);
p                  57 drivers/gpu/drm/sti/sti_drv.c 	struct drm_plane *p;
p                  60 drivers/gpu/drm/sti/sti_drv.c 	list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
p                  61 drivers/gpu/drm/sti/sti_drv.c 		struct sti_plane *plane = to_sti_plane(p);
p                  79 drivers/gpu/drm/sti/sti_drv.c 	struct drm_plane *p;
p                  81 drivers/gpu/drm/sti/sti_drv.c 	list_for_each_entry(p, &dev->mode_config.plane_list, head) {
p                  82 drivers/gpu/drm/sti/sti_drv.c 		struct sti_plane *plane = to_sti_plane(p);
p                 885 drivers/gpu/drm/stm/ltdc.c static void ltdc_plane_atomic_print_state(struct drm_printer *p,
p                 897 drivers/gpu/drm/stm/ltdc.c 	drm_printf(p, "\tuser_updates=%dfps\n",
p                 163 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_PIPE_OFF(p)		(0x5000 + (0x400 * (p)))
p                  74 drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c 	int i, j, p;
p                  82 drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c 	for (p = 0; p < clk_hw_get_num_parents(hw); p++) {
p                  83 drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c 		parent = clk_hw_get_parent_by_index(hw, p);
p                  28 drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c 	int i, p;
p                  30 drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c 	for (p = 0; p < clk_hw_get_num_parents(hw); p++) {
p                  31 drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c 		parent = clk_hw_get_parent_by_index(hw, p);
p                 671 drivers/gpu/drm/tegra/dc.c 	struct tegra_plane *p = to_tegra_plane(plane);
p                 678 drivers/gpu/drm/tegra/dc.c 	value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
p                 680 drivers/gpu/drm/tegra/dc.c 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
p                 688 drivers/gpu/drm/tegra/dc.c 	struct tegra_plane *p = to_tegra_plane(plane);
p                 731 drivers/gpu/drm/tegra/dc.c 	tegra_dc_setup_window(p, &window);
p                 555 drivers/gpu/drm/tegra/dc.h #define DC_WIN_H_FILTER_P(p)			(0x601 + (p))
p                 556 drivers/gpu/drm/tegra/dc.h #define DC_WIN_V_FILTER_P(p)			(0x619 + (p))
p                 941 drivers/gpu/drm/tegra/drm.c static int tegra_drm_context_cleanup(int id, void *p, void *data)
p                 943 drivers/gpu/drm/tegra/drm.c 	struct tegra_drm_context *context = p;
p                 990 drivers/gpu/drm/tegra/drm.c 	struct drm_printer p = drm_seq_file_printer(s);
p                 994 drivers/gpu/drm/tegra/drm.c 		drm_mm_print(&tegra->mm, &p);
p                 379 drivers/gpu/drm/tegra/hub.c 	struct tegra_plane *p = to_tegra_plane(plane);
p                 394 drivers/gpu/drm/tegra/hub.c 	if (WARN_ON(p->dc == NULL))
p                 395 drivers/gpu/drm/tegra/hub.c 		p->dc = dc;
p                 399 drivers/gpu/drm/tegra/hub.c 	value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
p                 401 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
p                 403 drivers/gpu/drm/tegra/hub.c 	tegra_dc_remove_shared_plane(dc, p);
p                 415 drivers/gpu/drm/tegra/hub.c 	struct tegra_plane *p = to_tegra_plane(plane);
p                 431 drivers/gpu/drm/tegra/hub.c 	tegra_dc_assign_shared_plane(dc, p);
p                 433 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
p                 439 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
p                 444 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
p                 447 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
p                 451 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
p                 454 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
p                 457 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
p                 462 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH);
p                 463 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
p                 467 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, value, DC_WIN_POSITION);
p                 470 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, value, DC_WIN_SIZE);
p                 473 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
p                 476 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
p                 478 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
p                 479 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
p                 482 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
p                 485 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
p                 489 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
p                 512 drivers/gpu/drm/tegra/hub.c 		tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
p                 516 drivers/gpu/drm/tegra/hub.c 	value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
p                 518 drivers/gpu/drm/tegra/hub.c 	tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
p                 542 drivers/gpu/drm/tegra/hub.c 	struct drm_plane *p;
p                 556 drivers/gpu/drm/tegra/hub.c 	p = &plane->base.base;
p                 562 drivers/gpu/drm/tegra/hub.c 	err = drm_universal_plane_init(drm, p, possible_crtcs,
p                 570 drivers/gpu/drm/tegra/hub.c 	drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
p                 571 drivers/gpu/drm/tegra/hub.c 	drm_plane_create_zpos_property(p, 0, 0, 255);
p                 573 drivers/gpu/drm/tegra/hub.c 	return p;
p                  16 drivers/gpu/drm/tegra/plane.c 	struct tegra_plane *p = to_tegra_plane(plane);
p                  19 drivers/gpu/drm/tegra/plane.c 	kfree(p);
p                  24 drivers/gpu/drm/tegra/plane.c 	struct tegra_plane *p = to_tegra_plane(plane);
p                  37 drivers/gpu/drm/tegra/plane.c 		plane->state->zpos = p->index;
p                  38 drivers/gpu/drm/tegra/plane.c 		plane->state->normalized_zpos = p->index;
p                 351 drivers/gpu/drm/tegra/plane.c 		struct tegra_plane *p = to_tegra_plane(plane);
p                 354 drivers/gpu/drm/tegra/plane.c 		if (p == tegra || p->dc != tegra->dc)
p                 394 drivers/gpu/drm/tegra/plane.c 		struct tegra_plane *p = to_tegra_plane(plane);
p                 398 drivers/gpu/drm/tegra/plane.c 		if (p == tegra || p->dc != tegra->dc)
p                 401 drivers/gpu/drm/tegra/plane.c 		index = tegra_plane_get_overlap_index(tegra, p);
p                 446 drivers/gpu/drm/tegra/plane.c 		struct tegra_plane *p = to_tegra_plane(plane);
p                 449 drivers/gpu/drm/tegra/plane.c 		if (p->dc != tegra->dc)
p                 460 drivers/gpu/drm/tegra/plane.c 			tegra_plane_update_transparency(p, tegra_state);
p                 478 drivers/gpu/drm/tilcdc/tilcdc_drv.c 	struct drm_printer p = drm_seq_file_printer(m);
p                 479 drivers/gpu/drm/tilcdc/tilcdc_drv.c 	drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
p                 343 drivers/gpu/drm/tiny/repaper.c 	u8 *p = epd->line_buffer;
p                 349 drivers/gpu/drm/tiny/repaper.c 		*p++ = 0x00;
p                 353 drivers/gpu/drm/tiny/repaper.c 		repaper_odd_pixels(epd, &p, data, fixed_value, mask, stage);
p                 358 drivers/gpu/drm/tiny/repaper.c 				*p++ = 0x03 << (2 * (line & 0x03));
p                 360 drivers/gpu/drm/tiny/repaper.c 				*p++ = 0x00;
p                 364 drivers/gpu/drm/tiny/repaper.c 		repaper_even_pixels(epd, &p, data, fixed_value, mask, stage);
p                 372 drivers/gpu/drm/tiny/repaper.c 				*p++ = 0xc0 >> (line & 0x06);
p                 374 drivers/gpu/drm/tiny/repaper.c 				*p++ = 0x00;
p                 378 drivers/gpu/drm/tiny/repaper.c 		repaper_all_pixels(epd, &p, data, fixed_value, mask, stage);
p                 386 drivers/gpu/drm/tiny/repaper.c 				*p++ = 0x03 << (line & 0x06);
p                 388 drivers/gpu/drm/tiny/repaper.c 				*p++ = 0x00;
p                 397 drivers/gpu/drm/tiny/repaper.c 		*p++ = 0x00;
p                 405 drivers/gpu/drm/tiny/repaper.c 			*p++ = 0x00;
p                 408 drivers/gpu/drm/tiny/repaper.c 			*p++ = 0xaa;
p                 415 drivers/gpu/drm/tiny/repaper.c 			  p - epd->line_buffer);
p                  79 drivers/gpu/drm/ttm/ttm_bo.c static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p,
p                  84 drivers/gpu/drm/ttm/ttm_bo.c 	drm_printf(p, "    has_type: %d\n", man->has_type);
p                  85 drivers/gpu/drm/ttm/ttm_bo.c 	drm_printf(p, "    use_type: %d\n", man->use_type);
p                  86 drivers/gpu/drm/ttm/ttm_bo.c 	drm_printf(p, "    flags: 0x%08X\n", man->flags);
p                  87 drivers/gpu/drm/ttm/ttm_bo.c 	drm_printf(p, "    gpu_offset: 0x%08llX\n", man->gpu_offset);
p                  88 drivers/gpu/drm/ttm/ttm_bo.c 	drm_printf(p, "    size: %llu\n", man->size);
p                  89 drivers/gpu/drm/ttm/ttm_bo.c 	drm_printf(p, "    available_caching: 0x%08X\n", man->available_caching);
p                  90 drivers/gpu/drm/ttm/ttm_bo.c 	drm_printf(p, "    default_caching: 0x%08X\n", man->default_caching);
p                  92 drivers/gpu/drm/ttm/ttm_bo.c 		(*man->func->debug)(man, p);
p                  98 drivers/gpu/drm/ttm/ttm_bo.c 	struct drm_printer p = drm_debug_printer(TTM_PFX);
p                 101 drivers/gpu/drm/ttm/ttm_bo.c 	drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
p                 109 drivers/gpu/drm/ttm/ttm_bo.c 		drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
p                 111 drivers/gpu/drm/ttm/ttm_bo.c 		ttm_mem_type_debug(bo->bdev, &p, mem_type);
p                 288 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct page *p;
p                 310 drivers/gpu/drm/ttm/ttm_page_alloc.c 	list_for_each_entry_reverse(p, &pool->list, lru) {
p                 314 drivers/gpu/drm/ttm/ttm_page_alloc.c 		pages_to_free[freed_pages++] = p;
p                 318 drivers/gpu/drm/ttm/ttm_page_alloc.c 			__list_del(p->lru.prev, &pool->list);
p                 353 drivers/gpu/drm/ttm/ttm_page_alloc.c 		__list_del(&p->lru, &pool->list);
p                 492 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct page *p;
p                 508 drivers/gpu/drm/ttm/ttm_page_alloc.c 		p = alloc_pages(gfp_flags, order);
p                 510 drivers/gpu/drm/ttm/ttm_page_alloc.c 		if (!p) {
p                 527 drivers/gpu/drm/ttm/ttm_page_alloc.c 		list_add(&p->lru, pages);
p                 533 drivers/gpu/drm/ttm/ttm_page_alloc.c 		if (PageHighMem(p))
p                 538 drivers/gpu/drm/ttm/ttm_page_alloc.c 			caching_array[cpages++] = p++;
p                 575 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct page *p;
p                 613 drivers/gpu/drm/ttm/ttm_page_alloc.c 			list_for_each_entry(p, &new_pages, lru) {
p                 636 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct list_head *p;
p                 656 drivers/gpu/drm/ttm/ttm_page_alloc.c 		list_for_each(p, &pool->list) {
p                 662 drivers/gpu/drm/ttm/ttm_page_alloc.c 		list_for_each_prev(p, &pool->list) {
p                 668 drivers/gpu/drm/ttm/ttm_page_alloc.c 	list_cut_position(pages, &pool->list, p);
p                 723 drivers/gpu/drm/ttm/ttm_page_alloc.c 			struct page *p = pages[i];
p                 736 drivers/gpu/drm/ttm/ttm_page_alloc.c 					if (++p != pages[i + j])
p                 764 drivers/gpu/drm/ttm/ttm_page_alloc.c 			struct page *p = pages[i];
p                 767 drivers/gpu/drm/ttm/ttm_page_alloc.c 			if (!p)
p                 771 drivers/gpu/drm/ttm/ttm_page_alloc.c 				if (++p != pages[i + j])
p                 834 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct page *p = NULL;
p                 868 drivers/gpu/drm/ttm/ttm_page_alloc.c 				p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
p                 869 drivers/gpu/drm/ttm/ttm_page_alloc.c 				if (!p)
p                 873 drivers/gpu/drm/ttm/ttm_page_alloc.c 					pages[i++] = p++;
p                 882 drivers/gpu/drm/ttm/ttm_page_alloc.c 			p = alloc_page(gfp_flags);
p                 883 drivers/gpu/drm/ttm/ttm_page_alloc.c 			if (!p) {
p                 889 drivers/gpu/drm/ttm/ttm_page_alloc.c 			if (i > first && pages[i - 1] == p - 1)
p                 890 drivers/gpu/drm/ttm/ttm_page_alloc.c 				swap(p, pages[i - 1]);
p                 892 drivers/gpu/drm/ttm/ttm_page_alloc.c 			pages[i++] = p;
p                 907 drivers/gpu/drm/ttm/ttm_page_alloc.c 		list_for_each_entry(p, &plist, lru) {
p                 911 drivers/gpu/drm/ttm/ttm_page_alloc.c 				pages[count++] = &p[j];
p                 921 drivers/gpu/drm/ttm/ttm_page_alloc.c 	list_for_each_entry(p, &plist, lru) {
p                 922 drivers/gpu/drm/ttm/ttm_page_alloc.c 		struct page *tmp = p;
p                1108 drivers/gpu/drm/ttm/ttm_page_alloc.c 		struct page *p = tt->ttm.pages[i];
p                1112 drivers/gpu/drm/ttm/ttm_page_alloc.c 			if (++p != tt->ttm.pages[j])
p                1145 drivers/gpu/drm/ttm/ttm_page_alloc.c 		struct page *p = tt->ttm.pages[i];
p                1154 drivers/gpu/drm/ttm/ttm_page_alloc.c 			if (++p != tt->ttm.pages[j])
p                1171 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct ttm_page_pool *p;
p                1181 drivers/gpu/drm/ttm/ttm_page_alloc.c 		p = &_manager->pools[i];
p                1184 drivers/gpu/drm/ttm/ttm_page_alloc.c 				p->name, p->nrefills,
p                1185 drivers/gpu/drm/ttm/ttm_page_alloc.c 				p->nfrees, p->npages);
p                 125 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct page *p;
p                 316 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			d_page->p = vmalloc_to_page(vaddr);
p                 318 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			d_page->p = virt_to_page(vaddr);
p                 355 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct page *page = d_page->p;
p                 443 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pages_to_free[freed_pages++] = dma_p->p;
p                 500 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct device_pools *p;
p                 507 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry_reverse(p, &_manager->pools, pools) {
p                 508 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		if (p->dev != dev)
p                 510 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pool = p->pool;
p                 514 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		list_del(&p->pools);
p                 515 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		kfree(p);
p                 563 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	char *p;
p                 605 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	p = pool->name;
p                 608 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			p += snprintf(p, sizeof(pool->name) - (p - pool->name),
p                 612 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	*p = 0;
p                 672 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct page *p;
p                 675 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	p = failed_pages[0];
p                 676 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (!p)
p                 680 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		if (d_page->p != p)
p                 686 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			p = failed_pages[i];
p                 705 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct page *p;
p                 744 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		p = dma_p->p;
p                 751 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		if (PageHighMem(p))
p                 757 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			caching_array[cpages++] = p + j;
p                 847 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		ttm->pages[index] = d_page->p;
p                1016 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 				ttm_mem_global_free_page(mem_glob, d_page->p,
p                1041 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		ttm->pages[count] = d_page->p;
p                1045 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			ttm_mem_global_free_page(mem_glob, d_page->p,
p                1099 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct device_pools *p;
p                1110 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry(p, &_manager->pools, pools) {
p                1113 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		if (!p->dev)
p                1122 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
p                1126 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			 p->pool->dev_name, p->pool->name, current->pid,
p                1137 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct device_pools *p;
p                1142 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry(p, &_manager->pools, pools)
p                1143 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		count += p->pool->npages_free;
p                1199 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct device_pools *p, *t;
p                1204 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
p                1205 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
p                1207 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
p                1208 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			ttm_dma_pool_match, p->pool));
p                1209 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		ttm_dma_free_pool(p->dev, p->pool->type);
p                1217 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct device_pools *p;
p                1226 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	list_for_each_entry(p, &_manager->pools, pools) {
p                1227 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		struct device *dev = p->dev;
p                1230 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		pool = p->pool;
p                 116 drivers/gpu/drm/ttm/ttm_tt.c static int ttm_tt_set_page_caching(struct page *p,
p                 122 drivers/gpu/drm/ttm/ttm_tt.c 	if (PageHighMem(p))
p                 129 drivers/gpu/drm/ttm/ttm_tt.c 		ret = ttm_set_pages_wb(p, 1);
p                 135 drivers/gpu/drm/ttm/ttm_tt.c 		ret = ttm_set_pages_wc(p, 1);
p                 137 drivers/gpu/drm/ttm/ttm_tt.c 		ret = ttm_set_pages_uc(p, 1);
p                  19 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	struct hgsmi_buffer_location *p;
p                  21 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI,
p                  23 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	if (!p)
p                  26 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->buf_location = location;
p                  27 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->buf_len = sizeof(struct hgsmi_host_flags);
p                  29 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	hgsmi_buffer_submit(ctx, p);
p                  30 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	hgsmi_buffer_free(ctx, p);
p                  43 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	struct vbva_caps *p;
p                  45 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS);
p                  46 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	if (!p)
p                  49 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->rc = VERR_NOT_IMPLEMENTED;
p                  50 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->caps = caps;
p                  52 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	hgsmi_buffer_submit(ctx, p);
p                  54 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	WARN_ON_ONCE(p->rc < 0);
p                  56 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	hgsmi_buffer_free(ctx, p);
p                  82 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	struct vbva_conf32 *p;
p                  84 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
p                  86 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	if (!p)
p                  89 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->index = index;
p                  90 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->value = U32_MAX;
p                  92 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	hgsmi_buffer_submit(ctx, p);
p                  94 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	*value_ret = p->value;
p                  96 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	hgsmi_buffer_free(ctx, p);
p                 117 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	struct vbva_mouse_pointer_shape *p;
p                 138 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
p                 140 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	if (!p)
p                 143 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->result = VINF_SUCCESS;
p                 144 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->flags = flags;
p                 145 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->hot_X = hot_x;
p                 146 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->hot_y = hot_y;
p                 147 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->width = width;
p                 148 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->height = height;
p                 150 drivers/gpu/drm/vboxvideo/hgsmi_base.c 		memcpy(p->data, pixels, pixel_len);
p                 152 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	hgsmi_buffer_submit(ctx, p);
p                 154 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	switch (p->result) {
p                 168 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	hgsmi_buffer_free(ctx, p);
p                 188 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	struct vbva_cursor_position *p;
p                 190 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
p                 192 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	if (!p)
p                 195 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->report_position = report_position;
p                 196 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->x = x;
p                 197 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	p->y = y;
p                 199 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	hgsmi_buffer_submit(ctx, p);
p                 201 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	*x_host = p->x;
p                 202 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	*y_host = p->y;
p                 204 drivers/gpu/drm/vboxvideo/hgsmi_base.c 	hgsmi_buffer_free(ctx, p);
p                  32 drivers/gpu/drm/vboxvideo/modesetting.c 	struct vbva_infoscreen *p;
p                  34 drivers/gpu/drm/vboxvideo/modesetting.c 	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
p                  36 drivers/gpu/drm/vboxvideo/modesetting.c 	if (!p)
p                  39 drivers/gpu/drm/vboxvideo/modesetting.c 	p->view_index = display;
p                  40 drivers/gpu/drm/vboxvideo/modesetting.c 	p->origin_x = origin_x;
p                  41 drivers/gpu/drm/vboxvideo/modesetting.c 	p->origin_y = origin_y;
p                  42 drivers/gpu/drm/vboxvideo/modesetting.c 	p->start_offset = start_offset;
p                  43 drivers/gpu/drm/vboxvideo/modesetting.c 	p->line_size = pitch;
p                  44 drivers/gpu/drm/vboxvideo/modesetting.c 	p->width = width;
p                  45 drivers/gpu/drm/vboxvideo/modesetting.c 	p->height = height;
p                  46 drivers/gpu/drm/vboxvideo/modesetting.c 	p->bits_per_pixel = bpp;
p                  47 drivers/gpu/drm/vboxvideo/modesetting.c 	p->flags = flags;
p                  49 drivers/gpu/drm/vboxvideo/modesetting.c 	hgsmi_buffer_submit(ctx, p);
p                  50 drivers/gpu/drm/vboxvideo/modesetting.c 	hgsmi_buffer_free(ctx, p);
p                  68 drivers/gpu/drm/vboxvideo/modesetting.c 	struct vbva_report_input_mapping *p;
p                  70 drivers/gpu/drm/vboxvideo/modesetting.c 	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
p                  72 drivers/gpu/drm/vboxvideo/modesetting.c 	if (!p)
p                  75 drivers/gpu/drm/vboxvideo/modesetting.c 	p->x = origin_x;
p                  76 drivers/gpu/drm/vboxvideo/modesetting.c 	p->y = origin_y;
p                  77 drivers/gpu/drm/vboxvideo/modesetting.c 	p->cx = width;
p                  78 drivers/gpu/drm/vboxvideo/modesetting.c 	p->cy = height;
p                  80 drivers/gpu/drm/vboxvideo/modesetting.c 	hgsmi_buffer_submit(ctx, p);
p                  81 drivers/gpu/drm/vboxvideo/modesetting.c 	hgsmi_buffer_free(ctx, p);
p                  96 drivers/gpu/drm/vboxvideo/modesetting.c 	struct vbva_query_mode_hints *p;
p                 103 drivers/gpu/drm/vboxvideo/modesetting.c 	p = hgsmi_buffer_alloc(ctx, sizeof(*p) + size, HGSMI_CH_VBVA,
p                 105 drivers/gpu/drm/vboxvideo/modesetting.c 	if (!p)
p                 108 drivers/gpu/drm/vboxvideo/modesetting.c 	p->hints_queried_count = screens;
p                 109 drivers/gpu/drm/vboxvideo/modesetting.c 	p->hint_structure_guest_size = sizeof(struct vbva_modehint);
p                 110 drivers/gpu/drm/vboxvideo/modesetting.c 	p->rc = VERR_NOT_SUPPORTED;
p                 112 drivers/gpu/drm/vboxvideo/modesetting.c 	hgsmi_buffer_submit(ctx, p);
p                 114 drivers/gpu/drm/vboxvideo/modesetting.c 	if (p->rc < 0) {
p                 115 drivers/gpu/drm/vboxvideo/modesetting.c 		hgsmi_buffer_free(ctx, p);
p                 119 drivers/gpu/drm/vboxvideo/modesetting.c 	memcpy(hints, ((u8 *)p) + sizeof(struct vbva_query_mode_hints), size);
p                 120 drivers/gpu/drm/vboxvideo/modesetting.c 	hgsmi_buffer_free(ctx, p);
p                  80 drivers/gpu/drm/vboxvideo/vbox_mode.c 	struct vbva_infoview *p;
p                  93 drivers/gpu/drm/vboxvideo/vbox_mode.c 	p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p),
p                  95 drivers/gpu/drm/vboxvideo/vbox_mode.c 	if (!p)
p                  98 drivers/gpu/drm/vboxvideo/vbox_mode.c 	p->view_index = vbox_crtc->crtc_id;
p                  99 drivers/gpu/drm/vboxvideo/vbox_mode.c 	p->view_offset = vbox_crtc->fb_offset;
p                 100 drivers/gpu/drm/vboxvideo/vbox_mode.c 	p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset +
p                 102 drivers/gpu/drm/vboxvideo/vbox_mode.c 	p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset;
p                 104 drivers/gpu/drm/vboxvideo/vbox_mode.c 	hgsmi_buffer_submit(vbox->guest_pool, p);
p                 105 drivers/gpu/drm/vboxvideo/vbox_mode.c 	hgsmi_buffer_free(vbox->guest_pool, p);
p                  48 drivers/gpu/drm/vboxvideo/vboxvideo_guest.h 		const void *p, u32 len);
p                  30 drivers/gpu/drm/vboxvideo/vbva_base.c 				      const void *p, u32 len, u32 offset)
p                  39 drivers/gpu/drm/vboxvideo/vbva_base.c 		memcpy(dst, p, len);
p                  42 drivers/gpu/drm/vboxvideo/vbva_base.c 		memcpy(dst, p, bytes_till_boundary);
p                  43 drivers/gpu/drm/vboxvideo/vbva_base.c 		memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
p                  49 drivers/gpu/drm/vboxvideo/vbva_base.c 	struct vbva_flush *p;
p                  51 drivers/gpu/drm/vboxvideo/vbva_base.c 	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH);
p                  52 drivers/gpu/drm/vboxvideo/vbva_base.c 	if (!p)
p                  55 drivers/gpu/drm/vboxvideo/vbva_base.c 	p->reserved = 0;
p                  57 drivers/gpu/drm/vboxvideo/vbva_base.c 	hgsmi_buffer_submit(ctx, p);
p                  58 drivers/gpu/drm/vboxvideo/vbva_base.c 	hgsmi_buffer_free(ctx, p);
p                  62 drivers/gpu/drm/vboxvideo/vbva_base.c 		const void *p, u32 len)
p                  93 drivers/gpu/drm/vboxvideo/vbva_base.c 		vbva_buffer_place_data_at(vbva_ctx, p, chunk,
p                 101 drivers/gpu/drm/vboxvideo/vbva_base.c 		p += chunk;
p                 110 drivers/gpu/drm/vboxvideo/vbva_base.c 	struct vbva_enable_ex *p;
p                 113 drivers/gpu/drm/vboxvideo/vbva_base.c 	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE);
p                 114 drivers/gpu/drm/vboxvideo/vbva_base.c 	if (!p)
p                 117 drivers/gpu/drm/vboxvideo/vbva_base.c 	p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
p                 118 drivers/gpu/drm/vboxvideo/vbva_base.c 	p->base.offset = vbva_ctx->buffer_offset;
p                 119 drivers/gpu/drm/vboxvideo/vbva_base.c 	p->base.result = VERR_NOT_SUPPORTED;
p                 121 drivers/gpu/drm/vboxvideo/vbva_base.c 		p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
p                 122 drivers/gpu/drm/vboxvideo/vbva_base.c 		p->screen_id = screen;
p                 125 drivers/gpu/drm/vboxvideo/vbva_base.c 	hgsmi_buffer_submit(ctx, p);
p                 128 drivers/gpu/drm/vboxvideo/vbva_base.c 		ret = p->base.result >= 0;
p                 132 drivers/gpu/drm/vboxvideo/vbva_base.c 	hgsmi_buffer_free(ctx, p);
p                  40 drivers/gpu/drm/vc4/vc4_bo.c static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4)
p                  48 drivers/gpu/drm/vc4/vc4_bo.c 		drm_printf(p, "%30s: %6dkb BOs (%d)\n",
p                  56 drivers/gpu/drm/vc4/vc4_bo.c 		drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
p                  60 drivers/gpu/drm/vc4/vc4_bo.c 		drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
p                  71 drivers/gpu/drm/vc4/vc4_bo.c 	struct drm_printer p = drm_seq_file_printer(m);
p                  73 drivers/gpu/drm/vc4/vc4_bo.c 	vc4_bo_stats_print(&p, vc4);
p                 452 drivers/gpu/drm/vc4/vc4_bo.c 		struct drm_printer p = drm_info_printer(vc4->dev->dev);
p                 454 drivers/gpu/drm/vc4/vc4_bo.c 		vc4_bo_stats_print(&p, vc4);
p                 393 drivers/gpu/drm/vc4/vc4_crtc.c 		struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
p                 396 drivers/gpu/drm/vc4/vc4_crtc.c 		drm_print_regset32(&p, &vc4_crtc->regset);
p                 437 drivers/gpu/drm/vc4/vc4_crtc.c 		struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
p                 440 drivers/gpu/drm/vc4/vc4_crtc.c 		drm_print_regset32(&p, &vc4_crtc->regset);
p                  47 drivers/gpu/drm/vc4/vc4_debugfs.c 	struct drm_printer p = drm_seq_file_printer(m);
p                  49 drivers/gpu/drm/vc4/vc4_debugfs.c 	drm_print_regset32(&p, regset);
p                 241 drivers/gpu/drm/vc4/vc4_drv.c 		struct device *p = NULL, *d;
p                 243 drivers/gpu/drm/vc4/vc4_drv.c 		while ((d = platform_find_device_by_driver(p, drv))) {
p                 244 drivers/gpu/drm/vc4/vc4_drv.c 			put_device(p);
p                 246 drivers/gpu/drm/vc4/vc4_drv.c 			p = d;
p                 248 drivers/gpu/drm/vc4/vc4_drv.c 		put_device(p);
p                 841 drivers/gpu/drm/vc4/vc4_dsi.c 		struct drm_printer p = drm_info_printer(&dsi->pdev->dev);
p                 843 drivers/gpu/drm/vc4/vc4_dsi.c 		drm_print_regset32(&p, &dsi->regset);
p                1077 drivers/gpu/drm/vc4/vc4_dsi.c 		struct drm_printer p = drm_info_printer(&dsi->pdev->dev);
p                1079 drivers/gpu/drm/vc4/vc4_dsi.c 		drm_print_regset32(&p, &dsi->regset);
p                 188 drivers/gpu/drm/vc4/vc4_hdmi.c 	struct drm_printer p = drm_seq_file_printer(m);
p                 190 drivers/gpu/drm/vc4/vc4_hdmi.c 	drm_print_regset32(&p, &hdmi->hdmi_regset);
p                 191 drivers/gpu/drm/vc4/vc4_hdmi.c 	drm_print_regset32(&p, &hdmi->hd_regset);
p                 531 drivers/gpu/drm/vc4/vc4_hdmi.c 		struct drm_printer p = drm_info_printer(&hdmi->pdev->dev);
p                 534 drivers/gpu/drm/vc4/vc4_hdmi.c 		drm_print_regset32(&p, &hdmi->hdmi_regset);
p                 535 drivers/gpu/drm/vc4/vc4_hdmi.c 		drm_print_regset32(&p, &hdmi->hd_regset);
p                 610 drivers/gpu/drm/vc4/vc4_hdmi.c 		struct drm_printer p = drm_info_printer(&hdmi->pdev->dev);
p                 613 drivers/gpu/drm/vc4/vc4_hdmi.c 		drm_print_regset32(&p, &hdmi->hdmi_regset);
p                 614 drivers/gpu/drm/vc4/vc4_hdmi.c 		drm_print_regset32(&p, &hdmi->hd_regset);
p                  67 drivers/gpu/drm/vc4/vc4_hvs.c 	struct drm_printer p = drm_info_printer(&vc4->hvs->pdev->dev);
p                  70 drivers/gpu/drm/vc4/vc4_hvs.c 	drm_print_regset32(&p, &vc4->hvs->regset);
p                  88 drivers/gpu/drm/vc4/vc4_hvs.c 	struct drm_printer p = drm_seq_file_printer(m);
p                  90 drivers/gpu/drm/vc4/vc4_hvs.c 	drm_printf(&p, "%d\n", atomic_read(&vc4->underrun));
p                 241 drivers/gpu/drm/vgem/vgem_fence.c static int __vgem_fence_idr_fini(int id, void *p, void *data)
p                 243 drivers/gpu/drm/vgem/vgem_fence.c 	dma_fence_signal(p);
p                 244 drivers/gpu/drm/vgem/vgem_fence.c 	dma_fence_put(p);
p                 313 drivers/gpu/host1x/bus.c 	struct property *p;
p                 328 drivers/gpu/host1x/bus.c 	of_property_for_each_string(np, "compatible", p, compat) {
p                 140 drivers/gpu/host1x/cdma.c 	u32 *p = (u32 *)((void *)pb->mapped + pb->pos);
p                 143 drivers/gpu/host1x/cdma.c 	*(p++) = op1;
p                 144 drivers/gpu/host1x/cdma.c 	*(p++) = op2;
p                 230 drivers/gpu/ipu-v3/ipu-cpmem.c 	struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch);
p                 231 drivers/gpu/ipu-v3/ipu-cpmem.c 	void __iomem *base = p;
p                 234 drivers/gpu/ipu-v3/ipu-cpmem.c 	for (i = 0; i < sizeof(*p) / sizeof(u32); i++)
p                 891 drivers/gpu/ipu-v3/ipu-cpmem.c 	struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch);
p                 896 drivers/gpu/ipu-v3/ipu-cpmem.c 		readl(&p->word[0].data[0]),
p                 897 drivers/gpu/ipu-v3/ipu-cpmem.c 		readl(&p->word[0].data[1]),
p                 898 drivers/gpu/ipu-v3/ipu-cpmem.c 		readl(&p->word[0].data[2]),
p                 899 drivers/gpu/ipu-v3/ipu-cpmem.c 		readl(&p->word[0].data[3]),
p                 900 drivers/gpu/ipu-v3/ipu-cpmem.c 		readl(&p->word[0].data[4]));
p                 902 drivers/gpu/ipu-v3/ipu-cpmem.c 		readl(&p->word[1].data[0]),
p                 903 drivers/gpu/ipu-v3/ipu-cpmem.c 		readl(&p->word[1].data[1]),
p                 904 drivers/gpu/ipu-v3/ipu-cpmem.c 		readl(&p->word[1].data[2]),
p                 905 drivers/gpu/ipu-v3/ipu-cpmem.c 		readl(&p->word[1].data[3]),
p                 906 drivers/gpu/ipu-v3/ipu-cpmem.c 		readl(&p->word[1].data[4]));
p                 445 drivers/hid/hid-debug.c 	const struct hid_usage_entry *p;
p                 454 drivers/hid/hid-debug.c 	for (p = hid_usage_table; p->description; p++)
p                 455 drivers/hid/hid-debug.c 		if (p->page == page) {
p                 458 drivers/hid/hid-debug.c 						p->description);
p                 462 drivers/hid/hid-debug.c 				seq_printf(f, "%s", p->description);
p                 474 drivers/hid/hid-debug.c 	const struct hid_usage_entry *p;
p                 493 drivers/hid/hid-debug.c 	for (p = hid_usage_table; p->description; p++)
p                 494 drivers/hid/hid-debug.c 		if (p->page == (usage >> 16)) {
p                 495 drivers/hid/hid-debug.c 			for(++p; p->description && p->usage != 0; p++)
p                 496 drivers/hid/hid-debug.c 				if (p->usage == (usage & 0xffff)) {
p                 500 drivers/hid/hid-debug.c 							"%s", p->description);
p                 504 drivers/hid/hid-debug.c 							p->description);
p                1033 drivers/hid/hid-debug.c static int hid_debug_rdesc_show(struct seq_file *f, void *p)
p                 214 drivers/hid/hid-elan.c 	int x, y, p;
p                 224 drivers/hid/hid-elan.c 		p = data[4];
p                 228 drivers/hid/hid-elan.c 		input_report_abs(input, ABS_MT_PRESSURE, p);
p                  98 drivers/hid/hid-multitouch.c 	__s32 *x, *y, *cx, *cy, *p, *w, *h, *a;
p                 221 drivers/hid/hid-multitouch.c #define MT_USB_DEVICE(v, p)	HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH, v, p)
p                 222 drivers/hid/hid-multitouch.c #define MT_BT_DEVICE(v, p)	HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH, v, p)
p                 518 drivers/hid/hid-multitouch.c 	usage->p = DEFAULT_ZERO;
p                 817 drivers/hid/hid-multitouch.c 			MT_STORE_FIELD(p);
p                1094 drivers/hid/hid-multitouch.c 		input_event(input, EV_ABS, ABS_MT_PRESSURE, *slot->p);
p                  23 drivers/hid/hid-picolcd_debugfs.c static int picolcd_debug_reset_show(struct seq_file *f, void *p)
p                 396 drivers/hid/hid-picolcd_fb.c 			u8 p = 0;
p                 398 drivers/hid/hid-picolcd_fb.c 				p <<= 1;
p                 399 drivers/hid/hid-picolcd_fb.c 				p |= o_fb[i*8+b] ? 0x01 : 0x00;
p                 401 drivers/hid/hid-picolcd_fb.c 			tmp_fb[i] = p;
p                 243 drivers/hid/hid-uclogic-params.c static s32 uclogic_params_get_le24(const void *p)
p                 245 drivers/hid/hid-uclogic-params.c 	const __u8 *b = p;
p                 564 drivers/hid/hid-uclogic-params.c 		__u8 *p;
p                 569 drivers/hid/hid-uclogic-params.c 		p = desc;
p                 572 drivers/hid/hid-uclogic-params.c 			memcpy(p, params->desc_ptr,
p                 574 drivers/hid/hid-uclogic-params.c 			p += params->desc_size;
p                 577 drivers/hid/hid-uclogic-params.c 			memcpy(p, params->pen.desc_ptr,
p                 579 drivers/hid/hid-uclogic-params.c 			p += params->pen.desc_size;
p                 582 drivers/hid/hid-uclogic-params.c 			memcpy(p, params->frame.desc_ptr,
p                 584 drivers/hid/hid-uclogic-params.c 			p += params->frame.desc_size;
p                 587 drivers/hid/hid-uclogic-params.c 		WARN_ON(p != desc + size);
p                 713 drivers/hid/hid-uclogic-params.c 	struct uclogic_params p = {0, };
p                 727 drivers/hid/hid-uclogic-params.c 		uclogic_params_init_with_pen_unused(&p);
p                 752 drivers/hid/hid-uclogic-params.c 		rc = uclogic_params_pen_init_v2(&p.pen, &found, hdev);
p                 761 drivers/hid/hid-uclogic-params.c 					&p.frame,
p                 772 drivers/hid/hid-uclogic-params.c 			p.pen_frame_flag = 0x20;
p                 779 drivers/hid/hid-uclogic-params.c 	rc = uclogic_params_pen_init_v1(&p.pen, &found, hdev);
p                 788 drivers/hid/hid-uclogic-params.c 						&p.frame,
p                 798 drivers/hid/hid-uclogic-params.c 			p.pen_frame_flag = 0x20;
p                 804 drivers/hid/hid-uclogic-params.c 	uclogic_params_init_invalid(&p);
p                 808 drivers/hid/hid-uclogic-params.c 	memcpy(params, &p, sizeof(*params));
p                 809 drivers/hid/hid-uclogic-params.c 	memset(&p, 0, sizeof(p));
p                 813 drivers/hid/hid-uclogic-params.c 	uclogic_params_cleanup(&p);
p                 841 drivers/hid/hid-uclogic-params.c 	struct uclogic_params p = {0, };
p                 856 drivers/hid/hid-uclogic-params.c 		&p, hdev,                                   \
p                 901 drivers/hid/hid-uclogic-params.c 				rc = uclogic_params_pen_init_v1(&p.pen,
p                 914 drivers/hid/hid-uclogic-params.c 				uclogic_params_init_invalid(&p);
p                 994 drivers/hid/hid-uclogic-params.c 		rc = uclogic_params_huion_init(&p, hdev);
p                1009 drivers/hid/hid-uclogic-params.c 			rc = uclogic_params_pen_init_v1(&p.pen, &found, hdev);
p                1016 drivers/hid/hid-uclogic-params.c 				uclogic_params_init_invalid(&p);
p                1020 drivers/hid/hid-uclogic-params.c 			uclogic_params_init_with_pen_unused(&p);
p                1028 drivers/hid/hid-uclogic-params.c 			rc = uclogic_params_pen_init_v1(&p.pen, &found, hdev);
p                1035 drivers/hid/hid-uclogic-params.c 				&p.frame,
p                1043 drivers/hid/hid-uclogic-params.c 			uclogic_params_init_with_pen_unused(&p);
p                1050 drivers/hid/hid-uclogic-params.c 			uclogic_params_init_invalid(&p);
p                1054 drivers/hid/hid-uclogic-params.c 		rc = uclogic_params_pen_init_v1(&p.pen, &found, hdev);
p                1060 drivers/hid/hid-uclogic-params.c 				&p.frame,
p                1070 drivers/hid/hid-uclogic-params.c 			p.frame.re_lsb =
p                1072 drivers/hid/hid-uclogic-params.c 			p.frame.dev_id_byte =
p                1076 drivers/hid/hid-uclogic-params.c 			uclogic_params_init_invalid(&p);
p                1084 drivers/hid/hid-uclogic-params.c 			uclogic_params_init_invalid(&p);
p                1088 drivers/hid/hid-uclogic-params.c 		rc = uclogic_params_pen_init_v1(&p.pen, &found, hdev);
p                1094 drivers/hid/hid-uclogic-params.c 				&p.frame,
p                1106 drivers/hid/hid-uclogic-params.c 			uclogic_params_init_invalid(&p);
p                1116 drivers/hid/hid-uclogic-params.c 	memcpy(params, &p, sizeof(*params));
p                1117 drivers/hid/hid-uclogic-params.c 	memset(&p, 0, sizeof(p));
p                1120 drivers/hid/hid-uclogic-params.c 	uclogic_params_cleanup(&p);
p                 843 drivers/hid/hid-uclogic-rdesc.c 	__u8 *p;
p                 850 drivers/hid/hid-uclogic-rdesc.c 	for (p = rdesc_ptr; p + sizeof(head) < rdesc_ptr + template_size;) {
p                 851 drivers/hid/hid-uclogic-rdesc.c 		if (memcmp(p, head, sizeof(head)) == 0 &&
p                 852 drivers/hid/hid-uclogic-rdesc.c 		    p[sizeof(head)] < param_num) {
p                 853 drivers/hid/hid-uclogic-rdesc.c 			v = param_list[p[sizeof(head)]];
p                 854 drivers/hid/hid-uclogic-rdesc.c 			put_unaligned(cpu_to_le32(v), (s32 *)p);
p                 855 drivers/hid/hid-uclogic-rdesc.c 			p += sizeof(head) + 1;
p                 857 drivers/hid/hid-uclogic-rdesc.c 			p++;
p                  98 drivers/hid/hid-wiimote-debug.c static int wiidebug_drm_show(struct seq_file *f, void *p)
p                1649 drivers/hid/wacom_wac.c 		int p = data[1] & (1 << i);
p                1650 drivers/hid/wacom_wac.c 		bool touch = p && report_touch_events(wacom);
p                2972 drivers/hid/wacom_wac.c 	int x = 0, y = 0, p = 0, d = 0;
p                2988 drivers/hid/wacom_wac.c 		p = le16_to_cpup((__le16 *)&data[6]);
p                3028 drivers/hid/wacom_wac.c 		input_report_abs(input, ABS_PRESSURE, p);
p                1278 drivers/hsi/clients/cmt_speech.c 	unsigned long p;
p                1290 drivers/hsi/clients/cmt_speech.c 	p = get_zeroed_page(GFP_KERNEL);
p                1291 drivers/hsi/clients/cmt_speech.c 	if (!p) {
p                1296 drivers/hsi/clients/cmt_speech.c 	ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE);
p                1303 drivers/hsi/clients/cmt_speech.c 	cs_char_data.mmap_base = p;
p                1311 drivers/hsi/clients/cmt_speech.c 	free_page(p);
p                  38 drivers/hsi/controllers/omap_ssi_core.c static int ssi_regs_show(struct seq_file *m, void *p __maybe_unused)
p                  53 drivers/hsi/controllers/omap_ssi_core.c static int ssi_gdd_regs_show(struct seq_file *m, void *p __maybe_unused)
p                  47 drivers/hsi/controllers/omap_ssi_port.c static int ssi_port_regs_show(struct seq_file *m, void *p __maybe_unused)
p                 120 drivers/hsi/hsi_core.c 	struct hsi_port	*p;
p                 124 drivers/hsi/hsi_core.c 			p = hsi_find_port_num(hsi, cl_info->info.port);
p                 125 drivers/hsi/hsi_core.c 			if (!p)
p                 127 drivers/hsi/hsi_core.c 			hsi_new_client(p, &cl_info->info);
p                 490 drivers/hv/hv_util.c static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
p                 253 drivers/hwmon/adt7470.c static int adt7470_update_thread(void *p)
p                 255 drivers/hwmon/adt7470.c 	struct i2c_client *client = p;
p                 657 drivers/hwmon/asus_atk0110.c static int atk_debugfs_gitm_get(void *p, u64 *val)
p                 659 drivers/hwmon/asus_atk0110.c 	struct atk_data *data = p;
p                 169 drivers/hwmon/g762.c static inline unsigned int rpm_from_cnt(u8 cnt, u32 clk_freq, u16 p,
p                 175 drivers/hwmon/g762.c 	return (clk_freq * 30 * gear_mult) / ((cnt ? cnt : 1) * p * clk_div);
p                 182 drivers/hwmon/g762.c static inline unsigned char cnt_from_rpm(unsigned long rpm, u32 clk_freq, u16 p,
p                 186 drivers/hwmon/g762.c 	unsigned long f2 = p * clk_div;
p                 416 drivers/hwmon/gpio-fan.c 	const __be32 *p;
p                 466 drivers/hwmon/gpio-fan.c 	p = NULL;
p                 468 drivers/hwmon/gpio-fan.c 		p = of_prop_next_u32(prop, p, &u);
p                 469 drivers/hwmon/gpio-fan.c 		if (!p)
p                 472 drivers/hwmon/gpio-fan.c 		p = of_prop_next_u32(prop, p, &u);
p                 473 drivers/hwmon/gpio-fan.c 		if (!p)
p                 250 drivers/hwmon/ibmpex.c 	struct ibmpex_bmc_data *p, *next;
p                 252 drivers/hwmon/ibmpex.c 	list_for_each_entry_safe(p, next, &driver_data.bmc_data, list)
p                 253 drivers/hwmon/ibmpex.c 		if (p->interface == iface)
p                 254 drivers/hwmon/ibmpex.c 			return p;
p                 582 drivers/hwmon/ibmpex.c 	struct ibmpex_bmc_data *p, *next;
p                 585 drivers/hwmon/ibmpex.c 	list_for_each_entry_safe(p, next, &driver_data.bmc_data, list)
p                 586 drivers/hwmon/ibmpex.c 		ibmpex_bmc_delete(p);
p                 505 drivers/hwmon/max6697.c 	struct max6697_platform_data p;
p                 535 drivers/hwmon/max6697.c 		memset(&p, 0, sizeof(p));
p                 536 drivers/hwmon/max6697.c 		max6697_get_config_of(client->dev.of_node, &p);
p                 537 drivers/hwmon/max6697.c 		pdata = &p;
p                 340 drivers/hwmon/nct6775.c #define NCT6775_AUTO_TEMP(data, nr, p)	((data)->REG_AUTO_TEMP[nr] + (p))
p                 341 drivers/hwmon/nct6775.c #define NCT6775_AUTO_PWM(data, nr, p)	((data)->REG_AUTO_PWM[nr] + (p))
p                2219 drivers/hwmon/nct6775.c 	int p = data->fan_pulses[sattr->index];
p                2221 drivers/hwmon/nct6775.c 	return sprintf(buf, "%d\n", p ? : 4);
p                  59 drivers/hwmon/pmbus/inspur-ipsps.c 	char *p;
p                  70 drivers/hwmon/pmbus/inspur-ipsps.c 	p = memscan(data, '#', rc);
p                  71 drivers/hwmon/pmbus/inspur-ipsps.c 	*p = '\0';
p                1265 drivers/hwmon/pmbus/pmbus_core.c 	int p;
p                1279 drivers/hwmon/pmbus/pmbus_core.c 	for (p = 1; p < info->pages; p++) {
p                1280 drivers/hwmon/pmbus/pmbus_core.c 		if (info->func[p] & attr->func)
p                  68 drivers/hwmon/ultra45_env.c static u8 env_read(struct env *p, u8 ireg)
p                  72 drivers/hwmon/ultra45_env.c 	spin_lock(&p->lock);
p                  73 drivers/hwmon/ultra45_env.c 	writeb(ireg, p->regs + REG_ADDR);
p                  74 drivers/hwmon/ultra45_env.c 	ret = readb(p->regs + REG_DATA);
p                  75 drivers/hwmon/ultra45_env.c 	spin_unlock(&p->lock);
p                  80 drivers/hwmon/ultra45_env.c static void env_write(struct env *p, u8 ireg, u8 val)
p                  82 drivers/hwmon/ultra45_env.c 	spin_lock(&p->lock);
p                  83 drivers/hwmon/ultra45_env.c 	writeb(ireg, p->regs + REG_ADDR);
p                  84 drivers/hwmon/ultra45_env.c 	writeb(val, p->regs + REG_DATA);
p                  85 drivers/hwmon/ultra45_env.c 	spin_unlock(&p->lock);
p                 101 drivers/hwmon/ultra45_env.c 	struct env *p = dev_get_drvdata(dev);
p                 105 drivers/hwmon/ultra45_env.c 	val = env_read(p, IREG_FAN0 + fan_nr);
p                 120 drivers/hwmon/ultra45_env.c 	struct env *p = dev_get_drvdata(dev);
p                 134 drivers/hwmon/ultra45_env.c 	env_write(p, IREG_FAN0 + fan_nr, val);
p                 143 drivers/hwmon/ultra45_env.c 	struct env *p = dev_get_drvdata(dev);
p                 144 drivers/hwmon/ultra45_env.c 	u8 val = env_read(p, IREG_FAN_STAT);
p                 166 drivers/hwmon/ultra45_env.c 	struct env *p = dev_get_drvdata(dev);
p                 169 drivers/hwmon/ultra45_env.c 	val = env_read(p, IREG_LCL_TEMP + temp_nr);
p                 187 drivers/hwmon/ultra45_env.c 	struct env *p = dev_get_drvdata(dev);
p                 190 drivers/hwmon/ultra45_env.c 	val = readb(p->regs + REG_STAT);
p                 203 drivers/hwmon/ultra45_env.c 	struct env *p = dev_get_drvdata(dev);
p                 206 drivers/hwmon/ultra45_env.c 	val = readb(p->regs + REG_STAT);
p                 256 drivers/hwmon/ultra45_env.c 	struct env *p = devm_kzalloc(&op->dev, sizeof(*p), GFP_KERNEL);
p                 259 drivers/hwmon/ultra45_env.c 	if (!p)
p                 262 drivers/hwmon/ultra45_env.c 	spin_lock_init(&p->lock);
p                 264 drivers/hwmon/ultra45_env.c 	p->regs = of_ioremap(&op->resource[0], 0, REG_SIZE, "pic16f747");
p                 265 drivers/hwmon/ultra45_env.c 	if (!p->regs)
p                 272 drivers/hwmon/ultra45_env.c 	p->hwmon_dev = hwmon_device_register(&op->dev);
p                 273 drivers/hwmon/ultra45_env.c 	if (IS_ERR(p->hwmon_dev)) {
p                 274 drivers/hwmon/ultra45_env.c 		err = PTR_ERR(p->hwmon_dev);
p                 278 drivers/hwmon/ultra45_env.c 	platform_set_drvdata(op, p);
p                 288 drivers/hwmon/ultra45_env.c 	of_iounmap(&op->resource[0], p->regs, REG_SIZE);
p                 295 drivers/hwmon/ultra45_env.c 	struct env *p = platform_get_drvdata(op);
p                 297 drivers/hwmon/ultra45_env.c 	if (p) {
p                 299 drivers/hwmon/ultra45_env.c 		hwmon_device_unregister(p->hwmon_dev);
p                 300 drivers/hwmon/ultra45_env.c 		of_iounmap(&op->resource[0], p->regs, REG_SIZE);
p                 256 drivers/hwmon/via-cputemp.c 	struct pdev_entry *p;
p                 259 drivers/hwmon/via-cputemp.c 	list_for_each_entry(p, &pdev_list, list) {
p                 260 drivers/hwmon/via-cputemp.c 		if (p->cpu == cpu) {
p                 261 drivers/hwmon/via-cputemp.c 			platform_device_unregister(p->pdev);
p                 262 drivers/hwmon/via-cputemp.c 			list_del(&p->list);
p                 264 drivers/hwmon/via-cputemp.c 			kfree(p);
p                1337 drivers/hwmon/w83781d.c 	int i, p;
p                1358 drivers/hwmon/w83781d.c 		p = w83781d_read_value(data, W83781D_REG_PWMCLK12);
p                1369 drivers/hwmon/w83781d.c 		w83781d_write_value(data, W83781D_REG_PWMCLK12, p);
p                 377 drivers/hwtracing/coresight/coresight-cpu-debug.c 			       unsigned long v, void *p)
p                1954 drivers/hwtracing/intel_th/msu.c 	const char *p = buf;
p                1971 drivers/hwtracing/intel_th/msu.c 		end = memchr(p, ',', len);
p                1972 drivers/hwtracing/intel_th/msu.c 		s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
p                2003 drivers/hwtracing/intel_th/msu.c 		len -= end - p + 1;
p                2004 drivers/hwtracing/intel_th/msu.c 		p = end + 1;
p                 108 drivers/hwtracing/stm/policy.c 	char *p = (char *)page;
p                 111 drivers/hwtracing/stm/policy.c 	if (sscanf(p, "%u %u", &first, &last) != 2)
p                 155 drivers/hwtracing/stm/policy.c 	char *p = (char *)page;
p                 158 drivers/hwtracing/stm/policy.c 	if (sscanf(p, "%u %u", &first, &last) != 2)
p                 392 drivers/hwtracing/stm/policy.c 	char *devname, *proto, *p;
p                 408 drivers/hwtracing/stm/policy.c 	p = strrchr(devname, '.');
p                 409 drivers/hwtracing/stm/policy.c 	if (!p) {
p                 414 drivers/hwtracing/stm/policy.c 	*p = '\0';
p                 216 drivers/i2c/busses/i2c-au1550.c 	struct i2c_msg *p;
p                 222 drivers/i2c/busses/i2c-au1550.c 		p = &msgs[i];
p                 223 drivers/i2c/busses/i2c-au1550.c 		err = do_address(adap, p->addr, p->flags & I2C_M_RD,
p                 224 drivers/i2c/busses/i2c-au1550.c 				 (p->len == 0));
p                 225 drivers/i2c/busses/i2c-au1550.c 		if (err || !p->len)
p                 227 drivers/i2c/busses/i2c-au1550.c 		if (p->flags & I2C_M_RD)
p                 228 drivers/i2c/busses/i2c-au1550.c 			err = i2c_read(adap, p->buf, p->len);
p                 230 drivers/i2c/busses/i2c-au1550.c 			err = i2c_write(adap, p->buf, p->len);
p                 208 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p = adap->pch_base_address;
p                 214 drivers/i2c/busses/i2c-eg20t.c 	iowrite32(0x01, p + PCH_I2CSRST);
p                 216 drivers/i2c/busses/i2c-eg20t.c 	iowrite32(0x0, p + PCH_I2CSRST);
p                 219 drivers/i2c/busses/i2c-eg20t.c 	iowrite32(0x21, p + PCH_I2CNF);
p                 237 drivers/i2c/busses/i2c-eg20t.c 	iowrite32(pch_i2cbc, p + PCH_I2CBC);
p                 240 drivers/i2c/busses/i2c-eg20t.c 	iowrite32(pch_i2ctmr, p + PCH_I2CTMR);
p                 243 drivers/i2c/busses/i2c-eg20t.c 	iowrite32(reg_value, p + PCH_I2CCTL);
p                 247 drivers/i2c/busses/i2c-eg20t.c 		ioread32(p + PCH_I2CCTL), pch_i2cbc, pch_i2ctmr);
p                 260 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p = adap->pch_base_address;
p                 264 drivers/i2c/busses/i2c-eg20t.c 	while (ioread32(p + PCH_I2CSR) & I2CMBB_BIT) {
p                 266 drivers/i2c/busses/i2c-eg20t.c 			pch_dbg(adap, "I2CSR = %x\n", ioread32(p + PCH_I2CSR));
p                 295 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p = adap->pch_base_address;
p                 296 drivers/i2c/busses/i2c-eg20t.c 	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
p                 306 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p = adap->pch_base_address;
p                 307 drivers/i2c/busses/i2c-eg20t.c 	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
p                 315 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p = adap->pch_base_address;
p                 338 drivers/i2c/busses/i2c-eg20t.c 	if (ioread32(p + PCH_I2CSR) & PCH_GETACK) {
p                 352 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p = adap->pch_base_address;
p                 353 drivers/i2c/busses/i2c-eg20t.c 	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
p                 377 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p = adap->pch_base_address;
p                 386 drivers/i2c/busses/i2c-eg20t.c 	pch_dbg(adap, "I2CCTL = %x msgs->len = %d\n", ioread32(p + PCH_I2CCTL),
p                 396 drivers/i2c/busses/i2c-eg20t.c 		iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
p                 405 drivers/i2c/busses/i2c-eg20t.c 		iowrite32(addr_8_lsb, p + PCH_I2CDR);
p                 408 drivers/i2c/busses/i2c-eg20t.c 		iowrite32(i2c_8bit_addr_from_msg(msgs), p + PCH_I2CDR);
p                 419 drivers/i2c/busses/i2c-eg20t.c 		iowrite32(buf[wrcount], p + PCH_I2CDR);
p                 447 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p = adap->pch_base_address;
p                 448 drivers/i2c/busses/i2c-eg20t.c 	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
p                 458 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p = adap->pch_base_address;
p                 459 drivers/i2c/busses/i2c-eg20t.c 	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
p                 471 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p = adap->pch_base_address;
p                 472 drivers/i2c/busses/i2c-eg20t.c 	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
p                 494 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p = adap->pch_base_address;
p                 511 drivers/i2c/busses/i2c-eg20t.c 		iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
p                 520 drivers/i2c/busses/i2c-eg20t.c 		iowrite32(addr_8_lsb, p + PCH_I2CDR);
p                 529 drivers/i2c/busses/i2c-eg20t.c 		iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
p                 532 drivers/i2c/busses/i2c-eg20t.c 		iowrite32(i2c_8bit_addr_from_msg(msgs), p + PCH_I2CDR);
p                 545 drivers/i2c/busses/i2c-eg20t.c 		ioread32(p + PCH_I2CDR); /* Dummy read needs */
p                 555 drivers/i2c/busses/i2c-eg20t.c 			buf[read_index] = ioread32(p + PCH_I2CDR);
p                 567 drivers/i2c/busses/i2c-eg20t.c 		buf[read_index] = ioread32(p + PCH_I2CDR); /* Read final - 1 */
p                 581 drivers/i2c/busses/i2c-eg20t.c 		buf[read_index++] = ioread32(p + PCH_I2CDR); /* Read Final */
p                 595 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p = adap->pch_base_address;
p                 597 drivers/i2c/busses/i2c-eg20t.c 	sts = ioread32(p + PCH_I2CSR);
p                 608 drivers/i2c/busses/i2c-eg20t.c 	pch_dbg(adap, "PCH_I2CSR = %x\n", ioread32(p + PCH_I2CSR));
p                 624 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p;
p                 628 drivers/i2c/busses/i2c-eg20t.c 		p = adap_info->pch_data[i].pch_base_address;
p                 629 drivers/i2c/busses/i2c-eg20t.c 		mode = ioread32(p + PCH_I2CMOD);
p                 636 drivers/i2c/busses/i2c-eg20t.c 		reg_val = ioread32(p + PCH_I2CSR);
p                 719 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p = adap->pch_base_address;
p                 723 drivers/i2c/busses/i2c-eg20t.c 	iowrite32(EEPROM_RST_INTR_DISBL, p + PCH_I2CESRMSK);
p                 725 drivers/i2c/busses/i2c-eg20t.c 	iowrite32(BUFFER_MODE_INTR_DISBL, p + PCH_I2CBUFMSK);
p                 854 drivers/i2c/busses/i2c-eg20t.c 	void __iomem *p = adap_info->pch_data[0].pch_base_address;
p                 871 drivers/i2c/busses/i2c-eg20t.c 		ioread32(p + PCH_I2CSR), ioread32(p + PCH_I2CBUFSTA),
p                 872 drivers/i2c/busses/i2c-eg20t.c 		ioread32(p + PCH_I2CESRSTA));
p                 110 drivers/i2c/busses/i2c-i801.c #define SMBHSTSTS(p)	(0 + (p)->smba)
p                 111 drivers/i2c/busses/i2c-i801.c #define SMBHSTCNT(p)	(2 + (p)->smba)
p                 112 drivers/i2c/busses/i2c-i801.c #define SMBHSTCMD(p)	(3 + (p)->smba)
p                 113 drivers/i2c/busses/i2c-i801.c #define SMBHSTADD(p)	(4 + (p)->smba)
p                 114 drivers/i2c/busses/i2c-i801.c #define SMBHSTDAT0(p)	(5 + (p)->smba)
p                 115 drivers/i2c/busses/i2c-i801.c #define SMBHSTDAT1(p)	(6 + (p)->smba)
p                 116 drivers/i2c/busses/i2c-i801.c #define SMBBLKDAT(p)	(7 + (p)->smba)
p                 117 drivers/i2c/busses/i2c-i801.c #define SMBPEC(p)	(8 + (p)->smba)		/* ICH3 and later */
p                 118 drivers/i2c/busses/i2c-i801.c #define SMBAUXSTS(p)	(12 + (p)->smba)	/* ICH4 and later */
p                 119 drivers/i2c/busses/i2c-i801.c #define SMBAUXCTL(p)	(13 + (p)->smba)	/* ICH4 and later */
p                 120 drivers/i2c/busses/i2c-i801.c #define SMBSLVSTS(p)	(16 + (p)->smba)	/* ICH3 and later */
p                 121 drivers/i2c/busses/i2c-i801.c #define SMBSLVCMD(p)	(17 + (p)->smba)	/* ICH3 and later */
p                 122 drivers/i2c/busses/i2c-i801.c #define SMBNTFDADD(p)	(20 + (p)->smba)	/* ICH3 and later */
p                1094 drivers/i2c/busses/i2c-i801.c 	const void __iomem *p;
p                1097 drivers/i2c/busses/i2c-i801.c 	p = bios_signature(bios);
p                1098 drivers/i2c/busses/i2c-i801.c 	if (p) {
p                1100 drivers/i2c/busses/i2c-i801.c 		apanel_addr = readb(p + 8 + 3) >> 1;
p                 238 drivers/i2c/busses/i2c-ibm_iic.c static int iic_smbus_quick(struct ibm_iic_private* dev, const struct i2c_msg* p)
p                 246 drivers/i2c/busses/i2c-ibm_iic.c 	if (unlikely(p->flags & I2C_M_TEN)){
p                 252 drivers/i2c/busses/i2c-ibm_iic.c 	DBG("%d: smbus_quick(0x%02x)\n", dev->idx, p->addr);
p                 269 drivers/i2c/busses/i2c-ibm_iic.c 	v = i2c_8bit_addr_from_msg(p);
p                 535 drivers/i2c/busses/i2c-ibm_iic.c static inline int iic_invalid_address(const struct i2c_msg* p)
p                 537 drivers/i2c/busses/i2c-ibm_iic.c 	return (p->addr > 0x3ff) || (!(p->flags & I2C_M_TEN) && (p->addr > 0x7f));
p                  42 drivers/i2c/busses/i2c-parport-light.c static inline void port_write(unsigned char p, unsigned char d)
p                  44 drivers/i2c/busses/i2c-parport-light.c 	outb(d, base+p);
p                  47 drivers/i2c/busses/i2c-parport-light.c static inline unsigned char port_read(unsigned char p)
p                  49 drivers/i2c/busses/i2c-parport-light.c 	return inb(base+p);
p                  49 drivers/i2c/busses/i2c-parport.c static void port_write_data(struct parport *p, unsigned char d)
p                  51 drivers/i2c/busses/i2c-parport.c 	parport_write_data(p, d);
p                  54 drivers/i2c/busses/i2c-parport.c static void port_write_control(struct parport *p, unsigned char d)
p                  56 drivers/i2c/busses/i2c-parport.c 	parport_write_control(p, d);
p                  59 drivers/i2c/busses/i2c-parport.c static unsigned char port_read_data(struct parport *p)
p                  61 drivers/i2c/busses/i2c-parport.c 	return parport_read_data(p);
p                  64 drivers/i2c/busses/i2c-parport.c static unsigned char port_read_status(struct parport *p)
p                  66 drivers/i2c/busses/i2c-parport.c 	return parport_read_status(p);
p                  69 drivers/i2c/busses/i2c-parport.c static unsigned char port_read_control(struct parport *p)
p                  71 drivers/i2c/busses/i2c-parport.c 	return parport_read_control(p);
p                 206 drivers/i2c/busses/i2c-qcom-geni.c 	int j, p;
p                 253 drivers/i2c/busses/i2c-qcom-geni.c 			p = 0;
p                 255 drivers/i2c/busses/i2c-qcom-geni.c 			while (gi2c->cur_rd < cur->len && p < sizeof(val)) {
p                 258 drivers/i2c/busses/i2c-qcom-geni.c 				p++;
p                 269 drivers/i2c/busses/i2c-qcom-geni.c 			p = 0;
p                 270 drivers/i2c/busses/i2c-qcom-geni.c 			while (gi2c->cur_wr < cur->len && p < sizeof(val)) {
p                 272 drivers/i2c/busses/i2c-qcom-geni.c 				val |= temp << (p * 8);
p                 273 drivers/i2c/busses/i2c-qcom-geni.c 				p++;
p                 145 drivers/i2c/busses/i2c-rcar.c #define rcar_i2c_priv_to_dev(p)		((p)->adap.dev.parent)
p                 146 drivers/i2c/busses/i2c-rcar.c #define rcar_i2c_is_recv(p)		((p)->msg->flags & I2C_M_RD)
p                 394 drivers/i2c/busses/i2c-stm32f7.c 	u16 p, l, a, h;
p                 452 drivers/i2c/busses/i2c-stm32f7.c 	for (p = 0; p < STM32F7_PRESC_MAX; p++) {
p                 454 drivers/i2c/busses/i2c-stm32f7.c 			u32 scldel = (l + 1) * (p + 1) * i2cclk;
p                 460 drivers/i2c/busses/i2c-stm32f7.c 				u32 sdadel = (a * (p + 1) + 1) * i2cclk;
p                 464 drivers/i2c/busses/i2c-stm32f7.c 				    (p != p_prev)) {
p                 471 drivers/i2c/busses/i2c-stm32f7.c 					v->presc = p;
p                 474 drivers/i2c/busses/i2c-stm32f7.c 					p_prev = p;
p                 482 drivers/i2c/busses/i2c-stm32f7.c 			if (p_prev == p)
p                  64 drivers/i2c/busses/i2c-taos-evm.c 	char *p;
p                  68 drivers/i2c/busses/i2c-taos-evm.c 	p = taos->buffer;
p                  73 drivers/i2c/busses/i2c-taos-evm.c 		p += sprintf(p, "@%02X", addr);
p                  78 drivers/i2c/busses/i2c-taos-evm.c 			sprintf(p, "$#%02X", command);
p                  80 drivers/i2c/busses/i2c-taos-evm.c 			sprintf(p, "$");
p                  84 drivers/i2c/busses/i2c-taos-evm.c 			sprintf(p, "$%02X#%02X", command, data->byte);
p                  86 drivers/i2c/busses/i2c-taos-evm.c 			sprintf(p, "$%02X", command);
p                  95 drivers/i2c/busses/i2c-taos-evm.c 	for (p = taos->buffer; *p; p++)
p                  96 drivers/i2c/busses/i2c-taos-evm.c 		serio_write(serio, *p);
p                 115 drivers/i2c/busses/i2c-taos-evm.c 	p = taos->buffer + 1;
p                 116 drivers/i2c/busses/i2c-taos-evm.c 	p[3] = '\0';
p                 117 drivers/i2c/busses/i2c-taos-evm.c 	if (!strcmp(p, "NAK"))
p                 121 drivers/i2c/busses/i2c-taos-evm.c 		if (!strcmp(p, "ACK"))
p                 124 drivers/i2c/busses/i2c-taos-evm.c 		if (p[0] == 'x') {
p                 130 drivers/i2c/busses/i2c-taos-evm.c 			if (kstrtou8(p + 1, 16, &data->byte))
p                  41 drivers/i2c/i2c-core-smbus.c static u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count)
p                  46 drivers/i2c/i2c-core-smbus.c 		crc = crc8((crc ^ p[i]) << 8);
p                 524 drivers/i2c/i2c-dev.c 		struct i2c_msg32 *p;
p                 541 drivers/i2c/i2c-dev.c 		p = compat_ptr(rdwr_arg.msgs);
p                 544 drivers/i2c/i2c-dev.c 			if (copy_from_user(&umsg, p + i, sizeof(umsg))) {
p                  57 drivers/i2c/muxes/i2c-demux-pinctrl.c 	struct pinctrl *p;
p                  75 drivers/i2c/muxes/i2c-demux-pinctrl.c 	p = devm_pinctrl_get(adap->dev.parent);
p                  76 drivers/i2c/muxes/i2c-demux-pinctrl.c 	if (IS_ERR(p)) {
p                  77 drivers/i2c/muxes/i2c-demux-pinctrl.c 		ret = PTR_ERR(p);
p                  83 drivers/i2c/muxes/i2c-demux-pinctrl.c 		struct pinctrl_state *s = pinctrl_lookup_state(p, priv->bus_name);
p                  89 drivers/i2c/muxes/i2c-demux-pinctrl.c 		ret = pinctrl_select_state(p, s);
p                 250 drivers/i3c/master/dw-i3c-master.c static u8 even_parity(u8 p)
p                 252 drivers/i3c/master/dw-i3c-master.c 	p ^= p >> 4;
p                 253 drivers/i3c/master/dw-i3c-master.c 	p &= 0xf;
p                 255 drivers/i3c/master/dw-i3c-master.c 	return (0x9669 >> p) & 1;
p                 767 drivers/i3c/master/dw-i3c-master.c 	u8 p, last_addr = 0;
p                 782 drivers/i3c/master/dw-i3c-master.c 		p = even_parity(ret);
p                 784 drivers/i3c/master/dw-i3c-master.c 		ret |= (p << 7);
p                 184 drivers/ide/ali14xx.c 	const RegInitializer *p;
p                 190 drivers/ide/ali14xx.c 	for (p = initData; p->reg != 0; ++p)
p                 191 drivers/ide/ali14xx.c 		outReg(p->data, p->reg);
p                 111 drivers/ide/alim15x3.c 		struct ide_timing p;
p                 113 drivers/ide/alim15x3.c 		ide_timing_compute(pair, pair->pio_mode, &p, T, 1);
p                 114 drivers/ide/alim15x3.c 		ide_timing_merge(&p, &t, &t,
p                 117 drivers/ide/alim15x3.c 			ide_timing_compute(pair, pair->dma_mode, &p, T, 1);
p                 118 drivers/ide/alim15x3.c 			ide_timing_merge(&p, &t, &t,
p                 178 drivers/ide/alim15x3.c 			struct ide_timing p;
p                 180 drivers/ide/alim15x3.c 			ide_timing_compute(pair, pair->pio_mode, &p, T, 1);
p                 181 drivers/ide/alim15x3.c 			ide_timing_merge(&p, &t, &t,
p                 185 drivers/ide/alim15x3.c 						&p, T, 1);
p                 186 drivers/ide/alim15x3.c 				ide_timing_merge(&p, &t, &t,
p                  82 drivers/ide/amd74xx.c 	struct ide_timing t, p;
p                  93 drivers/ide/amd74xx.c 		ide_timing_compute(peer, peer->pio_mode, &p, T, UT);
p                  94 drivers/ide/amd74xx.c 		ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT);
p                  46 drivers/ide/ide-generic.c 	struct pci_dev *p = NULL;
p                  49 drivers/ide/ide-generic.c 	for_each_pci_dev(p) {
p                  50 drivers/ide/ide-generic.c 		if (pci_resource_start(p, 0) == 0x1f0)
p                  52 drivers/ide/ide-generic.c 		if (pci_resource_start(p, 2) == 0x170)
p                  56 drivers/ide/ide-generic.c 		if (p->vendor == PCI_VENDOR_ID_CYRIX &&
p                  57 drivers/ide/ide-generic.c 		    (p->device == PCI_DEVICE_ID_CYRIX_5510 ||
p                  58 drivers/ide/ide-generic.c 		     p->device == PCI_DEVICE_ID_CYRIX_5520))
p                  62 drivers/ide/ide-generic.c 		if (p->vendor == PCI_VENDOR_ID_INTEL &&
p                  63 drivers/ide/ide-generic.c 		    p->device == PCI_DEVICE_ID_INTEL_82371MX) {
p                  64 drivers/ide/ide-generic.c 			pci_read_config_word(p, 0x6C, &val);
p                 196 drivers/ide/ide-ioctls.c 	void __user *p = (void __user *)arg;
p                 201 drivers/ide/ide-ioctls.c 	if (copy_from_user(args, p, 7))
p                 215 drivers/ide/ide-ioctls.c 	if (copy_to_user(p, args, 7))
p                  72 drivers/ide/ide-iops.c 	u8 *p, *end = &s[bytecount & ~1]; /* bytecount must be even */
p                  76 drivers/ide/ide-iops.c 		for (p = s ; p != end ; p += 2)
p                  77 drivers/ide/ide-iops.c 			be16_to_cpus((u16 *) p);
p                  81 drivers/ide/ide-iops.c 	p = s;
p                  87 drivers/ide/ide-iops.c 			*p++ = *(s-1);
p                  90 drivers/ide/ide-iops.c 	while (p != end)
p                  91 drivers/ide/ide-iops.c 		*p++ = '\0';
p                  89 drivers/ide/ide-pio-blacklist.c 	struct ide_pio_info *p;
p                  91 drivers/ide/ide-pio-blacklist.c 	for (p = ide_pio_blacklist; p->name != NULL; p++) {
p                  92 drivers/ide/ide-pio-blacklist.c 		if (strncmp(p->name, model, strlen(p->name)) == 0)
p                  93 drivers/ide/ide-pio-blacklist.c 			return p->pio;
p                 934 drivers/ide/ide-probe.c 	struct gendisk *p = data;
p                 936 drivers/ide/ide-probe.c 	return &disk_to_dev(p)->kobj;
p                 941 drivers/ide/ide-probe.c 	struct gendisk *p = data;
p                 943 drivers/ide/ide-probe.c 	if (!get_disk_and_module(p))
p                 324 drivers/ide/ide-proc.c 		char *p = s;
p                 328 drivers/ide/ide-proc.c 			char *q = p;
p                 330 drivers/ide/ide-proc.c 			while (n > 0 && *p != ':') {
p                 332 drivers/ide/ide-proc.c 				p++;
p                 334 drivers/ide/ide-proc.c 			if (*p != ':')
p                 336 drivers/ide/ide-proc.c 			if (p - q > MAX_LEN)
p                 338 drivers/ide/ide-proc.c 			memcpy(name, q, p - q);
p                 339 drivers/ide/ide-proc.c 			name[p - q] = 0;
p                 343 drivers/ide/ide-proc.c 				p++;
p                 347 drivers/ide/ide-proc.c 			val = simple_strtoul(p, &q, 10);
p                 348 drivers/ide/ide-proc.c 			n -= q - p;
p                 349 drivers/ide/ide-proc.c 			p = q;
p                 350 drivers/ide/ide-proc.c 			if (n > 0 && !isspace(*p))
p                 352 drivers/ide/ide-proc.c 			while (n > 0 && isspace(*p)) {
p                 354 drivers/ide/ide-proc.c 				++p;
p                 474 drivers/ide/ide-proc.c static void ide_add_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p, void *data)
p                 478 drivers/ide/ide-proc.c 	if (!dir || !p)
p                 480 drivers/ide/ide-proc.c 	while (p->name != NULL) {
p                 481 drivers/ide/ide-proc.c 		ent = proc_create_single_data(p->name, p->mode, dir, p->show, data);
p                 483 drivers/ide/ide-proc.c 		p++;
p                 487 drivers/ide/ide-proc.c static void ide_remove_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p)
p                 489 drivers/ide/ide-proc.c 	if (!dir || !p)
p                 491 drivers/ide/ide-proc.c 	while (p->name != NULL) {
p                 492 drivers/ide/ide-proc.c 		remove_proc_entry(p->name, dir);
p                 493 drivers/ide/ide-proc.c 		p++;
p                 607 drivers/ide/ide-proc.c static int ide_drivers_show(struct seq_file *s, void *p)
p                 135 drivers/ide/ide-timings.c 	struct ide_timing *s, p;
p                 154 drivers/ide/ide-timings.c 		memset(&p, 0, sizeof(p));
p                 158 drivers/ide/ide-timings.c 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
p                 161 drivers/ide/ide-timings.c 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
p                 163 drivers/ide/ide-timings.c 			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
p                 165 drivers/ide/ide-timings.c 		ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B);
p                 179 drivers/ide/ide-timings.c 		ide_timing_compute(drive, drive->pio_mode, &p, T, UT);
p                 180 drivers/ide/ide-timings.c 		ide_timing_merge(&p, t, t, IDE_TIMING_ALL);
p                 207 drivers/ide/ide.c #define param_check_ide_dev_mask(name, p) param_check_uint(name, p)
p                 154 drivers/ide/qd65xx.c 	struct qd65xx_timing_s *p;
p                 164 drivers/ide/qd65xx.c 	for (p = qd65xx_timing ; p->offset != -1 ; p++) {
p                 165 drivers/ide/qd65xx.c 		if (!strncmp(p->model, model+p->offset, 4)) {
p                 167 drivers/ide/qd65xx.c 			*active_time = p->active;
p                 168 drivers/ide/qd65xx.c 			*recovery_time = p->recovery;
p                 181 drivers/ide/via82cxxx.c 	struct ide_timing t, p;
p                 198 drivers/ide/via82cxxx.c 		ide_timing_compute(peer, peer->pio_mode, &p, T, UT);
p                 199 drivers/ide/via82cxxx.c 		ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT);
p                 526 drivers/iio/accel/adxl372.c static irqreturn_t adxl372_trigger_handler(int irq, void  *p)
p                 528 drivers/iio/accel/adxl372.c 	struct iio_poll_func *pf = p;
p                 661 drivers/iio/accel/bma180.c static irqreturn_t bma180_trigger_handler(int irq, void *p)
p                 663 drivers/iio/accel/bma180.c 	struct iio_poll_func *pf = p;
p                  97 drivers/iio/accel/bma220_spi.c static irqreturn_t bma220_trigger_handler(int irq, void *p)
p                 100 drivers/iio/accel/bma220_spi.c 	struct iio_poll_func *pf = p;
p                1106 drivers/iio/accel/bmc150-accel-core.c static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
p                1108 drivers/iio/accel/bmc150-accel-core.c 	struct iio_poll_func *pf = p;
p                1031 drivers/iio/accel/kxcjk-1013.c static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
p                1033 drivers/iio/accel/kxcjk-1013.c 	struct iio_poll_func *pf = p;
p                 207 drivers/iio/accel/kxsd9.c static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
p                 209 drivers/iio/accel/kxsd9.c 	const struct iio_poll_func *pf = p;
p                  80 drivers/iio/accel/mma7455_core.c static irqreturn_t mma7455_trigger_handler(int irq, void *p)
p                  82 drivers/iio/accel/mma7455_core.c 	struct iio_poll_func *pf = p;
p                1048 drivers/iio/accel/mma8452.c static irqreturn_t mma8452_interrupt(int irq, void *p)
p                1050 drivers/iio/accel/mma8452.c 	struct iio_dev *indio_dev = p;
p                1089 drivers/iio/accel/mma8452.c static irqreturn_t mma8452_trigger_handler(int irq, void *p)
p                1091 drivers/iio/accel/mma8452.c 	struct iio_poll_func *pf = p;
p                 425 drivers/iio/accel/stk8312.c static irqreturn_t stk8312_trigger_handler(int irq, void *p)
p                 427 drivers/iio/accel/stk8312.c 	struct iio_poll_func *pf = p;
p                 311 drivers/iio/accel/stk8ba50.c static irqreturn_t stk8ba50_trigger_handler(int irq, void *p)
p                 313 drivers/iio/accel/stk8ba50.c 	struct iio_poll_func *pf = p;
p                  82 drivers/iio/adc/ad7266.c static irqreturn_t ad7266_trigger_handler(int irq, void *p)
p                  84 drivers/iio/adc/ad7266.c 	struct iio_poll_func *pf = p;
p                 153 drivers/iio/adc/ad7298.c static irqreturn_t ad7298_trigger_handler(int irq, void *p)
p                 155 drivers/iio/adc/ad7298.c 	struct iio_poll_func *pf = p;
p                  67 drivers/iio/adc/ad7476.c static irqreturn_t ad7476_trigger_handler(int irq, void  *p)
p                  69 drivers/iio/adc/ad7476.c 	struct iio_poll_func *pf = p;
p                 119 drivers/iio/adc/ad7606.c static irqreturn_t ad7606_trigger_handler(int irq, void *p)
p                 121 drivers/iio/adc/ad7606.c 	struct iio_poll_func *pf = p;
p                  67 drivers/iio/adc/ad7766.c static irqreturn_t ad7766_trigger_handler(int irq, void *p)
p                  69 drivers/iio/adc/ad7766.c 	struct iio_poll_func *pf = p;
p                 453 drivers/iio/adc/ad7768-1.c static irqreturn_t ad7768_trigger_handler(int irq, void *p)
p                 455 drivers/iio/adc/ad7768-1.c 	struct iio_poll_func *pf = p;
p                 112 drivers/iio/adc/ad7887.c static irqreturn_t ad7887_trigger_handler(int irq, void *p)
p                 114 drivers/iio/adc/ad7887.c 	struct iio_poll_func *pf = p;
p                 171 drivers/iio/adc/ad7923.c static irqreturn_t ad7923_trigger_handler(int irq, void *p)
p                 173 drivers/iio/adc/ad7923.c 	struct iio_poll_func *pf = p;
p                 176 drivers/iio/adc/ad799x.c static irqreturn_t ad799x_trigger_handler(int irq, void *p)
p                 178 drivers/iio/adc/ad799x.c 	struct iio_poll_func *pf = p;
p                 400 drivers/iio/adc/ad_sigma_delta.c static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
p                 402 drivers/iio/adc/ad_sigma_delta.c 	struct iio_poll_func *pf = p;
p                1099 drivers/iio/adc/at91-sama5d2_adc.c static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
p                1101 drivers/iio/adc/at91-sama5d2_adc.c 	struct iio_poll_func *pf = p;
p                 245 drivers/iio/adc/at91_adc.c static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
p                 247 drivers/iio/adc/at91_adc.c 	struct iio_poll_func *pf = p;
p                 135 drivers/iio/adc/cc10001_adc.c static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
p                 138 drivers/iio/adc/cc10001_adc.c 	struct iio_poll_func *pf = p;
p                  87 drivers/iio/adc/dln2-adc.c 	struct dln2_adc_demux_table *p = dln2->demux_count ?
p                  90 drivers/iio/adc/dln2-adc.c 	if (p && p->from + p->length == in_loc &&
p                  91 drivers/iio/adc/dln2-adc.c 		p->to + p->length == out_loc) {
p                  92 drivers/iio/adc/dln2-adc.c 		p->length += length;
p                  94 drivers/iio/adc/dln2-adc.c 		p = &dln2->demux[dln2->demux_count++];
p                  95 drivers/iio/adc/dln2-adc.c 		p->from = in_loc;
p                  96 drivers/iio/adc/dln2-adc.c 		p->to = out_loc;
p                  97 drivers/iio/adc/dln2-adc.c 		p->length = length;
p                 481 drivers/iio/adc/dln2-adc.c static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
p                 483 drivers/iio/adc/dln2-adc.c 	struct iio_poll_func *pf = p;
p                 368 drivers/iio/adc/hx711.c static irqreturn_t hx711_trigger(int irq, void *p)
p                 370 drivers/iio/adc/hx711.c 	struct iio_poll_func *pf = p;
p                 157 drivers/iio/adc/max1118.c static irqreturn_t max1118_trigger_handler(int irq, void *p)
p                 159 drivers/iio/adc/max1118.c 	struct iio_poll_func *pf = p;
p                1470 drivers/iio/adc/max1363.c static irqreturn_t max1363_trigger_handler(int irq, void *p)
p                1472 drivers/iio/adc/max1363.c 	struct iio_poll_func *pf = p;
p                 412 drivers/iio/adc/mxs-lradc-adc.c static irqreturn_t mxs_lradc_adc_trigger_handler(int irq, void *p)
p                 414 drivers/iio/adc/mxs-lradc-adc.c 	struct iio_poll_func *pf = p;
p                1529 drivers/iio/adc/stm32-adc.c static irqreturn_t stm32_adc_trigger_handler(int irq, void *p)
p                1531 drivers/iio/adc/stm32-adc.c 	struct iio_poll_func *pf = p;
p                1815 drivers/iio/adc/stm32-adc.c 	irqreturn_t (*handler)(int irq, void *p) = NULL;
p                 127 drivers/iio/adc/stm32-dfsdm-adc.c 	const struct stm32_dfsdm_str2field *p = list;
p                 129 drivers/iio/adc/stm32-dfsdm-adc.c 	for (p = list; p && p->name; p++)
p                 130 drivers/iio/adc/stm32-dfsdm-adc.c 		if (!strcmp(p->name, str))
p                 131 drivers/iio/adc/stm32-dfsdm-adc.c 			return p->val;
p                 198 drivers/iio/adc/stm32-dfsdm-adc.c 	unsigned int p = fl->ford;	/* filter order (ford) */
p                 213 drivers/iio/adc/stm32-dfsdm-adc.c 		p = 2;
p                 227 drivers/iio/adc/stm32-dfsdm-adc.c 				d = fosr * (iosr - 1 + p) + p;
p                 242 drivers/iio/adc/stm32-dfsdm-adc.c 			for (i = p - 1; i > 0; i--) {
p                 126 drivers/iio/adc/ti-adc081c.c static irqreturn_t adc081c_trigger_handler(int irq, void *p)
p                 128 drivers/iio/adc/ti-adc081c.c 	struct iio_poll_func *pf = p;
p                 197 drivers/iio/adc/ti-adc0832.c static irqreturn_t adc0832_trigger_handler(int irq, void *p)
p                 199 drivers/iio/adc/ti-adc0832.c 	struct iio_poll_func *pf = p;
p                  71 drivers/iio/adc/ti-adc084s021.c 	u16 *p = data;
p                  79 drivers/iio/adc/ti-adc084s021.c 		*(p + i) = adc->rx_buf[i + 1];
p                 139 drivers/iio/adc/ti-adc108s102.c static irqreturn_t adc108s102_trigger_handler(int irq, void *p)
p                 141 drivers/iio/adc/ti-adc108s102.c 	struct iio_poll_func *pf = p;
p                 327 drivers/iio/adc/ti-adc12138.c static irqreturn_t adc12138_trigger_handler(int irq, void *p)
p                 329 drivers/iio/adc/ti-adc12138.c 	struct iio_poll_func *pf = p;
p                 383 drivers/iio/adc/ti-adc12138.c static irqreturn_t adc12138_eoc_handler(int irq, void *p)
p                 385 drivers/iio/adc/ti-adc12138.c 	struct iio_dev *indio_dev = p;
p                 376 drivers/iio/adc/ti-ads1015.c static irqreturn_t ads1015_trigger_handler(int irq, void *p)
p                 378 drivers/iio/adc/ti-ads1015.c 	struct iio_poll_func *pf = p;
p                 268 drivers/iio/adc/ti-ads124s08.c static irqreturn_t ads124s_trigger_handler(int irq, void *p)
p                 270 drivers/iio/adc/ti-ads124s08.c 	struct iio_poll_func *pf = p;
p                 304 drivers/iio/adc/ti-ads7950.c static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p)
p                 306 drivers/iio/adc/ti-ads7950.c 	struct iio_poll_func *pf = p;
p                 382 drivers/iio/adc/ti-ads8688.c static irqreturn_t ads8688_trigger_handler(int irq, void *p)
p                 384 drivers/iio/adc/ti-ads8688.c 	struct iio_poll_func *pf = p;
p                  90 drivers/iio/adc/ti-tlc4541.c static irqreturn_t tlc4541_trigger_handler(int irq, void *p)
p                  92 drivers/iio/adc/ti-tlc4541.c 	struct iio_poll_func *pf = p;
p                 381 drivers/iio/adc/ti_am335x_adc.c 	irqreturn_t (*pollfunc_bh)(int irq, void *p),
p                 382 drivers/iio/adc/ti_am335x_adc.c 	irqreturn_t (*pollfunc_th)(int irq, void *p),
p                 628 drivers/iio/adc/xilinx-xadc-core.c static irqreturn_t xadc_trigger_handler(int irq, void *p)
p                 630 drivers/iio/adc/xilinx-xadc-core.c 	struct iio_poll_func *pf = p;
p                  42 drivers/iio/buffer/industrialio-triggered-buffer.c 	irqreturn_t (*h)(int irq, void *p),
p                  43 drivers/iio/buffer/industrialio-triggered-buffer.c 	irqreturn_t (*thread)(int irq, void *p),
p                 105 drivers/iio/buffer/industrialio-triggered-buffer.c 				    irqreturn_t (*h)(int irq, void *p),
p                 106 drivers/iio/buffer/industrialio-triggered-buffer.c 				    irqreturn_t (*thread)(int irq, void *p),
p                 303 drivers/iio/chemical/ccs811.c static irqreturn_t ccs811_trigger_handler(int irq, void *p)
p                 305 drivers/iio/chemical/ccs811.c 	struct iio_poll_func *pf = p;
p                 106 drivers/iio/chemical/pms7003.c static irqreturn_t pms7003_trigger_handler(int irq, void *p)
p                 108 drivers/iio/chemical/pms7003.c 	struct iio_poll_func *pf = p;
p                 303 drivers/iio/chemical/sgp30.c static int sgp_iaq_threadfn(void *p)
p                 305 drivers/iio/chemical/sgp30.c 	struct sgp_data *data = (struct sgp_data *)p;
p                 227 drivers/iio/chemical/sps30.c static irqreturn_t sps30_trigger_handler(int irq, void *p)
p                 229 drivers/iio/chemical/sps30.c 	struct iio_poll_func *pf = p;
p                 447 drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c irqreturn_t cros_ec_sensors_capture(int irq, void *p)
p                 449 drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c 	struct iio_poll_func *pf = p;
p                  51 drivers/iio/common/st_sensors/st_sensors_buffer.c irqreturn_t st_sensors_trigger_handler(int irq, void *p)
p                  54 drivers/iio/common/st_sensors/st_sensors_buffer.c 	struct iio_poll_func *pf = p;
p                  24 drivers/iio/common/st_sensors/st_sensors_core.c static inline u32 st_sensors_get_unaligned_le24(const u8 *p)
p                  26 drivers/iio/common/st_sensors/st_sensors_core.c 	return (s32)((p[0] | p[1] << 8 | p[2] << 16) << 8) >> 8;
p                  60 drivers/iio/common/st_sensors/st_sensors_trigger.c static irqreturn_t st_sensors_irq_handler(int irq, void *p)
p                  62 drivers/iio/common/st_sensors/st_sensors_trigger.c 	struct iio_trigger *trig = p;
p                  76 drivers/iio/common/st_sensors/st_sensors_trigger.c static irqreturn_t st_sensors_irq_thread(int irq, void *p)
p                  78 drivers/iio/common/st_sensors/st_sensors_trigger.c 	struct iio_trigger *trig = p;
p                  91 drivers/iio/common/st_sensors/st_sensors_trigger.c 		iio_trigger_poll_chained(p);
p                 115 drivers/iio/common/st_sensors/st_sensors_trigger.c 		iio_trigger_poll_chained(p);
p                  44 drivers/iio/dummy/iio_simple_dummy_buffer.c static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
p                  46 drivers/iio/dummy/iio_simple_dummy_buffer.c 	struct iio_poll_func *pf = p;
p                 874 drivers/iio/gyro/bmg160_core.c static irqreturn_t bmg160_trigger_handler(int irq, void *p)
p                 876 drivers/iio/gyro/bmg160_core.c 	struct iio_poll_func *pf = p;
p                 664 drivers/iio/gyro/fxas21002c_core.c static irqreturn_t fxas21002c_trigger_handler(int irq, void *p)
p                 666 drivers/iio/gyro/fxas21002c_core.c 	struct iio_poll_func *pf = p;
p                  44 drivers/iio/gyro/itg3200_buffer.c static irqreturn_t itg3200_trigger_handler(int irq, void *p)
p                  46 drivers/iio/gyro/itg3200_buffer.c 	struct iio_poll_func *pf = p;
p                 458 drivers/iio/gyro/mpu3050-core.c static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
p                 460 drivers/iio/gyro/mpu3050-core.c 	const struct iio_poll_func *pf = p;
p                 897 drivers/iio/gyro/mpu3050-core.c static irqreturn_t mpu3050_irq_handler(int irq, void *p)
p                 899 drivers/iio/gyro/mpu3050-core.c 	struct iio_trigger *trig = p;
p                 912 drivers/iio/gyro/mpu3050-core.c static irqreturn_t mpu3050_irq_thread(int irq, void *p)
p                 914 drivers/iio/gyro/mpu3050-core.c 	struct iio_trigger *trig = p;
p                 929 drivers/iio/gyro/mpu3050-core.c 	iio_trigger_poll_chained(p);
p                 154 drivers/iio/humidity/am2315.c static irqreturn_t am2315_trigger_handler(int irq, void *p)
p                 159 drivers/iio/humidity/am2315.c 	struct iio_poll_func *pf = p;
p                 314 drivers/iio/humidity/hdc100x.c static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
p                 316 drivers/iio/humidity/hdc100x.c 	struct iio_poll_func *pf = p;
p                 163 drivers/iio/humidity/hts221_buffer.c static irqreturn_t hts221_buffer_handler_thread(int irq, void *p)
p                 166 drivers/iio/humidity/hts221_buffer.c 	struct iio_poll_func *pf = p;
p                 645 drivers/iio/imu/adis16400.c static irqreturn_t adis16400_trigger_handler(int irq, void *p)
p                 647 drivers/iio/imu/adis16400.c 	struct iio_poll_func *pf = p;
p                 121 drivers/iio/imu/adis_buffer.c static irqreturn_t adis_trigger_handler(int irq, void *p)
p                 123 drivers/iio/imu/adis_buffer.c 	struct iio_poll_func *pf = p;
p                 409 drivers/iio/imu/bmi160/bmi160_core.c static irqreturn_t bmi160_trigger_handler(int irq, void *p)
p                 411 drivers/iio/imu/bmi160/bmi160_core.c 	struct iio_poll_func *pf = p;
p                 344 drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h irqreturn_t inv_mpu6050_read_fifo(int irq, void *p);
p                 161 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
p                 163 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c 	struct iio_poll_func *pf = p;
p                1194 drivers/iio/imu/kmx61.c static irqreturn_t kmx61_trigger_handler(int irq, void *p)
p                1196 drivers/iio/imu/kmx61.c 	struct iio_poll_func *pf = p;
p                 794 drivers/iio/industrialio-buffer.c 	struct iio_demux_table *p, *q;
p                 795 drivers/iio/industrialio-buffer.c 	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
p                 796 drivers/iio/industrialio-buffer.c 		list_del(&p->l);
p                 797 drivers/iio/industrialio-buffer.c 		kfree(p);
p                 802 drivers/iio/industrialio-buffer.c 	struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
p                 806 drivers/iio/industrialio-buffer.c 	if (*p && (*p)->from + (*p)->length == in_loc &&
p                 807 drivers/iio/industrialio-buffer.c 		(*p)->to + (*p)->length == out_loc) {
p                 808 drivers/iio/industrialio-buffer.c 		(*p)->length += length;
p                 810 drivers/iio/industrialio-buffer.c 		*p = kmalloc(sizeof(**p), GFP_KERNEL);
p                 811 drivers/iio/industrialio-buffer.c 		if (*p == NULL)
p                 813 drivers/iio/industrialio-buffer.c 		(*p)->from = in_loc;
p                 814 drivers/iio/industrialio-buffer.c 		(*p)->to = out_loc;
p                 815 drivers/iio/industrialio-buffer.c 		(*p)->length = length;
p                 816 drivers/iio/industrialio-buffer.c 		list_add_tail(&(*p)->l, &buffer->demux_list);
p                 827 drivers/iio/industrialio-buffer.c 	struct iio_demux_table *p = NULL;
p                 858 drivers/iio/industrialio-buffer.c 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
p                 869 drivers/iio/industrialio-buffer.c 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
p                1233 drivers/iio/industrialio-buffer.c 	struct iio_dev_attr *p;
p                1327 drivers/iio/industrialio-buffer.c 	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
p                1328 drivers/iio/industrialio-buffer.c 		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
p                1222 drivers/iio/industrialio-core.c 	struct iio_dev_attr *p, *n;
p                1224 drivers/iio/industrialio-core.c 	list_for_each_entry_safe(p, n, attr_list, l) {
p                1225 drivers/iio/industrialio-core.c 		kfree(p->dev_attr.attr.name);
p                1226 drivers/iio/industrialio-core.c 		list_del(&p->l);
p                1227 drivers/iio/industrialio-core.c 		kfree(p);
p                1324 drivers/iio/industrialio-core.c 	struct iio_dev_attr *p;
p                1375 drivers/iio/industrialio-core.c 	list_for_each_entry(p, &indio_dev->channel_attr_list, l)
p                1376 drivers/iio/industrialio-core.c 		indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
p                 472 drivers/iio/industrialio-event.c 	struct iio_dev_attr *p;
p                 516 drivers/iio/industrialio-event.c 	list_for_each_entry(p,
p                 520 drivers/iio/industrialio-event.c 			&p->dev_attr.attr;
p                 315 drivers/iio/industrialio-trigger.c irqreturn_t iio_pollfunc_store_time(int irq, void *p)
p                 317 drivers/iio/industrialio-trigger.c 	struct iio_poll_func *pf = p;
p                 324 drivers/iio/industrialio-trigger.c *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
p                 325 drivers/iio/industrialio-trigger.c 		    irqreturn_t (*thread)(int irq, void *p),
p                  31 drivers/iio/industrialio-triggered-event.c 			      irqreturn_t (*h)(int irq, void *p),
p                  32 drivers/iio/industrialio-triggered-event.c 			      irqreturn_t (*thread)(int irq, void *p))
p                 113 drivers/iio/light/adjd_s311.c static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
p                 115 drivers/iio/light/adjd_s311.c 	struct iio_poll_func *pf = p;
p                 173 drivers/iio/light/isl29125.c static irqreturn_t isl29125_trigger_handler(int irq, void *p)
p                 175 drivers/iio/light/isl29125.c 	struct iio_poll_func *pf = p;
p                1240 drivers/iio/light/ltr501.c static irqreturn_t ltr501_trigger_handler(int irq, void *p)
p                1242 drivers/iio/light/ltr501.c 	struct iio_poll_func *pf = p;
p                 486 drivers/iio/light/max44000.c static irqreturn_t max44000_trigger_handler(int irq, void *p)
p                 488 drivers/iio/light/max44000.c 	struct iio_poll_func *pf = p;
p                 470 drivers/iio/light/max44009.c static irqreturn_t max44009_threaded_irq_handler(int irq, void *p)
p                 472 drivers/iio/light/max44009.c 	struct iio_dev *indio_dev = p;
p                 433 drivers/iio/light/rpr0521.c static irqreturn_t rpr0521_trigger_consumer_store_time(int irq, void *p)
p                 435 drivers/iio/light/rpr0521.c 	struct iio_poll_func *pf = p;
p                 445 drivers/iio/light/rpr0521.c static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p)
p                 447 drivers/iio/light/rpr0521.c 	struct iio_poll_func *pf = p;
p                 235 drivers/iio/light/st_uvis25_core.c static irqreturn_t st_uvis25_buffer_handler_thread(int irq, void *p)
p                 238 drivers/iio/light/st_uvis25_core.c 	struct iio_poll_func *pf = p;
p                 198 drivers/iio/light/tcs3414.c static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
p                 200 drivers/iio/light/tcs3414.c 	struct iio_poll_func *pf = p;
p                 371 drivers/iio/light/tcs3472.c static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
p                 373 drivers/iio/light/tcs3472.c 	struct iio_poll_func *pf = p;
p                 170 drivers/iio/light/tsl2583.c 	struct tsl2583_lux *p;
p                 240 drivers/iio/light/tsl2583.c 	for (p = (struct tsl2583_lux *)chip->als_settings.als_device_lux;
p                 241 drivers/iio/light/tsl2583.c 	     p->ratio != 0 && p->ratio < ratio; p++)
p                 244 drivers/iio/light/tsl2583.c 	if (p->ratio == 0) {
p                 249 drivers/iio/light/tsl2583.c 		ch0lux = ((ch0 * p->ch0) +
p                 252 drivers/iio/light/tsl2583.c 		ch1lux = ((ch1 * p->ch1) +
p                 412 drivers/iio/light/tsl2772.c 	struct tsl2772_lux *p;
p                 461 drivers/iio/light/tsl2772.c 	for (p = (struct tsl2772_lux *)chip->tsl2772_device_lux; p->ch0 != 0;
p                 462 drivers/iio/light/tsl2772.c 	     p++) {
p                 465 drivers/iio/light/tsl2772.c 		lux = ((chip->als_cur_info.als_ch0 * p->ch0) -
p                 466 drivers/iio/light/tsl2772.c 		       (chip->als_cur_info.als_ch1 * p->ch1)) /
p                 100 drivers/iio/light/vcnl4035.c static irqreturn_t vcnl4035_trigger_consumer_handler(int irq, void *p)
p                 102 drivers/iio/light/vcnl4035.c 	struct iio_poll_func *pf = p;
p                 609 drivers/iio/magnetometer/ak8974.c static irqreturn_t ak8974_handle_trigger(int irq, void *p)
p                 611 drivers/iio/magnetometer/ak8974.c 	const struct iio_poll_func *pf = p;
p                 844 drivers/iio/magnetometer/ak8975.c static irqreturn_t ak8975_handle_trigger(int irq, void *p)
p                 846 drivers/iio/magnetometer/ak8975.c 	const struct iio_poll_func *pf = p;
p                 668 drivers/iio/magnetometer/bmc150_magn.c static irqreturn_t bmc150_magn_trigger_handler(int irq, void *p)
p                 670 drivers/iio/magnetometer/bmc150_magn.c 	struct iio_poll_func *pf = p;
p                 434 drivers/iio/magnetometer/hmc5843_core.c static irqreturn_t hmc5843_trigger_handler(int irq, void *p)
p                 436 drivers/iio/magnetometer/hmc5843_core.c 	struct iio_poll_func *pf = p;
p                 385 drivers/iio/magnetometer/mag3110.c static irqreturn_t mag3110_trigger_handler(int irq, void *p)
p                 387 drivers/iio/magnetometer/mag3110.c 	struct iio_poll_func *pf = p;
p                 470 drivers/iio/magnetometer/rm3100-core.c static irqreturn_t rm3100_trigger_handler(int irq, void *p)
p                 472 drivers/iio/magnetometer/rm3100-core.c 	struct iio_poll_func *pf = p;
p                 303 drivers/iio/pressure/bmp280-core.c 	s64 var1, var2, p;
p                 317 drivers/iio/pressure/bmp280-core.c 	p = ((((s64)1048576 - adc_press) << 31) - var2) * 3125;
p                 318 drivers/iio/pressure/bmp280-core.c 	p = div64_s64(p, var1);
p                 319 drivers/iio/pressure/bmp280-core.c 	var1 = (((s64)calib->P9) * (p >> 13) * (p >> 13)) >> 25;
p                 320 drivers/iio/pressure/bmp280-core.c 	var2 = ((s64)(calib->P8) * p) >> 19;
p                 321 drivers/iio/pressure/bmp280-core.c 	p = ((p + var1 + var2) >> 8) + (((s64)calib->P7) << 4);
p                 323 drivers/iio/pressure/bmp280-core.c 	return (u32)p;
p                 868 drivers/iio/pressure/bmp280-core.c 	s32 x1, x2, x3, p;
p                 885 drivers/iio/pressure/bmp280-core.c 		p = (b7 * 2) / b4;
p                 887 drivers/iio/pressure/bmp280-core.c 		p = (b7 / b4) * 2;
p                 889 drivers/iio/pressure/bmp280-core.c 	x1 = (p >> 8) * (p >> 8);
p                 891 drivers/iio/pressure/bmp280-core.c 	x2 = (-7357 * p) >> 16;
p                 893 drivers/iio/pressure/bmp280-core.c 	return p + ((x1 + x2 + 3791) >> 4);
p                 474 drivers/iio/pressure/dps310.c 	s64 p;
p                 500 drivers/iio/pressure/dps310.c 	p = (s64)data->pressure_raw;
p                 506 drivers/iio/pressure/dps310.c 	nums[1] = p * (s64)data->c10;
p                 508 drivers/iio/pressure/dps310.c 	nums[2] = p * p * (s64)data->c20;
p                 510 drivers/iio/pressure/dps310.c 	nums[3] = p * p * p * (s64)data->c30;
p                 514 drivers/iio/pressure/dps310.c 	nums[5] = t * p * (s64)data->c11;
p                 516 drivers/iio/pressure/dps310.c 	nums[6] = t * p * p * (s64)data->c21;
p                 142 drivers/iio/pressure/mpl3115.c static irqreturn_t mpl3115_trigger_handler(int irq, void *p)
p                 144 drivers/iio/pressure/mpl3115.c 	struct iio_poll_func *pf = p;
p                 125 drivers/iio/pressure/ms5611_core.c 	s32 t = *temp, p = *pressure;
p                 153 drivers/iio/pressure/ms5611_core.c 	*pressure = (((p * sens) >> 21) - off) >> 15;
p                 161 drivers/iio/pressure/ms5611_core.c 	s32 t = *temp, p = *pressure;
p                 189 drivers/iio/pressure/ms5611_core.c 	*pressure = (((p * sens) >> 21) - off) >> 15;
p                 210 drivers/iio/pressure/ms5611_core.c static irqreturn_t ms5611_trigger_handler(int irq, void *p)
p                 212 drivers/iio/pressure/ms5611_core.c 	struct iio_poll_func *pf = p;
p                 935 drivers/iio/proximity/isl29501.c static irqreturn_t isl29501_trigger_handler(int irq, void *p)
p                 937 drivers/iio/proximity/isl29501.c 	struct iio_poll_func *pf = p;
p                 111 drivers/iio/proximity/mb1232.c static irqreturn_t mb1232_trigger_handler(int irq, void *p)
p                 113 drivers/iio/proximity/mb1232.c 	struct iio_poll_func *pf = p;
p                 180 drivers/iio/proximity/srf08.c static irqreturn_t srf08_trigger_handler(int irq, void *p)
p                 182 drivers/iio/proximity/srf08.c 	struct iio_poll_func *pf = p;
p                 889 drivers/infiniband/core/cache.c 	unsigned int p;
p                 891 drivers/infiniband/core/cache.c 	rdma_for_each_port (ib_dev, p) {
p                 892 drivers/infiniband/core/cache.c 		release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
p                 893 drivers/infiniband/core/cache.c 		ib_dev->port_data[p].cache.gid = NULL;
p                 920 drivers/infiniband/core/cache.c 	unsigned int p;
p                 922 drivers/infiniband/core/cache.c 	rdma_for_each_port (ib_dev, p)
p                 923 drivers/infiniband/core/cache.c 		cleanup_gid_table_port(ib_dev, p,
p                 924 drivers/infiniband/core/cache.c 				       ib_dev->port_data[p].cache.gid);
p                1002 drivers/infiniband/core/cache.c 	unsigned int p;
p                1007 drivers/infiniband/core/cache.c 	rdma_for_each_port(device, p) {
p                1012 drivers/infiniband/core/cache.c 		table = device->port_data[p].cache.gid;
p                1536 drivers/infiniband/core/cache.c 	unsigned int p;
p                1545 drivers/infiniband/core/cache.c 	rdma_for_each_port (device, p) {
p                1546 drivers/infiniband/core/cache.c 		err = ib_cache_update(device, p, true);
p                1556 drivers/infiniband/core/cache.c 	unsigned int p;
p                1564 drivers/infiniband/core/cache.c 	rdma_for_each_port (device, p)
p                1565 drivers/infiniband/core/cache.c 		kfree(device->port_data[p].cache.pkey);
p                 136 drivers/infiniband/core/cma.c 	const void *p;
p                 140 drivers/infiniband/core/cma.c 		p = ev->param.conn.private_data;
p                 143 drivers/infiniband/core/cma.c 		p = NULL;
p                 145 drivers/infiniband/core/cma.c 	return p;
p                 816 drivers/infiniband/core/cma.c 	u8 p;
p                 827 drivers/infiniband/core/cma.c 		for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
p                 828 drivers/infiniband/core/cma.c 			if (!rdma_cap_af_ib(cur_dev->device, p))
p                 831 drivers/infiniband/core/cma.c 			if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
p                 834 drivers/infiniband/core/cma.c 			if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
p                 837 drivers/infiniband/core/cma.c 						    p, i, &gid);
p                 842 drivers/infiniband/core/cma.c 					id_priv->id.port_num = p;
p                 851 drivers/infiniband/core/cma.c 					id_priv->id.port_num = p;
p                2974 drivers/infiniband/core/cma.c 	u8 p;
p                2986 drivers/infiniband/core/cma.c 		for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
p                2987 drivers/infiniband/core/cma.c 			if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) &&
p                3000 drivers/infiniband/core/cma.c 	p = 1;
p                3003 drivers/infiniband/core/cma.c 	ret = rdma_query_gid(cma_dev->device, p, 0, &gid);
p                3007 drivers/infiniband/core/cma.c 	ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
p                3012 drivers/infiniband/core/cma.c 		(rdma_protocol_ib(cma_dev->device, p)) ?
p                3017 drivers/infiniband/core/cma.c 	id_priv->id.port_num = p;
p                 193 drivers/infiniband/core/iwcm.c 	void *p;
p                 195 drivers/infiniband/core/iwcm.c 	p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
p                 196 drivers/infiniband/core/iwcm.c 	if (!p)
p                 198 drivers/infiniband/core/iwcm.c 	event->private_data = p;
p                 997 drivers/infiniband/core/nldev.c 	unsigned int p;
p                1009 drivers/infiniband/core/nldev.c 	rdma_for_each_port (device, p) {
p                1031 drivers/infiniband/core/nldev.c 		if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
p                 105 drivers/infiniband/core/sysfs.c 	struct ib_port *p = container_of(kobj, struct ib_port, kobj);
p                 110 drivers/infiniband/core/sysfs.c 	return port_attr->show(p, port_attr, buf);
p                 119 drivers/infiniband/core/sysfs.c 	struct ib_port *p = container_of(kobj, struct ib_port, kobj);
p                 123 drivers/infiniband/core/sysfs.c 	return port_attr->store(p, port_attr, buf, count);
p                 136 drivers/infiniband/core/sysfs.c 	struct ib_port *p = container_of(kobj, struct gid_attr_group,
p                 142 drivers/infiniband/core/sysfs.c 	return port_attr->show(p, port_attr, buf);
p                 149 drivers/infiniband/core/sysfs.c static ssize_t state_show(struct ib_port *p, struct port_attribute *unused,
p                 164 drivers/infiniband/core/sysfs.c 	ret = ib_query_port(p->ibdev, p->port_num, &attr);
p                 173 drivers/infiniband/core/sysfs.c static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
p                 179 drivers/infiniband/core/sysfs.c 	ret = ib_query_port(p->ibdev, p->port_num, &attr);
p                 186 drivers/infiniband/core/sysfs.c static ssize_t lid_mask_count_show(struct ib_port *p,
p                 193 drivers/infiniband/core/sysfs.c 	ret = ib_query_port(p->ibdev, p->port_num, &attr);
p                 200 drivers/infiniband/core/sysfs.c static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
p                 206 drivers/infiniband/core/sysfs.c 	ret = ib_query_port(p->ibdev, p->port_num, &attr);
p                 213 drivers/infiniband/core/sysfs.c static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
p                 219 drivers/infiniband/core/sysfs.c 	ret = ib_query_port(p->ibdev, p->port_num, &attr);
p                 226 drivers/infiniband/core/sysfs.c static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
p                 232 drivers/infiniband/core/sysfs.c 	ret = ib_query_port(p->ibdev, p->port_num, &attr);
p                 239 drivers/infiniband/core/sysfs.c static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
p                 247 drivers/infiniband/core/sysfs.c 	ret = ib_query_port(p->ibdev, p->port_num, &attr);
p                 310 drivers/infiniband/core/sysfs.c static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
p                 317 drivers/infiniband/core/sysfs.c 	ret = ib_query_port(p->ibdev, p->port_num, &attr);
p                 325 drivers/infiniband/core/sysfs.c static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused,
p                 328 drivers/infiniband/core/sysfs.c 	switch (rdma_port_get_link_layer(p->ibdev, p->port_num)) {
p                 380 drivers/infiniband/core/sysfs.c 	struct ib_port *p, struct port_attribute *attr, char *buf,
p                 388 drivers/infiniband/core/sysfs.c 	gid_attr = rdma_get_gid_attr(p->ibdev, p->port_num, tab_attr->index);
p                 397 drivers/infiniband/core/sysfs.c static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr,
p                 405 drivers/infiniband/core/sysfs.c 	gid_attr = rdma_get_gid_attr(p->ibdev, p->port_num, tab_attr->index);
p                 426 drivers/infiniband/core/sysfs.c static ssize_t show_port_gid_attr_ndev(struct ib_port *p,
p                 429 drivers/infiniband/core/sysfs.c 	return _show_port_gid_attr(p, attr, buf, print_ndev);
p                 432 drivers/infiniband/core/sysfs.c static ssize_t show_port_gid_attr_gid_type(struct ib_port *p,
p                 436 drivers/infiniband/core/sysfs.c 	return _show_port_gid_attr(p, attr, buf, print_gid_type);
p                 439 drivers/infiniband/core/sysfs.c static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
p                 447 drivers/infiniband/core/sysfs.c 	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
p                 518 drivers/infiniband/core/sysfs.c static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
p                 528 drivers/infiniband/core/sysfs.c 	ret = get_perf_mad(p->ibdev, p->port_num, tab_attr->attr_id, &data,
p                 675 drivers/infiniband/core/sysfs.c 	struct ib_port *p = container_of(kobj, struct ib_port, kobj);
p                 679 drivers/infiniband/core/sysfs.c 	if (p->gid_group.attrs) {
p                 680 drivers/infiniband/core/sysfs.c 		for (i = 0; (a = p->gid_group.attrs[i]); ++i)
p                 683 drivers/infiniband/core/sysfs.c 		kfree(p->gid_group.attrs);
p                 686 drivers/infiniband/core/sysfs.c 	if (p->pkey_group.attrs) {
p                 687 drivers/infiniband/core/sysfs.c 		for (i = 0; (a = p->pkey_group.attrs[i]); ++i)
p                 690 drivers/infiniband/core/sysfs.c 		kfree(p->pkey_group.attrs);
p                 693 drivers/infiniband/core/sysfs.c 	kfree(p);
p                 867 drivers/infiniband/core/sysfs.c 		struct ib_port *p = container_of(kobj, struct ib_port, kobj);
p                 869 drivers/infiniband/core/sysfs.c 		stats = p->hw_stats;
p                 902 drivers/infiniband/core/sysfs.c 		struct ib_port *p = container_of(kobj, struct ib_port, kobj);
p                 904 drivers/infiniband/core/sysfs.c 		stats = p->hw_stats;
p                1043 drivers/infiniband/core/sysfs.c 	struct ib_port *p;
p                1052 drivers/infiniband/core/sysfs.c 	p = kzalloc(sizeof *p, GFP_KERNEL);
p                1053 drivers/infiniband/core/sysfs.c 	if (!p)
p                1056 drivers/infiniband/core/sysfs.c 	p->ibdev      = device;
p                1057 drivers/infiniband/core/sysfs.c 	p->port_num   = port_num;
p                1059 drivers/infiniband/core/sysfs.c 	ret = kobject_init_and_add(&p->kobj, &port_type,
p                1063 drivers/infiniband/core/sysfs.c 		kfree(p);
p                1067 drivers/infiniband/core/sysfs.c 	p->gid_attr_group = kzalloc(sizeof(*p->gid_attr_group), GFP_KERNEL);
p                1068 drivers/infiniband/core/sysfs.c 	if (!p->gid_attr_group) {
p                1073 drivers/infiniband/core/sysfs.c 	p->gid_attr_group->port = p;
p                1074 drivers/infiniband/core/sysfs.c 	ret = kobject_init_and_add(&p->gid_attr_group->kobj, &gid_attr_type,
p                1075 drivers/infiniband/core/sysfs.c 				   &p->kobj, "gid_attrs");
p                1077 drivers/infiniband/core/sysfs.c 		kfree(p->gid_attr_group);
p                1082 drivers/infiniband/core/sysfs.c 		p->pma_table = get_counter_table(device, port_num);
p                1083 drivers/infiniband/core/sysfs.c 		ret = sysfs_create_group(&p->kobj, p->pma_table);
p                1088 drivers/infiniband/core/sysfs.c 	p->gid_group.name  = "gids";
p                1089 drivers/infiniband/core/sysfs.c 	p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len);
p                1090 drivers/infiniband/core/sysfs.c 	if (!p->gid_group.attrs) {
p                1095 drivers/infiniband/core/sysfs.c 	ret = sysfs_create_group(&p->kobj, &p->gid_group);
p                1099 drivers/infiniband/core/sysfs.c 	p->gid_attr_group->ndev.name = "ndevs";
p                1100 drivers/infiniband/core/sysfs.c 	p->gid_attr_group->ndev.attrs = alloc_group_attrs(show_port_gid_attr_ndev,
p                1102 drivers/infiniband/core/sysfs.c 	if (!p->gid_attr_group->ndev.attrs) {
p                1107 drivers/infiniband/core/sysfs.c 	ret = sysfs_create_group(&p->gid_attr_group->kobj,
p                1108 drivers/infiniband/core/sysfs.c 				 &p->gid_attr_group->ndev);
p                1112 drivers/infiniband/core/sysfs.c 	p->gid_attr_group->type.name = "types";
p                1113 drivers/infiniband/core/sysfs.c 	p->gid_attr_group->type.attrs = alloc_group_attrs(show_port_gid_attr_gid_type,
p                1115 drivers/infiniband/core/sysfs.c 	if (!p->gid_attr_group->type.attrs) {
p                1120 drivers/infiniband/core/sysfs.c 	ret = sysfs_create_group(&p->gid_attr_group->kobj,
p                1121 drivers/infiniband/core/sysfs.c 				 &p->gid_attr_group->type);
p                1125 drivers/infiniband/core/sysfs.c 	p->pkey_group.name  = "pkeys";
p                1126 drivers/infiniband/core/sysfs.c 	p->pkey_group.attrs = alloc_group_attrs(show_port_pkey,
p                1128 drivers/infiniband/core/sysfs.c 	if (!p->pkey_group.attrs) {
p                1133 drivers/infiniband/core/sysfs.c 	ret = sysfs_create_group(&p->kobj, &p->pkey_group);
p                1138 drivers/infiniband/core/sysfs.c 		ret = device->ops.init_port(device, port_num, &p->kobj);
p                1149 drivers/infiniband/core/sysfs.c 		setup_hw_stats(device, p, port_num);
p                1151 drivers/infiniband/core/sysfs.c 	list_add_tail(&p->kobj.entry, &coredev->port_list);
p                1153 drivers/infiniband/core/sysfs.c 	kobject_uevent(&p->kobj, KOBJ_ADD);
p                1157 drivers/infiniband/core/sysfs.c 	sysfs_remove_group(&p->kobj, &p->pkey_group);
p                1161 drivers/infiniband/core/sysfs.c 		kfree(p->pkey_group.attrs[i]);
p                1163 drivers/infiniband/core/sysfs.c 	kfree(p->pkey_group.attrs);
p                1164 drivers/infiniband/core/sysfs.c 	p->pkey_group.attrs = NULL;
p                1167 drivers/infiniband/core/sysfs.c 	sysfs_remove_group(&p->gid_attr_group->kobj,
p                1168 drivers/infiniband/core/sysfs.c 			   &p->gid_attr_group->type);
p                1172 drivers/infiniband/core/sysfs.c 		kfree(p->gid_attr_group->type.attrs[i]);
p                1174 drivers/infiniband/core/sysfs.c 	kfree(p->gid_attr_group->type.attrs);
p                1175 drivers/infiniband/core/sysfs.c 	p->gid_attr_group->type.attrs = NULL;
p                1178 drivers/infiniband/core/sysfs.c 	sysfs_remove_group(&p->gid_attr_group->kobj,
p                1179 drivers/infiniband/core/sysfs.c 			   &p->gid_attr_group->ndev);
p                1183 drivers/infiniband/core/sysfs.c 		kfree(p->gid_attr_group->ndev.attrs[i]);
p                1185 drivers/infiniband/core/sysfs.c 	kfree(p->gid_attr_group->ndev.attrs);
p                1186 drivers/infiniband/core/sysfs.c 	p->gid_attr_group->ndev.attrs = NULL;
p                1189 drivers/infiniband/core/sysfs.c 	sysfs_remove_group(&p->kobj, &p->gid_group);
p                1193 drivers/infiniband/core/sysfs.c 		kfree(p->gid_group.attrs[i]);
p                1195 drivers/infiniband/core/sysfs.c 	kfree(p->gid_group.attrs);
p                1196 drivers/infiniband/core/sysfs.c 	p->gid_group.attrs = NULL;
p                1199 drivers/infiniband/core/sysfs.c 	if (p->pma_table)
p                1200 drivers/infiniband/core/sysfs.c 		sysfs_remove_group(&p->kobj, p->pma_table);
p                1203 drivers/infiniband/core/sysfs.c 	kobject_put(&p->gid_attr_group->kobj);
p                1206 drivers/infiniband/core/sysfs.c 	kobject_put(&p->kobj);
p                1310 drivers/infiniband/core/sysfs.c 	struct kobject *p, *t;
p                1312 drivers/infiniband/core/sysfs.c 	list_for_each_entry_safe(p, t, &coredev->port_list, entry) {
p                1313 drivers/infiniband/core/sysfs.c 		struct ib_port *port = container_of(p, struct ib_port, kobj);
p                1315 drivers/infiniband/core/sysfs.c 		list_del(&p->entry);
p                1323 drivers/infiniband/core/sysfs.c 			sysfs_remove_group(p, port->pma_table);
p                1324 drivers/infiniband/core/sysfs.c 		sysfs_remove_group(p, &port->pkey_group);
p                1325 drivers/infiniband/core/sysfs.c 		sysfs_remove_group(p, &port->gid_group);
p                1331 drivers/infiniband/core/sysfs.c 		kobject_put(p);
p                1398 drivers/infiniband/core/sysfs.c 	struct kobject *p, *t;
p                1401 drivers/infiniband/core/sysfs.c 	list_for_each_entry_safe(p, t, &device->coredev.port_list, entry) {
p                1402 drivers/infiniband/core/sysfs.c 		struct ib_port *port = container_of(p, struct ib_port, kobj);
p                 597 drivers/infiniband/core/umem_odp.c 	phys_addr_t p = 0;
p                 663 drivers/infiniband/core/umem_odp.c 				p += PAGE_SIZE;
p                 664 drivers/infiniband/core/umem_odp.c 				if (page_to_phys(local_page_list[j]) != p) {
p                 683 drivers/infiniband/core/umem_odp.c 			p = page_to_phys(local_page_list[j]);
p                 482 drivers/infiniband/core/uverbs_cmd.c 	struct rb_node **p = &dev->xrcd_tree.rb_node;
p                 492 drivers/infiniband/core/uverbs_cmd.c 	while (*p) {
p                 493 drivers/infiniband/core/uverbs_cmd.c 		parent = *p;
p                 497 drivers/infiniband/core/uverbs_cmd.c 			p = &(*p)->rb_left;
p                 499 drivers/infiniband/core/uverbs_cmd.c 			p = &(*p)->rb_right;
p                 506 drivers/infiniband/core/uverbs_cmd.c 	rb_link_node(&entry->node, parent, p);
p                 516 drivers/infiniband/core/uverbs_cmd.c 	struct rb_node *p = dev->xrcd_tree.rb_node;
p                 518 drivers/infiniband/core/uverbs_cmd.c 	while (p) {
p                 519 drivers/infiniband/core/uverbs_cmd.c 		entry = rb_entry(p, struct xrcd_table_entry, node);
p                 522 drivers/infiniband/core/uverbs_cmd.c 			p = p->rb_left;
p                 524 drivers/infiniband/core/uverbs_cmd.c 			p = p->rb_right;
p                 294 drivers/infiniband/core/uverbs_ioctl.c 			void *p;
p                 296 drivers/infiniband/core/uverbs_ioctl.c 			p = uverbs_alloc(&pbundle->bundle, uattr->len);
p                 297 drivers/infiniband/core/uverbs_ioctl.c 			if (IS_ERR(p))
p                 298 drivers/infiniband/core/uverbs_ioctl.c 				return PTR_ERR(p);
p                 300 drivers/infiniband/core/uverbs_ioctl.c 			e->ptr_attr.ptr = p;
p                 302 drivers/infiniband/core/uverbs_ioctl.c 			if (copy_from_user(p, u64_to_user_ptr(uattr->data),
p                 180 drivers/infiniband/hw/bnxt_re/main.c static void bnxt_re_stop(void *p)
p                 184 drivers/infiniband/hw/bnxt_re/main.c static void bnxt_re_start(void *p)
p                 188 drivers/infiniband/hw/bnxt_re/main.c static void bnxt_re_sriov_config(void *p, int num_vfs)
p                 190 drivers/infiniband/hw/bnxt_re/main.c 	struct bnxt_re_dev *rdev = p;
p                 201 drivers/infiniband/hw/bnxt_re/main.c static void bnxt_re_shutdown(void *p)
p                 203 drivers/infiniband/hw/bnxt_re/main.c 	struct bnxt_re_dev *rdev = p;
p                 154 drivers/infiniband/hw/cxgb3/iwch_qp.c 	__be64 *p;
p                 169 drivers/infiniband/hw/cxgb3/iwch_qp.c 	p = &wqe->fastreg.pbl_addrs[0];
p                 170 drivers/infiniband/hw/cxgb3/iwch_qp.c 	for (i = 0; i < mhp->npages; i++, p++) {
p                 182 drivers/infiniband/hw/cxgb3/iwch_qp.c 			p = &wqe->pbl_frag.pbl_addrs[0];
p                 184 drivers/infiniband/hw/cxgb3/iwch_qp.c 		*p = cpu_to_be64((u64)mhp->pages[i]);
p                 422 drivers/infiniband/hw/cxgb4/device.c 	void *p;
p                 435 drivers/infiniband/hw/cxgb4/device.c 	xa_for_each(&stagd->devp->mrs, index, p)
p                 446 drivers/infiniband/hw/cxgb4/device.c 	xa_for_each(&stagd->devp->mrs, index, p)
p                  64 drivers/infiniband/hw/cxgb4/ev.c 	__be64 *p = (void *)err_cqe;
p                  74 drivers/infiniband/hw/cxgb4/ev.c 		 be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
p                  75 drivers/infiniband/hw/cxgb4/ev.c 		 be64_to_cpu(p[3]), be64_to_cpu(p[4]), be64_to_cpu(p[5]),
p                  76 drivers/infiniband/hw/cxgb4/ev.c 		 be64_to_cpu(p[6]), be64_to_cpu(p[7]));
p                 791 drivers/infiniband/hw/cxgb4/qp.c 	__be64 *p = (__be64 *)fr->pbl;
p                 813 drivers/infiniband/hw/cxgb4/qp.c 	p[0] = cpu_to_be64((u64)mhp->mpl[0]);
p                 814 drivers/infiniband/hw/cxgb4/qp.c 	p[1] = cpu_to_be64((u64)mhp->mpl[1]);
p                 824 drivers/infiniband/hw/cxgb4/qp.c 	__be64 *p;
p                 863 drivers/infiniband/hw/cxgb4/qp.c 		p = (__be64 *)(imdp + 1);
p                 866 drivers/infiniband/hw/cxgb4/qp.c 			*p = cpu_to_be64((u64)mhp->mpl[i]);
p                 867 drivers/infiniband/hw/cxgb4/qp.c 			rem -= sizeof(*p);
p                 868 drivers/infiniband/hw/cxgb4/qp.c 			if (++p == (__be64 *)&sq->queue[sq->size])
p                 869 drivers/infiniband/hw/cxgb4/qp.c 				p = (__be64 *)sq->queue;
p                 872 drivers/infiniband/hw/cxgb4/qp.c 			*p = 0;
p                 873 drivers/infiniband/hw/cxgb4/qp.c 			rem -= sizeof(*p);
p                 874 drivers/infiniband/hw/cxgb4/qp.c 			if (++p == (__be64 *)&sq->queue[sq->size])
p                 875 drivers/infiniband/hw/cxgb4/qp.c 				p = (__be64 *)sq->queue;
p                5262 drivers/infiniband/hw/hfi1/chip.c 	char *p = *curp;
p                5268 drivers/infiniband/hw/hfi1/chip.c 	if (p != buf) {
p                5273 drivers/infiniband/hw/hfi1/chip.c 		*p++ = ',';
p                5283 drivers/infiniband/hw/hfi1/chip.c 		*p++ = c;
p                5289 drivers/infiniband/hw/hfi1/chip.c 	*curp = p;
p                5303 drivers/infiniband/hw/hfi1/chip.c 	char *p = buf;
p                5315 drivers/infiniband/hw/hfi1/chip.c 			no_room = append_str(buf, &p, &len, table[i].str);
p                5325 drivers/infiniband/hw/hfi1/chip.c 		no_room = append_str(buf, &p, &len, extra);
p                5332 drivers/infiniband/hw/hfi1/chip.c 			--p;
p                5333 drivers/infiniband/hw/hfi1/chip.c 		*p++ = '*';
p                5337 drivers/infiniband/hw/hfi1/chip.c 	*p = 0;
p                12482 drivers/infiniband/hw/hfi1/chip.c 	char *p;
p                12558 drivers/infiniband/hw/hfi1/chip.c 	for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
p                12566 drivers/infiniband/hw/hfi1/chip.c 				memcpy(p, name, strlen(name));
p                12567 drivers/infiniband/hw/hfi1/chip.c 				p += strlen(name);
p                12571 drivers/infiniband/hw/hfi1/chip.c 					memcpy(p, bit_type_32, bit_type_32_sz);
p                12572 drivers/infiniband/hw/hfi1/chip.c 					p += bit_type_32_sz;
p                12575 drivers/infiniband/hw/hfi1/chip.c 				*p++ = '\n';
p                12581 drivers/infiniband/hw/hfi1/chip.c 				memcpy(p, name, strlen(name));
p                12582 drivers/infiniband/hw/hfi1/chip.c 				p += strlen(name);
p                12586 drivers/infiniband/hw/hfi1/chip.c 					memcpy(p, bit_type_32, bit_type_32_sz);
p                12587 drivers/infiniband/hw/hfi1/chip.c 					p += bit_type_32_sz;
p                12590 drivers/infiniband/hw/hfi1/chip.c 				*p++ = '\n';
p                12593 drivers/infiniband/hw/hfi1/chip.c 			memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
p                12594 drivers/infiniband/hw/hfi1/chip.c 			p += strlen(dev_cntrs[i].name);
p                12598 drivers/infiniband/hw/hfi1/chip.c 				memcpy(p, bit_type_32, bit_type_32_sz);
p                12599 drivers/infiniband/hw/hfi1/chip.c 				p += bit_type_32_sz;
p                12602 drivers/infiniband/hw/hfi1/chip.c 			*p++ = '\n';
p                12660 drivers/infiniband/hw/hfi1/chip.c 	for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
p                12668 drivers/infiniband/hw/hfi1/chip.c 				memcpy(p, name, strlen(name));
p                12669 drivers/infiniband/hw/hfi1/chip.c 				p += strlen(name);
p                12673 drivers/infiniband/hw/hfi1/chip.c 					memcpy(p, bit_type_32, bit_type_32_sz);
p                12674 drivers/infiniband/hw/hfi1/chip.c 					p += bit_type_32_sz;
p                12677 drivers/infiniband/hw/hfi1/chip.c 				*p++ = '\n';
p                12680 drivers/infiniband/hw/hfi1/chip.c 			memcpy(p, port_cntrs[i].name,
p                12682 drivers/infiniband/hw/hfi1/chip.c 			p += strlen(port_cntrs[i].name);
p                12686 drivers/infiniband/hw/hfi1/chip.c 				memcpy(p, bit_type_32, bit_type_32_sz);
p                12687 drivers/infiniband/hw/hfi1/chip.c 				p += bit_type_32_sz;
p                12690 drivers/infiniband/hw/hfi1/chip.c 			*p++ = '\n';
p                 528 drivers/infiniband/hw/hfi1/debugfs.c static void check_dyn_flag(u64 scratch0, char *p, int size, int *used,
p                 535 drivers/infiniband/hw/hfi1/debugfs.c 		*used += scnprintf(p + *used, size - *used,
p                 255 drivers/infiniband/hw/hfi1/eprom.c 	void *p;
p                 276 drivers/infiniband/hw/hfi1/eprom.c 	p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
p                 277 drivers/infiniband/hw/hfi1/eprom.c 	if (p)
p                 278 drivers/infiniband/hw/hfi1/eprom.c 		length = p - buffer;
p                2020 drivers/infiniband/hw/hfi1/hfi.h void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
p                1028 drivers/infiniband/hw/hfi1/mad.c 	__be16 *p;
p                1061 drivers/infiniband/hw/hfi1/mad.c 	p = (__be16 *)data;
p                1067 drivers/infiniband/hw/hfi1/mad.c 			p[i] = cpu_to_be16(q[i]);
p                1727 drivers/infiniband/hw/hfi1/mad.c 	u16 *p = (u16 *)data;
p                1760 drivers/infiniband/hw/hfi1/mad.c 		p[i] = be16_to_cpu(q[i]);
p                1762 drivers/infiniband/hw/hfi1/mad.c 	if (start_block == 0 && set_pkeys(dd, port, p) != 0) {
p                1832 drivers/infiniband/hw/hfi1/mad.c 	u8 *p = data;
p                1842 drivers/infiniband/hw/hfi1/mad.c 		*p++ = ibp->sl_to_sc[i];
p                1855 drivers/infiniband/hw/hfi1/mad.c 	u8 *p = data;
p                1866 drivers/infiniband/hw/hfi1/mad.c 		sc = *p++;
p                1884 drivers/infiniband/hw/hfi1/mad.c 	u8 *p = data;
p                1894 drivers/infiniband/hw/hfi1/mad.c 		*p++ = ibp->sc_to_sl[i];
p                1908 drivers/infiniband/hw/hfi1/mad.c 	u8 *p = data;
p                1917 drivers/infiniband/hw/hfi1/mad.c 		ibp->sc_to_sl[i] = *p++;
p                2193 drivers/infiniband/hw/hfi1/mad.c 	struct buffer_control *p = (struct buffer_control *)data;
p                2202 drivers/infiniband/hw/hfi1/mad.c 	fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
p                2203 drivers/infiniband/hw/hfi1/mad.c 	trace_bct_get(dd, p);
p                2217 drivers/infiniband/hw/hfi1/mad.c 	struct buffer_control *p = (struct buffer_control *)data;
p                2219 drivers/infiniband/hw/hfi1/mad.c 	if (num_ports != 1 || smp_length_check(sizeof(*p), max_len)) {
p                2224 drivers/infiniband/hw/hfi1/mad.c 	trace_bct_set(dd, p);
p                2225 drivers/infiniband/hw/hfi1/mad.c 	if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) {
p                2241 drivers/infiniband/hw/hfi1/mad.c 	u8 *p = data;
p                2251 drivers/infiniband/hw/hfi1/mad.c 		fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
p                2254 drivers/infiniband/hw/hfi1/mad.c 		fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
p                2257 drivers/infiniband/hw/hfi1/mad.c 		fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
p                2260 drivers/infiniband/hw/hfi1/mad.c 		fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
p                2283 drivers/infiniband/hw/hfi1/mad.c 	u8 *p = data;
p                2293 drivers/infiniband/hw/hfi1/mad.c 		(void)fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
p                2296 drivers/infiniband/hw/hfi1/mad.c 		(void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
p                2606 drivers/infiniband/hw/hfi1/mad.c 	struct opa_class_port_info *p =
p                2614 drivers/infiniband/hw/hfi1/mad.c 	p->base_version = OPA_MGMT_BASE_VERSION;
p                2615 drivers/infiniband/hw/hfi1/mad.c 	p->class_version = OPA_SM_CLASS_VERSION;
p                2619 drivers/infiniband/hw/hfi1/mad.c 	p->cap_mask2_resp_time = cpu_to_be32(18);
p                2622 drivers/infiniband/hw/hfi1/mad.c 		*resp_len += sizeof(*p);
p                3129 drivers/infiniband/hw/hfi1/mad.c 	struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)
p                3133 drivers/infiniband/hw/hfi1/mad.c 	if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
p                3141 drivers/infiniband/hw/hfi1/mad.c 	p->port_xmit_data = rsp.port_xmit_data;
p                3142 drivers/infiniband/hw/hfi1/mad.c 	p->port_rcv_data = rsp.port_rcv_data;
p                3143 drivers/infiniband/hw/hfi1/mad.c 	p->port_xmit_packets = rsp.port_xmit_pkts;
p                3144 drivers/infiniband/hw/hfi1/mad.c 	p->port_rcv_packets = rsp.port_rcv_pkts;
p                3145 drivers/infiniband/hw/hfi1/mad.c 	p->port_unicast_xmit_packets = 0;
p                3146 drivers/infiniband/hw/hfi1/mad.c 	p->port_unicast_rcv_packets =  0;
p                3147 drivers/infiniband/hw/hfi1/mad.c 	p->port_multicast_xmit_packets = rsp.port_multicast_xmit_pkts;
p                3148 drivers/infiniband/hw/hfi1/mad.c 	p->port_multicast_rcv_packets = rsp.port_multicast_rcv_pkts;
p                3289 drivers/infiniband/hw/hfi1/mad.c 	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
p                3299 drivers/infiniband/hw/hfi1/mad.c 	if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
p                3304 drivers/infiniband/hw/hfi1/mad.c 	p->symbol_error_counter = 0; /* N/A for OPA */
p                3308 drivers/infiniband/hw/hfi1/mad.c 		p->link_error_recovery_counter = 0xFF;
p                3310 drivers/infiniband/hw/hfi1/mad.c 		p->link_error_recovery_counter = (u8)temp_32;
p                3314 drivers/infiniband/hw/hfi1/mad.c 		p->link_downed_counter = 0xFF;
p                3316 drivers/infiniband/hw/hfi1/mad.c 		p->link_downed_counter = (u8)temp_32;
p                3320 drivers/infiniband/hw/hfi1/mad.c 		p->port_rcv_errors = cpu_to_be16(0xFFFF);
p                3322 drivers/infiniband/hw/hfi1/mad.c 		p->port_rcv_errors = cpu_to_be16((u16)temp_64);
p                3326 drivers/infiniband/hw/hfi1/mad.c 		p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
p                3328 drivers/infiniband/hw/hfi1/mad.c 		p->port_rcv_remphys_errors = cpu_to_be16((u16)temp_64);
p                3331 drivers/infiniband/hw/hfi1/mad.c 	p->port_rcv_switch_relay_errors = cpu_to_be16((u16)temp_64);
p                3335 drivers/infiniband/hw/hfi1/mad.c 		p->port_xmit_discards = cpu_to_be16(0xFFFF);
p                3337 drivers/infiniband/hw/hfi1/mad.c 		p->port_xmit_discards = cpu_to_be16((u16)temp_64);
p                3341 drivers/infiniband/hw/hfi1/mad.c 		p->port_xmit_constraint_errors = 0xFF;
p                3343 drivers/infiniband/hw/hfi1/mad.c 		p->port_xmit_constraint_errors = (u8)temp_64;
p                3347 drivers/infiniband/hw/hfi1/mad.c 		p->port_rcv_constraint_errors = 0xFFUL;
p                3349 drivers/infiniband/hw/hfi1/mad.c 		p->port_rcv_constraint_errors = (u8)temp_64;
p                3363 drivers/infiniband/hw/hfi1/mad.c 	p->link_overrun_errors = (u8)temp_link_overrun_errors;
p                3365 drivers/infiniband/hw/hfi1/mad.c 	p->vl15_dropped = 0; /* N/A for OPA */
p                3709 drivers/infiniband/hw/hfi1/mad.c 	struct opa_congestion_info_attr *p =
p                3714 drivers/infiniband/hw/hfi1/mad.c 	if (smp_length_check(sizeof(*p), max_len)) {
p                3719 drivers/infiniband/hw/hfi1/mad.c 	p->congestion_info = 0;
p                3720 drivers/infiniband/hw/hfi1/mad.c 	p->control_table_cap = ppd->cc_max_table_entries;
p                3721 drivers/infiniband/hw/hfi1/mad.c 	p->congestion_log_length = OPA_CONG_LOG_ELEMS;
p                3724 drivers/infiniband/hw/hfi1/mad.c 		*resp_len += sizeof(*p);
p                3734 drivers/infiniband/hw/hfi1/mad.c 	struct opa_congestion_setting_attr *p =
p                3741 drivers/infiniband/hw/hfi1/mad.c 	if (smp_length_check(sizeof(*p), max_len)) {
p                3756 drivers/infiniband/hw/hfi1/mad.c 	p->port_control = cpu_to_be16(cc_state->cong_setting.port_control);
p                3757 drivers/infiniband/hw/hfi1/mad.c 	p->control_map = cpu_to_be32(cc_state->cong_setting.control_map);
p                3759 drivers/infiniband/hw/hfi1/mad.c 		p->entries[i].ccti_increase = entries[i].ccti_increase;
p                3760 drivers/infiniband/hw/hfi1/mad.c 		p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
p                3761 drivers/infiniband/hw/hfi1/mad.c 		p->entries[i].trigger_threshold =
p                3763 drivers/infiniband/hw/hfi1/mad.c 		p->entries[i].ccti_min = entries[i].ccti_min;
p                3769 drivers/infiniband/hw/hfi1/mad.c 		*resp_len += sizeof(*p);
p                3826 drivers/infiniband/hw/hfi1/mad.c 	struct opa_congestion_setting_attr *p =
p                3833 drivers/infiniband/hw/hfi1/mad.c 	if (smp_length_check(sizeof(*p), max_len)) {
p                3843 drivers/infiniband/hw/hfi1/mad.c 	ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
p                3847 drivers/infiniband/hw/hfi1/mad.c 		entries[i].ccti_increase = p->entries[i].ccti_increase;
p                3848 drivers/infiniband/hw/hfi1/mad.c 		entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer);
p                3850 drivers/infiniband/hw/hfi1/mad.c 			p->entries[i].trigger_threshold;
p                3851 drivers/infiniband/hw/hfi1/mad.c 		entries[i].ccti_min = p->entries[i].ccti_min;
p                3984 drivers/infiniband/hw/hfi1/mad.c 	struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
p                4004 drivers/infiniband/hw/hfi1/mad.c 		 (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1;
p                4007 drivers/infiniband/hw/hfi1/mad.c 	ccti_limit = be16_to_cpu(p->ccti_limit);
p                4021 drivers/infiniband/hw/hfi1/mad.c 		entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
p                4045 drivers/infiniband/hw/hfi1/mad.c 	struct opa_led_info *p = (struct opa_led_info *)data;
p                4049 drivers/infiniband/hw/hfi1/mad.c 	if (nport != 1 || smp_length_check(sizeof(*p), max_len)) {
p                4061 drivers/infiniband/hw/hfi1/mad.c 	p->rsvd_led_mask = cpu_to_be32(is_beaconing_active << OPA_LED_SHIFT);
p                4074 drivers/infiniband/hw/hfi1/mad.c 	struct opa_led_info *p = (struct opa_led_info *)data;
p                4076 drivers/infiniband/hw/hfi1/mad.c 	int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
p                4078 drivers/infiniband/hw/hfi1/mad.c 	if (nport != 1 || smp_length_check(sizeof(*p), max_len)) {
p                  63 drivers/infiniband/hw/hfi1/rc.c 	u8 i, p;
p                  66 drivers/infiniband/hw/hfi1/rc.c 	for (i = qp->r_head_ack_queue; ; i = p) {
p                  70 drivers/infiniband/hw/hfi1/rc.c 			p = i - 1;
p                  72 drivers/infiniband/hw/hfi1/rc.c 			p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
p                  73 drivers/infiniband/hw/hfi1/rc.c 		if (p == qp->r_head_ack_queue) {
p                  77 drivers/infiniband/hw/hfi1/rc.c 		e = &qp->s_ack_queue[p];
p                  83 drivers/infiniband/hw/hfi1/rc.c 			if (p == qp->s_tail_ack_queue &&
p                  90 drivers/infiniband/hw/hfi1/rc.c 		*prev = p;
p                 157 drivers/infiniband/hw/hfi1/tid_rdma.c static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
p                 160 drivers/infiniband/hw/hfi1/tid_rdma.c 		(((u64)p->qp & TID_OPFN_QP_CTXT_MASK) <<
p                 162 drivers/infiniband/hw/hfi1/tid_rdma.c 		((((u64)p->qp >> 16) & TID_OPFN_QP_KDETH_MASK) <<
p                 164 drivers/infiniband/hw/hfi1/tid_rdma.c 		(((u64)((p->max_len >> PAGE_SHIFT) - 1) &
p                 166 drivers/infiniband/hw/hfi1/tid_rdma.c 		(((u64)p->timeout & TID_OPFN_TIMEOUT_MASK) <<
p                 168 drivers/infiniband/hw/hfi1/tid_rdma.c 		(((u64)p->urg & TID_OPFN_URG_MASK) << TID_OPFN_URG_SHIFT) |
p                 169 drivers/infiniband/hw/hfi1/tid_rdma.c 		(((u64)p->jkey & TID_OPFN_JKEY_MASK) << TID_OPFN_JKEY_SHIFT) |
p                 170 drivers/infiniband/hw/hfi1/tid_rdma.c 		(((u64)p->max_read & TID_OPFN_MAX_READ_MASK) <<
p                 172 drivers/infiniband/hw/hfi1/tid_rdma.c 		(((u64)p->max_write & TID_OPFN_MAX_WRITE_MASK) <<
p                 176 drivers/infiniband/hw/hfi1/tid_rdma.c static void tid_rdma_opfn_decode(struct tid_rdma_params *p, u64 data)
p                 178 drivers/infiniband/hw/hfi1/tid_rdma.c 	p->max_len = (((data >> TID_OPFN_MAX_LEN_SHIFT) &
p                 180 drivers/infiniband/hw/hfi1/tid_rdma.c 	p->jkey = (data >> TID_OPFN_JKEY_SHIFT) & TID_OPFN_JKEY_MASK;
p                 181 drivers/infiniband/hw/hfi1/tid_rdma.c 	p->max_write = (data >> TID_OPFN_MAX_WRITE_SHIFT) &
p                 183 drivers/infiniband/hw/hfi1/tid_rdma.c 	p->max_read = (data >> TID_OPFN_MAX_READ_SHIFT) &
p                 185 drivers/infiniband/hw/hfi1/tid_rdma.c 	p->qp =
p                 189 drivers/infiniband/hw/hfi1/tid_rdma.c 	p->urg = (data >> TID_OPFN_URG_SHIFT) & TID_OPFN_URG_MASK;
p                 190 drivers/infiniband/hw/hfi1/tid_rdma.c 	p->timeout = (data >> TID_OPFN_TIMEOUT_SHIFT) & TID_OPFN_TIMEOUT_MASK;
p                 193 drivers/infiniband/hw/hfi1/tid_rdma.c void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p)
p                 197 drivers/infiniband/hw/hfi1/tid_rdma.c 	p->qp = (kdeth_qp << 16) | priv->rcd->ctxt;
p                 198 drivers/infiniband/hw/hfi1/tid_rdma.c 	p->max_len = TID_RDMA_MAX_SEGMENT_SIZE;
p                 199 drivers/infiniband/hw/hfi1/tid_rdma.c 	p->jkey = priv->rcd->jkey;
p                 200 drivers/infiniband/hw/hfi1/tid_rdma.c 	p->max_read = TID_RDMA_MAX_READ_SEGS_PER_REQ;
p                 201 drivers/infiniband/hw/hfi1/tid_rdma.c 	p->max_write = TID_RDMA_MAX_WRITE_SEGS_PER_REQ;
p                 202 drivers/infiniband/hw/hfi1/tid_rdma.c 	p->timeout = qp->timeout;
p                 203 drivers/infiniband/hw/hfi1/tid_rdma.c 	p->urg = is_urg_masked(priv->rcd);
p                1616 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_swqe_priv *p = wqe->priv;
p                1618 drivers/infiniband/hw/hfi1/tid_rdma.c 	hfi1_kern_exp_rcv_free_flows(&p->tid_req);
p                 207 drivers/infiniband/hw/hfi1/tid_rdma.h void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p);
p                 226 drivers/infiniband/hw/hfi1/trace.c const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass,
p                 232 drivers/infiniband/hw/hfi1/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 234 drivers/infiniband/hw/hfi1/trace.c 	trace_seq_printf(p, LRH_PRN, len, sc, dlid, slid);
p                 237 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, LRH_16B_PRN,
p                 241 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, LRH_9B_PRN,
p                 243 drivers/infiniband/hw/hfi1/trace.c 	trace_seq_putc(p, 0);
p                 256 drivers/infiniband/hw/hfi1/trace.c const char *hfi1_trace_fmt_rest(struct trace_seq *p, bool bypass, u8 l4,
p                 262 drivers/infiniband/hw/hfi1/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 266 drivers/infiniband/hw/hfi1/trace.c 			trace_seq_printf(p, L4_FM_16B_PRN,
p                 269 drivers/infiniband/hw/hfi1/trace.c 			trace_seq_printf(p, BTH_16B_PRN,
p                 274 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, BTH_9B_PRN,
p                 278 drivers/infiniband/hw/hfi1/trace.c 	trace_seq_putc(p, 0);
p                 284 drivers/infiniband/hw/hfi1/trace.c 	struct trace_seq *p,
p                 289 drivers/infiniband/hw/hfi1/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 292 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, "mgmt pkt");
p                 304 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, IMM_PRN,
p                 310 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, RETH_PRN " " IMM_PRN,
p                 322 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, RETH_PRN,
p                 331 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, AETH_PRN, be32_to_cpu(eh->aeth) >> 24,
p                 336 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, TID_RDMA_KDETH " " RETH_PRN " "
p                 346 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, TID_RDMA_KDETH " " AETH_PRN " "
p                 362 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, TID_RDMA_KDETH_DATA " " TID_WRITE_DATA_PRN,
p                 375 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, TID_RDMA_KDETH " " RETH_PRN " "
p                 387 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, TID_RDMA_KDETH_DATA " " AETH_PRN " "
p                 407 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, TID_RDMA_KDETH " " AETH_PRN " "
p                 423 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, TID_RDMA_KDETH " " TID_RESYNC_PRN,
p                 430 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN,
p                 439 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, ATOMICETH_PRN,
p                 448 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, DETH_PRN,
p                 455 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, IETH_PRN,
p                 460 drivers/infiniband/hw/hfi1/trace.c 	trace_seq_putc(p, 0);
p                 465 drivers/infiniband/hw/hfi1/trace.c 	struct trace_seq *p,
p                 468 drivers/infiniband/hw/hfi1/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 475 drivers/infiniband/hw/hfi1/trace.c 	trace_seq_printf(p, "%s", flags);
p                 477 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, " amode:%u aidx:%u alen:%u",
p                 488 drivers/infiniband/hw/hfi1/trace.c 	struct trace_seq *p,
p                 492 drivers/infiniband/hw/hfi1/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 495 drivers/infiniband/hw/hfi1/trace.c 		trace_seq_printf(p, "%s%#x", i == 0 ? "" : " ", arr[i]);
p                 496 drivers/infiniband/hw/hfi1/trace.c 	trace_seq_putc(p, 0);
p                 109 drivers/infiniband/hw/hfi1/trace_ibhdrs.h const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode,
p                 132 drivers/infiniband/hw/hfi1/trace_ibhdrs.h const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass,
p                 138 drivers/infiniband/hw/hfi1/trace_ibhdrs.h const char *hfi1_trace_fmt_rest(struct trace_seq *p, bool bypass, u8 l4,
p                 147 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 			 parse_everbs_hdrs(p, op, l4, dest_qpn, src_qpn, ehdrs)
p                 264 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 			      hfi1_trace_fmt_lrh(p,
p                 282 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 			      hfi1_trace_fmt_rest(p,
p                 429 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 			      hfi1_trace_fmt_lrh(p,
p                 446 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 			      hfi1_trace_fmt_rest(p,
p                  57 drivers/infiniband/hw/hfi1/trace_tx.h const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
p                  59 drivers/infiniband/hw/hfi1/trace_tx.h #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
p                 657 drivers/infiniband/hw/hfi1/trace_tx.h #define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
p                 118 drivers/infiniband/hw/hfi1/user_pages.c void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
p                 121 drivers/infiniband/hw/hfi1/user_pages.c 	put_user_pages_dirty_lock(p, npages, dirty);
p                1443 drivers/infiniband/hw/hfi1/user_sdma.c 		struct sdma_txreq *t, *p;
p                1445 drivers/infiniband/hw/hfi1/user_sdma.c 		list_for_each_entry_safe(t, p, &req->txps, list) {
p                 509 drivers/infiniband/hw/hfi1/verbs.c 		struct rvt_mcast_qp *p;
p                 519 drivers/infiniband/hw/hfi1/verbs.c 		list_for_each_entry_rcu(p, &mcast->qp_list, list) {
p                 520 drivers/infiniband/hw/hfi1/verbs.c 			packet->qp = p->qp;
p                1663 drivers/infiniband/hw/hfi1/verbs.c 	char *names_out, *p, **q;
p                1679 drivers/infiniband/hw/hfi1/verbs.c 	p = names_out + (n + num_extra_names) * sizeof(char *);
p                1680 drivers/infiniband/hw/hfi1/verbs.c 	memcpy(p, names_in, names_len);
p                1684 drivers/infiniband/hw/hfi1/verbs.c 		q[i] = p;
p                1685 drivers/infiniband/hw/hfi1/verbs.c 		p = strchr(p, '\n');
p                1686 drivers/infiniband/hw/hfi1/verbs.c 		*p++ = '\0';
p                1744 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	u32 *p = NULL;
p                1751 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	p = (u32 *)&gid->raw[0];
p                1752 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
p                1755 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	p = (u32 *)&gid->raw[4];
p                1756 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
p                1759 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	p = (u32 *)&gid->raw[8];
p                1760 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
p                1763 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	p = (u32 *)&gid->raw[0xc];
p                1764 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
p                1779 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	u32 *p;
p                1795 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	p = (u32 *)(&addr[0]);
p                1796 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	reg_smac_l = *p;
p                2143 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	u32 *p;
p                2154 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	p = (u32 *)&gid->raw[0];
p                2155 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	sgid_tb->vf_sgid_l = cpu_to_le32(*p);
p                2157 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	p = (u32 *)&gid->raw[4];
p                2158 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
p                2160 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	p = (u32 *)&gid->raw[8];
p                2161 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
p                2163 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	p = (u32 *)&gid->raw[0xc];
p                2164 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	sgid_tb->vf_sgid_h = cpu_to_le32(*p);
p                1110 drivers/infiniband/hw/hns/hns_roce_qp.c 	int p;
p                1112 drivers/infiniband/hw/hns/hns_roce_qp.c 	p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
p                1113 drivers/infiniband/hw/hns/hns_roce_qp.c 	active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
p                1132 drivers/infiniband/hw/hns/hns_roce_qp.c 	int p;
p                1143 drivers/infiniband/hw/hns/hns_roce_qp.c 		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
p                1144 drivers/infiniband/hw/hns/hns_roce_qp.c 		if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
p                 279 drivers/infiniband/hw/i40iw/i40iw_utils.c 	__be32 *p;
p                 290 drivers/infiniband/hw/i40iw/i40iw_utils.c 		p = (__be32 *)neigh->primary_key;
p                 291 drivers/infiniband/hw/i40iw/i40iw_utils.c 		i40iw_copy_ip_ntohl(local_ipaddr, p);
p                1034 drivers/infiniband/hw/mlx4/mad.c 	int p, q;
p                1038 drivers/infiniband/hw/mlx4/mad.c 	for (p = 0; p < dev->num_ports; ++p) {
p                1039 drivers/infiniband/hw/mlx4/mad.c 		ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
p                1042 drivers/infiniband/hw/mlx4/mad.c 				agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
p                1050 drivers/infiniband/hw/mlx4/mad.c 				dev->send_agent[p][q] = agent;
p                1052 drivers/infiniband/hw/mlx4/mad.c 				dev->send_agent[p][q] = NULL;
p                1059 drivers/infiniband/hw/mlx4/mad.c 	for (p = 0; p < dev->num_ports; ++p)
p                1061 drivers/infiniband/hw/mlx4/mad.c 			if (dev->send_agent[p][q])
p                1062 drivers/infiniband/hw/mlx4/mad.c 				ib_unregister_mad_agent(dev->send_agent[p][q]);
p                1070 drivers/infiniband/hw/mlx4/mad.c 	int p, q;
p                1072 drivers/infiniband/hw/mlx4/mad.c 	for (p = 0; p < dev->num_ports; ++p) {
p                1074 drivers/infiniband/hw/mlx4/mad.c 			agent = dev->send_agent[p][q];
p                1076 drivers/infiniband/hw/mlx4/mad.c 				dev->send_agent[p][q] = NULL;
p                1081 drivers/infiniband/hw/mlx4/mad.c 		if (dev->sm_ah[p])
p                1082 drivers/infiniband/hw/mlx4/mad.c 			rdma_destroy_ah(dev->sm_ah[p], 0);
p                3016 drivers/infiniband/hw/mlx4/main.c 	int p;
p                3040 drivers/infiniband/hw/mlx4/main.c 	for (p = 0; p < ibdev->num_ports; ++p)
p                3041 drivers/infiniband/hw/mlx4/main.c 		mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
p                3043 drivers/infiniband/hw/mlx4/main.c 	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
p                3044 drivers/infiniband/hw/mlx4/main.c 		mlx4_CLOSE_PORT(dev, p);
p                3246 drivers/infiniband/hw/mlx4/main.c 	int p = 0;
p                3263 drivers/infiniband/hw/mlx4/main.c 		p = (int) param;
p                3267 drivers/infiniband/hw/mlx4/main.c 		if (p > ibdev->num_ports)
p                3270 drivers/infiniband/hw/mlx4/main.c 		    rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
p                3273 drivers/infiniband/hw/mlx4/main.c 				mlx4_ib_invalidate_all_guid_record(ibdev, p);
p                3276 drivers/infiniband/hw/mlx4/main.c 				mlx4_sched_ib_sl2vl_update_work(ibdev, p);
p                3282 drivers/infiniband/hw/mlx4/main.c 		if (p > ibdev->num_ports)
p                3310 drivers/infiniband/hw/mlx4/main.c 		do_slave_init(ibdev, p, 1);
p                3318 drivers/infiniband/hw/mlx4/main.c 								       p, i,
p                3332 drivers/infiniband/hw/mlx4/main.c 								       p, i,
p                3337 drivers/infiniband/hw/mlx4/main.c 		do_slave_init(ibdev, p, 0);
p                3345 drivers/infiniband/hw/mlx4/main.c 	ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
p                1076 drivers/infiniband/hw/mlx4/mcg.c 	struct rb_node *p;
p                1088 drivers/infiniband/hw/mlx4/mcg.c 		for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p))
p                1102 drivers/infiniband/hw/mlx4/mcg.c 	while ((p = rb_first(&ctx->mcg_table)) != NULL) {
p                1103 drivers/infiniband/hw/mlx4/mcg.c 		group = rb_entry(p, struct mcast_group, node);
p                1228 drivers/infiniband/hw/mlx4/mcg.c 	struct rb_node *p;
p                1231 drivers/infiniband/hw/mlx4/mcg.c 	for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) {
p                1232 drivers/infiniband/hw/mlx4/mcg.c 		group = rb_entry(p, struct mcast_group, node);
p                2792 drivers/infiniband/hw/mlx4/qp.c 		int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
p                2793 drivers/infiniband/hw/mlx4/qp.c 		if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) {
p                 378 drivers/infiniband/hw/mlx4/sysfs.c 	struct mlx4_port *p = container_of(kobj, struct mlx4_port, kobj);
p                 382 drivers/infiniband/hw/mlx4/sysfs.c 	for (i = 0; (a = p->pkey_group.attrs[i]); ++i)
p                 384 drivers/infiniband/hw/mlx4/sysfs.c 	kfree(p->pkey_group.attrs);
p                 385 drivers/infiniband/hw/mlx4/sysfs.c 	for (i = 0; (a = p->gid_group.attrs[i]); ++i)
p                 387 drivers/infiniband/hw/mlx4/sysfs.c 	kfree(p->gid_group.attrs);
p                 388 drivers/infiniband/hw/mlx4/sysfs.c 	kfree(p);
p                 403 drivers/infiniband/hw/mlx4/sysfs.c 	struct mlx4_port *p = container_of(kobj, struct mlx4_port, kobj);
p                 407 drivers/infiniband/hw/mlx4/sysfs.c 	return port_attr->show(p, port_attr, buf);
p                 416 drivers/infiniband/hw/mlx4/sysfs.c 	struct mlx4_port *p = container_of(kobj, struct mlx4_port, kobj);
p                 420 drivers/infiniband/hw/mlx4/sysfs.c 	return port_attr->store(p, port_attr, buf, size);
p                 439 drivers/infiniband/hw/mlx4/sysfs.c static ssize_t show_port_pkey(struct mlx4_port *p, struct port_attribute *attr,
p                 446 drivers/infiniband/hw/mlx4/sysfs.c 	if (p->dev->pkeys.virt2phys_pkey[p->slave][p->port_num - 1][tab_attr->index] >=
p                 447 drivers/infiniband/hw/mlx4/sysfs.c 	    (p->dev->dev->caps.pkey_table_len[p->port_num]))
p                 451 drivers/infiniband/hw/mlx4/sysfs.c 			      p->dev->pkeys.virt2phys_pkey[p->slave]
p                 452 drivers/infiniband/hw/mlx4/sysfs.c 			      [p->port_num - 1][tab_attr->index]);
p                 456 drivers/infiniband/hw/mlx4/sysfs.c static ssize_t store_port_pkey(struct mlx4_port *p, struct port_attribute *attr,
p                 465 drivers/infiniband/hw/mlx4/sysfs.c 	if (p->slave == mlx4_master_func_num(p->dev->dev))
p                 469 drivers/infiniband/hw/mlx4/sysfs.c 		idx = p->dev->dev->phys_caps.pkey_phys_table_len[p->port_num] - 1;
p                 471 drivers/infiniband/hw/mlx4/sysfs.c 		 idx >= p->dev->dev->caps.pkey_table_len[p->port_num] ||
p                 475 drivers/infiniband/hw/mlx4/sysfs.c 	p->dev->pkeys.virt2phys_pkey[p->slave][p->port_num - 1]
p                 477 drivers/infiniband/hw/mlx4/sysfs.c 	mlx4_sync_pkey_table(p->dev->dev, p->slave, p->port_num,
p                 479 drivers/infiniband/hw/mlx4/sysfs.c 	err = mlx4_gen_pkey_eqe(p->dev->dev, p->slave, p->port_num);
p                 482 drivers/infiniband/hw/mlx4/sysfs.c 		       " port %d, index %d\n", p->slave, p->port_num, idx);
p                 488 drivers/infiniband/hw/mlx4/sysfs.c static ssize_t show_port_gid_idx(struct mlx4_port *p,
p                 491 drivers/infiniband/hw/mlx4/sysfs.c 	return sprintf(buf, "%d\n", p->slave);
p                 543 drivers/infiniband/hw/mlx4/sysfs.c 	struct mlx4_port *p =
p                 547 drivers/infiniband/hw/mlx4/sysfs.c 	if (mlx4_vf_smi_enabled(p->dev->dev, p->slave, p->port_num))
p                 559 drivers/infiniband/hw/mlx4/sysfs.c 	struct mlx4_port *p =
p                 563 drivers/infiniband/hw/mlx4/sysfs.c 	if (mlx4_vf_get_enable_smi_admin(p->dev->dev, p->slave, p->port_num))
p                 575 drivers/infiniband/hw/mlx4/sysfs.c 	struct mlx4_port *p =
p                 583 drivers/infiniband/hw/mlx4/sysfs.c 	if (mlx4_vf_set_enable_smi_admin(p->dev->dev, p->slave, p->port_num, enable))
p                 588 drivers/infiniband/hw/mlx4/sysfs.c static int add_vf_smi_entries(struct mlx4_port *p)
p                 590 drivers/infiniband/hw/mlx4/sysfs.c 	int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) ==
p                 595 drivers/infiniband/hw/mlx4/sysfs.c 	if (is_eth || p->slave == mlx4_master_func_num(p->dev->dev))
p                 598 drivers/infiniband/hw/mlx4/sysfs.c 	sysfs_attr_init(&p->smi_enabled.attr);
p                 599 drivers/infiniband/hw/mlx4/sysfs.c 	p->smi_enabled.show = sysfs_show_smi_enabled;
p                 600 drivers/infiniband/hw/mlx4/sysfs.c 	p->smi_enabled.store = NULL;
p                 601 drivers/infiniband/hw/mlx4/sysfs.c 	p->smi_enabled.attr.name = "smi_enabled";
p                 602 drivers/infiniband/hw/mlx4/sysfs.c 	p->smi_enabled.attr.mode = 0444;
p                 603 drivers/infiniband/hw/mlx4/sysfs.c 	ret = sysfs_create_file(&p->kobj, &p->smi_enabled.attr);
p                 609 drivers/infiniband/hw/mlx4/sysfs.c 	sysfs_attr_init(&p->enable_smi_admin.attr);
p                 610 drivers/infiniband/hw/mlx4/sysfs.c 	p->enable_smi_admin.show = sysfs_show_enable_smi_admin;
p                 611 drivers/infiniband/hw/mlx4/sysfs.c 	p->enable_smi_admin.store = sysfs_store_enable_smi_admin;
p                 612 drivers/infiniband/hw/mlx4/sysfs.c 	p->enable_smi_admin.attr.name = "enable_smi_admin";
p                 613 drivers/infiniband/hw/mlx4/sysfs.c 	p->enable_smi_admin.attr.mode = 0644;
p                 614 drivers/infiniband/hw/mlx4/sysfs.c 	ret = sysfs_create_file(&p->kobj, &p->enable_smi_admin.attr);
p                 617 drivers/infiniband/hw/mlx4/sysfs.c 		sysfs_remove_file(&p->kobj, &p->smi_enabled.attr);
p                 623 drivers/infiniband/hw/mlx4/sysfs.c static void remove_vf_smi_entries(struct mlx4_port *p)
p                 625 drivers/infiniband/hw/mlx4/sysfs.c 	int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) ==
p                 628 drivers/infiniband/hw/mlx4/sysfs.c 	if (is_eth || p->slave == mlx4_master_func_num(p->dev->dev))
p                 631 drivers/infiniband/hw/mlx4/sysfs.c 	sysfs_remove_file(&p->kobj, &p->smi_enabled.attr);
p                 632 drivers/infiniband/hw/mlx4/sysfs.c 	sysfs_remove_file(&p->kobj, &p->enable_smi_admin.attr);
p                 637 drivers/infiniband/hw/mlx4/sysfs.c 	struct mlx4_port *p;
p                 643 drivers/infiniband/hw/mlx4/sysfs.c 	p = kzalloc(sizeof *p, GFP_KERNEL);
p                 644 drivers/infiniband/hw/mlx4/sysfs.c 	if (!p)
p                 647 drivers/infiniband/hw/mlx4/sysfs.c 	p->dev = dev;
p                 648 drivers/infiniband/hw/mlx4/sysfs.c 	p->port_num = port_num;
p                 649 drivers/infiniband/hw/mlx4/sysfs.c 	p->slave = slave;
p                 651 drivers/infiniband/hw/mlx4/sysfs.c 	ret = kobject_init_and_add(&p->kobj, &port_type,
p                 657 drivers/infiniband/hw/mlx4/sysfs.c 	p->pkey_group.name  = "pkey_idx";
p                 658 drivers/infiniband/hw/mlx4/sysfs.c 	p->pkey_group.attrs =
p                 662 drivers/infiniband/hw/mlx4/sysfs.c 	if (!p->pkey_group.attrs) {
p                 667 drivers/infiniband/hw/mlx4/sysfs.c 	ret = sysfs_create_group(&p->kobj, &p->pkey_group);
p                 671 drivers/infiniband/hw/mlx4/sysfs.c 	p->gid_group.name  = "gid_idx";
p                 672 drivers/infiniband/hw/mlx4/sysfs.c 	p->gid_group.attrs = alloc_group_attrs(show_port_gid_idx, NULL, 1);
p                 673 drivers/infiniband/hw/mlx4/sysfs.c 	if (!p->gid_group.attrs) {
p                 678 drivers/infiniband/hw/mlx4/sysfs.c 	ret = sysfs_create_group(&p->kobj, &p->gid_group);
p                 682 drivers/infiniband/hw/mlx4/sysfs.c 	ret = add_vf_smi_entries(p);
p                 686 drivers/infiniband/hw/mlx4/sysfs.c 	list_add_tail(&p->kobj.entry, &dev->pkeys.pkey_port_list[slave]);
p                 690 drivers/infiniband/hw/mlx4/sysfs.c 	kfree(p->gid_group.attrs[0]);
p                 691 drivers/infiniband/hw/mlx4/sysfs.c 	kfree(p->gid_group.attrs);
p                 695 drivers/infiniband/hw/mlx4/sysfs.c 		kfree(p->pkey_group.attrs[i]);
p                 696 drivers/infiniband/hw/mlx4/sysfs.c 	kfree(p->pkey_group.attrs);
p                 700 drivers/infiniband/hw/mlx4/sysfs.c 	kfree(p);
p                 709 drivers/infiniband/hw/mlx4/sysfs.c 	struct kobject *p, *t;
p                 746 drivers/infiniband/hw/mlx4/sysfs.c 	list_for_each_entry_safe(p, t,
p                 749 drivers/infiniband/hw/mlx4/sysfs.c 		list_del(&p->entry);
p                 750 drivers/infiniband/hw/mlx4/sysfs.c 		mport = container_of(p, struct mlx4_port, kobj);
p                 751 drivers/infiniband/hw/mlx4/sysfs.c 		sysfs_remove_group(p, &mport->pkey_group);
p                 752 drivers/infiniband/hw/mlx4/sysfs.c 		sysfs_remove_group(p, &mport->gid_group);
p                 754 drivers/infiniband/hw/mlx4/sysfs.c 		kobject_put(p);
p                 784 drivers/infiniband/hw/mlx4/sysfs.c 	struct kobject *p, *t;
p                 791 drivers/infiniband/hw/mlx4/sysfs.c 		list_for_each_entry_safe(p, t,
p                 794 drivers/infiniband/hw/mlx4/sysfs.c 			list_del(&p->entry);
p                 795 drivers/infiniband/hw/mlx4/sysfs.c 			port = container_of(p, struct mlx4_port, kobj);
p                 796 drivers/infiniband/hw/mlx4/sysfs.c 			sysfs_remove_group(p, &port->pkey_group);
p                 797 drivers/infiniband/hw/mlx4/sysfs.c 			sysfs_remove_group(p, &port->gid_group);
p                 799 drivers/infiniband/hw/mlx4/sysfs.c 			kobject_put(p);
p                 853 drivers/infiniband/hw/mlx4/sysfs.c 	struct mlx4_ib_iov_port *p;
p                 860 drivers/infiniband/hw/mlx4/sysfs.c 		p = &device->iov_ports[i];
p                 861 drivers/infiniband/hw/mlx4/sysfs.c 		kobject_put(p->admin_alias_parent);
p                 862 drivers/infiniband/hw/mlx4/sysfs.c 		kobject_put(p->gids_parent);
p                 863 drivers/infiniband/hw/mlx4/sysfs.c 		kobject_put(p->pkeys_parent);
p                 864 drivers/infiniband/hw/mlx4/sysfs.c 		kobject_put(p->mcgs_parent);
p                 865 drivers/infiniband/hw/mlx4/sysfs.c 		kobject_put(p->cur_port);
p                 866 drivers/infiniband/hw/mlx4/sysfs.c 		kobject_put(p->cur_port);
p                 867 drivers/infiniband/hw/mlx4/sysfs.c 		kobject_put(p->cur_port);
p                 868 drivers/infiniband/hw/mlx4/sysfs.c 		kobject_put(p->cur_port);
p                 869 drivers/infiniband/hw/mlx4/sysfs.c 		kobject_put(p->cur_port);
p                 870 drivers/infiniband/hw/mlx4/sysfs.c 		kobject_put(p->dev->ports_parent);
p                 871 drivers/infiniband/hw/mlx4/sysfs.c 		kfree(p->dentr_ar);
p                 132 drivers/infiniband/hw/mlx5/mad.c #define MLX5_SUM_CNT(p, cntr1, cntr2)	\
p                 133 drivers/infiniband/hw/mlx5/mad.c 	(MLX5_GET64(query_vport_counter_out, p, cntr1) + \
p                 134 drivers/infiniband/hw/mlx5/mad.c 	MLX5_GET64(query_vport_counter_out, p, cntr2))
p                  53 drivers/infiniband/hw/mlx5/mem.c 	u64 base = ~0, p = 0;
p                  68 drivers/infiniband/hw/mlx5/mem.c 		if (base + p != pfn) {
p                  72 drivers/infiniband/hw/mlx5/mem.c 			tmp = (unsigned long)(pfn | p);
p                  77 drivers/infiniband/hw/mlx5/mem.c 			p = 0;
p                  80 drivers/infiniband/hw/mlx5/mem.c 		p += len;
p                3503 drivers/infiniband/hw/mlx5/qp.c 				u8 p = mlx5_core_native_port_num(dev->mdev) - 1;
p                3504 drivers/infiniband/hw/mlx5/qp.c 				tx_affinity = get_tx_affinity(dev, pd, base, p,
p                4420 drivers/infiniband/hw/mlx5/qp.c 	u8 *p = wqe;
p                4425 drivers/infiniband/hw/mlx5/qp.c 		res ^= p[i];
p                4879 drivers/infiniband/hw/mlx5/qp.c 	__be32 *p = NULL;
p                4885 drivers/infiniband/hw/mlx5/qp.c 			p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
p                4886 drivers/infiniband/hw/mlx5/qp.c 			pr_debug("WQBB at %p:\n", (void *)p);
p                4890 drivers/infiniband/hw/mlx5/qp.c 		pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
p                4891 drivers/infiniband/hw/mlx5/qp.c 			 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
p                4892 drivers/infiniband/hw/mlx5/qp.c 			 be32_to_cpu(p[j + 3]));
p                 120 drivers/infiniband/hw/mthca/mthca_allocator.c 	int p = (index * sizeof (void *)) >> PAGE_SHIFT;
p                 122 drivers/infiniband/hw/mthca/mthca_allocator.c 	if (array->page_list[p].page)
p                 123 drivers/infiniband/hw/mthca/mthca_allocator.c 		return array->page_list[p].page[index & MTHCA_ARRAY_MASK];
p                 130 drivers/infiniband/hw/mthca/mthca_allocator.c 	int p = (index * sizeof (void *)) >> PAGE_SHIFT;
p                 133 drivers/infiniband/hw/mthca/mthca_allocator.c 	if (!array->page_list[p].page)
p                 134 drivers/infiniband/hw/mthca/mthca_allocator.c 		array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC);
p                 136 drivers/infiniband/hw/mthca/mthca_allocator.c 	if (!array->page_list[p].page)
p                 139 drivers/infiniband/hw/mthca/mthca_allocator.c 	array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value;
p                 140 drivers/infiniband/hw/mthca/mthca_allocator.c 	++array->page_list[p].used;
p                 147 drivers/infiniband/hw/mthca/mthca_allocator.c 	int p = (index * sizeof (void *)) >> PAGE_SHIFT;
p                 149 drivers/infiniband/hw/mthca/mthca_allocator.c 	if (--array->page_list[p].used == 0) {
p                 150 drivers/infiniband/hw/mthca/mthca_allocator.c 		free_page((unsigned long) array->page_list[p].page);
p                 151 drivers/infiniband/hw/mthca/mthca_allocator.c 		array->page_list[p].page = NULL;
p                 153 drivers/infiniband/hw/mthca/mthca_allocator.c 		array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL;
p                 155 drivers/infiniband/hw/mthca/mthca_allocator.c 	if (array->page_list[p].used < 0)
p                 157 drivers/infiniband/hw/mthca/mthca_allocator.c 			 array, index, p, array->page_list[p].used);
p                 298 drivers/infiniband/hw/mthca/mthca_mad.c 	int p, q;
p                 303 drivers/infiniband/hw/mthca/mthca_mad.c 	for (p = 0; p < dev->limits.num_ports; ++p)
p                 305 drivers/infiniband/hw/mthca/mthca_mad.c 			agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
p                 313 drivers/infiniband/hw/mthca/mthca_mad.c 			dev->send_agent[p][q] = agent;
p                 317 drivers/infiniband/hw/mthca/mthca_mad.c 	for (p = 1; p <= dev->limits.num_ports; ++p) {
p                 318 drivers/infiniband/hw/mthca/mthca_mad.c 		ret = mthca_update_rate(dev, p);
p                 321 drivers/infiniband/hw/mthca/mthca_mad.c 				  " aborting.\n", p);
p                 329 drivers/infiniband/hw/mthca/mthca_mad.c 	for (p = 0; p < dev->limits.num_ports; ++p)
p                 331 drivers/infiniband/hw/mthca/mthca_mad.c 			if (dev->send_agent[p][q])
p                 332 drivers/infiniband/hw/mthca/mthca_mad.c 				ib_unregister_mad_agent(dev->send_agent[p][q]);
p                 340 drivers/infiniband/hw/mthca/mthca_mad.c 	int p, q;
p                 342 drivers/infiniband/hw/mthca/mthca_mad.c 	for (p = 0; p < dev->limits.num_ports; ++p) {
p                 344 drivers/infiniband/hw/mthca/mthca_mad.c 			agent = dev->send_agent[p][q];
p                 345 drivers/infiniband/hw/mthca/mthca_mad.c 			dev->send_agent[p][q] = NULL;
p                 349 drivers/infiniband/hw/mthca/mthca_mad.c 		if (dev->sm_ah[p])
p                 350 drivers/infiniband/hw/mthca/mthca_mad.c 			rdma_destroy_ah(dev->sm_ah[p],
p                1084 drivers/infiniband/hw/mthca/mthca_main.c 	int p;
p                1090 drivers/infiniband/hw/mthca/mthca_main.c 		for (p = 1; p <= mdev->limits.num_ports; ++p)
p                1091 drivers/infiniband/hw/mthca/mthca_main.c 			mthca_CLOSE_IB(mdev, p);
p                2918 drivers/infiniband/hw/qedr/verbs.c static void swap_wqe_data64(u64 *p)
p                2922 drivers/infiniband/hw/qedr/verbs.c 	for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
p                2923 drivers/infiniband/hw/qedr/verbs.c 		*p = cpu_to_be64(cpu_to_le64(*p));
p                1427 drivers/infiniband/hw/qib/qib.h int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
p                  86 drivers/infiniband/hw/qib/qib_file_ops.c static u64 cvt_kvaddr(void *p)
p                  91 drivers/infiniband/hw/qib/qib_file_ops.c 	page = vmalloc_to_page(p);
p                 535 drivers/infiniband/hw/qib/qib_file_ops.c 			struct page *p;
p                 538 drivers/infiniband/hw/qib/qib_file_ops.c 			p = dd->pageshadow[ctxttid + tid];
p                 549 drivers/infiniband/hw/qib/qib_file_ops.c 			qib_release_user_pages(&p, 1);
p                1774 drivers/infiniband/hw/qib/qib_file_ops.c 		struct page *p = dd->pageshadow[i];
p                1777 drivers/infiniband/hw/qib/qib_file_ops.c 		if (!p)
p                1785 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_release_user_pages(&p, 1);
p                1202 drivers/infiniband/hw/qib/qib_init.c 					  void *p)
p                 318 drivers/infiniband/hw/qib/qib_mad.c 	__be64 *p = (__be64 *) smp->data;
p                 336 drivers/infiniband/hw/qib/qib_mad.c 			p[0] = g;
p                 338 drivers/infiniband/hw/qib/qib_mad.c 				p[i] = ibp->guids[i - 1];
p                 600 drivers/infiniband/hw/qib/qib_mad.c 	u16 *p = (u16 *) smp->data;
p                 610 drivers/infiniband/hw/qib/qib_mad.c 		get_pkeys(dd, port, p);
p                 613 drivers/infiniband/hw/qib/qib_mad.c 			q[i] = cpu_to_be16(p[i]);
p                 625 drivers/infiniband/hw/qib/qib_mad.c 	__be64 *p = (__be64 *) smp->data;
p                 637 drivers/infiniband/hw/qib/qib_mad.c 			ibp->guids[i - 1] = p[i];
p                1043 drivers/infiniband/hw/qib/qib_mad.c 	__be16 *p = (__be16 *) smp->data;
p                1049 drivers/infiniband/hw/qib/qib_mad.c 		q[i] = be16_to_cpu(p[i]);
p                1061 drivers/infiniband/hw/qib/qib_mad.c 	u8 *p = (u8 *) smp->data;
p                1070 drivers/infiniband/hw/qib/qib_mad.c 			*p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
p                1079 drivers/infiniband/hw/qib/qib_mad.c 	u8 *p = (u8 *) smp->data;
p                1087 drivers/infiniband/hw/qib/qib_mad.c 	for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
p                1088 drivers/infiniband/hw/qib/qib_mad.c 		ibp->sl_to_vl[i] = *p >> 4;
p                1089 drivers/infiniband/hw/qib/qib_mad.c 		ibp->sl_to_vl[i + 1] = *p & 0xF;
p                1154 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_class_port_info *p =
p                1164 drivers/infiniband/hw/qib/qib_mad.c 	p->base_version = 1;
p                1165 drivers/infiniband/hw/qib/qib_mad.c 	p->class_version = 1;
p                1166 drivers/infiniband/hw/qib/qib_mad.c 	p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
p                1171 drivers/infiniband/hw/qib/qib_mad.c 	ib_set_cpi_capmask2(p,
p                1177 drivers/infiniband/hw/qib/qib_mad.c 	ib_set_cpi_resp_time(p, 18);
p                1185 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_pma_portsamplescontrol *p =
p                1192 drivers/infiniband/hw/qib/qib_mad.c 	u8 port_select = p->port_select;
p                1196 drivers/infiniband/hw/qib/qib_mad.c 	p->port_select = port_select;
p                1202 drivers/infiniband/hw/qib/qib_mad.c 	p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
p                1203 drivers/infiniband/hw/qib/qib_mad.c 	p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
p                1204 drivers/infiniband/hw/qib/qib_mad.c 	p->counter_width = 4;   /* 32 bit counters */
p                1205 drivers/infiniband/hw/qib/qib_mad.c 	p->counter_mask0_9 = COUNTER_MASK0_9;
p                1206 drivers/infiniband/hw/qib/qib_mad.c 	p->sample_start = cpu_to_be32(ibp->rvp.pma_sample_start);
p                1207 drivers/infiniband/hw/qib/qib_mad.c 	p->sample_interval = cpu_to_be32(ibp->rvp.pma_sample_interval);
p                1208 drivers/infiniband/hw/qib/qib_mad.c 	p->tag = cpu_to_be16(ibp->rvp.pma_tag);
p                1209 drivers/infiniband/hw/qib/qib_mad.c 	p->counter_select[0] = ibp->rvp.pma_counter_select[0];
p                1210 drivers/infiniband/hw/qib/qib_mad.c 	p->counter_select[1] = ibp->rvp.pma_counter_select[1];
p                1211 drivers/infiniband/hw/qib/qib_mad.c 	p->counter_select[2] = ibp->rvp.pma_counter_select[2];
p                1212 drivers/infiniband/hw/qib/qib_mad.c 	p->counter_select[3] = ibp->rvp.pma_counter_select[3];
p                1213 drivers/infiniband/hw/qib/qib_mad.c 	p->counter_select[4] = ibp->rvp.pma_counter_select[4];
p                1223 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_pma_portsamplescontrol *p =
p                1233 drivers/infiniband/hw/qib/qib_mad.c 	if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
p                1248 drivers/infiniband/hw/qib/qib_mad.c 		ibp->rvp.pma_sample_start = be32_to_cpu(p->sample_start);
p                1249 drivers/infiniband/hw/qib/qib_mad.c 		ibp->rvp.pma_sample_interval = be32_to_cpu(p->sample_interval);
p                1250 drivers/infiniband/hw/qib/qib_mad.c 		ibp->rvp.pma_tag = be16_to_cpu(p->tag);
p                1251 drivers/infiniband/hw/qib/qib_mad.c 		ibp->rvp.pma_counter_select[0] = p->counter_select[0];
p                1252 drivers/infiniband/hw/qib/qib_mad.c 		ibp->rvp.pma_counter_select[1] = p->counter_select[1];
p                1253 drivers/infiniband/hw/qib/qib_mad.c 		ibp->rvp.pma_counter_select[2] = p->counter_select[2];
p                1254 drivers/infiniband/hw/qib/qib_mad.c 		ibp->rvp.pma_counter_select[3] = p->counter_select[3];
p                1255 drivers/infiniband/hw/qib/qib_mad.c 		ibp->rvp.pma_counter_select[4] = p->counter_select[4];
p                1352 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_pma_portsamplesresult *p =
p                1364 drivers/infiniband/hw/qib/qib_mad.c 	p->tag = cpu_to_be16(ibp->rvp.pma_tag);
p                1366 drivers/infiniband/hw/qib/qib_mad.c 		p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
p                1369 drivers/infiniband/hw/qib/qib_mad.c 		p->sample_status = cpu_to_be16(status);
p                1380 drivers/infiniband/hw/qib/qib_mad.c 		p->counter[i] = cpu_to_be32(
p                1391 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_pma_portsamplesresult_ext *p =
p                1404 drivers/infiniband/hw/qib/qib_mad.c 	p->tag = cpu_to_be16(ibp->rvp.pma_tag);
p                1406 drivers/infiniband/hw/qib/qib_mad.c 		p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
p                1409 drivers/infiniband/hw/qib/qib_mad.c 		p->sample_status = cpu_to_be16(status);
p                1411 drivers/infiniband/hw/qib/qib_mad.c 		p->extended_width = cpu_to_be32(0x80000000);
p                1422 drivers/infiniband/hw/qib/qib_mad.c 		p->counter[i] = cpu_to_be64(
p                1433 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
p                1438 drivers/infiniband/hw/qib/qib_mad.c 	u8 port_select = p->port_select;
p                1463 drivers/infiniband/hw/qib/qib_mad.c 	p->port_select = port_select;
p                1468 drivers/infiniband/hw/qib/qib_mad.c 		p->symbol_error_counter = cpu_to_be16(0xFFFF);
p                1470 drivers/infiniband/hw/qib/qib_mad.c 		p->symbol_error_counter =
p                1473 drivers/infiniband/hw/qib/qib_mad.c 		p->link_error_recovery_counter = 0xFF;
p                1475 drivers/infiniband/hw/qib/qib_mad.c 		p->link_error_recovery_counter =
p                1478 drivers/infiniband/hw/qib/qib_mad.c 		p->link_downed_counter = 0xFF;
p                1480 drivers/infiniband/hw/qib/qib_mad.c 		p->link_downed_counter = (u8)cntrs.link_downed_counter;
p                1482 drivers/infiniband/hw/qib/qib_mad.c 		p->port_rcv_errors = cpu_to_be16(0xFFFF);
p                1484 drivers/infiniband/hw/qib/qib_mad.c 		p->port_rcv_errors =
p                1487 drivers/infiniband/hw/qib/qib_mad.c 		p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
p                1489 drivers/infiniband/hw/qib/qib_mad.c 		p->port_rcv_remphys_errors =
p                1492 drivers/infiniband/hw/qib/qib_mad.c 		p->port_xmit_discards = cpu_to_be16(0xFFFF);
p                1494 drivers/infiniband/hw/qib/qib_mad.c 		p->port_xmit_discards =
p                1500 drivers/infiniband/hw/qib/qib_mad.c 	p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
p                1503 drivers/infiniband/hw/qib/qib_mad.c 		p->vl15_dropped = cpu_to_be16(0xFFFF);
p                1505 drivers/infiniband/hw/qib/qib_mad.c 		p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
p                1507 drivers/infiniband/hw/qib/qib_mad.c 		p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
p                1509 drivers/infiniband/hw/qib/qib_mad.c 		p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
p                1511 drivers/infiniband/hw/qib/qib_mad.c 		p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
p                1513 drivers/infiniband/hw/qib/qib_mad.c 		p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
p                1515 drivers/infiniband/hw/qib/qib_mad.c 		p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
p                1517 drivers/infiniband/hw/qib/qib_mad.c 		p->port_xmit_packets =
p                1520 drivers/infiniband/hw/qib/qib_mad.c 		p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
p                1522 drivers/infiniband/hw/qib/qib_mad.c 		p->port_rcv_packets =
p                1532 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_pma_portcounters_cong *p =
p                1583 drivers/infiniband/hw/qib/qib_mad.c 	p->port_check_rate =
p                1587 drivers/infiniband/hw/qib/qib_mad.c 	p->port_adr_events = cpu_to_be64(0);
p                1588 drivers/infiniband/hw/qib/qib_mad.c 	p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
p                1589 drivers/infiniband/hw/qib/qib_mad.c 	p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
p                1590 drivers/infiniband/hw/qib/qib_mad.c 	p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
p                1591 drivers/infiniband/hw/qib/qib_mad.c 	p->port_xmit_packets =
p                1593 drivers/infiniband/hw/qib/qib_mad.c 	p->port_rcv_packets =
p                1596 drivers/infiniband/hw/qib/qib_mad.c 		p->symbol_error_counter = cpu_to_be16(0xFFFF);
p                1598 drivers/infiniband/hw/qib/qib_mad.c 		p->symbol_error_counter =
p                1602 drivers/infiniband/hw/qib/qib_mad.c 		p->link_error_recovery_counter = 0xFF;
p                1604 drivers/infiniband/hw/qib/qib_mad.c 		p->link_error_recovery_counter =
p                1607 drivers/infiniband/hw/qib/qib_mad.c 		p->link_downed_counter = 0xFF;
p                1609 drivers/infiniband/hw/qib/qib_mad.c 		p->link_downed_counter =
p                1612 drivers/infiniband/hw/qib/qib_mad.c 		p->port_rcv_errors = cpu_to_be16(0xFFFF);
p                1614 drivers/infiniband/hw/qib/qib_mad.c 		p->port_rcv_errors =
p                1617 drivers/infiniband/hw/qib/qib_mad.c 		p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
p                1619 drivers/infiniband/hw/qib/qib_mad.c 		p->port_rcv_remphys_errors =
p                1623 drivers/infiniband/hw/qib/qib_mad.c 		p->port_xmit_discards = cpu_to_be16(0xFFFF);
p                1625 drivers/infiniband/hw/qib/qib_mad.c 		p->port_xmit_discards =
p                1631 drivers/infiniband/hw/qib/qib_mad.c 	p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
p                1634 drivers/infiniband/hw/qib/qib_mad.c 		p->vl15_dropped = cpu_to_be16(0xFFFF);
p                1636 drivers/infiniband/hw/qib/qib_mad.c 		p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
p                1645 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_pma_counters *p;
p                1650 drivers/infiniband/hw/qib/qib_mad.c 		p = per_cpu_ptr(ibp->pmastats, cpu);
p                1651 drivers/infiniband/hw/qib/qib_mad.c 		pmacounters->n_unicast_xmit += p->n_unicast_xmit;
p                1652 drivers/infiniband/hw/qib/qib_mad.c 		pmacounters->n_unicast_rcv += p->n_unicast_rcv;
p                1653 drivers/infiniband/hw/qib/qib_mad.c 		pmacounters->n_multicast_xmit += p->n_multicast_xmit;
p                1654 drivers/infiniband/hw/qib/qib_mad.c 		pmacounters->n_multicast_rcv += p->n_multicast_rcv;
p                1661 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_pma_portcounters_ext *p =
p                1667 drivers/infiniband/hw/qib/qib_mad.c 	u8 port_select = p->port_select;
p                1671 drivers/infiniband/hw/qib/qib_mad.c 	p->port_select = port_select;
p                1685 drivers/infiniband/hw/qib/qib_mad.c 	p->port_xmit_data = cpu_to_be64(swords);
p                1686 drivers/infiniband/hw/qib/qib_mad.c 	p->port_rcv_data = cpu_to_be64(rwords);
p                1687 drivers/infiniband/hw/qib/qib_mad.c 	p->port_xmit_packets = cpu_to_be64(spkts);
p                1688 drivers/infiniband/hw/qib/qib_mad.c 	p->port_rcv_packets = cpu_to_be64(rpkts);
p                1692 drivers/infiniband/hw/qib/qib_mad.c 	p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit
p                1694 drivers/infiniband/hw/qib/qib_mad.c 	p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv
p                1696 drivers/infiniband/hw/qib/qib_mad.c 	p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit
p                1698 drivers/infiniband/hw/qib/qib_mad.c 	p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv
p                1708 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
p                1720 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
p                1723 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
p                1727 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
p                1730 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
p                1733 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
p                1737 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
p                1740 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
p                1744 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
p                1748 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
p                1753 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
p                1756 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
p                1759 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
p                1762 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
p                1822 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
p                1831 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
p                1834 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
p                1837 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
p                1840 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
p                1845 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
p                1848 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
p                1851 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
p                1854 drivers/infiniband/hw/qib/qib_mad.c 	if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
p                2098 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_cc_classportinfo_attr *p =
p                2103 drivers/infiniband/hw/qib/qib_mad.c 	p->base_version = 1;
p                2104 drivers/infiniband/hw/qib/qib_mad.c 	p->class_version = 1;
p                2105 drivers/infiniband/hw/qib/qib_mad.c 	p->cap_mask = 0;
p                2110 drivers/infiniband/hw/qib/qib_mad.c 	p->resp_time_value = 18;
p                2118 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_cc_info_attr *p =
p                2125 drivers/infiniband/hw/qib/qib_mad.c 	p->congestion_info = 0;
p                2126 drivers/infiniband/hw/qib/qib_mad.c 	p->control_table_cap = ppd->cc_max_table_entries;
p                2135 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_cc_congestion_setting_attr *p =
p                2146 drivers/infiniband/hw/qib/qib_mad.c 	p->port_control = cpu_to_be16(
p                2148 drivers/infiniband/hw/qib/qib_mad.c 	p->control_map = cpu_to_be16(
p                2151 drivers/infiniband/hw/qib/qib_mad.c 		p->entries[i].ccti_increase = entries[i].ccti_increase;
p                2152 drivers/infiniband/hw/qib/qib_mad.c 		p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
p                2153 drivers/infiniband/hw/qib/qib_mad.c 		p->entries[i].trigger_threshold = entries[i].trigger_threshold;
p                2154 drivers/infiniband/hw/qib/qib_mad.c 		p->entries[i].ccti_min = entries[i].ccti_min;
p                2165 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_cc_table_attr *p =
p                2198 drivers/infiniband/hw/qib/qib_mad.c 	p->ccti_limit = cpu_to_be16(cct_entry);
p                2205 drivers/infiniband/hw/qib/qib_mad.c 		p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry);
p                2218 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_cc_congestion_setting_attr *p =
p                2224 drivers/infiniband/hw/qib/qib_mad.c 	ppd->cc_sl_control_map = be16_to_cpu(p->control_map);
p                2228 drivers/infiniband/hw/qib/qib_mad.c 			p->entries[i].ccti_increase;
p                2231 drivers/infiniband/hw/qib/qib_mad.c 			be16_to_cpu(p->entries[i].ccti_timer);
p                2234 drivers/infiniband/hw/qib/qib_mad.c 			p->entries[i].trigger_threshold;
p                2237 drivers/infiniband/hw/qib/qib_mad.c 			p->entries[i].ccti_min;
p                2246 drivers/infiniband/hw/qib/qib_mad.c 	struct ib_cc_table_attr *p =
p                2262 drivers/infiniband/hw/qib/qib_mad.c 	if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES)
p                2265 drivers/infiniband/hw/qib/qib_mad.c 	cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES;
p                2273 drivers/infiniband/hw/qib/qib_mad.c 	ppd->ccti_limit = be16_to_cpu(p->ccti_limit);
p                2278 drivers/infiniband/hw/qib/qib_mad.c 		entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry);
p                  40 drivers/infiniband/hw/qib/qib_user_pages.c static void __qib_release_user_pages(struct page **p, size_t num_pages,
p                  43 drivers/infiniband/hw/qib/qib_user_pages.c 	put_user_pages_dirty_lock(p, num_pages, dirty);
p                  95 drivers/infiniband/hw/qib/qib_user_pages.c 		       struct page **p)
p                 114 drivers/infiniband/hw/qib/qib_user_pages.c 				     p + got, NULL);
p                 124 drivers/infiniband/hw/qib/qib_user_pages.c 	__qib_release_user_pages(p, got, 0);
p                 130 drivers/infiniband/hw/qib/qib_user_pages.c void qib_release_user_pages(struct page **p, size_t num_pages)
p                 132 drivers/infiniband/hw/qib/qib_user_pages.c 	__qib_release_user_pages(p, num_pages, 1);
p                 324 drivers/infiniband/hw/qib/qib_verbs.c 		struct rvt_mcast_qp *p;
p                 333 drivers/infiniband/hw/qib/qib_verbs.c 		list_for_each_entry_rcu(p, &mcast->qp_list, list)
p                 334 drivers/infiniband/hw/qib/qib_verbs.c 			qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
p                 106 drivers/infiniband/sw/rdmavt/mad.c 	int p;
p                 109 drivers/infiniband/sw/rdmavt/mad.c 	for (p = 0; p < rdi->dparms.nports; p++) {
p                 110 drivers/infiniband/sw/rdmavt/mad.c 		rvp = rdi->ports[p];
p                 111 drivers/infiniband/sw/rdmavt/mad.c 		agent = ib_register_mad_agent(&rdi->ibdev, p + 1,
p                 123 drivers/infiniband/sw/rdmavt/mad.c 			rdi->driver_f.notify_create_mad_agent(rdi, p);
p                 129 drivers/infiniband/sw/rdmavt/mad.c 	for (p = 0; p < rdi->dparms.nports; p++) {
p                 130 drivers/infiniband/sw/rdmavt/mad.c 		rvp = rdi->ports[p];
p                 136 drivers/infiniband/sw/rdmavt/mad.c 				rdi->driver_f.notify_free_mad_agent(rdi, p);
p                 153 drivers/infiniband/sw/rdmavt/mad.c 	int p;
p                 155 drivers/infiniband/sw/rdmavt/mad.c 	for (p = 0; p < rdi->dparms.nports; p++) {
p                 156 drivers/infiniband/sw/rdmavt/mad.c 		rvp = rdi->ports[p];
p                 169 drivers/infiniband/sw/rdmavt/mad.c 			rdi->driver_f.notify_free_mad_agent(rdi, p);
p                 128 drivers/infiniband/sw/rdmavt/mcast.c 	struct rvt_mcast_qp *p, *tmp;
p                 130 drivers/infiniband/sw/rdmavt/mcast.c 	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
p                 131 drivers/infiniband/sw/rdmavt/mcast.c 		rvt_mcast_qp_free(p);
p                 204 drivers/infiniband/sw/rdmavt/mcast.c 		struct rvt_mcast_qp *p;
p                 227 drivers/infiniband/sw/rdmavt/mcast.c 		list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
p                 228 drivers/infiniband/sw/rdmavt/mcast.c 			if (p->qp == mqp->qp) {
p                 349 drivers/infiniband/sw/rdmavt/mcast.c 	struct rvt_mcast_qp *p, *tmp, *delp = NULL;
p                 385 drivers/infiniband/sw/rdmavt/mcast.c 	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
p                 386 drivers/infiniband/sw/rdmavt/mcast.c 		if (p->qp != qp)
p                 392 drivers/infiniband/sw/rdmavt/mcast.c 		list_del_rcu(&p->list);
p                 394 drivers/infiniband/sw/rdmavt/mcast.c 		delp = p;
p                 178 drivers/infiniband/sw/rdmavt/srq.c 		struct rvt_rwqe *p;
p                 237 drivers/infiniband/sw/rdmavt/srq.c 		p = tmp_rq.kwq->curr_wq;
p                 243 drivers/infiniband/sw/rdmavt/srq.c 			p->wr_id = wqe->wr_id;
p                 244 drivers/infiniband/sw/rdmavt/srq.c 			p->num_sge = wqe->num_sge;
p                 246 drivers/infiniband/sw/rdmavt/srq.c 				p->sg_list[i] = wqe->sg_list[i];
p                 248 drivers/infiniband/sw/rdmavt/srq.c 			p = (struct rvt_rwqe *)((char *)p + sz);
p                 479 drivers/infiniband/sw/rxe/rxe_req.c 	u32 *p;
p                 510 drivers/infiniband/sw/rxe/rxe_req.c 	p = payload_addr(pkt) + paylen + bth_pad(pkt);
p                 512 drivers/infiniband/sw/rxe/rxe_req.c 	*p = ~crc;
p                 597 drivers/infiniband/sw/rxe/rxe_resp.c 	u32 *p;
p                 647 drivers/infiniband/sw/rxe/rxe_resp.c 		p = payload_addr(ack) + payload + bth_pad(ack);
p                 648 drivers/infiniband/sw/rxe/rxe_resp.c 		*p = ~crc;
p                 669 drivers/infiniband/sw/rxe/rxe_resp.c 	u32 *p;
p                 742 drivers/infiniband/sw/rxe/rxe_resp.c 	p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
p                 743 drivers/infiniband/sw/rxe/rxe_resp.c 	*p = ~icrc;
p                 593 drivers/infiniband/sw/rxe/rxe_verbs.c 	u8 *p;
p                 603 drivers/infiniband/sw/rxe/rxe_verbs.c 		p = wqe->dma.inline_data;
p                 607 drivers/infiniband/sw/rxe/rxe_verbs.c 			memcpy(p, (void *)(uintptr_t)sge->addr,
p                 610 drivers/infiniband/sw/rxe/rxe_verbs.c 			p += sge->length;
p                1922 drivers/infiniband/sw/siw/siw_cm.c 	struct list_head *p, *tmp;
p                1928 drivers/infiniband/sw/siw/siw_cm.c 	list_for_each_safe(p, tmp, (struct list_head *)id->provider_data) {
p                1929 drivers/infiniband/sw/siw/siw_cm.c 		struct siw_cep *cep = list_entry(p, struct siw_cep, listenq);
p                1931 drivers/infiniband/sw/siw/siw_cm.c 		list_del(p);
p                  35 drivers/infiniband/sw/siw/siw_qp_rx.c 		struct page *p;
p                  39 drivers/infiniband/sw/siw/siw_qp_rx.c 		p = siw_get_upage(umem, dest_addr);
p                  40 drivers/infiniband/sw/siw/siw_qp_rx.c 		if (unlikely(!p)) {
p                  54 drivers/infiniband/sw/siw/siw_qp_rx.c 		siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
p                  56 drivers/infiniband/sw/siw/siw_qp_rx.c 		dest = kmap_atomic(p);
p                  66 drivers/infiniband/sw/siw/siw_qp_rx.c 				qp_id(rx_qp(srx)), __func__, len, p, rv);
p                  67 drivers/infiniband/sw/siw/siw_qp_tx.c 			struct page *p;
p                  72 drivers/infiniband/sw/siw/siw_qp_tx.c 				p = siw_get_upage(mem->umem, sge->laddr);
p                  74 drivers/infiniband/sw/siw/siw_qp_tx.c 				p = siw_get_pblpage(mem, sge->laddr, &pbl_idx);
p                  76 drivers/infiniband/sw/siw/siw_qp_tx.c 			if (unlikely(!p))
p                  79 drivers/infiniband/sw/siw/siw_qp_tx.c 			buffer = kmap(p);
p                  87 drivers/infiniband/sw/siw/siw_qp_tx.c 				kunmap(p);
p                  90 drivers/infiniband/sw/siw/siw_qp_tx.c 					p = siw_get_upage(mem->umem,
p                  93 drivers/infiniband/sw/siw/siw_qp_tx.c 					p = siw_get_pblpage(mem,
p                  96 drivers/infiniband/sw/siw/siw_qp_tx.c 				if (unlikely(!p))
p                  99 drivers/infiniband/sw/siw/siw_qp_tx.c 				buffer = kmap(p);
p                 102 drivers/infiniband/sw/siw/siw_qp_tx.c 			kunmap(p);
p                 490 drivers/infiniband/sw/siw/siw_qp_tx.c 				struct page *p;
p                 493 drivers/infiniband/sw/siw/siw_qp_tx.c 					p = siw_get_pblpage(
p                 497 drivers/infiniband/sw/siw/siw_qp_tx.c 					p = siw_get_upage(mem->umem,
p                 499 drivers/infiniband/sw/siw/siw_qp_tx.c 				if (unlikely(!p)) {
p                 505 drivers/infiniband/sw/siw/siw_qp_tx.c 				page_array[seg] = p;
p                 508 drivers/infiniband/sw/siw/siw_qp_tx.c 					iov[seg].iov_base = kmap(p) + fp_off;
p                 521 drivers/infiniband/sw/siw/siw_qp_tx.c 							    kmap(p) + fp_off,
p                 523 drivers/infiniband/sw/siw/siw_qp_tx.c 					kunmap(p);
p                 213 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ipoib_cm_rx *p;
p                 225 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
p                 227 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, NULL))
p                 235 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ipoib_cm_rx *p = ctx;
p                 236 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
p                 243 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	list_move(&p->list, &priv->cm.rx_flush_list);
p                 244 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->state = IPOIB_CM_RX_FLUSH;
p                 250 drivers/infiniband/ulp/ipoib/ipoib_cm.c 					   struct ipoib_cm_rx *p)
p                 262 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		.qp_context = p,
p                 447 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ipoib_cm_rx *p;
p                 452 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 453 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (!p)
p                 455 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->dev = dev;
p                 456 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->id = cm_id;
p                 457 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	cm_id->context = p;
p                 458 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->state = IPOIB_CM_RX_LIVE;
p                 459 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->jiffies = jiffies;
p                 460 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	INIT_LIST_HEAD(&p->list);
p                 462 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->qp = ipoib_cm_create_rx_qp(dev, p);
p                 463 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (IS_ERR(p->qp)) {
p                 464 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ret = PTR_ERR(p->qp);
p                 469 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
p                 474 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
p                 484 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->jiffies = jiffies;
p                 485 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (p->state == IPOIB_CM_RX_LIVE)
p                 486 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		list_move(&p->list, &priv->cm.passive_ids);
p                 489 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
p                 492 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
p                 498 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	ib_destroy_qp(p->qp);
p                 500 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	kfree(p);
p                 507 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ipoib_cm_rx *p;
p                 517 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		p = cm_id->context;
p                 518 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		priv = ipoib_priv(p->dev);
p                 519 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
p                 566 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ipoib_cm_rx *p;
p                 589 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p = wc->qp->qp_context;
p                 592 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
p                 604 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			if (!--p->recv_count) {
p                 606 drivers/infiniband/ulp/ipoib/ipoib_cm.c 				list_move(&p->list, &priv->cm.rx_reap_list);
p                 615 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
p                 617 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			p->jiffies = jiffies;
p                 620 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			if (p->state == IPOIB_CM_RX_LIVE)
p                 621 drivers/infiniband/ulp/ipoib/ipoib_cm.c 				list_move(&p->list, &priv->cm.passive_ids);
p                 685 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
p                 689 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			--p->recv_count;
p                 930 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ipoib_cm_rx *p;
p                 942 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
p                 943 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		list_move(&p->list, &priv->cm.rx_error_list);
p                 944 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		p->state = IPOIB_CM_RX_ERROR;
p                 946 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
p                 988 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ipoib_cm_tx *p = cm_id->context;
p                 989 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
p                 996 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->mtu = be32_to_cpu(data->mtu);
p                 998 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (p->mtu <= IPOIB_ENCAP_LEN) {
p                1000 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			   p->mtu, IPOIB_ENCAP_LEN);
p                1012 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
p                1024 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
p                1032 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	netif_tx_lock_bh(p->dev);
p                1034 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
p                1035 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (p->neigh)
p                1036 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		while ((skb = __skb_dequeue(&p->neigh->queue)))
p                1039 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	netif_tx_unlock_bh(p->dev);
p                1042 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		skb->dev = p->dev;
p                1144 drivers/infiniband/ulp/ipoib/ipoib_cm.c static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
p                1147 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
p                1152 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->tx_ring = vzalloc(array_size(ipoib_sendq_size, sizeof(*p->tx_ring)));
p                1153 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (!p->tx_ring) {
p                1159 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->qp = ipoib_cm_create_tx_qp(p->dev, p);
p                1161 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (IS_ERR(p->qp)) {
p                1162 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ret = PTR_ERR(p->qp);
p                1167 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
p                1168 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (IS_ERR(p->id)) {
p                1169 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ret = PTR_ERR(p->id);
p                1174 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	ret = ipoib_cm_modify_tx_init(p->dev, p->id,  p->qp);
p                1180 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
p                1187 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		  p->qp->qp_num, pathrec->dgid.raw, qpn);
p                1192 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	ib_destroy_cm_id(p->id);
p                1194 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->id = NULL;
p                1195 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	ib_destroy_qp(p->qp);
p                1197 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->qp = NULL;
p                1198 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	vfree(p->tx_ring);
p                1203 drivers/infiniband/ulp/ipoib/ipoib_cm.c static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
p                1205 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
p                1210 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		  p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
p                1212 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (p->id)
p                1213 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ib_destroy_cm_id(p->id);
p                1215 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (p->tx_ring) {
p                1218 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		while ((int) p->tx_tail - (int) p->tx_head < 0) {
p                1221 drivers/infiniband/ulp/ipoib/ipoib_cm.c 					   p->tx_head - p->tx_tail);
p                1231 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	while ((int) p->tx_tail - (int) p->tx_head < 0) {
p                1232 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
p                1235 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		netif_tx_lock_bh(p->dev);
p                1236 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		++p->tx_tail;
p                1240 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		    netif_queue_stopped(p->dev) &&
p                1242 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			netif_wake_queue(p->dev);
p                1243 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		netif_tx_unlock_bh(p->dev);
p                1246 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (p->qp)
p                1247 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ib_destroy_qp(p->qp);
p                1249 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	vfree(p->tx_ring);
p                1250 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	kfree(p);
p                1347 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ipoib_cm_tx *p;
p                1359 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		p = list_entry(priv->cm.start_list.next, typeof(*p), list);
p                1360 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		list_del_init(&p->list);
p                1361 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		neigh = p->neigh;
p                1380 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ret = ipoib_cm_tx_init(p, qpn, &pathrec);
p                1387 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			neigh = p->neigh;
p                1392 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			list_del(&p->list);
p                1393 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			kfree(p);
p                1406 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ipoib_cm_tx *p;
p                1413 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
p                1414 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		list_del_init(&p->list);
p                1417 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ipoib_cm_tx_destroy(p);
p                1485 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	struct ipoib_cm_rx *p;
p                1492 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
p                1493 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
p                1495 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		list_move(&p->list, &priv->cm.rx_error_list);
p                1496 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		p->state = IPOIB_CM_RX_ERROR;
p                1498 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
p                 121 drivers/infiniband/ulp/ipoib/ipoib_ethtool.c 	u8 *p = (u8 *)net_stats;
p                 124 drivers/infiniband/ulp/ipoib/ipoib_ethtool.c 		data[i] = *(u64 *)(p + ipoib_gstrings_stats[i].stat_offset);
p                 130 drivers/infiniband/ulp/ipoib/ipoib_ethtool.c 	u8 *p = data;
p                 136 drivers/infiniband/ulp/ipoib/ipoib_ethtool.c 			memcpy(p, ipoib_gstrings_stats[i].stat_string,
p                 138 drivers/infiniband/ulp/ipoib/ipoib_ethtool.c 			p += ETH_GSTRING_LEN;
p                2518 drivers/infiniband/ulp/ipoib/ipoib_main.c 	unsigned int p;
p                2527 drivers/infiniband/ulp/ipoib/ipoib_main.c 	rdma_for_each_port (device, p) {
p                2528 drivers/infiniband/ulp/ipoib/ipoib_main.c 		if (!rdma_protocol_ib(device, p))
p                2530 drivers/infiniband/ulp/ipoib/ipoib_main.c 		dev = ipoib_add_port("ib%d", device, p);
p                 153 drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c 		char *p = (char *)&vstats + vnic_gstrings_stats[i].stat_offset;
p                 156 drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c 			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                3495 drivers/infiniband/ulp/srp/ib_srp.c 	char *p;
p                3509 drivers/infiniband/ulp/srp/ib_srp.c 	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
p                3510 drivers/infiniband/ulp/srp/ib_srp.c 		if (!*p)
p                3513 drivers/infiniband/ulp/srp/ib_srp.c 		token = match_token(p, srp_opt_tokens, args);
p                3518 drivers/infiniband/ulp/srp/ib_srp.c 			p = match_strdup(args);
p                3519 drivers/infiniband/ulp/srp/ib_srp.c 			if (!p) {
p                3523 drivers/infiniband/ulp/srp/ib_srp.c 			ret = kstrtoull(p, 16, &ull);
p                3525 drivers/infiniband/ulp/srp/ib_srp.c 				pr_warn("invalid id_ext parameter '%s'\n", p);
p                3526 drivers/infiniband/ulp/srp/ib_srp.c 				kfree(p);
p                3530 drivers/infiniband/ulp/srp/ib_srp.c 			kfree(p);
p                3534 drivers/infiniband/ulp/srp/ib_srp.c 			p = match_strdup(args);
p                3535 drivers/infiniband/ulp/srp/ib_srp.c 			if (!p) {
p                3539 drivers/infiniband/ulp/srp/ib_srp.c 			ret = kstrtoull(p, 16, &ull);
p                3541 drivers/infiniband/ulp/srp/ib_srp.c 				pr_warn("invalid ioc_guid parameter '%s'\n", p);
p                3542 drivers/infiniband/ulp/srp/ib_srp.c 				kfree(p);
p                3546 drivers/infiniband/ulp/srp/ib_srp.c 			kfree(p);
p                3550 drivers/infiniband/ulp/srp/ib_srp.c 			p = match_strdup(args);
p                3551 drivers/infiniband/ulp/srp/ib_srp.c 			if (!p) {
p                3555 drivers/infiniband/ulp/srp/ib_srp.c 			if (strlen(p) != 32) {
p                3556 drivers/infiniband/ulp/srp/ib_srp.c 				pr_warn("bad dest GID parameter '%s'\n", p);
p                3557 drivers/infiniband/ulp/srp/ib_srp.c 				kfree(p);
p                3561 drivers/infiniband/ulp/srp/ib_srp.c 			ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
p                3562 drivers/infiniband/ulp/srp/ib_srp.c 			kfree(p);
p                3569 drivers/infiniband/ulp/srp/ib_srp.c 				pr_warn("bad P_Key parameter '%s'\n", p);
p                3576 drivers/infiniband/ulp/srp/ib_srp.c 			p = match_strdup(args);
p                3577 drivers/infiniband/ulp/srp/ib_srp.c 			if (!p) {
p                3581 drivers/infiniband/ulp/srp/ib_srp.c 			ret = kstrtoull(p, 16, &ull);
p                3583 drivers/infiniband/ulp/srp/ib_srp.c 				pr_warn("bad service_id parameter '%s'\n", p);
p                3584 drivers/infiniband/ulp/srp/ib_srp.c 				kfree(p);
p                3588 drivers/infiniband/ulp/srp/ib_srp.c 			kfree(p);
p                3592 drivers/infiniband/ulp/srp/ib_srp.c 			p = match_strdup(args);
p                3593 drivers/infiniband/ulp/srp/ib_srp.c 			if (!p) {
p                3597 drivers/infiniband/ulp/srp/ib_srp.c 			ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
p                3600 drivers/infiniband/ulp/srp/ib_srp.c 				pr_warn("bad source parameter '%s'\n", p);
p                3601 drivers/infiniband/ulp/srp/ib_srp.c 				kfree(p);
p                3605 drivers/infiniband/ulp/srp/ib_srp.c 			kfree(p);
p                3609 drivers/infiniband/ulp/srp/ib_srp.c 			p = match_strdup(args);
p                3610 drivers/infiniband/ulp/srp/ib_srp.c 			if (!p) {
p                3614 drivers/infiniband/ulp/srp/ib_srp.c 			ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
p                3619 drivers/infiniband/ulp/srp/ib_srp.c 				pr_warn("bad dest parameter '%s'\n", p);
p                3620 drivers/infiniband/ulp/srp/ib_srp.c 				kfree(p);
p                3624 drivers/infiniband/ulp/srp/ib_srp.c 			kfree(p);
p                3629 drivers/infiniband/ulp/srp/ib_srp.c 				pr_warn("bad max sect parameter '%s'\n", p);
p                3637 drivers/infiniband/ulp/srp/ib_srp.c 				pr_warn("bad queue_size parameter '%s'\n", p);
p                3650 drivers/infiniband/ulp/srp/ib_srp.c 					p);
p                3659 drivers/infiniband/ulp/srp/ib_srp.c 					p);
p                3667 drivers/infiniband/ulp/srp/ib_srp.c 				pr_warn("bad IO class parameter '%s'\n", p);
p                3681 drivers/infiniband/ulp/srp/ib_srp.c 			p = match_strdup(args);
p                3682 drivers/infiniband/ulp/srp/ib_srp.c 			if (!p) {
p                3686 drivers/infiniband/ulp/srp/ib_srp.c 			ret = kstrtoull(p, 16, &ull);
p                3688 drivers/infiniband/ulp/srp/ib_srp.c 				pr_warn("bad initiator_ext value '%s'\n", p);
p                3689 drivers/infiniband/ulp/srp/ib_srp.c 				kfree(p);
p                3693 drivers/infiniband/ulp/srp/ib_srp.c 			kfree(p);
p                3699 drivers/infiniband/ulp/srp/ib_srp.c 					p);
p                3707 drivers/infiniband/ulp/srp/ib_srp.c 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
p                3717 drivers/infiniband/ulp/srp/ib_srp.c 					p);
p                3725 drivers/infiniband/ulp/srp/ib_srp.c 				pr_warn("bad comp_vector parameter '%s'\n", p);
p                3734 drivers/infiniband/ulp/srp/ib_srp.c 					p);
p                3742 drivers/infiniband/ulp/srp/ib_srp.c 				p);
p                4118 drivers/infiniband/ulp/srp/ib_srp.c 	unsigned int p;
p                4185 drivers/infiniband/ulp/srp/ib_srp.c 	rdma_for_each_port (device, p) {
p                4186 drivers/infiniband/ulp/srp/ib_srp.c 		host = srp_add_port(srp_dev, p);
p                3391 drivers/infiniband/ulp/srpt/ib_srpt.c 	const char *p;
p                3395 drivers/infiniband/ulp/srpt/ib_srpt.c 	p = name;
p                3396 drivers/infiniband/ulp/srpt/ib_srpt.c 	if (strncasecmp(p, "0x", 2) == 0)
p                3397 drivers/infiniband/ulp/srpt/ib_srpt.c 		p += 2;
p                3399 drivers/infiniband/ulp/srpt/ib_srpt.c 	len = strlen(p);
p                3405 drivers/infiniband/ulp/srpt/ib_srpt.c 	ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
p                 635 drivers/input/evdev.c 			unsigned int maxlen, void __user *p, int compat)
p                 645 drivers/input/evdev.c 			if (copy_to_user((compat_long_t __user *) p + i,
p                 655 drivers/input/evdev.c 		if (copy_to_user(p, bits, len))
p                 663 drivers/input/evdev.c 			  unsigned int maxlen, const void __user *p, int compat)
p                 678 drivers/input/evdev.c 					   (compat_long_t __user *) p + i,
p                 692 drivers/input/evdev.c 		if (copy_from_user(bits, p, len))
p                 702 drivers/input/evdev.c 			unsigned int maxlen, void __user *p, int compat)
p                 711 drivers/input/evdev.c 	return copy_to_user(p, bits, len) ? -EFAULT : len;
p                 715 drivers/input/evdev.c 			  unsigned int maxlen, const void __user *p, int compat)
p                 728 drivers/input/evdev.c 	return copy_from_user(bits, p, len) ? -EFAULT : len;
p                 736 drivers/input/evdev.c 			unsigned int maxlen, void __user *p, int compat)
p                 743 drivers/input/evdev.c 	return copy_to_user(p, bits, len) ? -EFAULT : len;
p                 747 drivers/input/evdev.c 			  unsigned int maxlen, const void __user *p, int compat)
p                 758 drivers/input/evdev.c 	return copy_from_user(bits, p, len) ? -EFAULT : len;
p                 763 drivers/input/evdev.c static int str_to_user(const char *str, unsigned int maxlen, void __user *p)
p                 774 drivers/input/evdev.c 	return copy_to_user(p, str, len) ? -EFAULT : len;
p                 779 drivers/input/evdev.c 			    void __user *p, int compat_mode)
p                 798 drivers/input/evdev.c 	return bits_to_user(bits, len, size, p, compat_mode);
p                 801 drivers/input/evdev.c static int evdev_handle_get_keycode(struct input_dev *dev, void __user *p)
p                 807 drivers/input/evdev.c 	int __user *ip = (int __user *)p;
p                 811 drivers/input/evdev.c 	if (copy_from_user(ke.scancode, p, sizeof(unsigned int)))
p                 824 drivers/input/evdev.c static int evdev_handle_get_keycode_v2(struct input_dev *dev, void __user *p)
p                 829 drivers/input/evdev.c 	if (copy_from_user(&ke, p, sizeof(ke)))
p                 836 drivers/input/evdev.c 	if (copy_to_user(p, &ke, sizeof(ke)))
p                 842 drivers/input/evdev.c static int evdev_handle_set_keycode(struct input_dev *dev, void __user *p)
p                 848 drivers/input/evdev.c 	int __user *ip = (int __user *)p;
p                 850 drivers/input/evdev.c 	if (copy_from_user(ke.scancode, p, sizeof(unsigned int)))
p                 859 drivers/input/evdev.c static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p)
p                 863 drivers/input/evdev.c 	if (copy_from_user(&ke, p, sizeof(ke)))
p                 888 drivers/input/evdev.c 				unsigned int maxlen, void __user *p,
p                 909 drivers/input/evdev.c 	ret = bits_to_user(mem, maxbit, maxlen, p, compat);
p                1029 drivers/input/evdev.c 			   void __user *p, int compat_mode)
p                1037 drivers/input/evdev.c 	int __user *ip = (int __user *)p;
p                1049 drivers/input/evdev.c 		if (copy_to_user(p, &dev->id, sizeof(struct input_id)))
p                1076 drivers/input/evdev.c 		return input_ff_erase(dev, (int)(unsigned long) p, file);
p                1086 drivers/input/evdev.c 		if (p)
p                1092 drivers/input/evdev.c 		if (p)
p                1100 drivers/input/evdev.c 		if (copy_from_user(&mask, p, sizeof(mask)))
p                1112 drivers/input/evdev.c 		if (copy_from_user(&mask, p, sizeof(mask)))
p                1122 drivers/input/evdev.c 		if (copy_from_user(&i, p, sizeof(unsigned int)))
p                1128 drivers/input/evdev.c 		return evdev_handle_get_keycode(dev, p);
p                1131 drivers/input/evdev.c 		return evdev_handle_set_keycode(dev, p);
p                1134 drivers/input/evdev.c 		return evdev_handle_get_keycode_v2(dev, p);
p                1137 drivers/input/evdev.c 		return evdev_handle_set_keycode_v2(dev, p);
p                1148 drivers/input/evdev.c 				    size, p, compat_mode);
p                1155 drivers/input/evdev.c 					    KEY_MAX, size, p, compat_mode);
p                1159 drivers/input/evdev.c 					    LED_MAX, size, p, compat_mode);
p                1163 drivers/input/evdev.c 					    SND_MAX, size, p, compat_mode);
p                1167 drivers/input/evdev.c 					    SW_MAX, size, p, compat_mode);
p                1170 drivers/input/evdev.c 		return str_to_user(dev->name, size, p);
p                1173 drivers/input/evdev.c 		return str_to_user(dev->phys, size, p);
p                1176 drivers/input/evdev.c 		return str_to_user(dev->uniq, size, p);
p                1179 drivers/input/evdev.c 		if (input_ff_effect_from_user(p, size, &effect))
p                1186 drivers/input/evdev.c 		if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
p                1201 drivers/input/evdev.c 						p, compat_mode);
p                1211 drivers/input/evdev.c 			if (copy_to_user(p, &abs, min_t(size_t,
p                1228 drivers/input/evdev.c 			if (copy_from_user(&abs, p, min_t(size_t,
p                1256 drivers/input/evdev.c 				void __user *p, int compat_mode)
p                1271 drivers/input/evdev.c 	retval = evdev_do_ioctl(file, cmd, p, compat_mode);
p                 248 drivers/input/input-mt.c 			int p = input_mt_get_value(oldest, ABS_MT_PRESSURE);
p                 249 drivers/input/input-mt.c 			input_event(dev, EV_ABS, ABS_PRESSURE, p);
p                 317 drivers/input/input-mt.c 	int f, *p, s, c;
p                 323 drivers/input/input-mt.c 	p = begin + step;
p                 324 drivers/input/input-mt.c 	s = p == end ? f + 1 : *p;
p                 326 drivers/input/input-mt.c 	for (; p != end; p += step)
p                 327 drivers/input/input-mt.c 		if (*p < f)
p                 328 drivers/input/input-mt.c 			s = f, f = *p;
p                 329 drivers/input/input-mt.c 		else if (*p < s)
p                 330 drivers/input/input-mt.c 			s = *p;
p                 339 drivers/input/input-mt.c 	for (p = begin; p != end; p += step)
p                 340 drivers/input/input-mt.c 		*p -= c;
p                 364 drivers/input/input-mt.c 	const struct input_mt_pos *p;
p                 374 drivers/input/input-mt.c 		for (p = pos; p != pos + num_pos; p++) {
p                 375 drivers/input/input-mt.c 			int dx = x - p->x, dy = y - p->y;
p                1102 drivers/input/input.c 	void *p;
p                  82 drivers/input/joystick/gf2k.c 	unsigned int t, p;
p                  86 drivers/input/joystick/gf2k.c 	p = gameport_time(gameport, GF2K_STROBE);
p                 100 drivers/input/joystick/gf2k.c 			t = p;
p                 142 drivers/input/joystick/gf2k.c #define GB(p,n,s)	gf2k_get_bits(data, p, n, s)
p                 134 drivers/input/joystick/tmdc.c 	int i[2], j[2], t[2], p, k;
p                 136 drivers/input/joystick/tmdc.c 	p = gameport_time(gameport, TMDC_MAX_STROBE);
p                 155 drivers/input/joystick/tmdc.c 				t[k] = p;
p                 117 drivers/input/joystick/walkera0701.c static inline int read_ack(struct pardevice *p)
p                 119 drivers/input/joystick/walkera0701.c 	return parport_read_status(p->port) & 0x40;
p                  91 drivers/input/keyboard/hil_kbd.c static bool hil_dev_is_command_response(hil_packet p)
p                  93 drivers/input/keyboard/hil_kbd.c 	if ((p & ~HIL_CMDCT_POL) == (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_POL))
p                  96 drivers/input/keyboard/hil_kbd.c 	if ((p & ~HIL_CMDCT_RPL) == (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_RPL))
p                 104 drivers/input/keyboard/hil_kbd.c 	hil_packet p;
p                 109 drivers/input/keyboard/hil_kbd.c 	p = dev->data[idx - 1];
p                 111 drivers/input/keyboard/hil_kbd.c 	switch (p & HIL_PKT_DATA_MASK) {
p                 131 drivers/input/keyboard/hil_kbd.c 		if (p != (HIL_ERR_INT | HIL_PKT_CMD)) {
p                 133 drivers/input/keyboard/hil_kbd.c 			printk(KERN_WARNING PREFIX "Device sent unknown record %x\n", p);
p                 209 drivers/input/keyboard/hil_kbd.c 	hil_packet p = ptr->data[idx - 1];
p                 213 drivers/input/keyboard/hil_kbd.c 	if ((p & HIL_CMDCT_POL) != idx - 1) {
p                 215 drivers/input/keyboard/hil_kbd.c 			"Malformed poll packet %x (idx = %i)\n", p, idx);
p                 219 drivers/input/keyboard/hil_kbd.c 	i = (p & HIL_POL_AXIS_ALT) ? 3 : 0;
p                 220 drivers/input/keyboard/hil_kbd.c 	laxis = (p & HIL_POL_NUM_AXES_MASK) + i;
p                  47 drivers/input/keyboard/hilkbd.c  #define hil_readb(p)		gsc_readb(p)
p                  48 drivers/input/keyboard/hilkbd.c  #define hil_writeb(v,p)	gsc_writeb((v),(p))
p                  56 drivers/input/keyboard/hilkbd.c  #define hil_readb(p)		readb((const volatile void __iomem *)(p))
p                  57 drivers/input/keyboard/hilkbd.c  #define hil_writeb(v, p)	writeb((v), (volatile void __iomem *)(p))
p                  49 drivers/input/keyboard/sh_keysc.c static unsigned long sh_keysc_read(struct sh_keysc_priv *p, int reg_nr)
p                  51 drivers/input/keyboard/sh_keysc.c 	return ioread16(p->iomem_base + (reg_nr << 2));
p                  54 drivers/input/keyboard/sh_keysc.c static void sh_keysc_write(struct sh_keysc_priv *p, int reg_nr,
p                  57 drivers/input/keyboard/sh_keysc.c 	iowrite16(value, p->iomem_base + (reg_nr << 2));
p                  60 drivers/input/keyboard/sh_keysc.c static void sh_keysc_level_mode(struct sh_keysc_priv *p,
p                  63 drivers/input/keyboard/sh_keysc.c 	struct sh_keysc_info *pdata = &p->pdata;
p                  65 drivers/input/keyboard/sh_keysc.c 	sh_keysc_write(p, KYOUTDR, 0);
p                  66 drivers/input/keyboard/sh_keysc.c 	sh_keysc_write(p, KYCR2, KYCR2_IRQ_LEVEL | (keys_set << 8));
p                 254 drivers/input/misc/apanel.c 	const void __iomem *p;
p                 261 drivers/input/misc/apanel.c 	p = bios_signature(bios);
p                 262 drivers/input/misc/apanel.c 	if (!p) {
p                 268 drivers/input/misc/apanel.c 	p += 8;
p                 269 drivers/input/misc/apanel.c 	i2c_addr = readb(p + 3) >> 1;
p                 271 drivers/input/misc/apanel.c 	for ( ; (devno = readb(p)) & 0x7f; p += 4) {
p                 274 drivers/input/misc/apanel.c 		method = readb(p + 1);
p                 275 drivers/input/misc/apanel.c 		chip = readb(p + 2);
p                 276 drivers/input/misc/apanel.c 		slave = readb(p + 3) >> 1;
p                  91 drivers/input/misc/ati_remote2.c #define param_check_channel_mask(name, p) __param_check(name, p, unsigned int)
p                 100 drivers/input/misc/ati_remote2.c #define param_check_mode_mask(name, p) __param_check(name, p, unsigned int)
p                 816 drivers/input/misc/uinput.c 	char __user *p = dest;
p                 829 drivers/input/misc/uinput.c 	ret = copy_to_user(p, str, len);
p                 834 drivers/input/misc/uinput.c 	ret = put_user(0, p + len - 1);
p                 839 drivers/input/misc/uinput.c 				 unsigned long arg, void __user *p)
p                 864 drivers/input/misc/uinput.c 		if (put_user(UINPUT_VERSION, (unsigned int __user *)p))
p                 877 drivers/input/misc/uinput.c 		retval = uinput_dev_setup(udev, p);
p                 928 drivers/input/misc/uinput.c 		phys = strndup_user(p, 1024);
p                 939 drivers/input/misc/uinput.c 		retval = uinput_ff_upload_from_user(p, &ff_up);
p                 957 drivers/input/misc/uinput.c 		retval = uinput_ff_upload_to_user(p, &ff_up);
p                 961 drivers/input/misc/uinput.c 		if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) {
p                 974 drivers/input/misc/uinput.c 		if (copy_to_user(p, &ff_erase, sizeof(ff_erase))) {
p                 982 drivers/input/misc/uinput.c 		retval = uinput_ff_upload_from_user(p, &ff_up);
p                 998 drivers/input/misc/uinput.c 		if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) {
p                1024 drivers/input/misc/uinput.c 		retval = uinput_str_to_user(p, name, size);
p                1028 drivers/input/misc/uinput.c 		retval = uinput_abs_setup(udev, p, size);
p                  68 drivers/input/misc/yealink.c  	  .u = { .p = { .name = (n), .a = (h), .m = (hm) } } }
p                  76 drivers/input/misc/yealink.c 		}	p;
p                 142 drivers/input/misc/yealink.c 		a = lcdMap[el].u.p.a;
p                 143 drivers/input/misc/yealink.c 		m = lcdMap[el].u.p.m;
p                 247 drivers/input/misc/yealink.c static int yealink_cmd(struct yealink_dev *yld, struct yld_ctl_packet *p)
p                 249 drivers/input/misc/yealink.c 	u8	*buf = (u8 *)p;
p                 255 drivers/input/misc/yealink.c 	p->sum = sum;
p                 261 drivers/input/misc/yealink.c 			p, sizeof(*p),
p                 281 drivers/input/misc/yealink.c 	struct yld_ctl_packet *p = yld->ctl_data;
p                 292 drivers/input/misc/yealink.c 	yealink_cmd(yld, p);
p                 297 drivers/input/misc/yealink.c 	p->cmd = CMD_RING_NOTE;
p                 301 drivers/input/misc/yealink.c 		if (len > sizeof(p->data))
p                 302 drivers/input/misc/yealink.c 			len = sizeof(p->data);
p                 303 drivers/input/misc/yealink.c 		p->size	  = len;
p                 304 drivers/input/misc/yealink.c 		p->offset = cpu_to_be16(ix);
p                 305 drivers/input/misc/yealink.c 		memcpy(p->data, &buf[ix], len);
p                 306 drivers/input/misc/yealink.c 		yealink_cmd(yld, p);
p                 693 drivers/input/misc/yealink.c 				lcdMap[i].u.p.name);
p                 716 drivers/input/misc/yealink.c 		if (strncmp(buf, lcdMap[i].u.p.name, count) == 0) {
p                 623 drivers/input/mouse/alps.c static void alps_decode_buttons_v3(struct alps_fields *f, unsigned char *p)
p                 625 drivers/input/mouse/alps.c 	f->left = !!(p[3] & 0x01);
p                 626 drivers/input/mouse/alps.c 	f->right = !!(p[3] & 0x02);
p                 627 drivers/input/mouse/alps.c 	f->middle = !!(p[3] & 0x04);
p                 629 drivers/input/mouse/alps.c 	f->ts_left = !!(p[3] & 0x10);
p                 630 drivers/input/mouse/alps.c 	f->ts_right = !!(p[3] & 0x20);
p                 631 drivers/input/mouse/alps.c 	f->ts_middle = !!(p[3] & 0x40);
p                 634 drivers/input/mouse/alps.c static int alps_decode_pinnacle(struct alps_fields *f, unsigned char *p,
p                 637 drivers/input/mouse/alps.c 	f->first_mp = !!(p[4] & 0x40);
p                 638 drivers/input/mouse/alps.c 	f->is_mp = !!(p[0] & 0x40);
p                 641 drivers/input/mouse/alps.c 		f->fingers = (p[5] & 0x3) + 1;
p                 642 drivers/input/mouse/alps.c 		f->x_map = ((p[4] & 0x7e) << 8) |
p                 643 drivers/input/mouse/alps.c 			   ((p[1] & 0x7f) << 2) |
p                 644 drivers/input/mouse/alps.c 			   ((p[0] & 0x30) >> 4);
p                 645 drivers/input/mouse/alps.c 		f->y_map = ((p[3] & 0x70) << 4) |
p                 646 drivers/input/mouse/alps.c 			   ((p[2] & 0x7f) << 1) |
p                 647 drivers/input/mouse/alps.c 			   (p[4] & 0x01);
p                 649 drivers/input/mouse/alps.c 		f->st.x = ((p[1] & 0x7f) << 4) | ((p[4] & 0x30) >> 2) |
p                 650 drivers/input/mouse/alps.c 		       ((p[0] & 0x30) >> 4);
p                 651 drivers/input/mouse/alps.c 		f->st.y = ((p[2] & 0x7f) << 4) | (p[4] & 0x0f);
p                 652 drivers/input/mouse/alps.c 		f->pressure = p[5] & 0x7f;
p                 654 drivers/input/mouse/alps.c 		alps_decode_buttons_v3(f, p);
p                 660 drivers/input/mouse/alps.c static int alps_decode_rushmore(struct alps_fields *f, unsigned char *p,
p                 663 drivers/input/mouse/alps.c 	f->first_mp = !!(p[4] & 0x40);
p                 664 drivers/input/mouse/alps.c 	f->is_mp = !!(p[5] & 0x40);
p                 667 drivers/input/mouse/alps.c 		f->fingers = max((p[5] & 0x3), ((p[5] >> 2) & 0x3)) + 1;
p                 668 drivers/input/mouse/alps.c 		f->x_map = ((p[5] & 0x10) << 11) |
p                 669 drivers/input/mouse/alps.c 			   ((p[4] & 0x7e) << 8) |
p                 670 drivers/input/mouse/alps.c 			   ((p[1] & 0x7f) << 2) |
p                 671 drivers/input/mouse/alps.c 			   ((p[0] & 0x30) >> 4);
p                 672 drivers/input/mouse/alps.c 		f->y_map = ((p[5] & 0x20) << 6) |
p                 673 drivers/input/mouse/alps.c 			   ((p[3] & 0x70) << 4) |
p                 674 drivers/input/mouse/alps.c 			   ((p[2] & 0x7f) << 1) |
p                 675 drivers/input/mouse/alps.c 			   (p[4] & 0x01);
p                 677 drivers/input/mouse/alps.c 		f->st.x = ((p[1] & 0x7f) << 4) | ((p[4] & 0x30) >> 2) |
p                 678 drivers/input/mouse/alps.c 		       ((p[0] & 0x30) >> 4);
p                 679 drivers/input/mouse/alps.c 		f->st.y = ((p[2] & 0x7f) << 4) | (p[4] & 0x0f);
p                 680 drivers/input/mouse/alps.c 		f->pressure = p[5] & 0x7f;
p                 682 drivers/input/mouse/alps.c 		alps_decode_buttons_v3(f, p);
p                 688 drivers/input/mouse/alps.c static int alps_decode_dolphin(struct alps_fields *f, unsigned char *p,
p                 694 drivers/input/mouse/alps.c 	f->first_mp = !!(p[0] & 0x02);
p                 695 drivers/input/mouse/alps.c 	f->is_mp = !!(p[0] & 0x20);
p                 698 drivers/input/mouse/alps.c 		f->st.x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7));
p                 699 drivers/input/mouse/alps.c 		f->st.y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3));
p                 700 drivers/input/mouse/alps.c 		f->pressure = (p[0] & 4) ? 0 : p[5] & 0x7f;
p                 701 drivers/input/mouse/alps.c 		alps_decode_buttons_v3(f, p);
p                 703 drivers/input/mouse/alps.c 		f->fingers = ((p[0] & 0x6) >> 1 |
p                 704 drivers/input/mouse/alps.c 		     (p[0] & 0x10) >> 2);
p                 706 drivers/input/mouse/alps.c 		palm_data = (p[1] & 0x7f) |
p                 707 drivers/input/mouse/alps.c 			    ((p[2] & 0x7f) << 7) |
p                 708 drivers/input/mouse/alps.c 			    ((p[4] & 0x7f) << 14) |
p                 709 drivers/input/mouse/alps.c 			    ((p[5] & 0x7f) << 21) |
p                 710 drivers/input/mouse/alps.c 			    ((p[3] & 0x07) << 28) |
p                 711 drivers/input/mouse/alps.c 			    (((u64)p[3] & 0x70) << 27) |
p                 712 drivers/input/mouse/alps.c 			    (((u64)p[0] & 0x01) << 34);
p                1027 drivers/input/mouse/alps.c 				  unsigned char *p,
p                1033 drivers/input/mouse/alps.c 	pkt_id = alps_get_packet_id_v7(p);
p                1059 drivers/input/mouse/alps.c 	alps_get_finger_coordinate_v7(f->mt, p, pkt_id);
p                1064 drivers/input/mouse/alps.c 		f->fingers = 3 + (p[5] & 0x03);
p                1066 drivers/input/mouse/alps.c 	f->left = (p[0] & 0x80) >> 7;
p                1068 drivers/input/mouse/alps.c 		if (p[0] & 0x20)
p                1070 drivers/input/mouse/alps.c 		if (p[0] & 0x10)
p                1073 drivers/input/mouse/alps.c 		f->right = (p[0] & 0x20) >> 5;
p                1074 drivers/input/mouse/alps.c 		f->middle = (p[0] & 0x10) >> 4;
p                1178 drivers/input/mouse/alps.c 			      unsigned char *p, struct psmouse *psmouse)
p                1184 drivers/input/mouse/alps.c 	pkt_id = alps_get_pkt_id_ss4_v2(p);
p                1189 drivers/input/mouse/alps.c 		f->mt[0].x = SS4_1F_X_V2(p);
p                1190 drivers/input/mouse/alps.c 		f->mt[0].y = SS4_1F_Y_V2(p);
p                1191 drivers/input/mouse/alps.c 		f->pressure = ((SS4_1F_Z_V2(p)) * 2) & 0x7f;
p                1206 drivers/input/mouse/alps.c 				f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
p                1207 drivers/input/mouse/alps.c 				f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
p                1209 drivers/input/mouse/alps.c 				f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
p                1210 drivers/input/mouse/alps.c 				f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
p                1212 drivers/input/mouse/alps.c 			f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
p                1213 drivers/input/mouse/alps.c 			f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
p                1216 drivers/input/mouse/alps.c 				f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
p                1217 drivers/input/mouse/alps.c 				f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
p                1219 drivers/input/mouse/alps.c 				f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
p                1220 drivers/input/mouse/alps.c 				f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
p                1222 drivers/input/mouse/alps.c 			f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
p                1223 drivers/input/mouse/alps.c 			f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
p                1225 drivers/input/mouse/alps.c 		f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
p                1227 drivers/input/mouse/alps.c 		if (SS4_IS_MF_CONTINUE(p)) {
p                1240 drivers/input/mouse/alps.c 				f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
p                1241 drivers/input/mouse/alps.c 				f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
p                1244 drivers/input/mouse/alps.c 				f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
p                1245 drivers/input/mouse/alps.c 				f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
p                1250 drivers/input/mouse/alps.c 			f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
p                1251 drivers/input/mouse/alps.c 			f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
p                1254 drivers/input/mouse/alps.c 				f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0);
p                1255 drivers/input/mouse/alps.c 				f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1);
p                1258 drivers/input/mouse/alps.c 				f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
p                1259 drivers/input/mouse/alps.c 				f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
p                1264 drivers/input/mouse/alps.c 			f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
p                1265 drivers/input/mouse/alps.c 			f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
p                1271 drivers/input/mouse/alps.c 		if (SS4_IS_5F_DETECTED(p)) {
p                1300 drivers/input/mouse/alps.c 		f->ts_left = !!(SS4_BTN_V2(p) & 0x01);
p                1301 drivers/input/mouse/alps.c 		f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
p                1302 drivers/input/mouse/alps.c 		f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
p                1304 drivers/input/mouse/alps.c 		f->left = !!(SS4_BTN_V2(p) & 0x01);
p                1306 drivers/input/mouse/alps.c 			f->right = !!(SS4_BTN_V2(p) & 0x02);
p                1307 drivers/input/mouse/alps.c 			f->middle = !!(SS4_BTN_V2(p) & 0x04);
p                 311 drivers/input/mouse/alps.h 	int (*decode_fields)(struct alps_fields *f, unsigned char *p,
p                 266 drivers/input/mouse/bcm5974.c 	struct bcm5974_param p;	/* finger pressure limits */
p                 509 drivers/input/mouse/bcm5974.c 		    const struct bcm5974_param *p)
p                 511 drivers/input/mouse/bcm5974.c 	int fuzz = p->snratio ? (p->max - p->min) / p->snratio : 0;
p                 512 drivers/input/mouse/bcm5974.c 	input_set_abs_params(input, code, p->min, p->max, fuzz, 0);
p                 592 drivers/input/mouse/bcm5974.c 		int p = raw2int(f->touch_major);
p                 594 drivers/input/mouse/bcm5974.c 		if (p > 0 && raw2int(f->origin)) {
p                 595 drivers/input/mouse/bcm5974.c 			abs_p = clamp_val(256 * p / cfg->p.max, 0, 255);
p                 231 drivers/input/mouse/lifebook.c 	u8 p;
p                 236 drivers/input/mouse/lifebook.c 	p = params[resolution / 100];
p                 237 drivers/input/mouse/lifebook.c 	ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES);
p                 238 drivers/input/mouse/lifebook.c 	psmouse->resolution = 50 << p;
p                  53 drivers/input/mouse/psmouse-base.c #define param_check_proto_abbrev(name, p)	__param_check(name, p, unsigned int)
p                 456 drivers/input/mouse/psmouse-base.c 	u8 p;
p                 461 drivers/input/mouse/psmouse-base.c 	p = params[resolution / 50];
p                 462 drivers/input/mouse/psmouse-base.c 	ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES);
p                 463 drivers/input/mouse/psmouse-base.c 	psmouse->resolution = 25 << p;
p                 518 drivers/input/mouse/psmouse-base.c 	char *p, *fw_id_copy, *save_ptr;
p                 531 drivers/input/mouse/psmouse-base.c 	while ((p = strsep(&fw_id_copy, " ")) != NULL) {
p                 532 drivers/input/mouse/psmouse-base.c 		if (psmouse_check_pnp_id(p, ids)) {
p                 945 drivers/input/mouse/psmouse-base.c 	const struct psmouse_protocol *p;
p                 949 drivers/input/mouse/psmouse-base.c 		p = &psmouse_protocols[i];
p                 951 drivers/input/mouse/psmouse-base.c 		if ((strlen(p->name) == len && !strncmp(p->name, name, len)) ||
p                 952 drivers/input/mouse/psmouse-base.c 		    (strlen(p->alias) == len && !strncmp(p->alias, name, len)))
p                 265 drivers/input/mousedev.c 	struct mousedev_motion *p;
p                 275 drivers/input/mousedev.c 		p = &client->packets[client->head];
p                 276 drivers/input/mousedev.c 		if (client->ready && p->buttons != mousedev->packet.buttons) {
p                 279 drivers/input/mousedev.c 				p = &client->packets[client->head = new_head];
p                 280 drivers/input/mousedev.c 				memset(p, 0, sizeof(struct mousedev_motion));
p                 285 drivers/input/mousedev.c 			p->dx += packet->x - client->pos_x;
p                 286 drivers/input/mousedev.c 			p->dy += packet->y - client->pos_y;
p                 297 drivers/input/mousedev.c 		p->dx += packet->dx;
p                 298 drivers/input/mousedev.c 		p->dy += packet->dy;
p                 299 drivers/input/mousedev.c 		p->dz += packet->dz;
p                 300 drivers/input/mousedev.c 		p->buttons = mousedev->packet.buttons;
p                 302 drivers/input/mousedev.c 		if (p->dx || p->dy || p->dz ||
p                 303 drivers/input/mousedev.c 		    p->buttons != client->last_buttons)
p                 573 drivers/input/mousedev.c 	struct mousedev_motion *p = &client->packets[client->tail];
p                 576 drivers/input/mousedev.c 	dx = clamp_val(p->dx, -127, 127);
p                 577 drivers/input/mousedev.c 	p->dx -= dx;
p                 579 drivers/input/mousedev.c 	dy = clamp_val(p->dy, -127, 127);
p                 580 drivers/input/mousedev.c 	p->dy -= dy;
p                 584 drivers/input/mousedev.c 	ps2_data[0] |= p->buttons & 0x07;
p                 590 drivers/input/mousedev.c 		dz = clamp_val(p->dz, -7, 7);
p                 591 drivers/input/mousedev.c 		p->dz -= dz;
p                 593 drivers/input/mousedev.c 		ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1);
p                 598 drivers/input/mousedev.c 		dz = clamp_val(p->dz, -127, 127);
p                 599 drivers/input/mousedev.c 		p->dz -= dz;
p                 601 drivers/input/mousedev.c 		ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
p                 602 drivers/input/mousedev.c 			       ((p->buttons & 0x08) >> 1);
p                 610 drivers/input/mousedev.c 		p->dz = 0;
p                 612 drivers/input/mousedev.c 		ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
p                 613 drivers/input/mousedev.c 			       ((p->buttons & 0x08) >> 1);
p                 619 drivers/input/mousedev.c 	if (!p->dx && !p->dy && !p->dz) {
p                 622 drivers/input/mousedev.c 			client->last_buttons = p->buttons;
p                 176 drivers/input/serio/hil_mlc.c 		hil_packet p;
p                 178 drivers/input/serio/hil_mlc.c 		p = mlc->ipacket[i];
p                 179 drivers/input/serio/hil_mlc.c 		if (did != (p & HIL_PKT_ADDR_MASK) >> 8) {
p                 187 drivers/input/serio/hil_mlc.c 			did = (p & HIL_PKT_ADDR_MASK) >> 8;
p                 197 drivers/input/serio/hil_mlc.c 			drv->interrupt(serio, (p >> 24), 0);
p                 198 drivers/input/serio/hil_mlc.c 			drv->interrupt(serio, (p >> 16) & 0xff, 0);
p                 199 drivers/input/serio/hil_mlc.c 			drv->interrupt(serio, (p >> 8) & ~HIL_PKT_ADDR_MASK, 0);
p                 200 drivers/input/serio/hil_mlc.c 			drv->interrupt(serio, p & 0xff, 0);
p                  79 drivers/input/serio/hp_sdc.c # define sdc_readb(p)		gsc_readb(p)
p                  80 drivers/input/serio/hp_sdc.c # define sdc_writeb(v,p)	gsc_writeb((v),(p))
p                  83 drivers/input/serio/hp_sdc.c # define sdc_readb(p)		in_8(p)
p                  84 drivers/input/serio/hp_sdc.c # define sdc_writeb(v,p)	out_8((p),(v))
p                  76 drivers/input/serio/i8042.c #define param_check_reset_param(name, p)	\
p                  77 drivers/input/serio/i8042.c 	__param_check(name, p, enum i8042_controller_reset_mode)
p                  85 drivers/input/serio/parkbd.c 	unsigned char p;
p                  89 drivers/input/serio/parkbd.c         p = c ^ (c >> 4);
p                  90 drivers/input/serio/parkbd.c 	p = p ^ (p >> 2);
p                  91 drivers/input/serio/parkbd.c 	p = p ^ (p >> 1);
p                  95 drivers/input/serio/parkbd.c 	parkbd_buffer = c | (((int) (~p & 1)) << 8) | 0x600;
p                 355 drivers/input/tablet/aiptek.c 	const struct aiptek_map *p;
p                 360 drivers/input/tablet/aiptek.c 	for (p = map; p->string; p++)
p                 361 drivers/input/tablet/aiptek.c 	        if (!strncmp(str, p->string, count))
p                 362 drivers/input/tablet/aiptek.c 			return p->value;
p                 369 drivers/input/tablet/aiptek.c 	const struct aiptek_map *p;
p                 371 drivers/input/tablet/aiptek.c 	for (p = map; p->value != AIPTEK_INVALID_VALUE; p++)
p                 372 drivers/input/tablet/aiptek.c 		if (val == p->value)
p                 373 drivers/input/tablet/aiptek.c 			return p->string;
p                 419 drivers/input/tablet/aiptek.c 	int retval, macro, x, y, z, left, right, middle, p, dv, tip, bs, pck;
p                 515 drivers/input/tablet/aiptek.c 			p = (data[5] & 0x02) != 0 ? 1 : 0;
p                 544 drivers/input/tablet/aiptek.c 				if (p != 0) {
p                 576 drivers/input/tablet/aiptek.c 				input_report_abs(inputdev, ABS_MISC, p | AIPTEK_REPORT_TOOL_STYLUS);
p                 601 drivers/input/tablet/aiptek.c 			p = (data[5] & 0x02) != 0 ? 1 : 0;
p                 621 drivers/input/tablet/aiptek.c 				if (p != 0) {
p                 639 drivers/input/tablet/aiptek.c 				input_report_abs(inputdev, ABS_MISC, p | AIPTEK_REPORT_TOOL_MOUSE);
p                 655 drivers/input/tablet/aiptek.c 		p = (data[1] & 0x02) != 0 ? 1 : 0;
p                 660 drivers/input/tablet/aiptek.c 		macro = dv && p && tip && !(data[3] & 1) ? (data[3] >> 1) : -1;
p                 689 drivers/input/tablet/aiptek.c 				 p | AIPTEK_REPORT_TOOL_STYLUS);
p                 698 drivers/input/tablet/aiptek.c 		p = (data[1] & 0x02) != 0 ? 1 : 0;
p                 702 drivers/input/tablet/aiptek.c 		macro = dv && p && left && !(data[3] & 1) ? (data[3] >> 1) : 0;
p                 729 drivers/input/tablet/aiptek.c 				 p | AIPTEK_REPORT_TOOL_MOUSE);
p                 114 drivers/input/tablet/hanwang.c 	u16 p;
p                 165 drivers/input/tablet/hanwang.c 				p = (data[6] << 3) |
p                 172 drivers/input/tablet/hanwang.c 				p = (data[7] >> 6) | (data[6] << 2);
p                 176 drivers/input/tablet/hanwang.c 				p = 0;
p                 184 drivers/input/tablet/hanwang.c 			input_report_abs(input_dev, ABS_PRESSURE, p);
p                 191 drivers/input/tablet/wacom_serial4.c 	char *p;
p                 193 drivers/input/tablet/wacom_serial4.c 	p = strrchr(wacom->data, 'V');
p                 194 drivers/input/tablet/wacom_serial4.c 	if (p)
p                 195 drivers/input/tablet/wacom_serial4.c 		r = sscanf(p + 1, "%u.%u", &major_v, &minor_v);
p                2283 drivers/input/touchscreen/atmel_mxt_ts.c 	struct t37_debug *p;
p                2287 drivers/input/touchscreen/atmel_mxt_ts.c 		p = dbg->t37_buf + page;
p                2314 drivers/input/touchscreen/atmel_mxt_ts.c 				     sizeof(struct t37_debug), p);
p                2318 drivers/input/touchscreen/atmel_mxt_ts.c 		if (p->mode != mode || p->page != page) {
p                 191 drivers/input/touchscreen/cyttsp4_core.c 	void *p;
p                 205 drivers/input/touchscreen/cyttsp4_core.c 	p = krealloc(si->si_ptrs.cydata, si->si_ofs.cydata_size, GFP_KERNEL);
p                 206 drivers/input/touchscreen/cyttsp4_core.c 	if (p == NULL) {
p                 211 drivers/input/touchscreen/cyttsp4_core.c 	si->si_ptrs.cydata = p;
p                 268 drivers/input/touchscreen/cyttsp4_core.c 	void *p;
p                 280 drivers/input/touchscreen/cyttsp4_core.c 	p = krealloc(si->si_ptrs.test, si->si_ofs.test_size, GFP_KERNEL);
p                 281 drivers/input/touchscreen/cyttsp4_core.c 	if (p == NULL) {
p                 286 drivers/input/touchscreen/cyttsp4_core.c 	si->si_ptrs.test = p;
p                 327 drivers/input/touchscreen/cyttsp4_core.c 	void *p;
p                 339 drivers/input/touchscreen/cyttsp4_core.c 	p = krealloc(si->si_ptrs.pcfg, si->si_ofs.pcfg_size, GFP_KERNEL);
p                 340 drivers/input/touchscreen/cyttsp4_core.c 	if (p == NULL) {
p                 345 drivers/input/touchscreen/cyttsp4_core.c 	si->si_ptrs.pcfg = p;
p                 379 drivers/input/touchscreen/cyttsp4_core.c 	void *p;
p                 391 drivers/input/touchscreen/cyttsp4_core.c 	p = krealloc(si->si_ptrs.opcfg, si->si_ofs.opcfg_size, GFP_KERNEL);
p                 392 drivers/input/touchscreen/cyttsp4_core.c 	if (p == NULL) {
p                 397 drivers/input/touchscreen/cyttsp4_core.c 	si->si_ptrs.opcfg = p;
p                 475 drivers/input/touchscreen/cyttsp4_core.c 	void *p;
p                 480 drivers/input/touchscreen/cyttsp4_core.c 	p = krealloc(si->si_ptrs.ddata, si->si_ofs.ddata_size, GFP_KERNEL);
p                 481 drivers/input/touchscreen/cyttsp4_core.c 	if (p == NULL) {
p                 485 drivers/input/touchscreen/cyttsp4_core.c 	si->si_ptrs.ddata = p;
p                 502 drivers/input/touchscreen/cyttsp4_core.c 	void *p;
p                 507 drivers/input/touchscreen/cyttsp4_core.c 	p = krealloc(si->si_ptrs.mdata, si->si_ofs.mdata_size, GFP_KERNEL);
p                 508 drivers/input/touchscreen/cyttsp4_core.c 	if (p == NULL) {
p                 512 drivers/input/touchscreen/cyttsp4_core.c 	si->si_ptrs.mdata = p;
p                 532 drivers/input/touchscreen/cyttsp4_core.c 	void *p;
p                 539 drivers/input/touchscreen/cyttsp4_core.c 		p = krealloc(si->btn, si->si_ofs.btn_keys_size,
p                 541 drivers/input/touchscreen/cyttsp4_core.c 		if (p == NULL) {
p                 546 drivers/input/touchscreen/cyttsp4_core.c 		si->btn = p;
p                 582 drivers/input/touchscreen/cyttsp4_core.c 	void *p;
p                 584 drivers/input/touchscreen/cyttsp4_core.c 	p = krealloc(si->xy_mode, si->si_ofs.mode_size, GFP_KERNEL|__GFP_ZERO);
p                 585 drivers/input/touchscreen/cyttsp4_core.c 	if (p == NULL)
p                 587 drivers/input/touchscreen/cyttsp4_core.c 	si->xy_mode = p;
p                 589 drivers/input/touchscreen/cyttsp4_core.c 	p = krealloc(si->xy_data, si->si_ofs.data_size, GFP_KERNEL|__GFP_ZERO);
p                 590 drivers/input/touchscreen/cyttsp4_core.c 	if (p == NULL)
p                 592 drivers/input/touchscreen/cyttsp4_core.c 	si->xy_data = p;
p                 594 drivers/input/touchscreen/cyttsp4_core.c 	p = krealloc(si->btn_rec_data,
p                 597 drivers/input/touchscreen/cyttsp4_core.c 	if (p == NULL)
p                 599 drivers/input/touchscreen/cyttsp4_core.c 	si->btn_rec_data = p;
p                 804 drivers/input/touchscreen/edt-ft5x06.c 	char *p;
p                 831 drivers/input/touchscreen/edt-ft5x06.c 		p = strchr(rdbuf, '*');
p                 832 drivers/input/touchscreen/edt-ft5x06.c 		if (p)
p                 833 drivers/input/touchscreen/edt-ft5x06.c 			*p++ = '\0';
p                 835 drivers/input/touchscreen/edt-ft5x06.c 		strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
p                 845 drivers/input/touchscreen/edt-ft5x06.c 		p = strchr(rdbuf, '*');
p                 846 drivers/input/touchscreen/edt-ft5x06.c 		if (p)
p                 847 drivers/input/touchscreen/edt-ft5x06.c 			*p++ = '\0';
p                 849 drivers/input/touchscreen/edt-ft5x06.c 		strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
p                 796 drivers/input/touchscreen/elants_i2c.c 			unsigned int x, y, p, w;
p                 802 drivers/input/touchscreen/elants_i2c.c 			p = buf[FW_POS_PRESSURE + i];
p                 806 drivers/input/touchscreen/elants_i2c.c 				i, x, y, p, w);
p                 812 drivers/input/touchscreen/elants_i2c.c 			input_event(input, EV_ABS, ABS_MT_PRESSURE, p);
p                 122 drivers/input/touchscreen/mainstone-wm97xx.c 	u16 x, y, p = 0x100 | WM97XX_ADCSEL_PRES;
p                 148 drivers/input/touchscreen/mainstone-wm97xx.c 			p = MODR;
p                 151 drivers/input/touchscreen/mainstone-wm97xx.c 			x, y, p);
p                 156 drivers/input/touchscreen/mainstone-wm97xx.c 		    (p & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_PRES)
p                 163 drivers/input/touchscreen/mainstone-wm97xx.c 		input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff);
p                 164 drivers/input/touchscreen/mainstone-wm97xx.c 		input_report_key(wm->input_dev, BTN_TOUCH, (p != 0));
p                1028 drivers/input/touchscreen/sur40.c 			    struct v4l2_streamparm *p)
p                1030 drivers/input/touchscreen/sur40.c 	if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
p                1033 drivers/input/touchscreen/sur40.c 	p->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
p                1034 drivers/input/touchscreen/sur40.c 	p->parm.capture.timeperframe.numerator = 1;
p                1035 drivers/input/touchscreen/sur40.c 	p->parm.capture.timeperframe.denominator = 60;
p                1036 drivers/input/touchscreen/sur40.c 	p->parm.capture.readbuffers = 3;
p                 188 drivers/input/touchscreen/ucb1400_ts.c 	unsigned int x, y, p;
p                 204 drivers/input/touchscreen/ucb1400_ts.c 		p = ucb1400_ts_read_pressure(ucb);
p                 207 drivers/input/touchscreen/ucb1400_ts.c 		ucb1400_ts_report_event(ucb->ts_idev, p, x, y);
p                 958 drivers/input/touchscreen/wdt87xx_i2c.c 	u8 p;
p                 971 drivers/input/touchscreen/wdt87xx_i2c.c 	p = buf[FINGER_EV_V1_OFFSET_P];
p                 988 drivers/input/touchscreen/wdt87xx_i2c.c 	input_report_abs(input, ABS_MT_PRESSURE, p);
p                 281 drivers/input/touchscreen/wm9705.c 		rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN, &data->p);
p                 285 drivers/input/touchscreen/wm9705.c 		data->p = DEFAULT_PRESSURE;
p                 354 drivers/input/touchscreen/wm9712.c 		data->p = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
p                 356 drivers/input/touchscreen/wm9712.c 		data->p = DEFAULT_PRESSURE;
p                 364 drivers/input/touchscreen/wm9712.c 	if (pil && !(data->p & WM97XX_ADCSEL_PRES))
p                 400 drivers/input/touchscreen/wm9712.c 						&data->p);
p                 404 drivers/input/touchscreen/wm9712.c 			data->p = DEFAULT_PRESSURE;
p                 365 drivers/input/touchscreen/wm9713.c 		data->p = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
p                 367 drivers/input/touchscreen/wm9713.c 		data->p = DEFAULT_PRESSURE;
p                 375 drivers/input/touchscreen/wm9713.c 	if (pil && !(data->p & WM97XX_ADCSEL_PRES))
p                 407 drivers/input/touchscreen/wm9713.c 						&data->p);
p                 411 drivers/input/touchscreen/wm9713.c 			data->p = DEFAULT_PRESSURE;
p                 440 drivers/input/touchscreen/wm97xx-core.c 			data.y & 0xfff, data.p >> 12, data.p & 0xfff);
p                 453 drivers/input/touchscreen/wm97xx-core.c 		input_report_abs(wm->input_dev, ABS_PRESSURE, data.p & 0xfff);
p                  88 drivers/input/touchscreen/zylonite-wm97xx.c 	u16 x, y, p = 0x100 | WM97XX_ADCSEL_PRES;
p                 115 drivers/input/touchscreen/zylonite-wm97xx.c 			p = MODR;
p                 118 drivers/input/touchscreen/zylonite-wm97xx.c 			x, y, p);
p                 123 drivers/input/touchscreen/zylonite-wm97xx.c 		    (p & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_PRES)
p                 130 drivers/input/touchscreen/zylonite-wm97xx.c 		input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff);
p                 131 drivers/input/touchscreen/zylonite-wm97xx.c 		input_report_key(wm->input_dev, BTN_TOUCH, (p != 0));
p                 202 drivers/interconnect/core.c 	struct icc_provider *p = node->provider;
p                 208 drivers/interconnect/core.c 	if (p->pre_aggregate)
p                 209 drivers/interconnect/core.c 		p->pre_aggregate(node);
p                 212 drivers/interconnect/core.c 		p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
p                 161 drivers/iommu/amd_iommu.c 	struct acpihid_map_entry *p;
p                 163 drivers/iommu/amd_iommu.c 	list_for_each_entry(p, &acpihid_map, list) {
p                 164 drivers/iommu/amd_iommu.c 		if (!match_hid_uid(dev, p)) {
p                 166 drivers/iommu/amd_iommu.c 				*entry = p;
p                 167 drivers/iommu/amd_iommu.c 			return p->devid;
p                 313 drivers/iommu/amd_iommu.c 	struct acpihid_map_entry *p, *entry = NULL;
p                 320 drivers/iommu/amd_iommu.c 	list_for_each_entry(p, &acpihid_map, list) {
p                 321 drivers/iommu/amd_iommu.c 		if ((devid == p->devid) && p->group)
p                 322 drivers/iommu/amd_iommu.c 			entry->group = p->group;
p                1367 drivers/iommu/amd_iommu.c 		unsigned long p = (unsigned long)page_address(freelist);
p                1369 drivers/iommu/amd_iommu.c 		free_page(p);
p                1375 drivers/iommu/amd_iommu.c 	struct page *p = virt_to_page((void *)pt);
p                1377 drivers/iommu/amd_iommu.c 	p->freelist = freelist;
p                1379 drivers/iommu/amd_iommu.c 	return p;
p                1385 drivers/iommu/amd_iommu.c 	unsigned long p;							\
p                1401 drivers/iommu/amd_iommu.c 		p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);			\
p                1402 drivers/iommu/amd_iommu.c 		freelist = FN(p, freelist);					\
p                2558 drivers/iommu/amd_iommu.c 		int p, n;
p                2561 drivers/iommu/amd_iommu.c 		p = npages % boundary_size;
p                2563 drivers/iommu/amd_iommu.c 		if (p + n > boundary_size)
p                2564 drivers/iommu/amd_iommu.c 			npages += boundary_size - p;
p                 500 drivers/iommu/amd_iommu_init.c 	u8 *p = (void *)h, *end = (void *)h;
p                 510 drivers/iommu/amd_iommu_init.c 	p += ivhd_size;
p                 513 drivers/iommu/amd_iommu_init.c 	while (p < end) {
p                 514 drivers/iommu/amd_iommu_init.c 		dev = (struct ivhd_entry *)p;
p                 530 drivers/iommu/amd_iommu_init.c 		p += ivhd_entry_length(p);
p                 533 drivers/iommu/amd_iommu_init.c 	WARN_ON(p != end);
p                 541 drivers/iommu/amd_iommu_init.c 	u8 checksum = 0, *p = (u8 *)table;
p                 544 drivers/iommu/amd_iommu_init.c 		checksum += p[i];
p                 561 drivers/iommu/amd_iommu_init.c 	u8 *p = (u8 *)table, *end = (u8 *)table;
p                 564 drivers/iommu/amd_iommu_init.c 	p += IVRS_HEADER_LENGTH;
p                 567 drivers/iommu/amd_iommu_init.c 	while (p < end) {
p                 568 drivers/iommu/amd_iommu_init.c 		h = (struct ivhd_header *)p;
p                 575 drivers/iommu/amd_iommu_init.c 		p += h->length;
p                 577 drivers/iommu/amd_iommu_init.c 	WARN_ON(p != end);
p                1143 drivers/iommu/amd_iommu_init.c 	u8 *p = (u8 *)h;
p                1144 drivers/iommu/amd_iommu_init.c 	u8 *end = p, flags = 0;
p                1173 drivers/iommu/amd_iommu_init.c 	p += ivhd_size;
p                1178 drivers/iommu/amd_iommu_init.c 	while (p < end) {
p                1179 drivers/iommu/amd_iommu_init.c 		e = (struct ivhd_entry *)p;
p                1401 drivers/iommu/amd_iommu_init.c 		p += ivhd_entry_length(p);
p                1606 drivers/iommu/amd_iommu_init.c 		u8 *p = (u8 *) ivhd;
p                1610 drivers/iommu/amd_iommu_init.c 		ivhd = (struct ivhd_header *)(p + ivhd->length);
p                1622 drivers/iommu/amd_iommu_init.c 	u8 *p = (u8 *)table, *end = (u8 *)table;
p                1628 drivers/iommu/amd_iommu_init.c 	p += IVRS_HEADER_LENGTH;
p                1630 drivers/iommu/amd_iommu_init.c 	while (p < end) {
p                1631 drivers/iommu/amd_iommu_init.c 		h = (struct ivhd_header *)p;
p                1632 drivers/iommu/amd_iommu_init.c 		if (*p == amd_iommu_target_ivhd_type) {
p                1650 drivers/iommu/amd_iommu_init.c 		p += h->length;
p                1653 drivers/iommu/amd_iommu_init.c 	WARN_ON(p != end);
p                2153 drivers/iommu/amd_iommu_init.c 	u8 *p = (u8 *)table, *end = (u8 *)table;
p                2157 drivers/iommu/amd_iommu_init.c 	p += IVRS_HEADER_LENGTH;
p                2159 drivers/iommu/amd_iommu_init.c 	while (p < end) {
p                2160 drivers/iommu/amd_iommu_init.c 		m = (struct ivmd_header *)p;
p                2164 drivers/iommu/amd_iommu_init.c 		p += m->length;
p                3038 drivers/iommu/amd_iommu_init.c 	char *hid, *uid, *p;
p                3048 drivers/iommu/amd_iommu_init.c 	p = acpiid;
p                3049 drivers/iommu/amd_iommu_init.c 	hid = strsep(&p, ":");
p                3050 drivers/iommu/amd_iommu_init.c 	uid = p;
p                 184 drivers/iommu/arm-smmu-v3.c #define Q_IDX(llq, p)			((p) & ((1 << (llq)->max_n_shift) - 1))
p                 185 drivers/iommu/arm-smmu-v3.c #define Q_WRP(llq, p)			((p) & (1 << (llq)->max_n_shift))
p                 187 drivers/iommu/arm-smmu-v3.c #define Q_OVF(p)			((p) & Q_OVERFLOW_FLAG)
p                 188 drivers/iommu/arm-smmu-v3.c #define Q_ENT(q, p)			((q)->base +			\
p                 189 drivers/iommu/arm-smmu-v3.c 					 Q_IDX(&((q)->llq), p) *	\
p                 759 drivers/iommu/fsl_pamu.c 		void __iomem *p = data->pamu_reg_base + i * PAMU_OFFSET;
p                 760 drivers/iommu/fsl_pamu.c 		u32 pics = in_be32(p + PAMU_PICS);
p                 763 drivers/iommu/fsl_pamu.c 			u32 avs1 = in_be32(p + PAMU_AVS1);
p                 766 drivers/iommu/fsl_pamu.c 			pr_emerg("POES1=%08x\n", in_be32(p + PAMU_POES1));
p                 767 drivers/iommu/fsl_pamu.c 			pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2));
p                 769 drivers/iommu/fsl_pamu.c 			pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2));
p                 771 drivers/iommu/fsl_pamu.c 				 make64(in_be32(p + PAMU_AVAH),
p                 772 drivers/iommu/fsl_pamu.c 					in_be32(p + PAMU_AVAL)));
p                 773 drivers/iommu/fsl_pamu.c 			pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD));
p                 775 drivers/iommu/fsl_pamu.c 				 make64(in_be32(p + PAMU_POEAH),
p                 776 drivers/iommu/fsl_pamu.c 					in_be32(p + PAMU_POEAL)));
p                 778 drivers/iommu/fsl_pamu.c 			phys = make64(in_be32(p + PAMU_POEAH),
p                 779 drivers/iommu/fsl_pamu.c 				      in_be32(p + PAMU_POEAL));
p                 792 drivers/iommu/fsl_pamu.c 			out_be32(p + PAMU_AVS1, avs1 & PAMU_AV_MASK);
p                 811 drivers/iommu/fsl_pamu.c 			out_be32((p + PAMU_PICS), pics);
p                1008 drivers/iommu/fsl_pamu.c 	struct page *p;
p                1084 drivers/iommu/fsl_pamu.c 	p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
p                1085 drivers/iommu/fsl_pamu.c 	if (!p) {
p                1091 drivers/iommu/fsl_pamu.c 	ppaact = page_address(p);
p                1092 drivers/iommu/fsl_pamu.c 	ppaact_phys = page_to_phys(p);
p                 166 drivers/iommu/intel-iommu.c static inline unsigned long virt_to_dma_pfn(void *p)
p                 168 drivers/iommu/intel-iommu.c 	return page_to_dma_pfn(virt_to_page(p));
p                  57 drivers/iommu/intel-pasid.c 	void *p;
p                  60 drivers/iommu/intel-pasid.c 	p = idr_find(&pasid_idr, pasid);
p                  63 drivers/iommu/intel-pasid.c 	return p;
p                 235 drivers/iommu/io-pgtable-arm.c 	struct page *p;
p                 240 drivers/iommu/io-pgtable-arm.c 	p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
p                 242 drivers/iommu/io-pgtable-arm.c 	if (!p)
p                 245 drivers/iommu/io-pgtable-arm.c 	pages = page_address(p);
p                 265 drivers/iommu/io-pgtable-arm.c 	__free_pages(p, order);
p                  35 drivers/iommu/omap-iommu-debug.c 		bytes = snprintf(p, maxcol, str, __stringify(name),	\
p                  37 drivers/iommu/omap-iommu-debug.c 		p += bytes;						\
p                  46 drivers/iommu/omap-iommu-debug.c 	char *p = buf;
p                  65 drivers/iommu/omap-iommu-debug.c 	return p - buf;
p                  87 drivers/iommu/omap-iommu-debug.c 	char *p, *buf;
p                  96 drivers/iommu/omap-iommu-debug.c 	p = buf;
p                 100 drivers/iommu/omap-iommu-debug.c 	bytes = omap_iommu_dump_ctx(obj, p, count);
p                 115 drivers/iommu/omap-iommu-debug.c 	struct cr_regs *p = crs;
p                 123 drivers/iommu/omap-iommu-debug.c 		*p++ = tmp;
p                 129 drivers/iommu/omap-iommu-debug.c 	return  p - crs;
p                  87 drivers/iommu/omap-iommu.c 	u32 *p;
p                  95 drivers/iommu/omap-iommu.c 		p = obj->ctx;
p                  97 drivers/iommu/omap-iommu.c 			p[i] = iommu_read_reg(obj, i * sizeof(u32));
p                  99 drivers/iommu/omap-iommu.c 				p[i]);
p                 117 drivers/iommu/omap-iommu.c 	u32 *p;
p                 125 drivers/iommu/omap-iommu.c 		p = obj->ctx;
p                 127 drivers/iommu/omap-iommu.c 			iommu_write_reg(obj, p[i], i * sizeof(u32));
p                 129 drivers/iommu/omap-iommu.c 				p[i]);
p                1820 drivers/iommu/omap-iommu.c 	struct kmem_cache *p;
p                1832 drivers/iommu/omap-iommu.c 	p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
p                1834 drivers/iommu/omap-iommu.c 	if (!p)
p                1836 drivers/iommu/omap-iommu.c 	iopte_cachep = p;
p                 115 drivers/irqchip/irq-atmel-aic-common.c 	const __be32 *p;
p                 123 drivers/irqchip/irq-atmel-aic-common.c 	of_property_for_each_u32(node, "atmel,external-irqs", prop, p, hwirq) {
p                  74 drivers/irqchip/irq-ls-scfg-msi.c static int __init early_parse_ls_scfg_msi(char *p)
p                  76 drivers/irqchip/irq-ls-scfg-msi.c 	if (p && strncmp(p, "no-affinity", 11) == 0)
p                  95 drivers/irqchip/irq-partition-percpu.c static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
p                 101 drivers/irqchip/irq-partition-percpu.c 	seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
p                 195 drivers/irqchip/irq-pic32-evic.c 	const __le32 *p;
p                 200 drivers/irqchip/irq-pic32-evic.c 	of_property_for_each_u32(node, pname, prop, p, hwirq) {
p                  58 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p;
p                  98 drivers/irqchip/irq-renesas-intc-irqpin.c static inline unsigned long intc_irqpin_read(struct intc_irqpin_priv *p,
p                 101 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_iomem *i = &p->iomem[reg];
p                 106 drivers/irqchip/irq-renesas-intc-irqpin.c static inline void intc_irqpin_write(struct intc_irqpin_priv *p,
p                 109 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_iomem *i = &p->iomem[reg];
p                 114 drivers/irqchip/irq-renesas-intc-irqpin.c static inline unsigned long intc_irqpin_hwirq_mask(struct intc_irqpin_priv *p,
p                 117 drivers/irqchip/irq-renesas-intc-irqpin.c 	return BIT((p->iomem[reg].width - 1) - hw_irq);
p                 120 drivers/irqchip/irq-renesas-intc-irqpin.c static inline void intc_irqpin_irq_write_hwirq(struct intc_irqpin_priv *p,
p                 123 drivers/irqchip/irq-renesas-intc-irqpin.c 	intc_irqpin_write(p, reg, intc_irqpin_hwirq_mask(p, reg, hw_irq));
p                 128 drivers/irqchip/irq-renesas-intc-irqpin.c static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
p                 137 drivers/irqchip/irq-renesas-intc-irqpin.c 	tmp = intc_irqpin_read(p, reg);
p                 140 drivers/irqchip/irq-renesas-intc-irqpin.c 	intc_irqpin_write(p, reg, tmp);
p                 145 drivers/irqchip/irq-renesas-intc-irqpin.c static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
p                 152 drivers/irqchip/irq-renesas-intc-irqpin.c 	intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
p                 157 drivers/irqchip/irq-renesas-intc-irqpin.c static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
p                 160 drivers/irqchip/irq-renesas-intc-irqpin.c 	int bitfield_width = p->sense_bitfield_width;
p                 163 drivers/irqchip/irq-renesas-intc-irqpin.c 	dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
p                 168 drivers/irqchip/irq-renesas-intc-irqpin.c 	intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_SENSE, shift,
p                 175 drivers/irqchip/irq-renesas-intc-irqpin.c 	dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n",
p                 181 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
p                 184 drivers/irqchip/irq-renesas-intc-irqpin.c 	intc_irqpin_dbg(&p->irq[hw_irq], "enable");
p                 185 drivers/irqchip/irq-renesas-intc-irqpin.c 	intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq);
p                 190 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
p                 193 drivers/irqchip/irq-renesas-intc-irqpin.c 	intc_irqpin_dbg(&p->irq[hw_irq], "disable");
p                 194 drivers/irqchip/irq-renesas-intc-irqpin.c 	intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq);
p                 199 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
p                 202 drivers/irqchip/irq-renesas-intc-irqpin.c 	intc_irqpin_dbg(&p->irq[hw_irq], "shared enable");
p                 203 drivers/irqchip/irq-renesas-intc-irqpin.c 	intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq);
p                 205 drivers/irqchip/irq-renesas-intc-irqpin.c 	p->shared_irq_mask &= ~BIT(hw_irq);
p                 210 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
p                 213 drivers/irqchip/irq-renesas-intc-irqpin.c 	intc_irqpin_dbg(&p->irq[hw_irq], "shared disable");
p                 214 drivers/irqchip/irq-renesas-intc-irqpin.c 	intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq);
p                 216 drivers/irqchip/irq-renesas-intc-irqpin.c 	p->shared_irq_mask |= BIT(hw_irq);
p                 221 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
p                 222 drivers/irqchip/irq-renesas-intc-irqpin.c 	int irq = p->irq[irqd_to_hwirq(d)].requested_irq;
p                 235 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
p                 236 drivers/irqchip/irq-renesas-intc-irqpin.c 	int irq = p->irq[irqd_to_hwirq(d)].requested_irq;
p                 260 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
p                 265 drivers/irqchip/irq-renesas-intc-irqpin.c 	return intc_irqpin_set_sense(p, irqd_to_hwirq(d),
p                 271 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
p                 274 drivers/irqchip/irq-renesas-intc-irqpin.c 	irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
p                 276 drivers/irqchip/irq-renesas-intc-irqpin.c 		atomic_inc(&p->wakeup_path);
p                 278 drivers/irqchip/irq-renesas-intc-irqpin.c 		atomic_dec(&p->wakeup_path);
p                 286 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p = i->p;
p                 290 drivers/irqchip/irq-renesas-intc-irqpin.c 	bit = intc_irqpin_hwirq_mask(p, INTC_IRQPIN_REG_SOURCE, i->hw_irq);
p                 292 drivers/irqchip/irq-renesas-intc-irqpin.c 	if (intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE) & bit) {
p                 293 drivers/irqchip/irq-renesas-intc-irqpin.c 		intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, ~bit);
p                 303 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p = dev_id;
p                 304 drivers/irqchip/irq-renesas-intc-irqpin.c 	unsigned int reg_source = intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE);
p                 310 drivers/irqchip/irq-renesas-intc-irqpin.c 			if (BIT(k) & p->shared_irq_mask)
p                 313 drivers/irqchip/irq-renesas-intc-irqpin.c 			status |= intc_irqpin_irq_handler(irq, &p->irq[k]);
p                 332 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p = h->host_data;
p                 334 drivers/irqchip/irq-renesas-intc-irqpin.c 	p->irq[hw].domain_irq = virq;
p                 335 drivers/irqchip/irq-renesas-intc-irqpin.c 	p->irq[hw].hw_irq = hw;
p                 337 drivers/irqchip/irq-renesas-intc-irqpin.c 	intc_irqpin_dbg(&p->irq[hw], "map");
p                 341 drivers/irqchip/irq-renesas-intc-irqpin.c 	irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
p                 377 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p;
p                 391 drivers/irqchip/irq-renesas-intc-irqpin.c 	p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
p                 392 drivers/irqchip/irq-renesas-intc-irqpin.c 	if (!p)
p                 397 drivers/irqchip/irq-renesas-intc-irqpin.c 			     &p->sense_bitfield_width);
p                 399 drivers/irqchip/irq-renesas-intc-irqpin.c 	if (!p->sense_bitfield_width)
p                 400 drivers/irqchip/irq-renesas-intc-irqpin.c 		p->sense_bitfield_width = 4; /* default to 4 bits */
p                 402 drivers/irqchip/irq-renesas-intc-irqpin.c 	p->pdev = pdev;
p                 403 drivers/irqchip/irq-renesas-intc-irqpin.c 	platform_set_drvdata(pdev, p);
p                 427 drivers/irqchip/irq-renesas-intc-irqpin.c 		p->irq[k].p = p;
p                 428 drivers/irqchip/irq-renesas-intc-irqpin.c 		p->irq[k].requested_irq = irq->start;
p                 440 drivers/irqchip/irq-renesas-intc-irqpin.c 		i = &p->iomem[k];
p                 475 drivers/irqchip/irq-renesas-intc-irqpin.c 			intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM,
p                 483 drivers/irqchip/irq-renesas-intc-irqpin.c 		intc_irqpin_mask_unmask_prio(p, k, 1);
p                 486 drivers/irqchip/irq-renesas-intc-irqpin.c 	intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, 0x0);
p                 489 drivers/irqchip/irq-renesas-intc-irqpin.c 	ref_irq = p->irq[0].requested_irq;
p                 490 drivers/irqchip/irq-renesas-intc-irqpin.c 	p->shared_irqs = 1;
p                 492 drivers/irqchip/irq-renesas-intc-irqpin.c 		if (ref_irq != p->irq[k].requested_irq) {
p                 493 drivers/irqchip/irq-renesas-intc-irqpin.c 			p->shared_irqs = 0;
p                 502 drivers/irqchip/irq-renesas-intc-irqpin.c 	} else if (!p->shared_irqs) {
p                 510 drivers/irqchip/irq-renesas-intc-irqpin.c 	irq_chip = &p->irq_chip;
p                 519 drivers/irqchip/irq-renesas-intc-irqpin.c 	p->irq_domain = irq_domain_add_simple(dev->of_node, nirqs, 0,
p                 520 drivers/irqchip/irq-renesas-intc-irqpin.c 					      &intc_irqpin_irq_domain_ops, p);
p                 521 drivers/irqchip/irq-renesas-intc-irqpin.c 	if (!p->irq_domain) {
p                 527 drivers/irqchip/irq-renesas-intc-irqpin.c 	if (p->shared_irqs) {
p                 529 drivers/irqchip/irq-renesas-intc-irqpin.c 		if (devm_request_irq(dev, p->irq[0].requested_irq,
p                 531 drivers/irqchip/irq-renesas-intc-irqpin.c 				IRQF_SHARED, name, p)) {
p                 539 drivers/irqchip/irq-renesas-intc-irqpin.c 			if (devm_request_irq(dev, p->irq[k].requested_irq,
p                 541 drivers/irqchip/irq-renesas-intc-irqpin.c 					     &p->irq[k])) {
p                 551 drivers/irqchip/irq-renesas-intc-irqpin.c 		intc_irqpin_mask_unmask_prio(p, k, 0);
p                 558 drivers/irqchip/irq-renesas-intc-irqpin.c 	irq_domain_remove(p->irq_domain);
p                 567 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p = platform_get_drvdata(pdev);
p                 569 drivers/irqchip/irq-renesas-intc-irqpin.c 	irq_domain_remove(p->irq_domain);
p                 577 drivers/irqchip/irq-renesas-intc-irqpin.c 	struct intc_irqpin_priv *p = dev_get_drvdata(dev);
p                 579 drivers/irqchip/irq-renesas-intc-irqpin.c 	if (atomic_read(&p->wakeup_path))
p                  42 drivers/irqchip/irq-renesas-irqc.c 	struct irqc_priv *p;
p                  63 drivers/irqchip/irq-renesas-irqc.c 	dev_dbg(i->p->dev, "%s (%d:%d)\n", str, i->requested_irq, i->hw_irq);
p                  76 drivers/irqchip/irq-renesas-irqc.c 	struct irqc_priv *p = irq_data_to_priv(d);
p                  81 drivers/irqchip/irq-renesas-irqc.c 	irqc_dbg(&p->irq[hw_irq], "sense");
p                  86 drivers/irqchip/irq-renesas-irqc.c 	tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
p                  89 drivers/irqchip/irq-renesas-irqc.c 	iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
p                  95 drivers/irqchip/irq-renesas-irqc.c 	struct irqc_priv *p = irq_data_to_priv(d);
p                  98 drivers/irqchip/irq-renesas-irqc.c 	irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
p                 100 drivers/irqchip/irq-renesas-irqc.c 		atomic_inc(&p->wakeup_path);
p                 102 drivers/irqchip/irq-renesas-irqc.c 		atomic_dec(&p->wakeup_path);
p                 110 drivers/irqchip/irq-renesas-irqc.c 	struct irqc_priv *p = i->p;
p                 115 drivers/irqchip/irq-renesas-irqc.c 	if (ioread32(p->iomem + DETECT_STATUS) & bit) {
p                 116 drivers/irqchip/irq-renesas-irqc.c 		iowrite32(bit, p->iomem + DETECT_STATUS);
p                 118 drivers/irqchip/irq-renesas-irqc.c 		generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq));
p                 128 drivers/irqchip/irq-renesas-irqc.c 	struct irqc_priv *p;
p                 133 drivers/irqchip/irq-renesas-irqc.c 	p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
p                 134 drivers/irqchip/irq-renesas-irqc.c 	if (!p)
p                 137 drivers/irqchip/irq-renesas-irqc.c 	p->dev = dev;
p                 138 drivers/irqchip/irq-renesas-irqc.c 	platform_set_drvdata(pdev, p);
p                 149 drivers/irqchip/irq-renesas-irqc.c 		p->irq[k].p = p;
p                 150 drivers/irqchip/irq-renesas-irqc.c 		p->irq[k].hw_irq = k;
p                 151 drivers/irqchip/irq-renesas-irqc.c 		p->irq[k].requested_irq = irq->start;
p                 154 drivers/irqchip/irq-renesas-irqc.c 	p->number_of_irqs = k;
p                 155 drivers/irqchip/irq-renesas-irqc.c 	if (p->number_of_irqs < 1) {
p                 162 drivers/irqchip/irq-renesas-irqc.c 	p->iomem = devm_platform_ioremap_resource(pdev, 0);
p                 163 drivers/irqchip/irq-renesas-irqc.c 	if (IS_ERR(p->iomem)) {
p                 164 drivers/irqchip/irq-renesas-irqc.c 		ret = PTR_ERR(p->iomem);
p                 168 drivers/irqchip/irq-renesas-irqc.c 	p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
p                 170 drivers/irqchip/irq-renesas-irqc.c 	p->irq_domain = irq_domain_add_linear(dev->of_node, p->number_of_irqs,
p                 171 drivers/irqchip/irq-renesas-irqc.c 					      &irq_generic_chip_ops, p);
p                 172 drivers/irqchip/irq-renesas-irqc.c 	if (!p->irq_domain) {
p                 178 drivers/irqchip/irq-renesas-irqc.c 	ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
p                 186 drivers/irqchip/irq-renesas-irqc.c 	p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
p                 187 drivers/irqchip/irq-renesas-irqc.c 	p->gc->reg_base = p->cpu_int_base;
p                 188 drivers/irqchip/irq-renesas-irqc.c 	p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
p                 189 drivers/irqchip/irq-renesas-irqc.c 	p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
p                 190 drivers/irqchip/irq-renesas-irqc.c 	p->gc->chip_types[0].chip.parent_device = dev;
p                 191 drivers/irqchip/irq-renesas-irqc.c 	p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
p                 192 drivers/irqchip/irq-renesas-irqc.c 	p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
p                 193 drivers/irqchip/irq-renesas-irqc.c 	p->gc->chip_types[0].chip.irq_set_type	= irqc_irq_set_type;
p                 194 drivers/irqchip/irq-renesas-irqc.c 	p->gc->chip_types[0].chip.irq_set_wake	= irqc_irq_set_wake;
p                 195 drivers/irqchip/irq-renesas-irqc.c 	p->gc->chip_types[0].chip.flags	= IRQCHIP_MASK_ON_SUSPEND;
p                 198 drivers/irqchip/irq-renesas-irqc.c 	for (k = 0; k < p->number_of_irqs; k++) {
p                 199 drivers/irqchip/irq-renesas-irqc.c 		if (devm_request_irq(dev, p->irq[k].requested_irq,
p                 200 drivers/irqchip/irq-renesas-irqc.c 				     irqc_irq_handler, 0, name, &p->irq[k])) {
p                 207 drivers/irqchip/irq-renesas-irqc.c 	dev_info(dev, "driving %d irqs\n", p->number_of_irqs);
p                 212 drivers/irqchip/irq-renesas-irqc.c 	irq_domain_remove(p->irq_domain);
p                 221 drivers/irqchip/irq-renesas-irqc.c 	struct irqc_priv *p = platform_get_drvdata(pdev);
p                 223 drivers/irqchip/irq-renesas-irqc.c 	irq_domain_remove(p->irq_domain);
p                 231 drivers/irqchip/irq-renesas-irqc.c 	struct irqc_priv *p = dev_get_drvdata(dev);
p                 233 drivers/irqchip/irq-renesas-irqc.c 	if (atomic_read(&p->wakeup_path))
p                 162 drivers/isdn/capi/capi.c 	struct ackqueue_entry *p, *tmp;
p                 165 drivers/isdn/capi/capi.c 	list_for_each_entry_safe(p, tmp, &mp->ackqueue, list) {
p                 166 drivers/isdn/capi/capi.c 		if (p->datahandle == datahandle) {
p                 167 drivers/isdn/capi/capi.c 			list_del(&p->list);
p                 170 drivers/isdn/capi/capi.c 			kfree(p);
p                 180 drivers/isdn/capi/capi.c 	struct ackqueue_entry *p, *tmp;
p                 182 drivers/isdn/capi/capi.c 	list_for_each_entry_safe(p, tmp, &mp->ackqueue, list) {
p                 183 drivers/isdn/capi/capi.c 		list_del(&p->list);
p                 184 drivers/isdn/capi/capi.c 		kfree(p);
p                 229 drivers/isdn/capi/capiutil.c #define TYP (cdef[cmsg->par[cmsg->p]].typ)
p                 230 drivers/isdn/capi/capiutil.c #define OFF (((u8 *)cmsg) + cdef[cmsg->par[cmsg->p]].off)
p                 235 drivers/isdn/capi/capiutil.c 	for (cmsg->p++, layer = 1; layer;) {
p                 237 drivers/isdn/capi/capiutil.c 		cmsg->p++;
p                 252 drivers/isdn/capi/capiutil.c 	for (; TYP != _CEND; cmsg->p++) {
p                 291 drivers/isdn/capi/capiutil.c 				cmsg->p++;
p                 319 drivers/isdn/capi/capiutil.c 	cmsg->p = 0;
p                 338 drivers/isdn/capi/capiutil.c 	for (; TYP != _CEND; cmsg->p++) {
p                 371 drivers/isdn/capi/capiutil.c 				cmsg->p++;
p                 392 drivers/isdn/capi/capiutil.c 	cmsg->p = 0;
p                 578 drivers/isdn/capi/capiutil.c 	n = vsnprintf(cdb->p, r, fmt, f);
p                 596 drivers/isdn/capi/capiutil.c 		cdb->p = cdb->buf + cdb->pos;
p                 600 drivers/isdn/capi/capiutil.c 		n = vsnprintf(cdb->p, r, fmt, f);
p                 603 drivers/isdn/capi/capiutil.c 	cdb->p += n;
p                 648 drivers/isdn/capi/capiutil.c #define NAME (pnames[cmsg->par[cmsg->p]])
p                 655 drivers/isdn/capi/capiutil.c 	for (; TYP != _CEND; cmsg->p++) {
p                 703 drivers/isdn/capi/capiutil.c 				cmsg->p++;
p                 736 drivers/isdn/capi/capiutil.c 	cdb->p = cdb->buf;
p                 786 drivers/isdn/capi/capiutil.c 	cmsg->p = 0;
p                 824 drivers/isdn/capi/capiutil.c 	cmsg->p = 0;
p                 852 drivers/isdn/capi/capiutil.c 	g_debbuf->p = g_debbuf->buf;
p                 170 drivers/isdn/hardware/mISDN/avmfritz.c ReadISAC_V1(void *p, u8 offset)
p                 172 drivers/isdn/hardware/mISDN/avmfritz.c 	struct fritzcard *fc = p;
p                 180 drivers/isdn/hardware/mISDN/avmfritz.c WriteISAC_V1(void *p, u8 offset, u8 value)
p                 182 drivers/isdn/hardware/mISDN/avmfritz.c 	struct fritzcard *fc = p;
p                 190 drivers/isdn/hardware/mISDN/avmfritz.c ReadFiFoISAC_V1(void *p, u8 off, u8 *data, int size)
p                 192 drivers/isdn/hardware/mISDN/avmfritz.c 	struct fritzcard *fc = p;
p                 199 drivers/isdn/hardware/mISDN/avmfritz.c WriteFiFoISAC_V1(void *p, u8 off, u8 *data, int size)
p                 201 drivers/isdn/hardware/mISDN/avmfritz.c 	struct fritzcard *fc = p;
p                 208 drivers/isdn/hardware/mISDN/avmfritz.c ReadISAC_V2(void *p, u8 offset)
p                 210 drivers/isdn/hardware/mISDN/avmfritz.c 	struct fritzcard *fc = p;
p                 217 drivers/isdn/hardware/mISDN/avmfritz.c WriteISAC_V2(void *p, u8 offset, u8 value)
p                 219 drivers/isdn/hardware/mISDN/avmfritz.c 	struct fritzcard *fc = p;
p                 226 drivers/isdn/hardware/mISDN/avmfritz.c ReadFiFoISAC_V2(void *p, u8 off, u8 *data, int size)
p                 228 drivers/isdn/hardware/mISDN/avmfritz.c 	struct fritzcard *fc = p;
p                 237 drivers/isdn/hardware/mISDN/avmfritz.c WriteFiFoISAC_V2(void *p, u8 off, u8 *data, int size)
p                 239 drivers/isdn/hardware/mISDN/avmfritz.c 	struct fritzcard *fc = p;
p                 393 drivers/isdn/hardware/mISDN/avmfritz.c 	u8 *p;
p                 400 drivers/isdn/hardware/mISDN/avmfritz.c 		p = NULL;
p                 409 drivers/isdn/hardware/mISDN/avmfritz.c 		p = skb_put(bch->rx_skb, count);
p                 411 drivers/isdn/hardware/mISDN/avmfritz.c 	ptr = (u32 *)p;
p                 422 drivers/isdn/hardware/mISDN/avmfritz.c 		if (p) {
p                 428 drivers/isdn/hardware/mISDN/avmfritz.c 	if (p && (debug & DEBUG_HW_BFIFO)) {
p                 431 drivers/isdn/hardware/mISDN/avmfritz.c 		print_hex_dump_bytes(fc->log, DUMP_PREFIX_OFFSET, p, count);
p                 442 drivers/isdn/hardware/mISDN/avmfritz.c 	u8 *p;
p                 453 drivers/isdn/hardware/mISDN/avmfritz.c 		p = bch->fill;
p                 459 drivers/isdn/hardware/mISDN/avmfritz.c 		p = bch->tx_skb->data + bch->tx_idx;
p                 468 drivers/isdn/hardware/mISDN/avmfritz.c 	ptr = (u32 *)p;
p                 502 drivers/isdn/hardware/mISDN/avmfritz.c 		print_hex_dump_bytes(fc->log, DUMP_PREFIX_OFFSET, p, count);
p                1165 drivers/isdn/hardware/mISDN/hfcsusb.c 	__u8 threshbit, *p;
p                1277 drivers/isdn/hardware/mISDN/hfcsusb.c 				p = context_iso_urb->buffer + tx_offset + 1;
p                1279 drivers/isdn/hardware/mISDN/hfcsusb.c 					memset(p, fifo->bch->fill[0],
p                1282 drivers/isdn/hardware/mISDN/hfcsusb.c 					memcpy(p, (tx_skb->data + *tx_idx),
p                  26 drivers/isdn/hardware/mISDN/iohelper.h 	static u8 Read##name##_IO(void *p, u8 off) {			\
p                  27 drivers/isdn/hardware/mISDN/iohelper.h 		struct hws *hw = p;					\
p                  30 drivers/isdn/hardware/mISDN/iohelper.h 	static void Write##name##_IO(void *p, u8 off, u8 val) {		\
p                  31 drivers/isdn/hardware/mISDN/iohelper.h 		struct hws *hw = p;					\
p                  34 drivers/isdn/hardware/mISDN/iohelper.h 	static void ReadFiFo##name##_IO(void *p, u8 off, u8 *dp, int size) { \
p                  35 drivers/isdn/hardware/mISDN/iohelper.h 		struct hws *hw = p;					\
p                  38 drivers/isdn/hardware/mISDN/iohelper.h 	static void WriteFiFo##name##_IO(void *p, u8 off, u8 *dp, int size) { \
p                  39 drivers/isdn/hardware/mISDN/iohelper.h 		struct hws *hw = p;					\
p                  44 drivers/isdn/hardware/mISDN/iohelper.h 	static u8 Read##name##_IND(void *p, u8 off) {			\
p                  45 drivers/isdn/hardware/mISDN/iohelper.h 		struct hws *hw = p;					\
p                  49 drivers/isdn/hardware/mISDN/iohelper.h 	static void Write##name##_IND(void *p, u8 off, u8 val) {	\
p                  50 drivers/isdn/hardware/mISDN/iohelper.h 		struct hws *hw = p;					\
p                  54 drivers/isdn/hardware/mISDN/iohelper.h 	static void ReadFiFo##name##_IND(void *p, u8 off, u8 *dp, int size) { \
p                  55 drivers/isdn/hardware/mISDN/iohelper.h 		struct hws *hw = p;					\
p                  59 drivers/isdn/hardware/mISDN/iohelper.h 	static void WriteFiFo##name##_IND(void *p, u8 off, u8 *dp, int size) { \
p                  60 drivers/isdn/hardware/mISDN/iohelper.h 		struct hws *hw = p;					\
p                  66 drivers/isdn/hardware/mISDN/iohelper.h 	static u8 Read##name##_MIO(void *p, u8 off) {			\
p                  67 drivers/isdn/hardware/mISDN/iohelper.h 		struct hws *hw = p;					\
p                  70 drivers/isdn/hardware/mISDN/iohelper.h 	static void Write##name##_MIO(void *p, u8 off, u8 val) {	\
p                  71 drivers/isdn/hardware/mISDN/iohelper.h 		struct hws *hw = p;					\
p                  74 drivers/isdn/hardware/mISDN/iohelper.h 	static void ReadFiFo##name##_MIO(void *p, u8 off, u8 *dp, int size) { \
p                  75 drivers/isdn/hardware/mISDN/iohelper.h 		struct hws *hw = p;					\
p                  79 drivers/isdn/hardware/mISDN/iohelper.h 	static void WriteFiFo##name##_MIO(void *p, u8 off, u8 *dp, int size) { \
p                  80 drivers/isdn/hardware/mISDN/iohelper.h 		struct hws *hw = p;					\
p                  80 drivers/isdn/hardware/mISDN/mISDNinfineon.c 		void __iomem	*p;
p                  89 drivers/isdn/hardware/mISDN/mISDNinfineon.c 	void __iomem	*p;
p                 261 drivers/isdn/hardware/mISDN/mISDNinfineon.c IOFUNC_MEMIO(ISAC, inf_hw, u32, isac.a.p)
p                 262 drivers/isdn/hardware/mISDN/mISDNinfineon.c IOFUNC_MEMIO(IPAC, inf_hw, u32, hscx.a.p)
p                 289 drivers/isdn/hardware/mISDN/mISDNinfineon.c 	val = readb(hw->cfg.p);
p                 296 drivers/isdn/hardware/mISDN/mISDNinfineon.c 	writeb(PITA_INT0_STATUS, hw->cfg.p); /* ACK PITA INT0 */
p                 395 drivers/isdn/hardware/mISDN/mISDNinfineon.c 		writel(PITA_INT0_ENABLE, hw->cfg.p);
p                 439 drivers/isdn/hardware/mISDN/mISDNinfineon.c 		writel(0, hw->cfg.p);
p                 503 drivers/isdn/hardware/mISDN/mISDNinfineon.c 		       hw->cfg.p + PITA_MISC_REG);
p                 505 drivers/isdn/hardware/mISDN/mISDNinfineon.c 		writel(PITA_PARA_MPX_MODE, hw->cfg.p + PITA_MISC_REG);
p                 510 drivers/isdn/hardware/mISDN/mISDNinfineon.c 		       hw->cfg.p + PITA_MISC_REG);
p                 513 drivers/isdn/hardware/mISDN/mISDNinfineon.c 		       hw->cfg.p + PITA_MISC_REG);
p                 633 drivers/isdn/hardware/mISDN/mISDNinfineon.c 		if (hw->cfg.p) {
p                 635 drivers/isdn/hardware/mISDN/mISDNinfineon.c 			iounmap(hw->cfg.p);
p                 641 drivers/isdn/hardware/mISDN/mISDNinfineon.c 		if (hw->addr.p) {
p                 643 drivers/isdn/hardware/mISDN/mISDNinfineon.c 			iounmap(hw->addr.p);
p                 674 drivers/isdn/hardware/mISDN/mISDNinfineon.c 			hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
p                 701 drivers/isdn/hardware/mISDN/mISDNinfineon.c 			hw->addr.p = ioremap(hw->addr.start, hw->addr.size);
p                 702 drivers/isdn/hardware/mISDN/mISDNinfineon.c 			if (unlikely(!hw->addr.p))
p                 728 drivers/isdn/hardware/mISDN/mISDNinfineon.c 		hw->isac.a.p = hw->addr.p;
p                 730 drivers/isdn/hardware/mISDN/mISDNinfineon.c 		hw->hscx.a.p = hw->addr.p;
p                 735 drivers/isdn/hardware/mISDN/mISDNinfineon.c 		hw->isac.a.p = hw->addr.p;
p                 737 drivers/isdn/hardware/mISDN/mISDNinfineon.c 		hw->hscx.a.p = hw->addr.p;
p                 925 drivers/isdn/hardware/mISDN/mISDNipac.c 	u8 *p;
p                 943 drivers/isdn/hardware/mISDN/mISDNipac.c 	p = skb_put(hscx->bch.rx_skb, count);
p                 947 drivers/isdn/hardware/mISDN/mISDNipac.c 				    hscx->off + IPACX_RFIFOB, p, count);
p                 950 drivers/isdn/hardware/mISDN/mISDNipac.c 				    hscx->off, p, count);
p                 957 drivers/isdn/hardware/mISDN/mISDNipac.c 		print_hex_dump_bytes(hscx->log, DUMP_PREFIX_OFFSET, p, count);
p                 965 drivers/isdn/hardware/mISDN/mISDNipac.c 	u8 *p;
p                 972 drivers/isdn/hardware/mISDN/mISDNipac.c 		p = hscx->log;
p                 973 drivers/isdn/hardware/mISDN/mISDNipac.c 		memset(p, hscx->bch.fill[0], count);
p                 978 drivers/isdn/hardware/mISDN/mISDNipac.c 		p = hscx->bch.tx_skb->data + hscx->bch.tx_idx;
p                 991 drivers/isdn/hardware/mISDN/mISDNipac.c 				     hscx->off + IPACX_XFIFOB, p, count);
p                 995 drivers/isdn/hardware/mISDN/mISDNipac.c 				     hscx->off, p, count);
p                1002 drivers/isdn/hardware/mISDN/mISDNipac.c 		print_hex_dump_bytes(hscx->log, DUMP_PREFIX_OFFSET, p, count);
p                 131 drivers/isdn/hardware/mISDN/netjet.c ReadISAC_nj(void *p, u8 offset)
p                 133 drivers/isdn/hardware/mISDN/netjet.c 	struct tiger_hw *card = p;
p                 144 drivers/isdn/hardware/mISDN/netjet.c WriteISAC_nj(void *p, u8 offset, u8 value)
p                 146 drivers/isdn/hardware/mISDN/netjet.c 	struct tiger_hw *card = p;
p                 155 drivers/isdn/hardware/mISDN/netjet.c ReadFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
p                 157 drivers/isdn/hardware/mISDN/netjet.c 	struct tiger_hw *card = p;
p                 165 drivers/isdn/hardware/mISDN/netjet.c WriteFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
p                 167 drivers/isdn/hardware/mISDN/netjet.c 	struct tiger_hw *card = p;
p                 368 drivers/isdn/hardware/mISDN/netjet.c 	u8 *p, *pn;
p                 388 drivers/isdn/hardware/mISDN/netjet.c 		p = skb_put(bc->bch.rx_skb, cnt);
p                 390 drivers/isdn/hardware/mISDN/netjet.c 		p = bc->hrbuf;
p                 398 drivers/isdn/hardware/mISDN/netjet.c 		p[i] = val & 0xff;
p                 411 drivers/isdn/hardware/mISDN/netjet.c 			p = skb_put(bc->bch.rx_skb, stat);
p                 417 drivers/isdn/hardware/mISDN/netjet.c 						     DUMP_PREFIX_OFFSET, p,
p                 492 drivers/isdn/hardware/mISDN/netjet.c 	u8  *p;
p                 506 drivers/isdn/hardware/mISDN/netjet.c 	p = bc->hsbuf;
p                 513 drivers/isdn/hardware/mISDN/netjet.c 		v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
p                 519 drivers/isdn/hardware/mISDN/netjet.c 		print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
p                 529 drivers/isdn/hardware/mISDN/netjet.c 	u8  *p;
p                 538 drivers/isdn/hardware/mISDN/netjet.c 		p = bc->bch.fill;
p                 547 drivers/isdn/hardware/mISDN/netjet.c 		p = bc->bch.tx_skb->data + bc->bch.tx_idx;
p                 552 drivers/isdn/hardware/mISDN/netjet.c 		count = isdnhdlc_encode(&bc->hsend, p, count, &i,
p                 558 drivers/isdn/hardware/mISDN/netjet.c 		p = bc->hsbuf;
p                 568 drivers/isdn/hardware/mISDN/netjet.c 		n = p[0];
p                 585 drivers/isdn/hardware/mISDN/netjet.c 			n = p[i];
p                 593 drivers/isdn/hardware/mISDN/netjet.c 		print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
p                 395 drivers/isdn/mISDN/dsp_audio.c 	u8 *p;
p                 414 drivers/isdn/mISDN/dsp_audio.c 	p = skb->data;
p                 417 drivers/isdn/mISDN/dsp_audio.c 		*p = volume_change[*p];
p                 418 drivers/isdn/mISDN/dsp_audio.c 		p++;
p                  77 drivers/isdn/mISDN/dsp_blowfish.c 	u32 p[18];
p                1184 drivers/isdn/mISDN/dsp_cmx.c 	u8 *d, *p;
p                1287 drivers/isdn/mISDN/dsp_cmx.c 	p = skb->data;
p                1293 drivers/isdn/mISDN/dsp_cmx.c 		d[w++ & CMX_BUFF_MASK] = *p++;
p                1314 drivers/isdn/mISDN/dsp_cmx.c 	u8 *d, *p, *q, *o_q;
p                1368 drivers/isdn/mISDN/dsp_cmx.c 	p = dsp->tx_buff; /* transmit data */
p                1394 drivers/isdn/mISDN/dsp_cmx.c 		sprintf(debugbuf, "TX sending (%04x-%04x)%p: ", t, tt, p);
p                1400 drivers/isdn/mISDN/dsp_cmx.c 					p[t]);
p                1402 drivers/isdn/mISDN/dsp_cmx.c 			*d++ = p[t]; /* write tx_buff */
p                1424 drivers/isdn/mISDN/dsp_cmx.c 				*d++ = p[t]; /* write tx_buff */
p                1441 drivers/isdn/mISDN/dsp_cmx.c 				*d++ = dsp_audio_mix_law[(p[t] << 8) | q[r]];
p                1477 drivers/isdn/mISDN/dsp_cmx.c 				*d++ = dsp_audio_mix_law[(p[t] << 8) | o_q[o_r]];
p                1492 drivers/isdn/mISDN/dsp_cmx.c 				sample = dsp_audio_law_to_s32[p[t]] +
p                1522 drivers/isdn/mISDN/dsp_cmx.c 			sample = dsp_audio_law_to_s32[p[t]] + *c++ -
p                1550 drivers/isdn/mISDN/dsp_cmx.c 			sample = dsp_audio_law_to_s32[p[t]] + *c++;
p                1636 drivers/isdn/mISDN/dsp_cmx.c 	u8 *p, *q;
p                1738 drivers/isdn/mISDN/dsp_cmx.c 		p = dsp->rx_buff;
p                1746 drivers/isdn/mISDN/dsp_cmx.c 				p[r] = dsp_silence;
p                1793 drivers/isdn/mISDN/dsp_cmx.c 					p[r] = dsp_silence;
p                1863 drivers/isdn/mISDN/dsp_cmx.c 	u8 *d, *p;
p                1872 drivers/isdn/mISDN/dsp_cmx.c 	p = dsp->tx_buff;
p                1896 drivers/isdn/mISDN/dsp_cmx.c 	sprintf(debugbuf, "TX getting (%04x-%04x)%p: ", w, ww, p);
p                1903 drivers/isdn/mISDN/dsp_cmx.c 		p[w] = *d++;
p                  25 drivers/isdn/mISDN/dsp_pipeline.c 	void                *p;
p                  44 drivers/isdn/mISDN/dsp_pipeline.c 	char *p = buf;
p                  48 drivers/isdn/mISDN/dsp_pipeline.c 		p += sprintf(p, "Name:        %s\n%s%s%sDescription: %s\n\n",
p                  55 drivers/isdn/mISDN/dsp_pipeline.c 	return p - buf;
p                 201 drivers/isdn/mISDN/dsp_pipeline.c 			entry->elem->free(entry->p);
p                 267 drivers/isdn/mISDN/dsp_pipeline.c 					pipeline_entry->p = elem->new(args);
p                 268 drivers/isdn/mISDN/dsp_pipeline.c 					if (pipeline_entry->p) {
p                 323 drivers/isdn/mISDN/dsp_pipeline.c 			entry->elem->process_tx(entry->p, data, len);
p                 336 drivers/isdn/mISDN/dsp_pipeline.c 			entry->elem->process_rx(entry->p, data, len, txlen);
p                 201 drivers/isdn/mISDN/hwchannel.c get_sapi_tei(u_char *p)
p                 205 drivers/isdn/mISDN/hwchannel.c 	sapi = *p >> 2;
p                 206 drivers/isdn/mISDN/hwchannel.c 	tei = p[1] >> 1;
p                 267 drivers/isdn/mISDN/l1oip_core.c 	u8 *p;
p                 275 drivers/isdn/mISDN/l1oip_core.c 	p = frame;
p                 295 drivers/isdn/mISDN/l1oip_core.c 	*p++ = (L1OIP_VERSION << 6) /* version and coding */
p                 300 drivers/isdn/mISDN/l1oip_core.c 		*p++ = hc->id >> 24; /* id */
p                 301 drivers/isdn/mISDN/l1oip_core.c 		*p++ = hc->id >> 16;
p                 302 drivers/isdn/mISDN/l1oip_core.c 		*p++ = hc->id >> 8;
p                 303 drivers/isdn/mISDN/l1oip_core.c 		*p++ = hc->id;
p                 305 drivers/isdn/mISDN/l1oip_core.c 	*p++ =  0x00 + channel; /* m-flag, channel */
p                 306 drivers/isdn/mISDN/l1oip_core.c 	*p++ = timebase >> 8; /* time base */
p                 307 drivers/isdn/mISDN/l1oip_core.c 	*p++ = timebase;
p                 311 drivers/isdn/mISDN/l1oip_core.c 			l1oip_ulaw_to_alaw(buf, len, p);
p                 313 drivers/isdn/mISDN/l1oip_core.c 			l1oip_alaw_to_ulaw(buf, len, p);
p                 315 drivers/isdn/mISDN/l1oip_core.c 			len = l1oip_law_to_4bit(buf, len, p,
p                 318 drivers/isdn/mISDN/l1oip_core.c 			memcpy(p, buf, len);
p                 320 drivers/isdn/mISDN/l1oip_core.c 	len += p - frame;
p                 356 drivers/isdn/mISDN/l1oip_core.c 	u8 *p;
p                 389 drivers/isdn/mISDN/l1oip_core.c 	p = skb_put(nskb, (remotecodec == 3) ? (len << 1) : len);
p                 392 drivers/isdn/mISDN/l1oip_core.c 		l1oip_alaw_to_ulaw(buf, len, p);
p                 394 drivers/isdn/mISDN/l1oip_core.c 		l1oip_ulaw_to_alaw(buf, len, p);
p                 396 drivers/isdn/mISDN/l1oip_core.c 		len = l1oip_4bit_to_law(buf, len, p);
p                 398 drivers/isdn/mISDN/l1oip_core.c 		memcpy(p, buf, len);
p                 872 drivers/isdn/mISDN/l1oip_core.c 	unsigned char		*p;
p                 887 drivers/isdn/mISDN/l1oip_core.c 		p = skb->data;
p                 896 drivers/isdn/mISDN/l1oip_core.c 					  hc->chan[dch->slot].tx_counter++, p, ll);
p                 897 drivers/isdn/mISDN/l1oip_core.c 			p += ll;
p                1091 drivers/isdn/mISDN/l1oip_core.c 	unsigned char		*p;
p                1129 drivers/isdn/mISDN/l1oip_core.c 		p = skb->data;
p                1138 drivers/isdn/mISDN/l1oip_core.c 					  hc->chan[bch->slot].tx_counter, p, ll);
p                1140 drivers/isdn/mISDN/l1oip_core.c 			p += ll;
p                 279 drivers/isdn/mISDN/socket.c data_sock_ioctl_bound(struct sock *sk, unsigned int cmd, void __user *p)
p                 292 drivers/isdn/mISDN/socket.c 		if (copy_from_user(&cq, p, sizeof(cq))) {
p                 310 drivers/isdn/mISDN/socket.c 		if (copy_to_user(p, &cq, sizeof(cq)))
p                 319 drivers/isdn/mISDN/socket.c 		if (get_user(val[1], (int __user *)p)) {
p                 333 drivers/isdn/mISDN/socket.c 		if (get_user(val[1], (int __user *)p)) {
p                 406 drivers/leds/led-class.c 	struct led_cdev **p = res;
p                 408 drivers/leds/led-class.c 	if (WARN_ON(!p || !*p))
p                 411 drivers/leds/led-class.c 	return *p == data;
p                  70 drivers/leds/leds-mt6323.c #define MT6323_CAL_HW_DUTY(o, p)	DIV_ROUND_CLOSEST((o) * 100000ul,\
p                  71 drivers/leds/leds-mt6323.c 					(p) * MT6323_UNIT_DUTY)
p                 248 drivers/leds/leds-powernv.c 	struct property *p;
p                 254 drivers/leds/leds-powernv.c 		p = of_find_property(np, "led-types", NULL);
p                 256 drivers/leds/leds-powernv.c 		while ((cur = of_prop_next_string(p, cur)) != NULL) {
p                  33 drivers/leds/leds-sunfire.c 	struct sunfire_led *p = to_sunfire_led(led_cdev);
p                  34 drivers/leds/leds-sunfire.c 	u8 reg = upa_readb(p->reg);
p                  51 drivers/leds/leds-sunfire.c 	upa_writeb(reg, p->reg);
p                  75 drivers/leds/leds-sunfire.c 	struct sunfire_led *p = to_sunfire_led(led_cdev);
p                  76 drivers/leds/leds-sunfire.c 	u32 reg = upa_readl(p->reg);
p                  93 drivers/leds/leds-sunfire.c 	upa_writel(reg, p->reg);
p                 129 drivers/leds/leds-sunfire.c 	struct sunfire_drvdata *p;
p                 138 drivers/leds/leds-sunfire.c 	p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
p                 139 drivers/leds/leds-sunfire.c 	if (!p)
p                 143 drivers/leds/leds-sunfire.c 		struct led_classdev *lp = &p->leds[i].led_cdev;
p                 145 drivers/leds/leds-sunfire.c 		p->leds[i].reg = (void __iomem *) pdev->resource[0].start;
p                 156 drivers/leds/leds-sunfire.c 				led_classdev_unregister(&p->leds[i].led_cdev);
p                 161 drivers/leds/leds-sunfire.c 	platform_set_drvdata(pdev, p);
p                 168 drivers/leds/leds-sunfire.c 	struct sunfire_drvdata *p = platform_get_drvdata(pdev);
p                 172 drivers/leds/leds-sunfire.c 		led_classdev_unregister(&p->leds[i].led_cdev);
p                  28 drivers/leds/trigger/ledtrig-backlight.c static int fb_notifier_callback(struct notifier_block *p,
p                  31 drivers/leds/trigger/ledtrig-backlight.c 	struct bl_trig_notifier *n = container_of(p,
p                 568 drivers/lightnvm/core.c static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
p                 571 drivers/lightnvm/core.c 	struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
p                 572 drivers/lightnvm/core.c 	int lun_off = ch_map->lun_offs[p->a.lun];
p                 574 drivers/lightnvm/core.c 	p->a.ch += ch_map->ch_off;
p                 575 drivers/lightnvm/core.c 	p->a.lun += lun_off;
p                 578 drivers/lightnvm/core.c static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
p                 582 drivers/lightnvm/core.c 	struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
p                 583 drivers/lightnvm/core.c 	int lun_roff = ch_rmap->lun_offs[p->a.lun];
p                 585 drivers/lightnvm/core.c 	p->a.ch -= ch_rmap->ch_off;
p                 586 drivers/lightnvm/core.c 	p->a.lun -= lun_roff;
p                  28 drivers/lightnvm/pblk-rb.c 	struct pblk_rb_pages *p, *t;
p                  31 drivers/lightnvm/pblk-rb.c 	list_for_each_entry_safe(p, t, &rb->pages, list) {
p                  32 drivers/lightnvm/pblk-rb.c 		free_pages((unsigned long)page_address(p->pages), p->order);
p                  33 drivers/lightnvm/pblk-rb.c 		list_del(&p->list);
p                  34 drivers/lightnvm/pblk-rb.c 		kfree(p);
p                 200 drivers/lightnvm/pblk-rb.c unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
p                 203 drivers/lightnvm/pblk-rb.c 	return (p + nr_entries) & (rb->nr_entries - 1);
p                 355 drivers/lightnvm/pblk-recovery.c 			       struct pblk_recov_alloc p)
p                 376 drivers/lightnvm/pblk-recovery.c 	ppa_list = p.ppa_list;
p                 377 drivers/lightnvm/pblk-recovery.c 	meta_list = p.meta_list;
p                 378 drivers/lightnvm/pblk-recovery.c 	rqd = p.rqd;
p                 379 drivers/lightnvm/pblk-recovery.c 	data = p.data;
p                 380 drivers/lightnvm/pblk-recovery.c 	dma_ppa_list = p.dma_ppa_list;
p                 381 drivers/lightnvm/pblk-recovery.c 	dma_meta_list = p.dma_meta_list;
p                 484 drivers/lightnvm/pblk-recovery.c 	struct pblk_recov_alloc p;
p                 505 drivers/lightnvm/pblk-recovery.c 	p.ppa_list = ppa_list;
p                 506 drivers/lightnvm/pblk-recovery.c 	p.meta_list = meta_list;
p                 507 drivers/lightnvm/pblk-recovery.c 	p.rqd = rqd;
p                 508 drivers/lightnvm/pblk-recovery.c 	p.data = data;
p                 509 drivers/lightnvm/pblk-recovery.c 	p.dma_ppa_list = dma_ppa_list;
p                 510 drivers/lightnvm/pblk-recovery.c 	p.dma_meta_list = dma_meta_list;
p                 512 drivers/lightnvm/pblk-recovery.c 	ret = pblk_recov_scan_oob(pblk, line, p);
p                 386 drivers/lightnvm/pblk-sysfs.c 	int p = bucket * 100;
p                 388 drivers/lightnvm/pblk-sysfs.c 	p = div_u64(p, total);
p                 390 drivers/lightnvm/pblk-sysfs.c 	return p;
p                 418 drivers/lightnvm/pblk-sysfs.c 		unsigned long long p;
p                 420 drivers/lightnvm/pblk-sysfs.c 		p = bucket_percentage(atomic64_read(&pblk->pad_dist[i]),
p                 423 drivers/lightnvm/pblk-sysfs.c 				i + 1, p);
p                 747 drivers/lightnvm/pblk.h unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
p                 960 drivers/lightnvm/pblk.h static inline int pblk_ppa_to_line_id(struct ppa_addr p)
p                 962 drivers/lightnvm/pblk.h 	return p.a.blk;
p                 966 drivers/lightnvm/pblk.h 						 struct ppa_addr p)
p                 968 drivers/lightnvm/pblk.h 	return &pblk->lines[pblk_ppa_to_line_id(p)];
p                 971 drivers/lightnvm/pblk.h static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
p                 973 drivers/lightnvm/pblk.h 	return p.a.lun * geo->num_ch + p.a.ch;
p                1017 drivers/lightnvm/pblk.h 							struct ppa_addr p)
p                1021 drivers/lightnvm/pblk.h 	struct pblk_line *line = pblk_ppa_to_line(pblk, p);
p                1022 drivers/lightnvm/pblk.h 	int pos = pblk_ppa_to_pos(geo, p);
p                1028 drivers/lightnvm/pblk.h 							struct ppa_addr p)
p                1032 drivers/lightnvm/pblk.h 	return dev_to_chunk_addr(dev->parent, &pblk->addrf, p);
p                1036 drivers/lightnvm/pblk.h 							struct ppa_addr p)
p                1045 drivers/lightnvm/pblk.h 		paddr = (u64)p.g.ch << ppaf->ch_offset;
p                1046 drivers/lightnvm/pblk.h 		paddr |= (u64)p.g.lun << ppaf->lun_offset;
p                1047 drivers/lightnvm/pblk.h 		paddr |= (u64)p.g.pg << ppaf->pg_offset;
p                1048 drivers/lightnvm/pblk.h 		paddr |= (u64)p.g.pl << ppaf->pln_offset;
p                1049 drivers/lightnvm/pblk.h 		paddr |= (u64)p.g.sec << ppaf->sec_offset;
p                1052 drivers/lightnvm/pblk.h 		u64 secs = p.m.sec;
p                1055 drivers/lightnvm/pblk.h 		paddr = (u64)p.m.grp * uaddrf->sec_stripe;
p                1056 drivers/lightnvm/pblk.h 		paddr += (u64)p.m.pu * uaddrf->sec_lun_stripe;
p                1139 drivers/lightnvm/pblk.h 	struct ppa_addr p;
p                1141 drivers/lightnvm/pblk.h 	p.c.line = addr;
p                1142 drivers/lightnvm/pblk.h 	p.c.is_cached = 1;
p                1144 drivers/lightnvm/pblk.h 	return p;
p                1192 drivers/lightnvm/pblk.h static inline void print_ppa(struct pblk *pblk, struct ppa_addr *p,
p                1197 drivers/lightnvm/pblk.h 	if (p->c.is_cached) {
p                1199 drivers/lightnvm/pblk.h 				msg, error, (u64)p->c.line);
p                1203 drivers/lightnvm/pblk.h 			p->g.ch, p->g.lun, p->g.blk,
p                1204 drivers/lightnvm/pblk.h 			p->g.pg, p->g.pl, p->g.sec);
p                1208 drivers/lightnvm/pblk.h 			p->m.grp, p->m.pu, p->m.chk, p->m.sec);
p                  60 drivers/macintosh/ans-lcd.c 	const char __user *p = buf;
p                  71 drivers/macintosh/ans-lcd.c 	for ( i = *ppos; count > 0; ++i, ++p, --count ) 
p                  74 drivers/macintosh/ans-lcd.c 		__get_user(c, p);
p                  79 drivers/macintosh/ans-lcd.c 	return p - buf;
p                 561 drivers/macintosh/via-cuda.c #define ARRAY_FULL(a, p)	((p) - (a) == ARRAY_SIZE(a))
p                  40 drivers/mailbox/arm_mhu.c static irqreturn_t mhu_rx_interrupt(int irq, void *p)
p                  42 drivers/mailbox/arm_mhu.c 	struct mbox_chan *chan = p;
p                 148 drivers/mailbox/hi6220-mailbox.c static irqreturn_t hi6220_mbox_interrupt(int irq, void *p)
p                 150 drivers/mailbox/hi6220-mailbox.c 	struct hi6220_mbox *mbox = p;
p                 107 drivers/mailbox/imx-mailbox.c static irqreturn_t imx_mu_isr(int irq, void *p)
p                 109 drivers/mailbox/imx-mailbox.c 	struct mbox_chan *chan = p;
p                 141 drivers/mailbox/mailbox-altera.c static irqreturn_t altera_mbox_tx_interrupt(int irq, void *p)
p                 143 drivers/mailbox/mailbox-altera.c 	struct mbox_chan *chan = (struct mbox_chan *)p;
p                 152 drivers/mailbox/mailbox-altera.c static irqreturn_t altera_mbox_rx_interrupt(int irq, void *p)
p                 154 drivers/mailbox/mailbox-altera.c 	struct mbox_chan *chan = (struct mbox_chan *)p;
p                 313 drivers/mailbox/omap-mailbox.c static irqreturn_t mbox_interrupt(int irq, void *p)
p                 315 drivers/mailbox/omap-mailbox.c 	struct omap_mbox *mbox = p;
p                 178 drivers/mailbox/pcc.c static irqreturn_t pcc_mbox_irq(int irq, void *p)
p                 182 drivers/mailbox/pcc.c 	struct mbox_chan *chan = p;
p                  46 drivers/mailbox/platform_mhu.c static irqreturn_t platform_mhu_rx_interrupt(int irq, void *p)
p                  48 drivers/mailbox/platform_mhu.c 	struct mbox_chan *chan = p;
p                 202 drivers/mailbox/ti-msgmgr.c static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
p                 204 drivers/mailbox/ti-msgmgr.c 	struct mbox_chan *chan = p;
p                 518 drivers/mailbox/ti-msgmgr.c 					    const struct of_phandle_args *p)
p                 536 drivers/mailbox/ti-msgmgr.c 	if (p->args_count != ncells) {
p                 538 drivers/mailbox/ti-msgmgr.c 			p->args_count, ncells);
p                 543 drivers/mailbox/ti-msgmgr.c 		req_pid = p->args[0];
p                 545 drivers/mailbox/ti-msgmgr.c 		req_qid = p->args[0];
p                 546 drivers/mailbox/ti-msgmgr.c 		req_pid = p->args[1];
p                 564 drivers/mailbox/ti-msgmgr.c 		req_qid, req_pid, p->np);
p                 404 drivers/mailbox/zynqmp-ipi-mailbox.c 					     const struct of_phandle_args *p)
p                 411 drivers/mailbox/zynqmp-ipi-mailbox.c 	chan_type = p->args[0];
p                  16 drivers/mcb/mcb-parse.c #define for_each_chameleon_cell(dtype, p)	\
p                  17 drivers/mcb/mcb-parse.c 	for ((dtype) = get_next_dtype((p));	\
p                  19 drivers/mcb/mcb-parse.c 	     (dtype) = get_next_dtype((p)))
p                  21 drivers/mcb/mcb-parse.c static inline uint32_t get_next_dtype(void __iomem *p)
p                  25 drivers/mcb/mcb-parse.c 	dtype = readl(p);
p                 119 drivers/mcb/mcb-parse.c 	char __iomem *p = base;
p                 123 drivers/mcb/mcb-parse.c 	p += sizeof(__le32);
p                 126 drivers/mcb/mcb-parse.c 		cb[i].addr = readl(p);
p                 127 drivers/mcb/mcb-parse.c 		cb[i].size = readl(p + 4);
p                 129 drivers/mcb/mcb-parse.c 		p += sizeof(struct chameleon_bar);
p                 182 drivers/mcb/mcb-parse.c 	char __iomem *p = base;
p                 196 drivers/mcb/mcb-parse.c 	memcpy_fromio(header, p, hsize);
p                 205 drivers/mcb/mcb-parse.c 	p += hsize;
p                 213 drivers/mcb/mcb-parse.c 	bar_count = chameleon_get_bar(&p, mapbase, &cb);
p                 219 drivers/mcb/mcb-parse.c 	for_each_chameleon_cell(dtype, p) {
p                 222 drivers/mcb/mcb-parse.c 			ret = chameleon_parse_gdd(bus, cb, p, bar_count);
p                 225 drivers/mcb/mcb-parse.c 			p += sizeof(struct chameleon_gdd);
p                 228 drivers/mcb/mcb-parse.c 			chameleon_parse_bdd(bus, cb, p);
p                 229 drivers/mcb/mcb-parse.c 			p += sizeof(struct chameleon_bdd);
p                  69 drivers/md/bcache/bset.c 	struct bkey *k, *p = NULL;
p                  76 drivers/md/bcache/bset.c 			if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
p                  83 drivers/md/bcache/bset.c 			if (p && bkey_cmp(p, &START_KEY(k)) > 0)
p                  90 drivers/md/bcache/bset.c 			if (p && !bkey_cmp(p, k))
p                  93 drivers/md/bcache/bset.c 		p = k;
p                  97 drivers/md/bcache/bset.c 	if (p && bkey_cmp(p, &b->key) > 0)
p                 588 drivers/md/bcache/bset.c 	const uint64_t *p = &k->low - (f->exponent >> 6);
p                 590 drivers/md/bcache/bset.c 	return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
p                 597 drivers/md/bcache/bset.c 	struct bkey *p = tree_to_prev_bkey(t, j);
p                 608 drivers/md/bcache/bset.c 	BUG_ON(bkey_next(p) != m);
p                 632 drivers/md/bcache/bset.c 	if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f))
p                 970 drivers/md/bcache/bset.c 		unsigned int p = n << 4;
p                 972 drivers/md/bcache/bset.c 		if (p < t->size)
p                 973 drivers/md/bcache/bset.c 			prefetch(&t->tree[p]);
p                1920 drivers/md/bcache/btree.c 	struct bkey *k, *p = NULL;
p                1943 drivers/md/bcache/btree.c 			if (p)
p                1944 drivers/md/bcache/btree.c 				ret = btree(check_recurse, p, b, op);
p                1946 drivers/md/bcache/btree.c 			p = k;
p                1947 drivers/md/bcache/btree.c 		} while (p && !ret);
p                2580 drivers/md/bcache/btree.c 	struct keybuf_key *p, *w, s;
p                2592 drivers/md/bcache/btree.c 		p = w;
p                2595 drivers/md/bcache/btree.c 		if (p->private)
p                2598 drivers/md/bcache/btree.c 			__bch_keybuf_del(buf, p);
p                 109 drivers/md/bcache/closure.c 	struct task_struct *p;
p                 112 drivers/md/bcache/closure.c 	p = READ_ONCE(s->task);
p                 114 drivers/md/bcache/closure.c 	wake_up_process(p);
p                 104 drivers/md/bcache/extents.c 	p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
p                 108 drivers/md/bcache/extents.c 			p(", ");
p                 111 drivers/md/bcache/extents.c 			p("check dev");
p                 113 drivers/md/bcache/extents.c 			p("%llu:%llu gen %llu", PTR_DEV(k, i),
p                 117 drivers/md/bcache/extents.c 	p("]");
p                 120 drivers/md/bcache/extents.c 		p(" dirty");
p                 122 drivers/md/bcache/extents.c 		p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
p                 302 drivers/md/bcache/journal.c 	atomic_t p = { 0 };
p                 321 drivers/md/bcache/journal.c 				fifo_push_front(&j->pin, p);
p                 326 drivers/md/bcache/journal.c 			fifo_push_front(&j->pin, p);
p                 420 drivers/md/bcache/journal.c #define nr_to_fifo_front(p, front_p, mask)	(((p) - (front_p)) & (mask))
p                 645 drivers/md/bcache/journal.c 	atomic_t p __maybe_unused;
p                 650 drivers/md/bcache/journal.c 		fifo_pop(&c->journal.pin, p);
p                 703 drivers/md/bcache/journal.c 	atomic_t p = { 1 };
p                 713 drivers/md/bcache/journal.c 	BUG_ON(!fifo_push(&j->pin, p));
p                 566 drivers/md/bcache/super.c 		struct prio_set *p = ca->disk_buckets;
p                 567 drivers/md/bcache/super.c 		struct bucket_disk *d = p->data;
p                 577 drivers/md/bcache/super.c 		p->next_bucket	= ca->prio_buckets[i + 1];
p                 578 drivers/md/bcache/super.c 		p->magic	= pset_magic(&ca->sb);
p                 579 drivers/md/bcache/super.c 		p->csum		= bch_crc64(&p->magic, bucket_bytes(ca) - 8);
p                 615 drivers/md/bcache/super.c 	struct prio_set *p = ca->disk_buckets;
p                 616 drivers/md/bcache/super.c 	struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
p                 630 drivers/md/bcache/super.c 			if (p->csum !=
p                 631 drivers/md/bcache/super.c 			    bch_crc64(&p->magic, bucket_bytes(ca) - 8))
p                 634 drivers/md/bcache/super.c 			if (p->magic != pset_magic(&ca->sb))
p                 637 drivers/md/bcache/super.c 			bucket = p->next_bucket;
p                 638 drivers/md/bcache/super.c 			d = p->data;
p                 680 drivers/md/bcache/sysfs.c 		struct hlist_node *p;
p                 682 drivers/md/bcache/sysfs.c 		hlist_for_each(p, h)
p                1022 drivers/md/bcache/sysfs.c 		uint16_t q[31], *p, *cached;
p                1025 drivers/md/bcache/sysfs.c 		cached = p = vmalloc(array_size(sizeof(uint16_t),
p                1027 drivers/md/bcache/sysfs.c 		if (!p)
p                1043 drivers/md/bcache/sysfs.c 			p[i] = ca->buckets[i].prio;
p                1046 drivers/md/bcache/sysfs.c 		sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
p                1052 drivers/md/bcache/sysfs.c 		while (cached < p + n &&
p                1066 drivers/md/bcache/sysfs.c 		vfree(p);
p                 124 drivers/md/bcache/util.c bool bch_is_zero(const char *p, size_t n)
p                 129 drivers/md/bcache/util.c 		if (p[i])
p                  79 drivers/md/bcache/util.h 		size_t p = (i - 1) / 2;					\
p                  80 drivers/md/bcache/util.h 		if (cmp((h)->data[i], (h)->data[p]))			\
p                  82 drivers/md/bcache/util.h 		heap_swap(h, i, p);					\
p                  83 drivers/md/bcache/util.h 		i = p;							\
p                 172 drivers/md/bcache/util.h #define fifo_idx(fifo, p)	(((p) - &fifo_front(fifo)) & (fifo)->mask)
p                 364 drivers/md/bcache/util.h bool bch_is_zero(const char *p, size_t n);
p                 545 drivers/md/bcache/util.h static inline uint64_t bch_crc64(const void *p, size_t len)
p                 549 drivers/md/bcache/util.h 	crc = crc64_be(crc, p, len);
p                 554 drivers/md/bcache/util.h 					const void *p,
p                 557 drivers/md/bcache/util.h 	crc = crc64_be(crc, p, len);
p                 138 drivers/md/dm-cache-metadata.h int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
p                  15 drivers/md/dm-cache-policy-internal.h static inline int policy_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
p                  18 drivers/md/dm-cache-policy-internal.h 	return p->lookup(p, oblock, cblock, data_dir, fast_copy, background_queued);
p                  21 drivers/md/dm-cache-policy-internal.h static inline int policy_lookup_with_work(struct dm_cache_policy *p,
p                  26 drivers/md/dm-cache-policy-internal.h 	if (!p->lookup_with_work) {
p                  28 drivers/md/dm-cache-policy-internal.h 		return p->lookup(p, oblock, cblock, data_dir, fast_copy, NULL);
p                  31 drivers/md/dm-cache-policy-internal.h 	return p->lookup_with_work(p, oblock, cblock, data_dir, fast_copy, work);
p                  34 drivers/md/dm-cache-policy-internal.h static inline int policy_get_background_work(struct dm_cache_policy *p,
p                  37 drivers/md/dm-cache-policy-internal.h 	return p->get_background_work(p, idle, result);
p                  40 drivers/md/dm-cache-policy-internal.h static inline void policy_complete_background_work(struct dm_cache_policy *p,
p                  44 drivers/md/dm-cache-policy-internal.h 	return p->complete_background_work(p, work, success);
p                  47 drivers/md/dm-cache-policy-internal.h static inline void policy_set_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
p                  49 drivers/md/dm-cache-policy-internal.h 	p->set_dirty(p, cblock);
p                  52 drivers/md/dm-cache-policy-internal.h static inline void policy_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
p                  54 drivers/md/dm-cache-policy-internal.h 	p->clear_dirty(p, cblock);
p                  57 drivers/md/dm-cache-policy-internal.h static inline int policy_load_mapping(struct dm_cache_policy *p,
p                  61 drivers/md/dm-cache-policy-internal.h 	return p->load_mapping(p, oblock, cblock, dirty, hint, hint_valid);
p                  64 drivers/md/dm-cache-policy-internal.h static inline int policy_invalidate_mapping(struct dm_cache_policy *p,
p                  67 drivers/md/dm-cache-policy-internal.h 	return p->invalidate_mapping(p, cblock);
p                  70 drivers/md/dm-cache-policy-internal.h static inline uint32_t policy_get_hint(struct dm_cache_policy *p,
p                  73 drivers/md/dm-cache-policy-internal.h 	return p->get_hint ? p->get_hint(p, cblock) : 0;
p                  76 drivers/md/dm-cache-policy-internal.h static inline dm_cblock_t policy_residency(struct dm_cache_policy *p)
p                  78 drivers/md/dm-cache-policy-internal.h 	return p->residency(p);
p                  81 drivers/md/dm-cache-policy-internal.h static inline void policy_tick(struct dm_cache_policy *p, bool can_block)
p                  83 drivers/md/dm-cache-policy-internal.h 	if (p->tick)
p                  84 drivers/md/dm-cache-policy-internal.h 		return p->tick(p, can_block);
p                  87 drivers/md/dm-cache-policy-internal.h static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result,
p                  91 drivers/md/dm-cache-policy-internal.h 	if (p->emit_config_values)
p                  92 drivers/md/dm-cache-policy-internal.h 		return p->emit_config_values(p, result, maxlen, sz_ptr);
p                  99 drivers/md/dm-cache-policy-internal.h static inline int policy_set_config_value(struct dm_cache_policy *p,
p                 102 drivers/md/dm-cache-policy-internal.h 	return p->set_config_value ? p->set_config_value(p, key, value) : -EINVAL;
p                 105 drivers/md/dm-cache-policy-internal.h static inline void policy_allow_migrations(struct dm_cache_policy *p, bool allow)
p                 107 drivers/md/dm-cache-policy-internal.h 	return p->allow_migrations(p, allow);
p                 150 drivers/md/dm-cache-policy-internal.h void dm_cache_policy_destroy(struct dm_cache_policy *p);
p                 155 drivers/md/dm-cache-policy-internal.h const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
p                 157 drivers/md/dm-cache-policy-internal.h const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p);
p                 159 drivers/md/dm-cache-policy-internal.h size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
p                1125 drivers/md/dm-cache-policy-smq.c static unsigned percent_to_target(struct smq_policy *mq, unsigned p)
p                1127 drivers/md/dm-cache-policy-smq.c 	return from_cblock(mq->cache_size) * p / 100u;
p                1345 drivers/md/dm-cache-policy-smq.c static struct smq_policy *to_smq_policy(struct dm_cache_policy *p)
p                1347 drivers/md/dm-cache-policy-smq.c 	return container_of(p, struct smq_policy, policy);
p                1350 drivers/md/dm-cache-policy-smq.c static void smq_destroy(struct dm_cache_policy *p)
p                1352 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
p                1400 drivers/md/dm-cache-policy-smq.c static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
p                1406 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
p                1417 drivers/md/dm-cache-policy-smq.c static int smq_lookup_with_work(struct dm_cache_policy *p,
p                1425 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
p                1434 drivers/md/dm-cache-policy-smq.c static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
p                1439 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
p                1504 drivers/md/dm-cache-policy-smq.c static void smq_complete_background_work(struct dm_cache_policy *p,
p                1509 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
p                1530 drivers/md/dm-cache-policy-smq.c static void smq_set_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
p                1533 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
p                1540 drivers/md/dm-cache-policy-smq.c static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
p                1542 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
p                1555 drivers/md/dm-cache-policy-smq.c static int smq_load_mapping(struct dm_cache_policy *p,
p                1559 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
p                1577 drivers/md/dm-cache-policy-smq.c static int smq_invalidate_mapping(struct dm_cache_policy *p, dm_cblock_t cblock)
p                1579 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
p                1592 drivers/md/dm-cache-policy-smq.c static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock)
p                1594 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
p                1603 drivers/md/dm-cache-policy-smq.c static dm_cblock_t smq_residency(struct dm_cache_policy *p)
p                1607 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
p                1616 drivers/md/dm-cache-policy-smq.c static void smq_tick(struct dm_cache_policy *p, bool can_block)
p                1618 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
p                1629 drivers/md/dm-cache-policy-smq.c static void smq_allow_migrations(struct dm_cache_policy *p, bool allow)
p                1631 drivers/md/dm-cache-policy-smq.c 	struct smq_policy *mq = to_smq_policy(p);
p                1640 drivers/md/dm-cache-policy-smq.c static int mq_set_config_value(struct dm_cache_policy *p,
p                1660 drivers/md/dm-cache-policy-smq.c static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
p                 116 drivers/md/dm-cache-policy.c 	struct dm_cache_policy *p = NULL;
p                 125 drivers/md/dm-cache-policy.c 	p = type->create(cache_size, origin_size, cache_block_size);
p                 126 drivers/md/dm-cache-policy.c 	if (!p) {
p                 130 drivers/md/dm-cache-policy.c 	p->private = type;
p                 132 drivers/md/dm-cache-policy.c 	return p;
p                 136 drivers/md/dm-cache-policy.c void dm_cache_policy_destroy(struct dm_cache_policy *p)
p                 138 drivers/md/dm-cache-policy.c 	struct dm_cache_policy_type *t = p->private;
p                 140 drivers/md/dm-cache-policy.c 	p->destroy(p);
p                 145 drivers/md/dm-cache-policy.c const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
p                 147 drivers/md/dm-cache-policy.c 	struct dm_cache_policy_type *t = p->private;
p                 157 drivers/md/dm-cache-policy.c const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p)
p                 159 drivers/md/dm-cache-policy.c 	struct dm_cache_policy_type *t = p->private;
p                 165 drivers/md/dm-cache-policy.c size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p)
p                 167 drivers/md/dm-cache-policy.c 	struct dm_cache_policy_type *t = p->private;
p                  43 drivers/md/dm-cache-policy.h 	void (*destroy)(struct dm_cache_policy *p);
p                  56 drivers/md/dm-cache-policy.h 	int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
p                  68 drivers/md/dm-cache-policy.h 	int (*lookup_with_work)(struct dm_cache_policy *p,
p                  77 drivers/md/dm-cache-policy.h 	int (*get_background_work)(struct dm_cache_policy *p, bool idle,
p                  84 drivers/md/dm-cache-policy.h 	void (*complete_background_work)(struct dm_cache_policy *p,
p                  88 drivers/md/dm-cache-policy.h 	void (*set_dirty)(struct dm_cache_policy *p, dm_cblock_t cblock);
p                  89 drivers/md/dm-cache-policy.h 	void (*clear_dirty)(struct dm_cache_policy *p, dm_cblock_t cblock);
p                  95 drivers/md/dm-cache-policy.h 	int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
p                 103 drivers/md/dm-cache-policy.h 	int (*invalidate_mapping)(struct dm_cache_policy *p, dm_cblock_t cblock);
p                 109 drivers/md/dm-cache-policy.h 	uint32_t (*get_hint)(struct dm_cache_policy *p, dm_cblock_t cblock);
p                 114 drivers/md/dm-cache-policy.h 	dm_cblock_t (*residency)(struct dm_cache_policy *p);
p                 125 drivers/md/dm-cache-policy.h 	void (*tick)(struct dm_cache_policy *p, bool can_block);
p                 130 drivers/md/dm-cache-policy.h 	int (*emit_config_values)(struct dm_cache_policy *p, char *result,
p                 132 drivers/md/dm-cache-policy.h 	int (*set_config_value)(struct dm_cache_policy *p,
p                 135 drivers/md/dm-cache-policy.h 	void (*allow_migrations)(struct dm_cache_policy *p, bool allow);
p                2394 drivers/md/dm-cache-target.c 	struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
p                2398 drivers/md/dm-cache-target.c 	if (IS_ERR(p)) {
p                2400 drivers/md/dm-cache-target.c 		return PTR_ERR(p);
p                2402 drivers/md/dm-cache-target.c 	cache->policy = p;
p                1842 drivers/md/dm-crypt.c static void crypt_copy_authenckey(char *p, const void *key,
p                1848 drivers/md/dm-crypt.c 	rta = (struct rtattr *)p;
p                1853 drivers/md/dm-crypt.c 	p += RTA_SPACE(sizeof(*param));
p                1854 drivers/md/dm-crypt.c 	memcpy(p, key + enckeylen, authkeylen);
p                1855 drivers/md/dm-crypt.c 	p += authkeylen;
p                1856 drivers/md/dm-crypt.c 	memcpy(p, key, enckeylen);
p                  74 drivers/md/dm-exception-store.c 	char *p, *type_name_dup;
p                  89 drivers/md/dm-exception-store.c 		p = strrchr(type_name_dup, '-');
p                  90 drivers/md/dm-exception-store.c 		if (!p)
p                  92 drivers/md/dm-exception-store.c 		p[0] = '\0';
p                 163 drivers/md/dm-io.c 			 struct page **p, unsigned long *len, unsigned *offset);
p                 180 drivers/md/dm-io.c 		  struct page **p, unsigned long *len, unsigned *offset)
p                 185 drivers/md/dm-io.c 	*p = pl->page;
p                 208 drivers/md/dm-io.c static void bio_get_page(struct dpages *dp, struct page **p,
p                 214 drivers/md/dm-io.c 	*p = bvec.bv_page;
p                 247 drivers/md/dm-io.c 		 struct page **p, unsigned long *len, unsigned *offset)
p                 249 drivers/md/dm-io.c 	*p = vmalloc_to_page(dp->context_ptr);
p                 271 drivers/md/dm-io.c static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
p                 274 drivers/md/dm-io.c 	*p = virt_to_page(dp->context_ptr);
p                  67 drivers/md/dm-log.c 	char *p, *type_name_dup;
p                  86 drivers/md/dm-log.c 		p = strrchr(type_name_dup, '-');
p                  87 drivers/md/dm-log.c 		if (!p)
p                  89 drivers/md/dm-log.c 		p[0] = '\0';
p                 840 drivers/md/dm-mpath.c 	struct pgpath *p;
p                 851 drivers/md/dm-mpath.c 	p = alloc_pgpath();
p                 852 drivers/md/dm-mpath.c 	if (!p)
p                 856 drivers/md/dm-mpath.c 			  &p->path.dev);
p                 862 drivers/md/dm-mpath.c 	q = bdev_get_queue(p->path.dev->bdev);
p                 865 drivers/md/dm-mpath.c 		INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
p                 866 drivers/md/dm-mpath.c 		r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
p                 869 drivers/md/dm-mpath.c 			dm_put_device(ti, p->path.dev);
p                 874 drivers/md/dm-mpath.c 	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
p                 876 drivers/md/dm-mpath.c 		dm_put_device(ti, p->path.dev);
p                 880 drivers/md/dm-mpath.c 	return p;
p                 882 drivers/md/dm-mpath.c 	free_pgpath(p);
p                 986 drivers/md/dm-mpath.c 		char *p;
p                 991 drivers/md/dm-mpath.c 		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
p                 992 drivers/md/dm-mpath.c 		if (!p) {
p                 997 drivers/md/dm-mpath.c 		j = sprintf(p, "%d", hw_argc - 1);
p                 998 drivers/md/dm-mpath.c 		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
p                 999 drivers/md/dm-mpath.c 			j = sprintf(p, "%s", as->argv[i]);
p                1672 drivers/md/dm-mpath.c 	struct pgpath *p;
p                1747 drivers/md/dm-mpath.c 			list_for_each_entry(p, &pg->pgpaths, list) {
p                1748 drivers/md/dm-mpath.c 				DMEMIT("%s %s %u ", p->path.dev->name,
p                1749 drivers/md/dm-mpath.c 				       p->is_active ? "A" : "F",
p                1750 drivers/md/dm-mpath.c 				       p->fail_count);
p                1753 drivers/md/dm-mpath.c 					      &p->path, type, result + sz,
p                1773 drivers/md/dm-mpath.c 			list_for_each_entry(p, &pg->pgpaths, list) {
p                1774 drivers/md/dm-mpath.c 				DMEMIT("%s ", p->path.dev->name);
p                1777 drivers/md/dm-mpath.c 					      &p->path, type, result + sz,
p                1902 drivers/md/dm-mpath.c 	struct pgpath *p;
p                1906 drivers/md/dm-mpath.c 		list_for_each_entry(p, &pg->pgpaths, list) {
p                1907 drivers/md/dm-mpath.c 			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
p                  60 drivers/md/dm-path-selector.h 	void (*fail_path) (struct path_selector *ps, struct dm_path *p);
p                  65 drivers/md/dm-path-selector.h 	int (*reinstate_path) (struct path_selector *ps, struct dm_path *p);
p                  30 drivers/md/dm-raid1.c #define errors_handled(p)	((p)->features & DM_RAID1_HANDLE_ERRORS)
p                  31 drivers/md/dm-raid1.c #define keep_log(p)		((p)->features & DM_RAID1_KEEP_LOG)
p                 157 drivers/md/dm-round-robin.c static void rr_fail_path(struct path_selector *ps, struct dm_path *p)
p                 161 drivers/md/dm-round-robin.c 	struct path_info *pi = p->pscontext;
p                 168 drivers/md/dm-round-robin.c static int rr_reinstate_path(struct path_selector *ps, struct dm_path *p)
p                 172 drivers/md/dm-round-robin.c 	struct path_info *pi = p->pscontext;
p                1764 drivers/md/dm-snap.c 		struct rb_node **p = &s->out_of_order_tree.rb_node;
p                1767 drivers/md/dm-snap.c 		while (*p) {
p                1768 drivers/md/dm-snap.c 			pe2 = rb_entry(*p, struct dm_snap_pending_exception, out_of_order_node);
p                1769 drivers/md/dm-snap.c 			parent = *p;
p                1773 drivers/md/dm-snap.c 				p = &((*p)->rb_left);
p                1775 drivers/md/dm-snap.c 				p = &((*p)->rb_right);
p                1778 drivers/md/dm-snap.c 		rb_link_node(&pe->out_of_order_node, parent, p);
p                 145 drivers/md/dm-stats.c 	void *p;
p                 150 drivers/md/dm-stats.c 	p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
p                 151 drivers/md/dm-stats.c 	if (p)
p                 152 drivers/md/dm-stats.c 		return p;
p                 250 drivers/md/dm-stats.c 	struct dm_stat_percpu *p;
p                 332 drivers/md/dm-stats.c 		p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
p                 333 drivers/md/dm-stats.c 		if (!p) {
p                 337 drivers/md/dm-stats.c 		s->stat_percpu[cpu] = p;
p                 346 drivers/md/dm-stats.c 				p[ni].histogram = hi;
p                 484 drivers/md/dm-stats.c 			  struct dm_stat_percpu *p)
p                 504 drivers/md/dm-stats.c 		p->io_ticks[READ] += difference;
p                 506 drivers/md/dm-stats.c 		p->io_ticks[WRITE] += difference;
p                 508 drivers/md/dm-stats.c 		p->io_ticks_total += difference;
p                 509 drivers/md/dm-stats.c 		p->time_in_queue += (in_flight_read + in_flight_write) * difference;
p                 520 drivers/md/dm-stats.c 	struct dm_stat_percpu *p;
p                 543 drivers/md/dm-stats.c 	p = &s->stat_percpu[smp_processor_id()][entry];
p                 546 drivers/md/dm-stats.c 		dm_stat_round(s, shared, p);
p                 550 drivers/md/dm-stats.c 		dm_stat_round(s, shared, p);
p                 552 drivers/md/dm-stats.c 		p->sectors[idx] += len;
p                 553 drivers/md/dm-stats.c 		p->ios[idx] += 1;
p                 554 drivers/md/dm-stats.c 		p->merges[idx] += stats_aux->merged;
p                 556 drivers/md/dm-stats.c 			p->ticks[idx] += duration_jiffies;
p                 559 drivers/md/dm-stats.c 			p->ticks[idx] += stats_aux->duration_ns;
p                 573 drivers/md/dm-stats.c 			p->histogram[lo]++;
p                 673 drivers/md/dm-stats.c 	struct dm_stat_percpu *p;
p                 676 drivers/md/dm-stats.c 	p = &s->stat_percpu[smp_processor_id()][x];
p                 677 drivers/md/dm-stats.c 	dm_stat_round(s, shared, p);
p                 697 drivers/md/dm-stats.c 		p = &s->stat_percpu[cpu][x];
p                 698 drivers/md/dm-stats.c 		shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
p                 699 drivers/md/dm-stats.c 		shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
p                 700 drivers/md/dm-stats.c 		shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
p                 701 drivers/md/dm-stats.c 		shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
p                 702 drivers/md/dm-stats.c 		shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
p                 703 drivers/md/dm-stats.c 		shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
p                 704 drivers/md/dm-stats.c 		shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
p                 705 drivers/md/dm-stats.c 		shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
p                 706 drivers/md/dm-stats.c 		shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
p                 707 drivers/md/dm-stats.c 		shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
p                 708 drivers/md/dm-stats.c 		shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
p                 709 drivers/md/dm-stats.c 		shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
p                 713 drivers/md/dm-stats.c 				shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
p                 723 drivers/md/dm-stats.c 	struct dm_stat_percpu *p;
p                 730 drivers/md/dm-stats.c 		p = &s->stat_percpu[smp_processor_id()][x];
p                 731 drivers/md/dm-stats.c 		p->sectors[READ] -= shared->tmp.sectors[READ];
p                 732 drivers/md/dm-stats.c 		p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
p                 733 drivers/md/dm-stats.c 		p->ios[READ] -= shared->tmp.ios[READ];
p                 734 drivers/md/dm-stats.c 		p->ios[WRITE] -= shared->tmp.ios[WRITE];
p                 735 drivers/md/dm-stats.c 		p->merges[READ] -= shared->tmp.merges[READ];
p                 736 drivers/md/dm-stats.c 		p->merges[WRITE] -= shared->tmp.merges[WRITE];
p                 737 drivers/md/dm-stats.c 		p->ticks[READ] -= shared->tmp.ticks[READ];
p                 738 drivers/md/dm-stats.c 		p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
p                 739 drivers/md/dm-stats.c 		p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
p                 740 drivers/md/dm-stats.c 		p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
p                 741 drivers/md/dm-stats.c 		p->io_ticks_total -= shared->tmp.io_ticks_total;
p                 742 drivers/md/dm-stats.c 		p->time_in_queue -= shared->tmp.time_in_queue;
p                 748 drivers/md/dm-stats.c 				p = &s->stat_percpu[smp_processor_id()][x];
p                 749 drivers/md/dm-stats.c 				p->histogram[i] -= shared->tmp.histogram[i];
p                 157 drivers/md/dm-switch.c 	sector_t p;
p                 159 drivers/md/dm-switch.c 	p = offset;
p                 161 drivers/md/dm-switch.c 		p >>= sctx->region_size_bits;
p                 163 drivers/md/dm-switch.c 		sector_div(p, sctx->region_size);
p                 165 drivers/md/dm-switch.c 	path_nr = switch_region_table_read(sctx, p);
p                 223 drivers/md/dm-writecache.c 	long p, da;
p                 235 drivers/md/dm-writecache.c 	p = s >> PAGE_SHIFT;
p                 236 drivers/md/dm-writecache.c 	if (!p) {
p                 240 drivers/md/dm-writecache.c 	if (p != s >> PAGE_SHIFT) {
p                 247 drivers/md/dm-writecache.c 	da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn);
p                 258 drivers/md/dm-writecache.c 	if (da != p) {
p                 261 drivers/md/dm-writecache.c 		pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
p                 269 drivers/md/dm-writecache.c 			daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
p                 279 drivers/md/dm-writecache.c 			while (daa-- && i < p) {
p                 283 drivers/md/dm-writecache.c 		} while (i < p);
p                 284 drivers/md/dm-writecache.c 		wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
p                2050 drivers/md/dm.c 	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
p                2074 drivers/md/dm.c 	BUG_ON(!p ||
p                2078 drivers/md/dm.c 	ret = bioset_init_from_src(&md->bs, &p->bs);
p                2081 drivers/md/dm.c 	ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
p                 239 drivers/md/md-multipath.c 	struct multipath_info *p;
p                 249 drivers/md/md-multipath.c 		if ((p=conf->multipaths+path)->rdev == NULL) {
p                 261 drivers/md/md-multipath.c 			rcu_assign_pointer(p->rdev, rdev);
p                 276 drivers/md/md-multipath.c 	struct multipath_info *p = conf->multipaths + number;
p                 280 drivers/md/md-multipath.c 	if (rdev == p->rdev) {
p                 287 drivers/md/md-multipath.c 		p->rdev = NULL;
p                 293 drivers/md/md-multipath.c 				p->rdev = rdev;
p                1994 drivers/md/md.c 		u64 *p = bb->page;
p                2005 drivers/md/md.c 				u64 internal_bb = p[i];
p                3771 drivers/md/md.c 	struct md_personality *p;
p                3774 drivers/md/md.c 	p = mddev->pers;
p                3775 drivers/md/md.c 	if (p)
p                3776 drivers/md/md.c 		ret = sprintf(page, "%s\n", p->name);
p                8147 drivers/md/md.c int register_md_personality(struct md_personality *p)
p                8150 drivers/md/md.c 		 p->name, p->level);
p                8152 drivers/md/md.c 	list_add_tail(&p->list, &pers_list);
p                8158 drivers/md/md.c int unregister_md_personality(struct md_personality *p)
p                8160 drivers/md/md.c 	pr_debug("md: %s personality unregistered\n", p->name);
p                8162 drivers/md/md.c 	list_del_init(&p->list);
p                 677 drivers/md/md.h static inline void safe_put_page(struct page *p)
p                 679 drivers/md/md.h 	if (p) put_page(p);
p                 682 drivers/md/md.h extern int register_md_personality(struct md_personality *p);
p                 683 drivers/md/md.h extern int unregister_md_personality(struct md_personality *p);
p                  51 drivers/md/persistent-data/dm-bitset.c 	struct packer_context *p = context;
p                  52 drivers/md/persistent-data/dm-bitset.c 	unsigned bit, nr = min(64u, p->nr_bits - (index * 64));
p                  57 drivers/md/persistent-data/dm-bitset.c 		r = p->fn(index * 64 + bit, &bv, p->context);
p                  75 drivers/md/persistent-data/dm-bitset.c 	struct packer_context p;
p                  76 drivers/md/persistent-data/dm-bitset.c 	p.fn = fn;
p                  77 drivers/md/persistent-data/dm-bitset.c 	p.nr_bits = size;
p                  78 drivers/md/persistent-data/dm-bitset.c 	p.context = context;
p                  80 drivers/md/persistent-data/dm-bitset.c 	return dm_array_new(&info->array_info, root, dm_div_up(size, 64), pack_bits, &p);
p                 460 drivers/md/persistent-data/dm-block-manager.c 	void *p;
p                 463 drivers/md/persistent-data/dm-block-manager.c 	p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
p                 464 drivers/md/persistent-data/dm-block-manager.c 	if (IS_ERR(p))
p                 465 drivers/md/persistent-data/dm-block-manager.c 		return PTR_ERR(p);
p                 493 drivers/md/persistent-data/dm-block-manager.c 	void *p;
p                 499 drivers/md/persistent-data/dm-block-manager.c 	p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
p                 500 drivers/md/persistent-data/dm-block-manager.c 	if (IS_ERR(p))
p                 501 drivers/md/persistent-data/dm-block-manager.c 		return PTR_ERR(p);
p                 529 drivers/md/persistent-data/dm-block-manager.c 	void *p;
p                 532 drivers/md/persistent-data/dm-block-manager.c 	p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
p                 533 drivers/md/persistent-data/dm-block-manager.c 	if (IS_ERR(p))
p                 534 drivers/md/persistent-data/dm-block-manager.c 		return PTR_ERR(p);
p                 535 drivers/md/persistent-data/dm-block-manager.c 	if (unlikely(!p))
p                 563 drivers/md/persistent-data/dm-block-manager.c 	void *p;
p                 568 drivers/md/persistent-data/dm-block-manager.c 	p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
p                 569 drivers/md/persistent-data/dm-block-manager.c 	if (IS_ERR(p))
p                 570 drivers/md/persistent-data/dm-block-manager.c 		return PTR_ERR(p);
p                 572 drivers/md/persistent-data/dm-block-manager.c 	memset(p, 0, dm_bm_block_size(bm));
p                  36 drivers/md/persistent-data/dm-transaction-manager.c static void prefetch_wipe(struct prefetch_set *p)
p                  40 drivers/md/persistent-data/dm-transaction-manager.c 		p->blocks[i] = PREFETCH_SENTINEL;
p                  43 drivers/md/persistent-data/dm-transaction-manager.c static void prefetch_init(struct prefetch_set *p)
p                  45 drivers/md/persistent-data/dm-transaction-manager.c 	mutex_init(&p->lock);
p                  46 drivers/md/persistent-data/dm-transaction-manager.c 	prefetch_wipe(p);
p                  49 drivers/md/persistent-data/dm-transaction-manager.c static void prefetch_add(struct prefetch_set *p, dm_block_t b)
p                  53 drivers/md/persistent-data/dm-transaction-manager.c 	mutex_lock(&p->lock);
p                  54 drivers/md/persistent-data/dm-transaction-manager.c 	if (p->blocks[h] == PREFETCH_SENTINEL)
p                  55 drivers/md/persistent-data/dm-transaction-manager.c 		p->blocks[h] = b;
p                  57 drivers/md/persistent-data/dm-transaction-manager.c 	mutex_unlock(&p->lock);
p                  60 drivers/md/persistent-data/dm-transaction-manager.c static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
p                  64 drivers/md/persistent-data/dm-transaction-manager.c 	mutex_lock(&p->lock);
p                  67 drivers/md/persistent-data/dm-transaction-manager.c 		if (p->blocks[i] != PREFETCH_SENTINEL) {
p                  68 drivers/md/persistent-data/dm-transaction-manager.c 			dm_bm_prefetch(bm, p->blocks[i]);
p                  69 drivers/md/persistent-data/dm-transaction-manager.c 			p->blocks[i] = PREFETCH_SENTINEL;
p                  72 drivers/md/persistent-data/dm-transaction-manager.c 	mutex_unlock(&p->lock);
p                1747 drivers/md/raid1.c 	struct raid1_info *p;
p                1771 drivers/md/raid1.c 		p = conf->mirrors + mirror;
p                1772 drivers/md/raid1.c 		if (!p->rdev) {
p                1777 drivers/md/raid1.c 			p->head_position = 0;
p                1785 drivers/md/raid1.c 			rcu_assign_pointer(p->rdev, rdev);
p                1788 drivers/md/raid1.c 		if (test_bit(WantReplacement, &p->rdev->flags) &&
p                1789 drivers/md/raid1.c 		    p[conf->raid_disks].rdev == NULL) {
p                1796 drivers/md/raid1.c 			rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
p                1811 drivers/md/raid1.c 	struct raid1_info *p = conf->mirrors + number;
p                1813 drivers/md/raid1.c 	if (rdev != p->rdev)
p                1814 drivers/md/raid1.c 		p = conf->mirrors + conf->raid_disks + number;
p                1817 drivers/md/raid1.c 	if (rdev == p->rdev) {
p                1832 drivers/md/raid1.c 		p->rdev = NULL;
p                1838 drivers/md/raid1.c 				p->rdev = rdev;
p                1862 drivers/md/raid1.c 			p->rdev = repl;
p                1783 drivers/md/raid10.c 		struct raid10_info *p = &conf->mirrors[mirror];
p                1784 drivers/md/raid10.c 		if (p->recovery_disabled == mddev->recovery_disabled)
p                1786 drivers/md/raid10.c 		if (p->rdev) {
p                1787 drivers/md/raid10.c 			if (!test_bit(WantReplacement, &p->rdev->flags) ||
p                1788 drivers/md/raid10.c 			    p->replacement != NULL)
p                1798 drivers/md/raid10.c 			rcu_assign_pointer(p->replacement, rdev);
p                1806 drivers/md/raid10.c 		p->head_position = 0;
p                1807 drivers/md/raid10.c 		p->recovery_disabled = mddev->recovery_disabled - 1;
p                1812 drivers/md/raid10.c 		rcu_assign_pointer(p->rdev, rdev);
p                1828 drivers/md/raid10.c 	struct raid10_info *p = conf->mirrors + number;
p                1831 drivers/md/raid10.c 	if (rdev == p->rdev)
p                1832 drivers/md/raid10.c 		rdevp = &p->rdev;
p                1833 drivers/md/raid10.c 	else if (rdev == p->replacement)
p                1834 drivers/md/raid10.c 		rdevp = &p->replacement;
p                1847 drivers/md/raid10.c 	    mddev->recovery_disabled != p->recovery_disabled &&
p                1848 drivers/md/raid10.c 	    (!p->replacement || p->replacement == rdev) &&
p                1864 drivers/md/raid10.c 	if (p->replacement) {
p                1866 drivers/md/raid10.c 		p->rdev = p->replacement;
p                1867 drivers/md/raid10.c 		clear_bit(Replacement, &p->replacement->flags);
p                1871 drivers/md/raid10.c 		p->replacement = NULL;
p                2769 drivers/md/raid5-cache.c 			struct page *p = sh->dev[i].orig_page;
p                2775 drivers/md/raid5-cache.c 				put_page(p);
p                 453 drivers/md/raid5.c 	struct page *p;
p                 459 drivers/md/raid5.c 		p = sh->dev[i].page;
p                 460 drivers/md/raid5.c 		if (!p)
p                 463 drivers/md/raid5.c 		put_page(p);
p                2418 drivers/md/raid5.c 				struct page *p = alloc_page(GFP_NOIO);
p                2419 drivers/md/raid5.c 				nsh->dev[i].page = p;
p                2420 drivers/md/raid5.c 				nsh->dev[i].orig_page = p;
p                2421 drivers/md/raid5.c 				if (!p)
p                3932 drivers/md/raid5.c 				struct page *p = alloc_page(GFP_NOIO);
p                3934 drivers/md/raid5.c 				if (p) {
p                3935 drivers/md/raid5.c 					dev->orig_page = p;
p                7601 drivers/md/raid5.c 	struct disk_info *p = conf->disks + number;
p                7619 drivers/md/raid5.c 	if (rdev == p->rdev)
p                7620 drivers/md/raid5.c 		rdevp = &p->rdev;
p                7621 drivers/md/raid5.c 	else if (rdev == p->replacement)
p                7622 drivers/md/raid5.c 		rdevp = &p->replacement;
p                7641 drivers/md/raid5.c 	    (!p->replacement || p->replacement == rdev) &&
p                7660 drivers/md/raid5.c 	if (p->replacement) {
p                7662 drivers/md/raid5.c 		p->rdev = p->replacement;
p                7663 drivers/md/raid5.c 		clear_bit(Replacement, &p->replacement->flags);
p                7667 drivers/md/raid5.c 		p->replacement = NULL;
p                7670 drivers/md/raid5.c 			err = log_modify(conf, p->rdev, true);
p                7685 drivers/md/raid5.c 	struct disk_info *p;
p                7728 drivers/md/raid5.c 		p = conf->disks + disk;
p                7729 drivers/md/raid5.c 		if (p->rdev == NULL) {
p                7734 drivers/md/raid5.c 			rcu_assign_pointer(p->rdev, rdev);
p                7742 drivers/md/raid5.c 		p = conf->disks + disk;
p                7743 drivers/md/raid5.c 		if (test_bit(WantReplacement, &p->rdev->flags) &&
p                7744 drivers/md/raid5.c 		    p->replacement == NULL) {
p                7750 drivers/md/raid5.c 			rcu_assign_pointer(p->replacement, rdev);
p                 212 drivers/media/cec/cec-core.c 	char *p;
p                 217 drivers/media/cec/cec-core.c 	p = buf;
p                 218 drivers/media/cec/cec-core.c 	while (p && *p) {
p                 219 drivers/media/cec/cec-core.c 		p = skip_spaces(p);
p                 220 drivers/media/cec/cec-core.c 		line = strsep(&p, "\n");
p                  78 drivers/media/cec/cec-pin-error-inj.c 	char *p = line;
p                  88 drivers/media/cec/cec-pin-error-inj.c 	p = skip_spaces(p);
p                  89 drivers/media/cec/cec-pin-error-inj.c 	token = strsep(&p, delims);
p                 124 drivers/media/cec/cec-pin-error-inj.c 	if (!p)
p                 127 drivers/media/cec/cec-pin-error-inj.c 	p = skip_spaces(p);
p                 131 drivers/media/cec/cec-pin-error-inj.c 		if (kstrtou32(p, 0, &usecs) || usecs > 10000000)
p                 139 drivers/media/cec/cec-pin-error-inj.c 		if (kstrtou32(p, 0, &usecs) || usecs > 10000000)
p                 174 drivers/media/cec/cec-pin-error-inj.c 	token = strsep(&p, delims);
p                 175 drivers/media/cec/cec-pin-error-inj.c 	if (p) {
p                 176 drivers/media/cec/cec-pin-error-inj.c 		p = skip_spaces(p);
p                 177 drivers/media/cec/cec-pin-error-inj.c 		has_pos = !kstrtou8(p, 0, &pos);
p                 113 drivers/media/common/b2c2/flexcop-i2c.c 	u8 *p;
p                 136 drivers/media/common/b2c2/flexcop-i2c.c 	p = buf;
p                 145 drivers/media/common/b2c2/flexcop-i2c.c 			ret = flexcop_i2c_read4(i2c, r100, p);
p                 147 drivers/media/common/b2c2/flexcop-i2c.c 			ret = flexcop_i2c_write4(i2c->fc, r100, p);
p                 152 drivers/media/common/b2c2/flexcop-i2c.c 		p  += bytes_to_transfer;
p                 843 drivers/media/common/cx2341x.c const char * const *cx2341x_ctrl_get_menu(const struct cx2341x_mpeg_params *p, u32 id)
p                 876 drivers/media/common/cx2341x.c 		return (p->capabilities & CX2341X_CAP_HAS_TS) ?
p                 879 drivers/media/common/cx2341x.c 		return (p->capabilities & CX2341X_CAP_HAS_AC3) ?
p                1000 drivers/media/common/cx2341x.c void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p)
p                1002 drivers/media/common/cx2341x.c 	*p = default_params;
p                1003 drivers/media/common/cx2341x.c 	cx2341x_calc_audio_properties(p);
p                1187 drivers/media/common/cx2341x.c static const char *cx2341x_menu_item(const struct cx2341x_mpeg_params *p, u32 id)
p                1189 drivers/media/common/cx2341x.c 	const char * const *menu = cx2341x_ctrl_get_menu(p, id);
p                1195 drivers/media/common/cx2341x.c 	if (cx2341x_get_ctrl(p, &ctrl))
p                1206 drivers/media/common/cx2341x.c void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix)
p                1208 drivers/media/common/cx2341x.c 	int is_mpeg1 = p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1;
p                1213 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE));
p                1214 drivers/media/common/cx2341x.c 	if (p->stream_insert_nav_packets)
p                1219 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_VBI_FMT));
p                1224 drivers/media/common/cx2341x.c 		p->width / (is_mpeg1 ? 2 : 1), p->height / (is_mpeg1 ? 2 : 1),
p                1225 drivers/media/common/cx2341x.c 		p->is_50hz ? 25 : 30,
p                1226 drivers/media/common/cx2341x.c 		(p->video_mute) ? " (muted)" : "");
p                1229 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ENCODING),
p                1230 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ASPECT),
p                1231 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_BITRATE_MODE),
p                1232 drivers/media/common/cx2341x.c 		p->video_bitrate);
p                1233 drivers/media/common/cx2341x.c 	if (p->video_bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
p                1234 drivers/media/common/cx2341x.c 		printk(KERN_CONT ", Peak %d", p->video_bitrate_peak);
p                1239 drivers/media/common/cx2341x.c 		p->video_gop_size, p->video_b_frames,
p                1240 drivers/media/common/cx2341x.c 		p->video_gop_closure ? "" : "No ");
p                1241 drivers/media/common/cx2341x.c 	if (p->video_temporal_decimation)
p                1243 drivers/media/common/cx2341x.c 			prefix, p->video_temporal_decimation);
p                1248 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ),
p                1249 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_ENCODING),
p                1250 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p,
p                1251 drivers/media/common/cx2341x.c 			   p->audio_encoding == V4L2_MPEG_AUDIO_ENCODING_AC3
p                1254 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE),
p                1255 drivers/media/common/cx2341x.c 		p->audio_mute ? " (muted)" : "");
p                1256 drivers/media/common/cx2341x.c 	if (p->audio_mode == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO)
p                1257 drivers/media/common/cx2341x.c 		printk(KERN_CONT ", %s", cx2341x_menu_item(p,
p                1260 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_EMPHASIS),
p                1261 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_CRC));
p                1266 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p,
p                1268 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p,
p                1270 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p,
p                1272 drivers/media/common/cx2341x.c 		p->video_spatial_filter);
p                1276 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p,
p                1278 drivers/media/common/cx2341x.c 		p->video_temporal_filter);
p                1282 drivers/media/common/cx2341x.c 		cx2341x_menu_item(p,
p                1284 drivers/media/common/cx2341x.c 		p->video_luma_median_filter_bottom,
p                1285 drivers/media/common/cx2341x.c 		p->video_luma_median_filter_top,
p                1286 drivers/media/common/cx2341x.c 		p->video_chroma_median_filter_bottom,
p                1287 drivers/media/common/cx2341x.c 		p->video_chroma_median_filter_top);
p                 240 drivers/media/common/saa7146/saa7146_core.c 	int i,p;
p                 256 drivers/media/common/saa7146/saa7146_core.c 		for (p = 0; p * 4096 < list->length; p++, ptr++) {
p                 257 drivers/media/common/saa7146/saa7146_core.c 			*ptr = cpu_to_le32(sg_dma_address(list) + p * 4096);
p                 507 drivers/media/common/saa7146/saa7146_core.c 	}, *p;
p                 521 drivers/media/common/saa7146/saa7146_core.c 	for (p = dev_map; p->addr; p++)
p                 522 drivers/media/common/saa7146/saa7146_core.c 		pci_free_consistent(pdev, SAA7146_RPS_MEM, p->addr, p->dma);
p                 214 drivers/media/common/saa7146/saa7146_video.c 		int i,p,m1,m2,m3,o1,o2;
p                 250 drivers/media/common/saa7146/saa7146_video.c 			for (p = 0; p * 4096 < list->length; p++, ptr1++) {
p                 624 drivers/media/common/siano/smscoreapi.c 	cb->p = buffer;
p                1481 drivers/media/common/siano/smscoreapi.c 	struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) ((u8 *) cb->p
p                 119 drivers/media/common/siano/smscoreapi.h 	void *p;
p                  35 drivers/media/common/siano/smsdvb-debugfs.c 			    struct sms_stats *p)
p                  49 drivers/media/common/siano/smsdvb-debugfs.c 		      "is_rf_locked = %d\n", p->is_rf_locked);
p                  51 drivers/media/common/siano/smsdvb-debugfs.c 		      "is_demod_locked = %d\n", p->is_demod_locked);
p                  53 drivers/media/common/siano/smsdvb-debugfs.c 		      "is_external_lna_on = %d\n", p->is_external_lna_on);
p                  55 drivers/media/common/siano/smsdvb-debugfs.c 		      "SNR = %d\n", p->SNR);
p                  57 drivers/media/common/siano/smsdvb-debugfs.c 		      "ber = %d\n", p->ber);
p                  59 drivers/media/common/siano/smsdvb-debugfs.c 		      "FIB_CRC = %d\n", p->FIB_CRC);
p                  61 drivers/media/common/siano/smsdvb-debugfs.c 		      "ts_per = %d\n", p->ts_per);
p                  63 drivers/media/common/siano/smsdvb-debugfs.c 		      "MFER = %d\n", p->MFER);
p                  65 drivers/media/common/siano/smsdvb-debugfs.c 		      "RSSI = %d\n", p->RSSI);
p                  67 drivers/media/common/siano/smsdvb-debugfs.c 		      "in_band_pwr = %d\n", p->in_band_pwr);
p                  69 drivers/media/common/siano/smsdvb-debugfs.c 		      "carrier_offset = %d\n", p->carrier_offset);
p                  71 drivers/media/common/siano/smsdvb-debugfs.c 		      "modem_state = %d\n", p->modem_state);
p                  73 drivers/media/common/siano/smsdvb-debugfs.c 		      "frequency = %d\n", p->frequency);
p                  75 drivers/media/common/siano/smsdvb-debugfs.c 		      "bandwidth = %d\n", p->bandwidth);
p                  77 drivers/media/common/siano/smsdvb-debugfs.c 		      "transmission_mode = %d\n", p->transmission_mode);
p                  79 drivers/media/common/siano/smsdvb-debugfs.c 		      "modem_state = %d\n", p->modem_state);
p                  81 drivers/media/common/siano/smsdvb-debugfs.c 		      "guard_interval = %d\n", p->guard_interval);
p                  83 drivers/media/common/siano/smsdvb-debugfs.c 		      "code_rate = %d\n", p->code_rate);
p                  85 drivers/media/common/siano/smsdvb-debugfs.c 		      "lp_code_rate = %d\n", p->lp_code_rate);
p                  87 drivers/media/common/siano/smsdvb-debugfs.c 		      "hierarchy = %d\n", p->hierarchy);
p                  89 drivers/media/common/siano/smsdvb-debugfs.c 		      "constellation = %d\n", p->constellation);
p                  91 drivers/media/common/siano/smsdvb-debugfs.c 		      "burst_size = %d\n", p->burst_size);
p                  93 drivers/media/common/siano/smsdvb-debugfs.c 		      "burst_duration = %d\n", p->burst_duration);
p                  95 drivers/media/common/siano/smsdvb-debugfs.c 		      "burst_cycle_time = %d\n", p->burst_cycle_time);
p                  98 drivers/media/common/siano/smsdvb-debugfs.c 		      p->calc_burst_cycle_time);
p                 100 drivers/media/common/siano/smsdvb-debugfs.c 		      "num_of_rows = %d\n", p->num_of_rows);
p                 102 drivers/media/common/siano/smsdvb-debugfs.c 		      "num_of_padd_cols = %d\n", p->num_of_padd_cols);
p                 104 drivers/media/common/siano/smsdvb-debugfs.c 		      "num_of_punct_cols = %d\n", p->num_of_punct_cols);
p                 106 drivers/media/common/siano/smsdvb-debugfs.c 		      "error_ts_packets = %d\n", p->error_ts_packets);
p                 108 drivers/media/common/siano/smsdvb-debugfs.c 		      "total_ts_packets = %d\n", p->total_ts_packets);
p                 110 drivers/media/common/siano/smsdvb-debugfs.c 		      "num_of_valid_mpe_tlbs = %d\n", p->num_of_valid_mpe_tlbs);
p                 112 drivers/media/common/siano/smsdvb-debugfs.c 		      "num_of_invalid_mpe_tlbs = %d\n", p->num_of_invalid_mpe_tlbs);
p                 114 drivers/media/common/siano/smsdvb-debugfs.c 		      "num_of_corrected_mpe_tlbs = %d\n", p->num_of_corrected_mpe_tlbs);
p                 116 drivers/media/common/siano/smsdvb-debugfs.c 		      "ber_error_count = %d\n", p->ber_error_count);
p                 118 drivers/media/common/siano/smsdvb-debugfs.c 		      "ber_bit_count = %d\n", p->ber_bit_count);
p                 120 drivers/media/common/siano/smsdvb-debugfs.c 		      "sms_to_host_tx_errors = %d\n", p->sms_to_host_tx_errors);
p                 122 drivers/media/common/siano/smsdvb-debugfs.c 		      "pre_ber = %d\n", p->pre_ber);
p                 124 drivers/media/common/siano/smsdvb-debugfs.c 		      "cell_id = %d\n", p->cell_id);
p                 126 drivers/media/common/siano/smsdvb-debugfs.c 		      "dvbh_srv_ind_hp = %d\n", p->dvbh_srv_ind_hp);
p                 128 drivers/media/common/siano/smsdvb-debugfs.c 		      "dvbh_srv_ind_lp = %d\n", p->dvbh_srv_ind_lp);
p                 130 drivers/media/common/siano/smsdvb-debugfs.c 		      "num_mpe_received = %d\n", p->num_mpe_received);
p                 138 drivers/media/common/siano/smsdvb-debugfs.c 			     struct sms_isdbt_stats *p)
p                 152 drivers/media/common/siano/smsdvb-debugfs.c 		      "statistics_type = %d\t", p->statistics_type);
p                 154 drivers/media/common/siano/smsdvb-debugfs.c 		      "full_size = %d\n", p->full_size);
p                 157 drivers/media/common/siano/smsdvb-debugfs.c 		      "is_rf_locked = %d\t\t", p->is_rf_locked);
p                 159 drivers/media/common/siano/smsdvb-debugfs.c 		      "is_demod_locked = %d\t", p->is_demod_locked);
p                 161 drivers/media/common/siano/smsdvb-debugfs.c 		      "is_external_lna_on = %d\n", p->is_external_lna_on);
p                 163 drivers/media/common/siano/smsdvb-debugfs.c 		      "SNR = %d dB\t\t", p->SNR);
p                 165 drivers/media/common/siano/smsdvb-debugfs.c 		      "RSSI = %d dBm\t\t", p->RSSI);
p                 167 drivers/media/common/siano/smsdvb-debugfs.c 		      "in_band_pwr = %d dBm\n", p->in_band_pwr);
p                 169 drivers/media/common/siano/smsdvb-debugfs.c 		      "carrier_offset = %d\t", p->carrier_offset);
p                 171 drivers/media/common/siano/smsdvb-debugfs.c 		      "bandwidth = %d\t\t", p->bandwidth);
p                 173 drivers/media/common/siano/smsdvb-debugfs.c 		      "frequency = %d Hz\n", p->frequency);
p                 175 drivers/media/common/siano/smsdvb-debugfs.c 		      "transmission_mode = %d\t", p->transmission_mode);
p                 177 drivers/media/common/siano/smsdvb-debugfs.c 		      "modem_state = %d\t\t", p->modem_state);
p                 179 drivers/media/common/siano/smsdvb-debugfs.c 		      "guard_interval = %d\n", p->guard_interval);
p                 181 drivers/media/common/siano/smsdvb-debugfs.c 		      "system_type = %d\t\t", p->system_type);
p                 183 drivers/media/common/siano/smsdvb-debugfs.c 		      "partial_reception = %d\t", p->partial_reception);
p                 185 drivers/media/common/siano/smsdvb-debugfs.c 		      "num_of_layers = %d\n", p->num_of_layers);
p                 187 drivers/media/common/siano/smsdvb-debugfs.c 		      "sms_to_host_tx_errors = %d\n", p->sms_to_host_tx_errors);
p                 190 drivers/media/common/siano/smsdvb-debugfs.c 		if (p->layer_info[i].number_of_segments < 1 ||
p                 191 drivers/media/common/siano/smsdvb-debugfs.c 		    p->layer_info[i].number_of_segments > 13)
p                 196 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].code_rate);
p                 198 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].constellation);
p                 200 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].ber);
p                 202 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].ber_error_count);
p                 204 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].ber_bit_count);
p                 206 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].pre_ber);
p                 208 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].ts_per);
p                 210 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].error_ts_packets);
p                 212 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].total_ts_packets);
p                 214 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].ti_ldepth_i);
p                 217 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].number_of_segments);
p                 219 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].tmcc_errors);
p                 228 drivers/media/common/siano/smsdvb-debugfs.c 				struct sms_isdbt_stats_ex *p)
p                 242 drivers/media/common/siano/smsdvb-debugfs.c 		      "statistics_type = %d\t", p->statistics_type);
p                 244 drivers/media/common/siano/smsdvb-debugfs.c 		      "full_size = %d\n", p->full_size);
p                 247 drivers/media/common/siano/smsdvb-debugfs.c 		      "is_rf_locked = %d\t\t", p->is_rf_locked);
p                 249 drivers/media/common/siano/smsdvb-debugfs.c 		      "is_demod_locked = %d\t", p->is_demod_locked);
p                 251 drivers/media/common/siano/smsdvb-debugfs.c 		      "is_external_lna_on = %d\n", p->is_external_lna_on);
p                 253 drivers/media/common/siano/smsdvb-debugfs.c 		      "SNR = %d dB\t\t", p->SNR);
p                 255 drivers/media/common/siano/smsdvb-debugfs.c 		      "RSSI = %d dBm\t\t", p->RSSI);
p                 257 drivers/media/common/siano/smsdvb-debugfs.c 		      "in_band_pwr = %d dBm\n", p->in_band_pwr);
p                 259 drivers/media/common/siano/smsdvb-debugfs.c 		      "carrier_offset = %d\t", p->carrier_offset);
p                 261 drivers/media/common/siano/smsdvb-debugfs.c 		      "bandwidth = %d\t\t", p->bandwidth);
p                 263 drivers/media/common/siano/smsdvb-debugfs.c 		      "frequency = %d Hz\n", p->frequency);
p                 265 drivers/media/common/siano/smsdvb-debugfs.c 		      "transmission_mode = %d\t", p->transmission_mode);
p                 267 drivers/media/common/siano/smsdvb-debugfs.c 		      "modem_state = %d\t\t", p->modem_state);
p                 269 drivers/media/common/siano/smsdvb-debugfs.c 		      "guard_interval = %d\n", p->guard_interval);
p                 271 drivers/media/common/siano/smsdvb-debugfs.c 		      "system_type = %d\t\t", p->system_type);
p                 273 drivers/media/common/siano/smsdvb-debugfs.c 		      "partial_reception = %d\t", p->partial_reception);
p                 275 drivers/media/common/siano/smsdvb-debugfs.c 		      "num_of_layers = %d\n", p->num_of_layers);
p                 277 drivers/media/common/siano/smsdvb-debugfs.c 		      p->segment_number);
p                 279 drivers/media/common/siano/smsdvb-debugfs.c 		      p->tune_bw);
p                 282 drivers/media/common/siano/smsdvb-debugfs.c 		if (p->layer_info[i].number_of_segments < 1 ||
p                 283 drivers/media/common/siano/smsdvb-debugfs.c 		    p->layer_info[i].number_of_segments > 13)
p                 288 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].code_rate);
p                 290 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].constellation);
p                 292 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].ber);
p                 294 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].ber_error_count);
p                 296 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].ber_bit_count);
p                 298 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].pre_ber);
p                 300 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].ts_per);
p                 302 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].error_ts_packets);
p                 304 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].total_ts_packets);
p                 306 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].ti_ldepth_i);
p                 309 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].number_of_segments);
p                 311 drivers/media/common/siano/smsdvb-debugfs.c 			      p->layer_info[i].tmcc_errors);
p                 213 drivers/media/common/siano/smsdvb-main.c 				    struct sms_tx_stats *p)
p                 218 drivers/media/common/siano/smsdvb-main.c 	c->frequency = p->frequency;
p                 219 drivers/media/common/siano/smsdvb-main.c 	client->fe_status = sms_to_status(p->is_demod_locked, 0);
p                 220 drivers/media/common/siano/smsdvb-main.c 	c->bandwidth_hz = sms_to_bw(p->bandwidth);
p                 221 drivers/media/common/siano/smsdvb-main.c 	c->transmission_mode = sms_to_mode(p->transmission_mode);
p                 222 drivers/media/common/siano/smsdvb-main.c 	c->guard_interval = sms_to_guard_interval(p->guard_interval);
p                 223 drivers/media/common/siano/smsdvb-main.c 	c->code_rate_HP = sms_to_code_rate(p->code_rate);
p                 224 drivers/media/common/siano/smsdvb-main.c 	c->code_rate_LP = sms_to_code_rate(p->lp_code_rate);
p                 225 drivers/media/common/siano/smsdvb-main.c 	c->hierarchy = sms_to_hierarchy(p->hierarchy);
p                 226 drivers/media/common/siano/smsdvb-main.c 	c->modulation = sms_to_modulation(p->constellation);
p                 230 drivers/media/common/siano/smsdvb-main.c 				     struct RECEPTION_STATISTICS_PER_SLICES_S *p)
p                 236 drivers/media/common/siano/smsdvb-main.c 	client->fe_status = sms_to_status(p->is_demod_locked, p->is_rf_locked);
p                 237 drivers/media/common/siano/smsdvb-main.c 	c->modulation = sms_to_modulation(p->constellation);
p                 240 drivers/media/common/siano/smsdvb-main.c 	c->strength.stat[0].uvalue = p->in_band_power * 1000;
p                 243 drivers/media/common/siano/smsdvb-main.c 	c->cnr.stat[0].svalue = p->snr * 1000;
p                 246 drivers/media/common/siano/smsdvb-main.c 	if (!p->is_demod_locked)
p                 253 drivers/media/common/siano/smsdvb-main.c 	c->block_error.stat[0].uvalue += p->ets_packets;
p                 254 drivers/media/common/siano/smsdvb-main.c 	c->block_count.stat[0].uvalue += p->ets_packets + p->ts_packets;
p                 259 drivers/media/common/siano/smsdvb-main.c 	c->post_bit_error.stat[0].uvalue += p->ber_error_count;
p                 260 drivers/media/common/siano/smsdvb-main.c 	c->post_bit_count.stat[0].uvalue += p->ber_bit_count;
p                 263 drivers/media/common/siano/smsdvb-main.c 	tmp = p->ets_packets * 65535ULL;
p                 264 drivers/media/common/siano/smsdvb-main.c 	if (p->ts_packets + p->ets_packets)
p                 265 drivers/media/common/siano/smsdvb-main.c 		do_div(tmp, p->ts_packets + p->ets_packets);
p                 270 drivers/media/common/siano/smsdvb-main.c 				    struct sms_stats *p)
p                 276 drivers/media/common/siano/smsdvb-main.c 		client->prt_dvb_stats(client->debug_data, p);
p                 278 drivers/media/common/siano/smsdvb-main.c 	client->fe_status = sms_to_status(p->is_demod_locked, p->is_rf_locked);
p                 281 drivers/media/common/siano/smsdvb-main.c 	c->frequency = p->frequency;
p                 282 drivers/media/common/siano/smsdvb-main.c 	client->fe_status = sms_to_status(p->is_demod_locked, 0);
p                 283 drivers/media/common/siano/smsdvb-main.c 	c->bandwidth_hz = sms_to_bw(p->bandwidth);
p                 284 drivers/media/common/siano/smsdvb-main.c 	c->transmission_mode = sms_to_mode(p->transmission_mode);
p                 285 drivers/media/common/siano/smsdvb-main.c 	c->guard_interval = sms_to_guard_interval(p->guard_interval);
p                 286 drivers/media/common/siano/smsdvb-main.c 	c->code_rate_HP = sms_to_code_rate(p->code_rate);
p                 287 drivers/media/common/siano/smsdvb-main.c 	c->code_rate_LP = sms_to_code_rate(p->lp_code_rate);
p                 288 drivers/media/common/siano/smsdvb-main.c 	c->hierarchy = sms_to_hierarchy(p->hierarchy);
p                 289 drivers/media/common/siano/smsdvb-main.c 	c->modulation = sms_to_modulation(p->constellation);
p                 292 drivers/media/common/siano/smsdvb-main.c 	c->lna = p->is_external_lna_on ? 1 : 0;
p                 295 drivers/media/common/siano/smsdvb-main.c 	c->cnr.stat[0].svalue = p->SNR * 1000;
p                 298 drivers/media/common/siano/smsdvb-main.c 	c->strength.stat[0].uvalue = p->in_band_pwr * 1000;
p                 301 drivers/media/common/siano/smsdvb-main.c 	if (!p->is_demod_locked)
p                 308 drivers/media/common/siano/smsdvb-main.c 	c->block_error.stat[0].uvalue += p->error_ts_packets;
p                 309 drivers/media/common/siano/smsdvb-main.c 	c->block_count.stat[0].uvalue += p->total_ts_packets;
p                 314 drivers/media/common/siano/smsdvb-main.c 	c->post_bit_error.stat[0].uvalue += p->ber_error_count;
p                 315 drivers/media/common/siano/smsdvb-main.c 	c->post_bit_count.stat[0].uvalue += p->ber_bit_count;
p                 318 drivers/media/common/siano/smsdvb-main.c 	client->legacy_ber = p->ber;
p                 322 drivers/media/common/siano/smsdvb-main.c 				      struct sms_isdbt_stats *p)
p                 330 drivers/media/common/siano/smsdvb-main.c 		client->prt_isdb_stats(client->debug_data, p);
p                 332 drivers/media/common/siano/smsdvb-main.c 	client->fe_status = sms_to_status(p->is_demod_locked, p->is_rf_locked);
p                 339 drivers/media/common/siano/smsdvb-main.c 	if (p->statistics_type == 0) {
p                 340 drivers/media/common/siano/smsdvb-main.c 		c->strength.stat[0].uvalue = ((s32)p->transmission_mode) * 1000;
p                 346 drivers/media/common/siano/smsdvb-main.c 	c->frequency = p->frequency;
p                 347 drivers/media/common/siano/smsdvb-main.c 	c->bandwidth_hz = sms_to_bw(p->bandwidth);
p                 348 drivers/media/common/siano/smsdvb-main.c 	c->transmission_mode = sms_to_mode(p->transmission_mode);
p                 349 drivers/media/common/siano/smsdvb-main.c 	c->guard_interval = sms_to_guard_interval(p->guard_interval);
p                 350 drivers/media/common/siano/smsdvb-main.c 	c->isdbt_partial_reception = p->partial_reception ? 1 : 0;
p                 351 drivers/media/common/siano/smsdvb-main.c 	n_layers = p->num_of_layers;
p                 359 drivers/media/common/siano/smsdvb-main.c 	c->lna = p->is_external_lna_on ? 1 : 0;
p                 362 drivers/media/common/siano/smsdvb-main.c 	c->cnr.stat[0].svalue = p->SNR * 1000;
p                 365 drivers/media/common/siano/smsdvb-main.c 	c->strength.stat[0].uvalue = p->in_band_pwr * 1000;
p                 368 drivers/media/common/siano/smsdvb-main.c 	if (!p->is_demod_locked)
p                 384 drivers/media/common/siano/smsdvb-main.c 		lr = &p->layer_info[i];
p                 418 drivers/media/common/siano/smsdvb-main.c 					 struct sms_isdbt_stats_ex *p)
p                 426 drivers/media/common/siano/smsdvb-main.c 		client->prt_isdb_stats_ex(client->debug_data, p);
p                 429 drivers/media/common/siano/smsdvb-main.c 	c->frequency = p->frequency;
p                 430 drivers/media/common/siano/smsdvb-main.c 	client->fe_status = sms_to_status(p->is_demod_locked, 0);
p                 431 drivers/media/common/siano/smsdvb-main.c 	c->bandwidth_hz = sms_to_bw(p->bandwidth);
p                 432 drivers/media/common/siano/smsdvb-main.c 	c->transmission_mode = sms_to_mode(p->transmission_mode);
p                 433 drivers/media/common/siano/smsdvb-main.c 	c->guard_interval = sms_to_guard_interval(p->guard_interval);
p                 434 drivers/media/common/siano/smsdvb-main.c 	c->isdbt_partial_reception = p->partial_reception ? 1 : 0;
p                 435 drivers/media/common/siano/smsdvb-main.c 	n_layers = p->num_of_layers;
p                 443 drivers/media/common/siano/smsdvb-main.c 	c->lna = p->is_external_lna_on ? 1 : 0;
p                 446 drivers/media/common/siano/smsdvb-main.c 	c->cnr.stat[0].svalue = p->SNR * 1000;
p                 449 drivers/media/common/siano/smsdvb-main.c 	c->strength.stat[0].uvalue = p->in_band_pwr * 1000;
p                 452 drivers/media/common/siano/smsdvb-main.c 	if (!p->is_demod_locked)
p                 472 drivers/media/common/siano/smsdvb-main.c 		lr = &p->layer_info[i];
p                 508 drivers/media/common/siano/smsdvb-main.c 	struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) (((u8 *) cb->p)
p                 510 drivers/media/common/siano/smsdvb-main.c 	void *p = phdr + 1;
p                 522 drivers/media/common/siano/smsdvb-main.c 			dvb_dmx_swfilter(&client->demux, p,
p                 546 drivers/media/common/siano/smsdvb-main.c 		smsdvb_update_tx_params(client, p);
p                 552 drivers/media/common/siano/smsdvb-main.c 		smsdvb_update_per_slices(client, p);
p                 561 drivers/media/common/siano/smsdvb-main.c 			smsdvb_update_isdbt_stats(client, p);
p                 565 drivers/media/common/siano/smsdvb-main.c 			smsdvb_update_dvb_stats(client, p + sizeof(u32));
p                 574 drivers/media/common/siano/smsdvb-main.c 		smsdvb_update_isdbt_stats_ex(client, p + sizeof(u32));
p                  10 drivers/media/common/siano/smsdvb.h 				    struct sms_stats *p);
p                  13 drivers/media/common/siano/smsdvb.h 				     struct sms_isdbt_stats *p);
p                  17 drivers/media/common/siano/smsdvb.h 			 struct sms_isdbt_stats_ex *p);
p                 490 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	unsigned p;
p                 505 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	for (p = 0; p < tpg->planes; p++)
p                 506 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		tpg->bytesperline[p] = (width * tpg->twopixelsize[p]) /
p                 507 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 				       (2 * tpg->hdownsampling[p]);
p                1757 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	unsigned p;
p                1806 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			for (p = 0; p < tpg->planes; p++) {
p                1807 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 				unsigned twopixsize = tpg->twopixelsize[p];
p                1808 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 				unsigned hdiv = tpg->hdownsampling[p];
p                1809 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 				u8 *pos = tpg->lines[pat][p] + tpg_hdiv(tpg, p, x);
p                1811 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 				memcpy(pos, pix[p], twopixsize / hdiv);
p                1822 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			for (p = 1; p < tpg->planes; p++) {
p                1823 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 				unsigned w = tpg_hdiv(tpg, p, tpg->scaled_width * 2);
p                1824 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 				u8 *pos1 = tpg->lines[pat][p];
p                1825 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 				u8 *pos2 = tpg->lines[next_pat][p];
p                1826 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 				u8 *dest = tpg->downsampled_lines[pat][p];
p                1836 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	for (p = 0; p < tpg->planes; p++) {
p                1837 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		unsigned twopixsize = tpg->twopixelsize[p];
p                1838 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		u8 *pos = tpg->contrast_line[p];
p                1841 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			memcpy(pos, pix[p], twopixsize);
p                1846 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	for (p = 0; p < tpg->planes; p++) {
p                1847 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		unsigned twopixsize = tpg->twopixelsize[p];
p                1848 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		u8 *pos = tpg->black_line[p];
p                1851 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			memcpy(pos, pix[p], twopixsize);
p                1857 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		for (p = 0; p < tpg->planes; p++) {
p                1858 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			unsigned twopixsize = tpg->twopixelsize[p];
p                1859 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			u8 *pos = tpg->random_line[p] + x * twopixsize / 2;
p                1861 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			memcpy(pos, pix[p], twopixsize);
p                1875 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	unsigned vdiv = tpg->vdownsampling[p]; \
p                1876 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	unsigned hdiv = tpg->hdownsampling[p]; \
p                1880 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	memcpy(&fg, tpg->textfg[p], sizeof(PIXTYPE));	\
p                1881 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	memcpy(&bg, tpg->textbg[p], sizeof(PIXTYPE));	\
p                1885 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		PIXTYPE *pos = (PIXTYPE *)(basep[p][(line / vdiv) & 1] + \
p                1886 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			       ((y * step + l) / (vdiv * div)) * tpg->bytesperline[p] + \
p                1929 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			unsigned p, unsigned first, unsigned div, unsigned step,
p                1936 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			unsigned p, unsigned first, unsigned div, unsigned step,
p                1943 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			unsigned p, unsigned first, unsigned div, unsigned step,
p                1950 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			unsigned p, unsigned first, unsigned div, unsigned step,
p                1963 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	unsigned p;
p                1985 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	for (p = 0; p < tpg->planes; p++) {
p                1987 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		switch (tpg->twopixelsize[p]) {
p                1989 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			tpg_print_str_2(tpg, basep, p, first, div, step, y, x,
p                1993 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			tpg_print_str_4(tpg, basep, p, first, div, step, y, x,
p                1997 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			tpg_print_str_6(tpg, basep, p, first, div, step, y, x,
p                2001 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			tpg_print_str_8(tpg, basep, p, first, div, step, y, x,
p                2131 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf)
p                2133 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	unsigned stride = tpg->bytesperline[p];
p                2138 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	basep[p][0] = vbuf;
p                2139 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	basep[p][1] = vbuf;
p                2140 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	h /= tpg->vdownsampling[p];
p                2142 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		basep[p][1] += h * stride / 2;
p                2144 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		basep[p][0] += h * stride / 2;
p                2145 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	if (p == 0 && tpg->interleaved)
p                2231 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c static void tpg_fill_params_pattern(const struct tpg_data *tpg, unsigned p,
p                2235 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		tpg_hscale_div(tpg, p, tpg->mv_hor_count % tpg->src_width);
p                2237 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		tpg_hscale_div(tpg, p, (tpg->mv_hor_count + tpg->mv_hor_step) %
p                2245 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 				   unsigned p,
p                2255 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	params->wss_width = tpg_hscale_div(tpg, p, params->wss_width);
p                2263 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		left_pillar_width = tpg_hscale_div(tpg, p, left_pillar_width);
p                2272 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			tpg_hscale_div(tpg, p, right_pillar_start);
p                2284 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 				  unsigned p, unsigned h, u8 *vbuf)
p                2299 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		u8 *wss = tpg->random_line[p] + params->wss_random_offset;
p                2312 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			memcpy(vbuf + left, tpg->contrast_line[p],
p                2318 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 					tpg->contrast_line[p], twopixsize);
p                2322 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 					tpg->contrast_line[p], twopixsize);
p                2327 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		memcpy(vbuf, tpg->black_line[p], params->left_pillar_width);
p                2328 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		memcpy(vbuf + params->right_pillar_start, tpg->black_line[p],
p                2345 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		left = tpg_hscale_div(tpg, p, left);
p                2346 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		width = tpg_hscale_div(tpg, p, width);
p                2347 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		memcpy(vbuf + left, tpg->contrast_line[p], width);
p                2350 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		unsigned offset = tpg_hdiv(tpg, p, tpg->compose.width / 3);
p                2351 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		u8 *p = vbuf + offset;
p                2354 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		p[0] = 0xff;
p                2355 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		p[1] = 0;
p                2356 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		p[2] = 0;
p                2357 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		p[3] = 0x80 | (params->sav_eav_f << 6) |
p                2365 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		unsigned offset = tpg_hdiv(tpg, p, tpg->compose.width * 2 / 3);
p                2366 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		u8 *p = vbuf + offset;
p                2369 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		p[0] = 0xff;
p                2370 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		p[1] = 0;
p                2371 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		p[2] = 0;
p                2372 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		p[3] = 0x80 | (params->sav_eav_f << 6) |
p                2383 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 				   unsigned p, unsigned h, u8 *vbuf)
p                2393 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	unsigned line_offset = tpg_hscale_div(tpg, p, tpg->crop.left);
p                2419 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		linestart_older = tpg->contrast_line[p];
p                2420 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		linestart_newer = tpg->contrast_line[p];
p                2424 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		linestart_older = tpg->black_line[p];
p                2425 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		linestart_newer = tpg->black_line[p];
p                2427 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		linestart_older = tpg->random_line[p] +
p                2429 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		linestart_newer = tpg->random_line[p] +
p                2441 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		linestart_older = tpg->lines[pat_line_old][p] + mv_hor_old;
p                2442 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		linestart_newer = tpg->lines[pat_line_new][p] + mv_hor_new;
p                2444 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		if (tpg->vdownsampling[p] > 1 && frame_line != frame_line_next) {
p                2463 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 				linestart_older = tpg->downsampled_lines[avg_pat][p] + mv_hor_old;
p                2473 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 					linestart_older = tpg->downsampled_lines[avg_pat][p] +
p                2477 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 					linestart_newer = tpg->downsampled_lines[avg_pat][p] +
p                2525 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			   unsigned p, u8 *vbuf)
p                2541 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	params.twopixsize = tpg->twopixelsize[p];
p                2542 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	params.img_width = tpg_hdiv(tpg, p, tpg->compose.width);
p                2543 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	params.stride = tpg->bytesperline[p];
p                2546 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	tpg_fill_params_pattern(tpg, p, &params);
p                2547 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	tpg_fill_params_extras(tpg, p, &params);
p                2549 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 	vbuf += tpg_hdiv(tpg, p, tpg->compose.left);
p                2569 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			p = tpg_g_interleaved_plane(tpg, buf_line);
p                2571 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		if (tpg->vdownsampling[p] > 1) {
p                2598 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 			buf_line /= tpg->vdownsampling[p];
p                2600 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		tpg_fill_plane_pattern(tpg, &params, p, h,
p                2602 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		tpg_fill_plane_extras(tpg, &params, p, h,
p                2608 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std, unsigned p, u8 *vbuf)
p                2614 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 		tpg_fill_plane_buffer(tpg, std, p, vbuf);
p                 272 drivers/media/common/videobuf2/videobuf2-core.c static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
p                 274 drivers/media/common/videobuf2/videobuf2-core.c 	if (!p->mem_priv)
p                 277 drivers/media/common/videobuf2/videobuf2-core.c 	if (p->dbuf_mapped)
p                 278 drivers/media/common/videobuf2/videobuf2-core.c 		call_void_memop(vb, unmap_dmabuf, p->mem_priv);
p                 280 drivers/media/common/videobuf2/videobuf2-core.c 	call_void_memop(vb, detach_dmabuf, p->mem_priv);
p                 281 drivers/media/common/videobuf2/videobuf2-core.c 	dma_buf_put(p->dbuf);
p                 282 drivers/media/common/videobuf2/videobuf2-core.c 	p->mem_priv = NULL;
p                 283 drivers/media/common/videobuf2/videobuf2-core.c 	p->dbuf = NULL;
p                 284 drivers/media/common/videobuf2/videobuf2-core.c 	p->dbuf_mapped = 0;
p                 311 drivers/media/common/videobuf2/videobuf2-core.c 		struct vb2_plane *p = &prev->planes[prev->num_planes - 1];
p                 313 drivers/media/common/videobuf2/videobuf2-core.c 		off = PAGE_ALIGN(p->m.offset + p->length);
p                 274 drivers/media/common/videobuf2/videobuf2-dvb.c 	struct dvb_frontend *p)
p                 284 drivers/media/common/videobuf2/videobuf2-dvb.c 		if (fe->dvb.frontend == p) {
p                 908 drivers/media/common/videobuf2/videobuf2-v4l2.c 			  struct v4l2_requestbuffers *p)
p                 911 drivers/media/common/videobuf2/videobuf2-v4l2.c 	int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
p                 913 drivers/media/common/videobuf2/videobuf2-v4l2.c 	fill_buf_caps(vdev->queue, &p->capabilities);
p                 918 drivers/media/common/videobuf2/videobuf2-v4l2.c 	res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
p                 922 drivers/media/common/videobuf2/videobuf2-v4l2.c 		vdev->queue->owner = p->count ? file->private_data : NULL;
p                 928 drivers/media/common/videobuf2/videobuf2-v4l2.c 			  struct v4l2_create_buffers *p)
p                 931 drivers/media/common/videobuf2/videobuf2-v4l2.c 	int res = vb2_verify_memory_type(vdev->queue, p->memory,
p                 932 drivers/media/common/videobuf2/videobuf2-v4l2.c 			p->format.type);
p                 934 drivers/media/common/videobuf2/videobuf2-v4l2.c 	p->index = vdev->queue->num_buffers;
p                 935 drivers/media/common/videobuf2/videobuf2-v4l2.c 	fill_buf_caps(vdev->queue, &p->capabilities);
p                 940 drivers/media/common/videobuf2/videobuf2-v4l2.c 	if (p->count == 0)
p                 947 drivers/media/common/videobuf2/videobuf2-v4l2.c 	res = vb2_create_bufs(vdev->queue, p);
p                 955 drivers/media/common/videobuf2/videobuf2-v4l2.c 			  struct v4l2_buffer *p)
p                 961 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
p                 965 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
p                 970 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_querybuf(vdev->queue, p);
p                 974 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
p                 980 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
p                 984 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
p                 990 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
p                1014 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
p                1020 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_expbuf(vdev->queue, p);
p                 117 drivers/media/dvb-core/dvb_demux.c 	int p;
p                 124 drivers/media/dvb-core/dvb_demux.c 	p = 188 - count;
p                 140 drivers/media/dvb-core/dvb_demux.c 	return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts,
p                 300 drivers/media/dvb-core/dvb_demux.c 	u8 p, count;
p                 309 drivers/media/dvb-core/dvb_demux.c 	p = 188 - count;	/* payload start */
p                 352 drivers/media/dvb-core/dvb_demux.c 		if (count > 1 && buf[p] < count) {
p                 353 drivers/media/dvb-core/dvb_demux.c 			const u8 *before = &buf[p + 1];
p                 354 drivers/media/dvb-core/dvb_demux.c 			u8 before_len = buf[p];
p                 372 drivers/media/dvb-core/dvb_demux.c 		dvb_dmx_swfilter_section_copy_dump(feed, &buf[p], count);
p                 550 drivers/media/dvb-core/dvb_demux.c 	int p = 0, i, j;
p                 568 drivers/media/dvb-core/dvb_demux.c 		p += j;
p                 572 drivers/media/dvb-core/dvb_demux.c 		p = find_next_packet(buf, p, count, pktsize);
p                 573 drivers/media/dvb-core/dvb_demux.c 		if (p >= count)
p                 575 drivers/media/dvb-core/dvb_demux.c 		if (count - p < pktsize)
p                 578 drivers/media/dvb-core/dvb_demux.c 		q = &buf[p];
p                 586 drivers/media/dvb-core/dvb_demux.c 		p += pktsize;
p                 589 drivers/media/dvb-core/dvb_demux.c 	i = count - p;
p                 591 drivers/media/dvb-core/dvb_demux.c 		memcpy(demux->tsbuf, &buf[p], i);
p                1151 drivers/media/dvb-core/dvb_demux.c 	void *p;
p                1156 drivers/media/dvb-core/dvb_demux.c 	p = memdup_user(buf, count);
p                1157 drivers/media/dvb-core/dvb_demux.c 	if (IS_ERR(p))
p                1158 drivers/media/dvb-core/dvb_demux.c 		return PTR_ERR(p);
p                1160 drivers/media/dvb-core/dvb_demux.c 		kfree(p);
p                1163 drivers/media/dvb-core/dvb_demux.c 	dvb_dmx_swfilter(dvbdemux, p, count);
p                1164 drivers/media/dvb-core/dvb_demux.c 	kfree(p);
p                 180 drivers/media/dvb-core/dvb_frontend.c 				struct dvb_frontend_parameters *p);
p                1168 drivers/media/dvb-core/dvb_frontend.c 				   const struct dvb_frontend_parameters *p)
p                1170 drivers/media/dvb-core/dvb_frontend.c 	c->frequency = p->frequency;
p                1171 drivers/media/dvb-core/dvb_frontend.c 	c->inversion = p->inversion;
p                1176 drivers/media/dvb-core/dvb_frontend.c 		c->symbol_rate = p->u.qpsk.symbol_rate;
p                1177 drivers/media/dvb-core/dvb_frontend.c 		c->fec_inner = p->u.qpsk.fec_inner;
p                1181 drivers/media/dvb-core/dvb_frontend.c 		c->symbol_rate = p->u.qam.symbol_rate;
p                1182 drivers/media/dvb-core/dvb_frontend.c 		c->fec_inner = p->u.qam.fec_inner;
p                1183 drivers/media/dvb-core/dvb_frontend.c 		c->modulation = p->u.qam.modulation;
p                1188 drivers/media/dvb-core/dvb_frontend.c 		switch (p->u.ofdm.bandwidth) {
p                1211 drivers/media/dvb-core/dvb_frontend.c 		c->code_rate_HP = p->u.ofdm.code_rate_HP;
p                1212 drivers/media/dvb-core/dvb_frontend.c 		c->code_rate_LP = p->u.ofdm.code_rate_LP;
p                1213 drivers/media/dvb-core/dvb_frontend.c 		c->modulation = p->u.ofdm.constellation;
p                1214 drivers/media/dvb-core/dvb_frontend.c 		c->transmission_mode = p->u.ofdm.transmission_mode;
p                1215 drivers/media/dvb-core/dvb_frontend.c 		c->guard_interval = p->u.ofdm.guard_interval;
p                1216 drivers/media/dvb-core/dvb_frontend.c 		c->hierarchy = p->u.ofdm.hierarchy_information;
p                1220 drivers/media/dvb-core/dvb_frontend.c 		c->modulation = p->u.vsb.modulation;
p                1244 drivers/media/dvb-core/dvb_frontend.c 				struct dvb_frontend_parameters *p)
p                1246 drivers/media/dvb-core/dvb_frontend.c 	p->frequency = c->frequency;
p                1247 drivers/media/dvb-core/dvb_frontend.c 	p->inversion = c->inversion;
p                1257 drivers/media/dvb-core/dvb_frontend.c 		p->u.qpsk.symbol_rate = c->symbol_rate;
p                1258 drivers/media/dvb-core/dvb_frontend.c 		p->u.qpsk.fec_inner = c->fec_inner;
p                1262 drivers/media/dvb-core/dvb_frontend.c 		p->u.qam.symbol_rate = c->symbol_rate;
p                1263 drivers/media/dvb-core/dvb_frontend.c 		p->u.qam.fec_inner = c->fec_inner;
p                1264 drivers/media/dvb-core/dvb_frontend.c 		p->u.qam.modulation = c->modulation;
p                1270 drivers/media/dvb-core/dvb_frontend.c 			p->u.ofdm.bandwidth = BANDWIDTH_10_MHZ;
p                1273 drivers/media/dvb-core/dvb_frontend.c 			p->u.ofdm.bandwidth = BANDWIDTH_8_MHZ;
p                1276 drivers/media/dvb-core/dvb_frontend.c 			p->u.ofdm.bandwidth = BANDWIDTH_7_MHZ;
p                1279 drivers/media/dvb-core/dvb_frontend.c 			p->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
p                1282 drivers/media/dvb-core/dvb_frontend.c 			p->u.ofdm.bandwidth = BANDWIDTH_5_MHZ;
p                1285 drivers/media/dvb-core/dvb_frontend.c 			p->u.ofdm.bandwidth = BANDWIDTH_1_712_MHZ;
p                1289 drivers/media/dvb-core/dvb_frontend.c 			p->u.ofdm.bandwidth = BANDWIDTH_AUTO;
p                1291 drivers/media/dvb-core/dvb_frontend.c 		p->u.ofdm.code_rate_HP = c->code_rate_HP;
p                1292 drivers/media/dvb-core/dvb_frontend.c 		p->u.ofdm.code_rate_LP = c->code_rate_LP;
p                1293 drivers/media/dvb-core/dvb_frontend.c 		p->u.ofdm.constellation = c->modulation;
p                1294 drivers/media/dvb-core/dvb_frontend.c 		p->u.ofdm.transmission_mode = c->transmission_mode;
p                1295 drivers/media/dvb-core/dvb_frontend.c 		p->u.ofdm.guard_interval = c->guard_interval;
p                1296 drivers/media/dvb-core/dvb_frontend.c 		p->u.ofdm.hierarchy_information = c->hierarchy;
p                1300 drivers/media/dvb-core/dvb_frontend.c 		p->u.vsb.modulation = c->modulation;
p                 180 drivers/media/dvb-core/dvb_net.c static int ule_test_sndu( struct dvb_net_priv *p )
p                 185 drivers/media/dvb-core/dvb_net.c static int ule_bridged_sndu( struct dvb_net_priv *p )
p                 187 drivers/media/dvb-core/dvb_net.c 	struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr;
p                 189 drivers/media/dvb-core/dvb_net.c 		int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data);
p                 201 drivers/media/dvb-core/dvb_net.c 	p->ule_bridged = 1;
p                 205 drivers/media/dvb-core/dvb_net.c static int ule_exthdr_padding(struct dvb_net_priv *p)
p                 216 drivers/media/dvb-core/dvb_net.c static int handle_one_ule_extension( struct dvb_net_priv *p )
p                 219 drivers/media/dvb-core/dvb_net.c 	static int (*ule_mandatory_ext_handlers[255])( struct dvb_net_priv *p ) =
p                 223 drivers/media/dvb-core/dvb_net.c 	static int (*ule_optional_ext_handlers[255])( struct dvb_net_priv *p ) =
p                 227 drivers/media/dvb-core/dvb_net.c 	unsigned char hlen = (p->ule_sndu_type & 0x0700) >> 8;
p                 228 drivers/media/dvb-core/dvb_net.c 	unsigned char htype = p->ule_sndu_type & 0x00FF;
p                 234 drivers/media/dvb-core/dvb_net.c 			ext_len = ule_mandatory_ext_handlers[htype]( p );
p                 236 drivers/media/dvb-core/dvb_net.c 				p->ule_next_hdr += ext_len;
p                 237 drivers/media/dvb-core/dvb_net.c 				if (!p->ule_bridged) {
p                 238 drivers/media/dvb-core/dvb_net.c 					p->ule_sndu_type = ntohs(*(__be16 *)p->ule_next_hdr);
p                 239 drivers/media/dvb-core/dvb_net.c 					p->ule_next_hdr += 2;
p                 241 drivers/media/dvb-core/dvb_net.c 					p->ule_sndu_type = ntohs(*(__be16 *)(p->ule_next_hdr + ((p->ule_dbit ? 2 : 3) * ETH_ALEN)));
p                 253 drivers/media/dvb-core/dvb_net.c 			(void)ule_optional_ext_handlers[htype]( p );
p                 254 drivers/media/dvb-core/dvb_net.c 		p->ule_next_hdr += ext_len;
p                 255 drivers/media/dvb-core/dvb_net.c 		p->ule_sndu_type = ntohs( *(__be16 *)(p->ule_next_hdr-2) );
p                 265 drivers/media/dvb-core/dvb_net.c static int handle_ule_extensions( struct dvb_net_priv *p )
p                 269 drivers/media/dvb-core/dvb_net.c 	p->ule_next_hdr = p->ule_skb->data;
p                 271 drivers/media/dvb-core/dvb_net.c 		l = handle_one_ule_extension( p );
p                 276 drivers/media/dvb-core/dvb_net.c 			 p->ule_next_hdr, (int)p->ule_sndu_type,
p                 279 drivers/media/dvb-core/dvb_net.c 	} while (p->ule_sndu_type < ETH_P_802_3_MIN);
p                 286 drivers/media/dvb-core/dvb_net.c static inline void reset_ule( struct dvb_net_priv *p )
p                 288 drivers/media/dvb-core/dvb_net.c 	p->ule_skb = NULL;
p                 289 drivers/media/dvb-core/dvb_net.c 	p->ule_next_hdr = NULL;
p                 290 drivers/media/dvb-core/dvb_net.c 	p->ule_sndu_len = 0;
p                 291 drivers/media/dvb-core/dvb_net.c 	p->ule_sndu_type = 0;
p                 292 drivers/media/dvb-core/dvb_net.c 	p->ule_sndu_type_1 = 0;
p                 293 drivers/media/dvb-core/dvb_net.c 	p->ule_sndu_remain = 0;
p                 294 drivers/media/dvb-core/dvb_net.c 	p->ule_dbit = 0xFF;
p                 295 drivers/media/dvb-core/dvb_net.c 	p->ule_bridged = 0;
p                1269 drivers/media/dvb-core/dvb_net.c static int dvb_net_set_mac (struct net_device *dev, void *p)
p                1272 drivers/media/dvb-core/dvb_net.c 	struct sockaddr *addr=p;
p                 271 drivers/media/dvb-frontends/ascot2e.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 274 drivers/media/dvb-frontends/ascot2e.c 	if (p->delivery_system == SYS_DVBT) {
p                 275 drivers/media/dvb-frontends/ascot2e.c 		if (p->bandwidth_hz <= 5000000)
p                 277 drivers/media/dvb-frontends/ascot2e.c 		else if (p->bandwidth_hz <= 6000000)
p                 279 drivers/media/dvb-frontends/ascot2e.c 		else if (p->bandwidth_hz <= 7000000)
p                 281 drivers/media/dvb-frontends/ascot2e.c 		else if (p->bandwidth_hz <= 8000000)
p                 285 drivers/media/dvb-frontends/ascot2e.c 			p->bandwidth_hz = 8000000;
p                 287 drivers/media/dvb-frontends/ascot2e.c 	} else if (p->delivery_system == SYS_DVBT2) {
p                 288 drivers/media/dvb-frontends/ascot2e.c 		if (p->bandwidth_hz <= 5000000)
p                 290 drivers/media/dvb-frontends/ascot2e.c 		else if (p->bandwidth_hz <= 6000000)
p                 292 drivers/media/dvb-frontends/ascot2e.c 		else if (p->bandwidth_hz <= 7000000)
p                 294 drivers/media/dvb-frontends/ascot2e.c 		else if (p->bandwidth_hz <= 8000000)
p                 298 drivers/media/dvb-frontends/ascot2e.c 			p->bandwidth_hz = 8000000;
p                 300 drivers/media/dvb-frontends/ascot2e.c 	} else if (p->delivery_system == SYS_DVBC_ANNEX_A) {
p                 301 drivers/media/dvb-frontends/ascot2e.c 		if (p->bandwidth_hz <= 6000000)
p                 303 drivers/media/dvb-frontends/ascot2e.c 		else if (p->bandwidth_hz <= 8000000)
p                 308 drivers/media/dvb-frontends/ascot2e.c 		__func__, (int)system, p->delivery_system, p->bandwidth_hz);
p                 317 drivers/media/dvb-frontends/ascot2e.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 321 drivers/media/dvb-frontends/ascot2e.c 		__func__, p->frequency / 1000);
p                 331 drivers/media/dvb-frontends/ascot2e.c 	frequency = roundup(p->frequency / 1000, 25);
p                  57 drivers/media/dvb-frontends/bsbe1.h 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                  64 drivers/media/dvb-frontends/bsbe1.h 	if ((p->frequency < 950000) || (p->frequency > 2150000))
p                  67 drivers/media/dvb-frontends/bsbe1.h 	div = p->frequency / 1000;
p                  89 drivers/media/dvb-frontends/bsru6.h 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                  95 drivers/media/dvb-frontends/bsru6.h 	if ((p->frequency < 950000) || (p->frequency > 2150000))
p                  98 drivers/media/dvb-frontends/bsru6.h 	div = (p->frequency + (125 - 1)) / 125;	/* round correctly */
p                 104 drivers/media/dvb-frontends/bsru6.h 	if (p->frequency > 1530000)
p                 113 drivers/media/dvb-frontends/cx22700.c 			   struct dtv_frontend_properties *p)
p                 121 drivers/media/dvb-frontends/cx22700.c 	if (p->code_rate_HP < FEC_1_2 || p->code_rate_HP > FEC_7_8)
p                 124 drivers/media/dvb-frontends/cx22700.c 	if (p->code_rate_LP < FEC_1_2 || p->code_rate_LP > FEC_7_8)
p                 127 drivers/media/dvb-frontends/cx22700.c 	if (p->code_rate_HP == FEC_4_5 || p->code_rate_LP == FEC_4_5)
p                 130 drivers/media/dvb-frontends/cx22700.c 	if ((int)p->guard_interval < GUARD_INTERVAL_1_32 ||
p                 131 drivers/media/dvb-frontends/cx22700.c 	    p->guard_interval > GUARD_INTERVAL_1_4)
p                 134 drivers/media/dvb-frontends/cx22700.c 	if (p->transmission_mode != TRANSMISSION_MODE_2K &&
p                 135 drivers/media/dvb-frontends/cx22700.c 	    p->transmission_mode != TRANSMISSION_MODE_8K)
p                 138 drivers/media/dvb-frontends/cx22700.c 	if (p->modulation != QPSK &&
p                 139 drivers/media/dvb-frontends/cx22700.c 	    p->modulation != QAM_16 &&
p                 140 drivers/media/dvb-frontends/cx22700.c 	    p->modulation != QAM_64)
p                 143 drivers/media/dvb-frontends/cx22700.c 	if ((int)p->hierarchy < HIERARCHY_NONE ||
p                 144 drivers/media/dvb-frontends/cx22700.c 	    p->hierarchy > HIERARCHY_4)
p                 147 drivers/media/dvb-frontends/cx22700.c 	if (p->bandwidth_hz > 8000000 || p->bandwidth_hz < 6000000)
p                 150 drivers/media/dvb-frontends/cx22700.c 	if (p->bandwidth_hz == 7000000)
p                 155 drivers/media/dvb-frontends/cx22700.c 	val = qam_tab[p->modulation - QPSK];
p                 156 drivers/media/dvb-frontends/cx22700.c 	val |= p->hierarchy - HIERARCHY_NONE;
p                 160 drivers/media/dvb-frontends/cx22700.c 	if (p->code_rate_HP - FEC_1_2 >= sizeof(fec_tab) ||
p                 161 drivers/media/dvb-frontends/cx22700.c 	    p->code_rate_LP - FEC_1_2 >= sizeof(fec_tab))
p                 163 drivers/media/dvb-frontends/cx22700.c 	val = fec_tab[p->code_rate_HP - FEC_1_2] << 3;
p                 164 drivers/media/dvb-frontends/cx22700.c 	val |= fec_tab[p->code_rate_LP - FEC_1_2];
p                 168 drivers/media/dvb-frontends/cx22700.c 	val = (p->guard_interval - GUARD_INTERVAL_1_32) << 2;
p                 169 drivers/media/dvb-frontends/cx22700.c 	val |= p->transmission_mode - TRANSMISSION_MODE_2K;
p                 180 drivers/media/dvb-frontends/cx22700.c 			   struct dtv_frontend_properties *p)
p                 196 drivers/media/dvb-frontends/cx22700.c 		p->hierarchy = HIERARCHY_AUTO;
p                 198 drivers/media/dvb-frontends/cx22700.c 		p->hierarchy = HIERARCHY_NONE + (val & 0x7);
p                 201 drivers/media/dvb-frontends/cx22700.c 		p->modulation = QAM_AUTO;
p                 203 drivers/media/dvb-frontends/cx22700.c 		p->modulation = qam_tab[(val >> 3) & 0x3];
p                 208 drivers/media/dvb-frontends/cx22700.c 		p->code_rate_HP = FEC_AUTO;
p                 210 drivers/media/dvb-frontends/cx22700.c 		p->code_rate_HP = fec_tab[(val >> 3) & 0x07];
p                 213 drivers/media/dvb-frontends/cx22700.c 		p->code_rate_LP = FEC_AUTO;
p                 215 drivers/media/dvb-frontends/cx22700.c 		p->code_rate_LP = fec_tab[val & 0x07];
p                 219 drivers/media/dvb-frontends/cx22700.c 	p->guard_interval = GUARD_INTERVAL_1_32 + ((val >> 6) & 0x3);
p                 220 drivers/media/dvb-frontends/cx22700.c 	p->transmission_mode = TRANSMISSION_MODE_2K + ((val >> 5) & 0x1);
p                 137 drivers/media/dvb-frontends/cx22702.c 			   struct dtv_frontend_properties *p)
p                 148 drivers/media/dvb-frontends/cx22702.c 		p->modulation = QPSK;
p                 151 drivers/media/dvb-frontends/cx22702.c 		p->modulation = QAM_16;
p                 154 drivers/media/dvb-frontends/cx22702.c 		p->modulation = QAM_64;
p                 159 drivers/media/dvb-frontends/cx22702.c 		p->hierarchy = HIERARCHY_NONE;
p                 162 drivers/media/dvb-frontends/cx22702.c 		p->hierarchy = HIERARCHY_1;
p                 165 drivers/media/dvb-frontends/cx22702.c 		p->hierarchy = HIERARCHY_2;
p                 168 drivers/media/dvb-frontends/cx22702.c 		p->hierarchy = HIERARCHY_4;
p                 176 drivers/media/dvb-frontends/cx22702.c 		p->code_rate_HP = FEC_1_2;
p                 179 drivers/media/dvb-frontends/cx22702.c 		p->code_rate_HP = FEC_2_3;
p                 182 drivers/media/dvb-frontends/cx22702.c 		p->code_rate_HP = FEC_3_4;
p                 185 drivers/media/dvb-frontends/cx22702.c 		p->code_rate_HP = FEC_5_6;
p                 188 drivers/media/dvb-frontends/cx22702.c 		p->code_rate_HP = FEC_7_8;
p                 193 drivers/media/dvb-frontends/cx22702.c 		p->code_rate_LP = FEC_1_2;
p                 196 drivers/media/dvb-frontends/cx22702.c 		p->code_rate_LP = FEC_2_3;
p                 199 drivers/media/dvb-frontends/cx22702.c 		p->code_rate_LP = FEC_3_4;
p                 202 drivers/media/dvb-frontends/cx22702.c 		p->code_rate_LP = FEC_5_6;
p                 205 drivers/media/dvb-frontends/cx22702.c 		p->code_rate_LP = FEC_7_8;
p                 212 drivers/media/dvb-frontends/cx22702.c 		p->guard_interval = GUARD_INTERVAL_1_32;
p                 215 drivers/media/dvb-frontends/cx22702.c 		p->guard_interval = GUARD_INTERVAL_1_16;
p                 218 drivers/media/dvb-frontends/cx22702.c 		p->guard_interval = GUARD_INTERVAL_1_8;
p                 221 drivers/media/dvb-frontends/cx22702.c 		p->guard_interval = GUARD_INTERVAL_1_4;
p                 226 drivers/media/dvb-frontends/cx22702.c 		p->transmission_mode = TRANSMISSION_MODE_2K;
p                 229 drivers/media/dvb-frontends/cx22702.c 		p->transmission_mode = TRANSMISSION_MODE_8K;
p                 253 drivers/media/dvb-frontends/cx22702.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 264 drivers/media/dvb-frontends/cx22702.c 	cx22702_set_inversion(state, p->inversion);
p                 268 drivers/media/dvb-frontends/cx22702.c 	switch (p->bandwidth_hz) {
p                 283 drivers/media/dvb-frontends/cx22702.c 	p->code_rate_LP = FEC_AUTO; /* temp hack as manual not working */
p                 286 drivers/media/dvb-frontends/cx22702.c 	if ((p->hierarchy == HIERARCHY_AUTO) ||
p                 287 drivers/media/dvb-frontends/cx22702.c 	   (p->modulation == QAM_AUTO) ||
p                 288 drivers/media/dvb-frontends/cx22702.c 	   (p->code_rate_HP == FEC_AUTO) ||
p                 289 drivers/media/dvb-frontends/cx22702.c 	   (p->code_rate_LP == FEC_AUTO) ||
p                 290 drivers/media/dvb-frontends/cx22702.c 	   (p->guard_interval == GUARD_INTERVAL_AUTO) ||
p                 291 drivers/media/dvb-frontends/cx22702.c 	   (p->transmission_mode == TRANSMISSION_MODE_AUTO)) {
p                 307 drivers/media/dvb-frontends/cx22702.c 	switch (p->modulation) {		/* mask 0x18 */
p                 321 drivers/media/dvb-frontends/cx22702.c 	switch (p->hierarchy) {	/* mask 0x07 */
p                 339 drivers/media/dvb-frontends/cx22702.c 	switch (p->code_rate_HP) {		/* mask 0x38 */
p                 360 drivers/media/dvb-frontends/cx22702.c 	switch (p->code_rate_LP) {		/* mask 0x07 */
p                 382 drivers/media/dvb-frontends/cx22702.c 	switch (p->guard_interval) {		/* mask 0x0c */
p                 399 drivers/media/dvb-frontends/cx22702.c 	switch (p->transmission_mode) {		/* mask 0x03 */
p                 525 drivers/media/dvb-frontends/cx24110.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 532 drivers/media/dvb-frontends/cx24110.c 	cx24110_set_inversion(state, p->inversion);
p                 533 drivers/media/dvb-frontends/cx24110.c 	cx24110_set_fec(state, p->fec_inner);
p                 534 drivers/media/dvb-frontends/cx24110.c 	cx24110_set_symbolrate(state, p->symbol_rate);
p                 541 drivers/media/dvb-frontends/cx24110.c 				struct dtv_frontend_properties *p)
p                 560 drivers/media/dvb-frontends/cx24110.c 	p->frequency += afc;
p                 561 drivers/media/dvb-frontends/cx24110.c 	p->inversion = (cx24110_readreg (state, 0x22) & 0x10) ?
p                 563 drivers/media/dvb-frontends/cx24110.c 	p->fec_inner = cx24110_get_fec(state);
p                 504 drivers/media/dvb-frontends/cx24123.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 524 drivers/media/dvb-frontends/cx24123.c 		if ((agcv->symbolrate_low <= p->symbol_rate) &&
p                 525 drivers/media/dvb-frontends/cx24123.c 		    (agcv->symbolrate_high >= p->symbol_rate)) {
p                 536 drivers/media/dvb-frontends/cx24123.c 			if ((bsv->freq_low <= p->frequency) &&
p                 537 drivers/media/dvb-frontends/cx24123.c 				(bsv->freq_high >= p->frequency))
p                 547 drivers/media/dvb-frontends/cx24123.c 	if (p->frequency < (cx24123_bandselect_vals[band].freq_low +
p                 556 drivers/media/dvb-frontends/cx24123.c 	ndiv = (((p->frequency * vco_div * 10) /
p                 558 drivers/media/dvb-frontends/cx24123.c 	adiv = (((p->frequency * vco_div * 10) /
p                 636 drivers/media/dvb-frontends/cx24123.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 640 drivers/media/dvb-frontends/cx24123.c 	dprintk("frequency=%i\n", p->frequency);
p                 903 drivers/media/dvb-frontends/cx24123.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 910 drivers/media/dvb-frontends/cx24123.c 	state->currentfreq = p->frequency;
p                 911 drivers/media/dvb-frontends/cx24123.c 	state->currentsymbolrate = p->symbol_rate;
p                 913 drivers/media/dvb-frontends/cx24123.c 	cx24123_set_inversion(state, p->inversion);
p                 914 drivers/media/dvb-frontends/cx24123.c 	cx24123_set_fec(state, p->fec_inner);
p                 915 drivers/media/dvb-frontends/cx24123.c 	cx24123_set_symbolrate(state, p->symbol_rate);
p                 936 drivers/media/dvb-frontends/cx24123.c 				struct dtv_frontend_properties *p)
p                 942 drivers/media/dvb-frontends/cx24123.c 	if (cx24123_get_inversion(state, &p->inversion) != 0) {
p                 946 drivers/media/dvb-frontends/cx24123.c 	if (cx24123_get_fec(state, &p->fec_inner) != 0) {
p                 950 drivers/media/dvb-frontends/cx24123.c 	p->frequency = state->currentfreq;
p                 951 drivers/media/dvb-frontends/cx24123.c 	p->symbol_rate = state->currentsymbolrate;
p                 174 drivers/media/dvb-frontends/cxd2820r_core.c 				 struct dtv_frontend_properties *p)
p                 188 drivers/media/dvb-frontends/cxd2820r_core.c 		ret = cxd2820r_get_frontend_t(fe, p);
p                 191 drivers/media/dvb-frontends/cxd2820r_core.c 		ret = cxd2820r_get_frontend_t2(fe, p);
p                 194 drivers/media/dvb-frontends/cxd2820r_core.c 		ret = cxd2820r_get_frontend_c(fe, p);
p                  81 drivers/media/dvb-frontends/cxd2820r_priv.h 			    struct dtv_frontend_properties *p);
p                  97 drivers/media/dvb-frontends/cxd2820r_priv.h 			    struct dtv_frontend_properties *p);
p                 113 drivers/media/dvb-frontends/cxd2820r_priv.h 			     struct dtv_frontend_properties *p);
p                 487 drivers/media/dvb-frontends/cxd2841er.c 				   struct dtv_frontend_properties *p)
p                 502 drivers/media/dvb-frontends/cxd2841er.c 				priv, p->symbol_rate / 1000);
p                 507 drivers/media/dvb-frontends/cxd2841er.c 					priv, p->bandwidth_hz);
p                 510 drivers/media/dvb-frontends/cxd2841er.c 					priv, p->bandwidth_hz);
p                 513 drivers/media/dvb-frontends/cxd2841er.c 					priv, p->bandwidth_hz);
p                 519 drivers/media/dvb-frontends/cxd2841er.c 					priv, p->bandwidth_hz);
p                1940 drivers/media/dvb-frontends/cxd2841er.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1945 drivers/media/dvb-frontends/cxd2841er.c 	switch (p->delivery_system) {
p                1967 drivers/media/dvb-frontends/cxd2841er.c 		p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1968 drivers/media/dvb-frontends/cxd2841er.c 		p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1973 drivers/media/dvb-frontends/cxd2841er.c 		p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
p                1974 drivers/media/dvb-frontends/cxd2841er.c 		p->post_bit_error.stat[0].uvalue += bit_error;
p                1975 drivers/media/dvb-frontends/cxd2841er.c 		p->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
p                1976 drivers/media/dvb-frontends/cxd2841er.c 		p->post_bit_count.stat[0].uvalue += bit_count;
p                1978 drivers/media/dvb-frontends/cxd2841er.c 		p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1979 drivers/media/dvb-frontends/cxd2841er.c 		p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1985 drivers/media/dvb-frontends/cxd2841er.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1990 drivers/media/dvb-frontends/cxd2841er.c 	switch (p->delivery_system) {
p                1994 drivers/media/dvb-frontends/cxd2841er.c 							p->delivery_system);
p                1995 drivers/media/dvb-frontends/cxd2841er.c 		p->strength.stat[0].scale = FE_SCALE_DECIBEL;
p                1997 drivers/media/dvb-frontends/cxd2841er.c 		p->strength.stat[0].uvalue = strength * 366 / 100 - 89520;
p                2003 drivers/media/dvb-frontends/cxd2841er.c 							p->delivery_system);
p                2004 drivers/media/dvb-frontends/cxd2841er.c 		p->strength.stat[0].scale = FE_SCALE_DECIBEL;
p                2010 drivers/media/dvb-frontends/cxd2841er.c 		p->strength.stat[0].uvalue = strength * 4045 / 1000 - 85224;
p                2013 drivers/media/dvb-frontends/cxd2841er.c 		strength = cxd2841er_read_agc_gain_i(priv, p->delivery_system);
p                2014 drivers/media/dvb-frontends/cxd2841er.c 		p->strength.stat[0].scale = FE_SCALE_DECIBEL;
p                2019 drivers/media/dvb-frontends/cxd2841er.c 		p->strength.stat[0].uvalue = strength * 3775 / 1000 - 90185;
p                2024 drivers/media/dvb-frontends/cxd2841er.c 		p->strength.stat[0].scale = FE_SCALE_RELATIVE;
p                2025 drivers/media/dvb-frontends/cxd2841er.c 		p->strength.stat[0].uvalue = strength;
p                2028 drivers/media/dvb-frontends/cxd2841er.c 		p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                2037 drivers/media/dvb-frontends/cxd2841er.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                2041 drivers/media/dvb-frontends/cxd2841er.c 	switch (p->delivery_system) {
p                2058 drivers/media/dvb-frontends/cxd2841er.c 		ret = cxd2841er_dvbs_read_snr(priv, p->delivery_system, &tmp);
p                2062 drivers/media/dvb-frontends/cxd2841er.c 			__func__, p->delivery_system);
p                2063 drivers/media/dvb-frontends/cxd2841er.c 		p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                2071 drivers/media/dvb-frontends/cxd2841er.c 		p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
p                2072 drivers/media/dvb-frontends/cxd2841er.c 		p->cnr.stat[0].svalue = tmp;
p                2074 drivers/media/dvb-frontends/cxd2841er.c 		p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                2080 drivers/media/dvb-frontends/cxd2841er.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                2085 drivers/media/dvb-frontends/cxd2841er.c 	switch (p->delivery_system) {
p                2101 drivers/media/dvb-frontends/cxd2841er.c 		p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                2106 drivers/media/dvb-frontends/cxd2841er.c 	p->block_error.stat[0].scale = FE_SCALE_COUNTER;
p                2107 drivers/media/dvb-frontends/cxd2841er.c 	p->block_error.stat[0].uvalue = ucblocks;
p                3265 drivers/media/dvb-frontends/cxd2841er.c 				  struct dtv_frontend_properties *p)
p                3279 drivers/media/dvb-frontends/cxd2841er.c 		p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3287 drivers/media/dvb-frontends/cxd2841er.c 		p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3288 drivers/media/dvb-frontends/cxd2841er.c 		p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3289 drivers/media/dvb-frontends/cxd2841er.c 		p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3290 drivers/media/dvb-frontends/cxd2841er.c 		p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3300 drivers/media/dvb-frontends/cxd2841er.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                3301 drivers/media/dvb-frontends/cxd2841er.c 	u32 symbol_rate = p->symbol_rate/1000;
p                3305 drivers/media/dvb-frontends/cxd2841er.c 		(p->delivery_system == SYS_DVBS ? "DVB-S" : "DVB-S2"),
p                3306 drivers/media/dvb-frontends/cxd2841er.c 		 p->frequency, symbol_rate, priv->xtal);
p                3314 drivers/media/dvb-frontends/cxd2841er.c 			priv, p->delivery_system, symbol_rate);
p                3317 drivers/media/dvb-frontends/cxd2841er.c 		ret = cxd2841er_retune_active(priv, p);
p                3357 drivers/media/dvb-frontends/cxd2841er.c 	p->strength.stat[0].scale = FE_SCALE_RELATIVE;
p                3358 drivers/media/dvb-frontends/cxd2841er.c 	p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3359 drivers/media/dvb-frontends/cxd2841er.c 	p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3360 drivers/media/dvb-frontends/cxd2841er.c 	p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3361 drivers/media/dvb-frontends/cxd2841er.c 	p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3371 drivers/media/dvb-frontends/cxd2841er.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                3374 drivers/media/dvb-frontends/cxd2841er.c 		 __func__, p->delivery_system, p->bandwidth_hz);
p                3381 drivers/media/dvb-frontends/cxd2841er.c 	    priv->system != p->delivery_system) {
p                3383 drivers/media/dvb-frontends/cxd2841er.c 			 __func__, priv->system, p->delivery_system);
p                3387 drivers/media/dvb-frontends/cxd2841er.c 	if (p->delivery_system == SYS_DVBT) {
p                3392 drivers/media/dvb-frontends/cxd2841er.c 				priv, p->bandwidth_hz);
p                3395 drivers/media/dvb-frontends/cxd2841er.c 			ret = cxd2841er_retune_active(priv, p);
p                3402 drivers/media/dvb-frontends/cxd2841er.c 	} else if (p->delivery_system == SYS_DVBT2) {
p                3405 drivers/media/dvb-frontends/cxd2841er.c 			(int)(p->stream_id > 255), p->stream_id);
p                3410 drivers/media/dvb-frontends/cxd2841er.c 				p->bandwidth_hz);
p                3413 drivers/media/dvb-frontends/cxd2841er.c 			ret = cxd2841er_retune_active(priv, p);
p                3420 drivers/media/dvb-frontends/cxd2841er.c 	} else if (p->delivery_system == SYS_ISDBT) {
p                3425 drivers/media/dvb-frontends/cxd2841er.c 					priv, p->bandwidth_hz);
p                3428 drivers/media/dvb-frontends/cxd2841er.c 			ret = cxd2841er_retune_active(priv, p);
p                3435 drivers/media/dvb-frontends/cxd2841er.c 	} else if (p->delivery_system == SYS_DVBC_ANNEX_A ||
p                3436 drivers/media/dvb-frontends/cxd2841er.c 			p->delivery_system == SYS_DVBC_ANNEX_C) {
p                3439 drivers/media/dvb-frontends/cxd2841er.c 		if (p->bandwidth_hz != 6000000 &&
p                3440 drivers/media/dvb-frontends/cxd2841er.c 				p->bandwidth_hz != 7000000 &&
p                3441 drivers/media/dvb-frontends/cxd2841er.c 				p->bandwidth_hz != 8000000) {
p                3442 drivers/media/dvb-frontends/cxd2841er.c 			p->bandwidth_hz = 8000000;
p                3444 drivers/media/dvb-frontends/cxd2841er.c 					__func__, p->bandwidth_hz);
p                3450 drivers/media/dvb-frontends/cxd2841er.c 				priv, p->bandwidth_hz);
p                3453 drivers/media/dvb-frontends/cxd2841er.c 			ret = cxd2841er_retune_active(priv, p);
p                3463 drivers/media/dvb-frontends/cxd2841er.c 			__func__, p->delivery_system);
p                3502 drivers/media/dvb-frontends/cxd2841er.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                3514 drivers/media/dvb-frontends/cxd2841er.c 			p->frequency += carrier_offset;
p                3532 drivers/media/dvb-frontends/cxd2841er.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                3535 drivers/media/dvb-frontends/cxd2841er.c 			re_tune, p->bandwidth_hz);
p                3545 drivers/media/dvb-frontends/cxd2841er.c 						priv, p->bandwidth_hz,
p                3552 drivers/media/dvb-frontends/cxd2841er.c 					priv, p->bandwidth_hz,
p                3559 drivers/media/dvb-frontends/cxd2841er.c 					priv, p->bandwidth_hz,
p                3578 drivers/media/dvb-frontends/cxd2841er.c 			p->frequency += carrier_offset;
p                3757 drivers/media/dvb-frontends/cxd2841er.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                3759 drivers/media/dvb-frontends/cxd2841er.c 	p->strength.len = 1;
p                3760 drivers/media/dvb-frontends/cxd2841er.c 	p->strength.stat[0].scale = FE_SCALE_RELATIVE;
p                3761 drivers/media/dvb-frontends/cxd2841er.c 	p->cnr.len = 1;
p                3762 drivers/media/dvb-frontends/cxd2841er.c 	p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3763 drivers/media/dvb-frontends/cxd2841er.c 	p->block_error.len = 1;
p                3764 drivers/media/dvb-frontends/cxd2841er.c 	p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3765 drivers/media/dvb-frontends/cxd2841er.c 	p->post_bit_error.len = 1;
p                3766 drivers/media/dvb-frontends/cxd2841er.c 	p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3767 drivers/media/dvb-frontends/cxd2841er.c 	p->post_bit_count.len = 1;
p                3768 drivers/media/dvb-frontends/cxd2841er.c 	p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3802 drivers/media/dvb-frontends/cxd2841er.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                3805 drivers/media/dvb-frontends/cxd2841er.c 			__func__, p->bandwidth_hz);
p                1660 drivers/media/dvb-frontends/dib9000.c 	u8 p[13] = { 0 };
p                1662 drivers/media/dvb-frontends/dib9000.c 	p[0] = type;
p                1663 drivers/media/dvb-frontends/dib9000.c 	p[1] = port;
p                1664 drivers/media/dvb-frontends/dib9000.c 	p[2] = msg[0].addr << 1;
p                1666 drivers/media/dvb-frontends/dib9000.c 	p[3] = (u8) scl & 0xff;	/* scl */
p                1667 drivers/media/dvb-frontends/dib9000.c 	p[4] = (u8) (scl >> 8);
p                1669 drivers/media/dvb-frontends/dib9000.c 	p[7] = 0;
p                1670 drivers/media/dvb-frontends/dib9000.c 	p[8] = 0;
p                1672 drivers/media/dvb-frontends/dib9000.c 	p[9] = (u8) (msg[0].len);
p                1673 drivers/media/dvb-frontends/dib9000.c 	p[10] = (u8) (msg[0].len >> 8);
p                1675 drivers/media/dvb-frontends/dib9000.c 		p[11] = (u8) (msg[1].len);
p                1676 drivers/media/dvb-frontends/dib9000.c 		p[12] = (u8) (msg[1].len >> 8);
p                1678 drivers/media/dvb-frontends/dib9000.c 		p[11] = 0;
p                1679 drivers/media/dvb-frontends/dib9000.c 		p[12] = 0;
p                1687 drivers/media/dvb-frontends/dib9000.c 	dib9000_risc_mem_write(state, FE_MM_W_COMPONENT_ACCESS, p);
p                8868 drivers/media/dvb-frontends/drx39xyj/drxj.c 	struct dtv_frontend_properties *p = &state->frontend.dtv_property_cache;
p                8895 drivers/media/dvb-frontends/drx39xyj/drxj.c 				if (p->cnr.stat[0].svalue > 20800) {
p                8964 drivers/media/dvb-frontends/drx39xyj/drxj.c 				if (p->cnr.stat[0].svalue > 20800) {
p                9016 drivers/media/dvb-frontends/drx39xyj/drxj.c 	struct dtv_frontend_properties *p = &state->frontend.dtv_property_cache;
p                9041 drivers/media/dvb-frontends/drx39xyj/drxj.c 				if (p->cnr.stat[0].svalue > 26800) {
p                9459 drivers/media/dvb-frontends/drx39xyj/drxj.c 	struct dtv_frontend_properties *p = &state->frontend.dtv_property_cache;
p                9488 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                9618 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
p                9619 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
p                9620 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
p                9621 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
p                9622 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->block_error.stat[0].scale = FE_SCALE_COUNTER;
p                9623 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
p                9625 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->cnr.stat[0].svalue = ((u16) qam_sl_mer) * 100;
p                9627 drivers/media/dvb-frontends/drx39xyj/drxj.c 		p->pre_bit_error.stat[0].uvalue += qam_vd_ser;
p                9628 drivers/media/dvb-frontends/drx39xyj/drxj.c 		p->pre_bit_count.stat[0].uvalue += vd_bit_cnt * ((e > 2) ? 1 : 8) / 8;
p                9630 drivers/media/dvb-frontends/drx39xyj/drxj.c 		p->pre_bit_error.stat[0].uvalue += qam_pre_rs_ber;
p                9631 drivers/media/dvb-frontends/drx39xyj/drxj.c 		p->pre_bit_count.stat[0].uvalue += rs_bit_cnt >> e;
p                9634 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->post_bit_error.stat[0].uvalue += qam_post_rs_ber;
p                9635 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->post_bit_count.stat[0].uvalue += rs_bit_cnt >> e;
p                9637 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->block_error.stat[0].uvalue += pkt_errs;
p                9649 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                9650 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                9651 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                9652 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                9653 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                9654 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                10676 drivers/media/dvb-frontends/drx39xyj/drxj.c 	struct dtv_frontend_properties *p = &state->frontend.dtv_property_cache;
p                10685 drivers/media/dvb-frontends/drx39xyj/drxj.c 		p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                10687 drivers/media/dvb-frontends/drx39xyj/drxj.c 		p->strength.stat[0].scale = FE_SCALE_RELATIVE;
p                10688 drivers/media/dvb-frontends/drx39xyj/drxj.c 		p->strength.stat[0].uvalue = 65535UL *  strength/ 100;
p                10701 drivers/media/dvb-frontends/drx39xyj/drxj.c 			p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                10702 drivers/media/dvb-frontends/drx39xyj/drxj.c 			p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                10703 drivers/media/dvb-frontends/drx39xyj/drxj.c 			p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                10704 drivers/media/dvb-frontends/drx39xyj/drxj.c 			p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                10705 drivers/media/dvb-frontends/drx39xyj/drxj.c 			p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                10706 drivers/media/dvb-frontends/drx39xyj/drxj.c 			p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                10707 drivers/media/dvb-frontends/drx39xyj/drxj.c 			p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                10712 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                10714 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->block_error.stat[0].scale = FE_SCALE_COUNTER;
p                10715 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->block_error.stat[0].uvalue += err;
p                10716 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->block_count.stat[0].scale = FE_SCALE_COUNTER;
p                10717 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->block_count.stat[0].uvalue += pkt;
p                10724 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                10726 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
p                10727 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->pre_bit_error.stat[0].uvalue += ber;
p                10728 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
p                10729 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->pre_bit_count.stat[0].uvalue += cnt;
p                10735 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                10737 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
p                10738 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->post_bit_error.stat[0].uvalue += ber;
p                10739 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
p                10740 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->post_bit_count.stat[0].uvalue += cnt;
p                10745 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                10747 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->cnr.stat[0].svalue = mer * 100;
p                10748 drivers/media/dvb-frontends/drx39xyj/drxj.c 				p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
p                12009 drivers/media/dvb-frontends/drx39xyj/drxj.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                12011 drivers/media/dvb-frontends/drx39xyj/drxj.c 	if (p->pre_bit_error.stat[0].scale == FE_SCALE_NOT_AVAILABLE) {
p                12016 drivers/media/dvb-frontends/drx39xyj/drxj.c 	if (!p->pre_bit_count.stat[0].uvalue) {
p                12017 drivers/media/dvb-frontends/drx39xyj/drxj.c 		if (!p->pre_bit_error.stat[0].uvalue)
p                12022 drivers/media/dvb-frontends/drx39xyj/drxj.c 		*ber = frac_times1e6(p->pre_bit_error.stat[0].uvalue,
p                12023 drivers/media/dvb-frontends/drx39xyj/drxj.c 				     p->pre_bit_count.stat[0].uvalue);
p                12031 drivers/media/dvb-frontends/drx39xyj/drxj.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                12033 drivers/media/dvb-frontends/drx39xyj/drxj.c 	if (p->strength.stat[0].scale == FE_SCALE_NOT_AVAILABLE) {
p                12038 drivers/media/dvb-frontends/drx39xyj/drxj.c 	*strength = p->strength.stat[0].uvalue;
p                12044 drivers/media/dvb-frontends/drx39xyj/drxj.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                12047 drivers/media/dvb-frontends/drx39xyj/drxj.c 	if (p->cnr.stat[0].scale == FE_SCALE_NOT_AVAILABLE) {
p                12052 drivers/media/dvb-frontends/drx39xyj/drxj.c 	tmp64 = p->cnr.stat[0].svalue;
p                12060 drivers/media/dvb-frontends/drx39xyj/drxj.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                12062 drivers/media/dvb-frontends/drx39xyj/drxj.c 	if (p->block_error.stat[0].scale == FE_SCALE_NOT_AVAILABLE) {
p                12067 drivers/media/dvb-frontends/drx39xyj/drxj.c 	*ucb = p->block_error.stat[0].uvalue;
p                12076 drivers/media/dvb-frontends/drx39xyj/drxj.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                12123 drivers/media/dvb-frontends/drx39xyj/drxj.c 	switch (p->delivery_system) {
p                12130 drivers/media/dvb-frontends/drx39xyj/drxj.c 		switch (p->modulation) {
p                12155 drivers/media/dvb-frontends/drx39xyj/drxj.c 	channel.frequency = p->frequency / 1000;
p                12169 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->strength.stat[0].scale = FE_SCALE_RELATIVE;
p                12282 drivers/media/dvb-frontends/drx39xyj/drxj.c 	struct dtv_frontend_properties *p;
p                12339 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p = &state->frontend.dtv_property_cache;
p                12340 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->strength.len = 1;
p                12341 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->pre_bit_count.len = 1;
p                12342 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->pre_bit_error.len = 1;
p                12343 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->post_bit_count.len = 1;
p                12344 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->post_bit_error.len = 1;
p                12345 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->block_count.len = 1;
p                12346 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->block_error.len = 1;
p                12347 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->cnr.len = 1;
p                12349 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->strength.stat[0].scale = FE_SCALE_RELATIVE;
p                12350 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                12351 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                12352 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                12353 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                12354 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                12355 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                12356 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1899 drivers/media/dvb-frontends/drxd_hard.c 	struct dtv_frontend_properties *p = &state->props;
p                1966 drivers/media/dvb-frontends/drxd_hard.c 		switch (p->transmission_mode) {
p                1994 drivers/media/dvb-frontends/drxd_hard.c 		switch (p->guard_interval) {
p                2014 drivers/media/dvb-frontends/drxd_hard.c 		switch (p->hierarchy) {
p                2139 drivers/media/dvb-frontends/drxd_hard.c 		switch (p->modulation) {
p                2261 drivers/media/dvb-frontends/drxd_hard.c 		switch (p->code_rate_HP) {
p                2301 drivers/media/dvb-frontends/drxd_hard.c 		switch (p->bandwidth_hz) {
p                2303 drivers/media/dvb-frontends/drxd_hard.c 			p->bandwidth_hz = 8000000;
p                2345 drivers/media/dvb-frontends/drxd_hard.c 			if ((p->transmission_mode == TRANSMISSION_MODE_2K) &&
p                2346 drivers/media/dvb-frontends/drxd_hard.c 			    (p->guard_interval == GUARD_INTERVAL_1_32)) {
p                2860 drivers/media/dvb-frontends/drxd_hard.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                2864 drivers/media/dvb-frontends/drxd_hard.c 	state->props = *p;
p                1450 drivers/media/dvb-frontends/drxk_hard.c 	const char *p;
p                1509 drivers/media/dvb-frontends/drxk_hard.c 			p = "SCU_RESULT_UNKCMD";
p                1512 drivers/media/dvb-frontends/drxk_hard.c 			p = "SCU_RESULT_UNKSTD";
p                1515 drivers/media/dvb-frontends/drxk_hard.c 			p = "SCU_RESULT_SIZE";
p                1518 drivers/media/dvb-frontends/drxk_hard.c 			p = "SCU_RESULT_INVPAR";
p                1522 drivers/media/dvb-frontends/drxk_hard.c 			p = errname;
p                1524 drivers/media/dvb-frontends/drxk_hard.c 		pr_err("%s while sending cmd 0x%04x with params:", p, cmd);
p                6335 drivers/media/dvb-frontends/drxk_hard.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                6336 drivers/media/dvb-frontends/drxk_hard.c 	u32 delsys  = p->delivery_system, old_delsys;
p                6361 drivers/media/dvb-frontends/drxk_hard.c 	state->props = *p;
p                6391 drivers/media/dvb-frontends/drxk_hard.c 	p->strength.stat[0].scale = FE_SCALE_RELATIVE;
p                6392 drivers/media/dvb-frontends/drxk_hard.c 	p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                6393 drivers/media/dvb-frontends/drxk_hard.c 	p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                6394 drivers/media/dvb-frontends/drxk_hard.c 	p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                6395 drivers/media/dvb-frontends/drxk_hard.c 	p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                6396 drivers/media/dvb-frontends/drxk_hard.c 	p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                6397 drivers/media/dvb-frontends/drxk_hard.c 	p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                6398 drivers/media/dvb-frontends/drxk_hard.c 	p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                6705 drivers/media/dvb-frontends/drxk_hard.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                6714 drivers/media/dvb-frontends/drxk_hard.c 	switch (p->delivery_system) {
p                6763 drivers/media/dvb-frontends/drxk_hard.c 	struct dtv_frontend_properties *p;
p                6833 drivers/media/dvb-frontends/drxk_hard.c 	p = &state->frontend.dtv_property_cache;
p                6834 drivers/media/dvb-frontends/drxk_hard.c 	p->strength.len = 1;
p                6835 drivers/media/dvb-frontends/drxk_hard.c 	p->cnr.len = 1;
p                6836 drivers/media/dvb-frontends/drxk_hard.c 	p->block_error.len = 1;
p                6837 drivers/media/dvb-frontends/drxk_hard.c 	p->block_count.len = 1;
p                6838 drivers/media/dvb-frontends/drxk_hard.c 	p->pre_bit_error.len = 1;
p                6839 drivers/media/dvb-frontends/drxk_hard.c 	p->pre_bit_count.len = 1;
p                6840 drivers/media/dvb-frontends/drxk_hard.c 	p->post_bit_error.len = 1;
p                6841 drivers/media/dvb-frontends/drxk_hard.c 	p->post_bit_count.len = 1;
p                6843 drivers/media/dvb-frontends/drxk_hard.c 	p->strength.stat[0].scale = FE_SCALE_RELATIVE;
p                6844 drivers/media/dvb-frontends/drxk_hard.c 	p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                6845 drivers/media/dvb-frontends/drxk_hard.c 	p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                6846 drivers/media/dvb-frontends/drxk_hard.c 	p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                6847 drivers/media/dvb-frontends/drxk_hard.c 	p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                6848 drivers/media/dvb-frontends/drxk_hard.c 	p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                6849 drivers/media/dvb-frontends/drxk_hard.c 	p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                6850 drivers/media/dvb-frontends/drxk_hard.c 	p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                  64 drivers/media/dvb-frontends/dvb_dummy_fe.c 				     struct dtv_frontend_properties *p)
p                 451 drivers/media/dvb-frontends/helene.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 454 drivers/media/dvb-frontends/helene.c 	if (p->delivery_system == SYS_DVBT) {
p                 455 drivers/media/dvb-frontends/helene.c 		if (p->bandwidth_hz <= 5000000)
p                 457 drivers/media/dvb-frontends/helene.c 		else if (p->bandwidth_hz <= 6000000)
p                 459 drivers/media/dvb-frontends/helene.c 		else if (p->bandwidth_hz <= 7000000)
p                 461 drivers/media/dvb-frontends/helene.c 		else if (p->bandwidth_hz <= 8000000)
p                 465 drivers/media/dvb-frontends/helene.c 			p->bandwidth_hz = 8000000;
p                 467 drivers/media/dvb-frontends/helene.c 	} else if (p->delivery_system == SYS_DVBT2) {
p                 468 drivers/media/dvb-frontends/helene.c 		if (p->bandwidth_hz <= 5000000)
p                 470 drivers/media/dvb-frontends/helene.c 		else if (p->bandwidth_hz <= 6000000)
p                 472 drivers/media/dvb-frontends/helene.c 		else if (p->bandwidth_hz <= 7000000)
p                 474 drivers/media/dvb-frontends/helene.c 		else if (p->bandwidth_hz <= 8000000)
p                 478 drivers/media/dvb-frontends/helene.c 			p->bandwidth_hz = 8000000;
p                 480 drivers/media/dvb-frontends/helene.c 	} else if (p->delivery_system == SYS_DVBS) {
p                 482 drivers/media/dvb-frontends/helene.c 	} else if (p->delivery_system == SYS_DVBS2) {
p                 484 drivers/media/dvb-frontends/helene.c 	} else if (p->delivery_system == SYS_ISDBS) {
p                 486 drivers/media/dvb-frontends/helene.c 	} else if (p->delivery_system == SYS_ISDBT) {
p                 487 drivers/media/dvb-frontends/helene.c 		if (p->bandwidth_hz <= 6000000)
p                 489 drivers/media/dvb-frontends/helene.c 		else if (p->bandwidth_hz <= 7000000)
p                 491 drivers/media/dvb-frontends/helene.c 		else if (p->bandwidth_hz <= 8000000)
p                 495 drivers/media/dvb-frontends/helene.c 			p->bandwidth_hz = 8000000;
p                 497 drivers/media/dvb-frontends/helene.c 	} else if (p->delivery_system == SYS_DVBC_ANNEX_A) {
p                 498 drivers/media/dvb-frontends/helene.c 		if (p->bandwidth_hz <= 6000000)
p                 500 drivers/media/dvb-frontends/helene.c 		else if (p->bandwidth_hz <= 8000000)
p                 505 drivers/media/dvb-frontends/helene.c 			__func__, (int)system, p->delivery_system,
p                 506 drivers/media/dvb-frontends/helene.c 			p->bandwidth_hz);
p                 515 drivers/media/dvb-frontends/helene.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 517 drivers/media/dvb-frontends/helene.c 	int frequencykHz = p->frequency;
p                 519 drivers/media/dvb-frontends/helene.c 	u32 symbol_rate = p->symbol_rate/1000;
p                 533 drivers/media/dvb-frontends/helene.c 	frequency = roundup(p->frequency / 1000, 1);
p                 665 drivers/media/dvb-frontends/helene.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 667 drivers/media/dvb-frontends/helene.c 	int frequencykHz = p->frequency / 1000;
p                 680 drivers/media/dvb-frontends/helene.c 	frequency = roundup(p->frequency / 1000, 25);
p                 831 drivers/media/dvb-frontends/helene.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 833 drivers/media/dvb-frontends/helene.c 	if (p->delivery_system == SYS_DVBT ||
p                 834 drivers/media/dvb-frontends/helene.c 	    p->delivery_system == SYS_DVBT2 ||
p                 835 drivers/media/dvb-frontends/helene.c 	    p->delivery_system == SYS_ISDBT ||
p                 836 drivers/media/dvb-frontends/helene.c 	    p->delivery_system == SYS_DVBC_ANNEX_A)
p                 167 drivers/media/dvb-frontends/horus3a.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 169 drivers/media/dvb-frontends/horus3a.c 	u32 frequency = p->frequency;
p                 170 drivers/media/dvb-frontends/horus3a.c 	u32 symbol_rate = p->symbol_rate/1000;
p                 244 drivers/media/dvb-frontends/horus3a.c 	if (p->delivery_system == SYS_DVBS) {
p                 266 drivers/media/dvb-frontends/horus3a.c 	} else if (p->delivery_system == SYS_DVBS2) {
p                 288 drivers/media/dvb-frontends/horus3a.c 			p->delivery_system);
p                 110 drivers/media/dvb-frontends/l64781.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 129 drivers/media/dvb-frontends/l64781.c 	switch (p->bandwidth_hz) {
p                 148 drivers/media/dvb-frontends/l64781.c 	if (p->inversion != INVERSION_ON &&
p                 149 drivers/media/dvb-frontends/l64781.c 	    p->inversion != INVERSION_OFF)
p                 152 drivers/media/dvb-frontends/l64781.c 	if (p->code_rate_HP != FEC_1_2 && p->code_rate_HP != FEC_2_3 &&
p                 153 drivers/media/dvb-frontends/l64781.c 	    p->code_rate_HP != FEC_3_4 && p->code_rate_HP != FEC_5_6 &&
p                 154 drivers/media/dvb-frontends/l64781.c 	    p->code_rate_HP != FEC_7_8)
p                 157 drivers/media/dvb-frontends/l64781.c 	if (p->hierarchy != HIERARCHY_NONE &&
p                 158 drivers/media/dvb-frontends/l64781.c 	    (p->code_rate_LP != FEC_1_2 && p->code_rate_LP != FEC_2_3 &&
p                 159 drivers/media/dvb-frontends/l64781.c 	     p->code_rate_LP != FEC_3_4 && p->code_rate_LP != FEC_5_6 &&
p                 160 drivers/media/dvb-frontends/l64781.c 	     p->code_rate_LP != FEC_7_8))
p                 163 drivers/media/dvb-frontends/l64781.c 	if (p->modulation != QPSK && p->modulation != QAM_16 &&
p                 164 drivers/media/dvb-frontends/l64781.c 	    p->modulation != QAM_64)
p                 167 drivers/media/dvb-frontends/l64781.c 	if (p->transmission_mode != TRANSMISSION_MODE_2K &&
p                 168 drivers/media/dvb-frontends/l64781.c 	    p->transmission_mode != TRANSMISSION_MODE_8K)
p                 171 drivers/media/dvb-frontends/l64781.c 	if ((int)p->guard_interval < GUARD_INTERVAL_1_32 ||
p                 172 drivers/media/dvb-frontends/l64781.c 	    p->guard_interval > GUARD_INTERVAL_1_4)
p                 175 drivers/media/dvb-frontends/l64781.c 	if ((int)p->hierarchy < HIERARCHY_NONE ||
p                 176 drivers/media/dvb-frontends/l64781.c 	    p->hierarchy > HIERARCHY_4)
p                 190 drivers/media/dvb-frontends/l64781.c 	spi_bias *= qam_tab[p->modulation];
p                 191 drivers/media/dvb-frontends/l64781.c 	spi_bias /= p->code_rate_HP + 1;
p                 192 drivers/media/dvb-frontends/l64781.c 	spi_bias /= (guard_tab[p->guard_interval] + 32);
p                 195 drivers/media/dvb-frontends/l64781.c 	spi_bias *= p->code_rate_HP;
p                 197 drivers/media/dvb-frontends/l64781.c 	val0x04 = (p->transmission_mode << 2) | p->guard_interval;
p                 198 drivers/media/dvb-frontends/l64781.c 	val0x05 = fec_tab[p->code_rate_HP];
p                 200 drivers/media/dvb-frontends/l64781.c 	if (p->hierarchy != HIERARCHY_NONE)
p                 201 drivers/media/dvb-frontends/l64781.c 		val0x05 |= (p->code_rate_LP - FEC_1_2) << 3;
p                 203 drivers/media/dvb-frontends/l64781.c 	val0x06 = (p->hierarchy << 2) | p->modulation;
p                 213 drivers/media/dvb-frontends/l64781.c 			 p->transmission_mode == TRANSMISSION_MODE_2K ? 1 : 3);
p                 221 drivers/media/dvb-frontends/l64781.c 		(p->inversion == INVERSION_ON ? 0x80 : 0x00));
p                 235 drivers/media/dvb-frontends/l64781.c 			struct dtv_frontend_properties *p)
p                 244 drivers/media/dvb-frontends/l64781.c 		p->guard_interval = GUARD_INTERVAL_1_32;
p                 247 drivers/media/dvb-frontends/l64781.c 		p->guard_interval = GUARD_INTERVAL_1_16;
p                 250 drivers/media/dvb-frontends/l64781.c 		p->guard_interval = GUARD_INTERVAL_1_8;
p                 253 drivers/media/dvb-frontends/l64781.c 		p->guard_interval = GUARD_INTERVAL_1_4;
p                 258 drivers/media/dvb-frontends/l64781.c 		p->transmission_mode = TRANSMISSION_MODE_2K;
p                 261 drivers/media/dvb-frontends/l64781.c 		p->transmission_mode = TRANSMISSION_MODE_8K;
p                 270 drivers/media/dvb-frontends/l64781.c 		p->code_rate_HP = FEC_1_2;
p                 273 drivers/media/dvb-frontends/l64781.c 		p->code_rate_HP = FEC_2_3;
p                 276 drivers/media/dvb-frontends/l64781.c 		p->code_rate_HP = FEC_3_4;
p                 279 drivers/media/dvb-frontends/l64781.c 		p->code_rate_HP = FEC_5_6;
p                 282 drivers/media/dvb-frontends/l64781.c 		p->code_rate_HP = FEC_7_8;
p                 289 drivers/media/dvb-frontends/l64781.c 		p->code_rate_LP = FEC_1_2;
p                 292 drivers/media/dvb-frontends/l64781.c 		p->code_rate_LP = FEC_2_3;
p                 295 drivers/media/dvb-frontends/l64781.c 		p->code_rate_LP = FEC_3_4;
p                 298 drivers/media/dvb-frontends/l64781.c 		p->code_rate_LP = FEC_5_6;
p                 301 drivers/media/dvb-frontends/l64781.c 		p->code_rate_LP = FEC_7_8;
p                 310 drivers/media/dvb-frontends/l64781.c 		p->modulation = QPSK;
p                 313 drivers/media/dvb-frontends/l64781.c 		p->modulation = QAM_16;
p                 316 drivers/media/dvb-frontends/l64781.c 		p->modulation = QAM_64;
p                 323 drivers/media/dvb-frontends/l64781.c 		p->hierarchy = HIERARCHY_NONE;
p                 326 drivers/media/dvb-frontends/l64781.c 		p->hierarchy = HIERARCHY_1;
p                 329 drivers/media/dvb-frontends/l64781.c 		p->hierarchy = HIERARCHY_2;
p                 332 drivers/media/dvb-frontends/l64781.c 		p->hierarchy = HIERARCHY_4;
p                 340 drivers/media/dvb-frontends/l64781.c 	p->inversion = (tmp & 0x80) ? INVERSION_ON : INVERSION_OFF;
p                 345 drivers/media/dvb-frontends/l64781.c 	p->frequency += tmp;
p                 258 drivers/media/dvb-frontends/lgdt3305.c 				   struct dtv_frontend_properties *p)
p                 271 drivers/media/dvb-frontends/lgdt3305.c 	switch (p->modulation) {
p                 290 drivers/media/dvb-frontends/lgdt3305.c 					 struct dtv_frontend_properties *p)
p                 294 drivers/media/dvb-frontends/lgdt3305.c 	switch (p->modulation) {
p                 313 drivers/media/dvb-frontends/lgdt3305.c 					 struct dtv_frontend_properties *p)
p                 317 drivers/media/dvb-frontends/lgdt3305.c 	switch (p->modulation) {
p                 340 drivers/media/dvb-frontends/lgdt3305.c 			       struct dtv_frontend_properties *p)
p                 344 drivers/media/dvb-frontends/lgdt3305.c 	switch (p->modulation) {
p                 390 drivers/media/dvb-frontends/lgdt3305.c 			      struct dtv_frontend_properties *p)
p                 394 drivers/media/dvb-frontends/lgdt3305.c 	switch (p->modulation) {
p                 424 drivers/media/dvb-frontends/lgdt3305.c 	return lgdt3305_rfagc_loop(state, p);
p                 428 drivers/media/dvb-frontends/lgdt3305.c 				      struct dtv_frontend_properties *p)
p                 432 drivers/media/dvb-frontends/lgdt3305.c 	switch (p->modulation) {
p                 465 drivers/media/dvb-frontends/lgdt3305.c 				       struct dtv_frontend_properties *p,
p                 472 drivers/media/dvb-frontends/lgdt3305.c 	switch (p->modulation) {
p                 489 drivers/media/dvb-frontends/lgdt3305.c 			   struct dtv_frontend_properties *p)
p                 495 drivers/media/dvb-frontends/lgdt3305.c 	switch (p->modulation) {
p                 509 drivers/media/dvb-frontends/lgdt3305.c 	switch (p->modulation) {
p                 671 drivers/media/dvb-frontends/lgdt3305.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 675 drivers/media/dvb-frontends/lgdt3305.c 	lg_dbg("(%d, %d)\n", p->frequency, p->modulation);
p                 683 drivers/media/dvb-frontends/lgdt3305.c 		state->current_frequency = p->frequency;
p                 686 drivers/media/dvb-frontends/lgdt3305.c 	ret = lgdt3305_set_modulation(state, p);
p                 690 drivers/media/dvb-frontends/lgdt3305.c 	ret = lgdt3305_passband_digital_agc(state, p);
p                 694 drivers/media/dvb-frontends/lgdt3305.c 	ret = lgdt3305_agc_setup(state, p);
p                 699 drivers/media/dvb-frontends/lgdt3305.c 	switch (p->modulation) {
p                 710 drivers/media/dvb-frontends/lgdt3305.c 		ret = lgdt3305_set_if(state, p);
p                 719 drivers/media/dvb-frontends/lgdt3305.c 	ret = lgdt3305_spectral_inversion(state, p,
p                 725 drivers/media/dvb-frontends/lgdt3305.c 	state->current_modulation = p->modulation;
p                 739 drivers/media/dvb-frontends/lgdt3305.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 743 drivers/media/dvb-frontends/lgdt3305.c 	lg_dbg("(%d, %d)\n", p->frequency, p->modulation);
p                 751 drivers/media/dvb-frontends/lgdt3305.c 		state->current_frequency = p->frequency;
p                 754 drivers/media/dvb-frontends/lgdt3305.c 	ret = lgdt3305_set_modulation(state, p);
p                 758 drivers/media/dvb-frontends/lgdt3305.c 	ret = lgdt3305_passband_digital_agc(state, p);
p                 761 drivers/media/dvb-frontends/lgdt3305.c 	ret = lgdt3305_set_agc_power_ref(state, p);
p                 764 drivers/media/dvb-frontends/lgdt3305.c 	ret = lgdt3305_agc_setup(state, p);
p                 776 drivers/media/dvb-frontends/lgdt3305.c 	ret = lgdt3305_set_if(state, p);
p                 779 drivers/media/dvb-frontends/lgdt3305.c 	ret = lgdt3305_spectral_inversion(state, p,
p                 785 drivers/media/dvb-frontends/lgdt3305.c 	ret = lgdt3305_set_filter_extension(state, p);
p                 789 drivers/media/dvb-frontends/lgdt3305.c 	state->current_modulation = p->modulation;
p                 802 drivers/media/dvb-frontends/lgdt3305.c 				 struct dtv_frontend_properties *p)
p                 808 drivers/media/dvb-frontends/lgdt3305.c 	p->modulation = state->current_modulation;
p                 809 drivers/media/dvb-frontends/lgdt3305.c 	p->frequency = state->current_frequency;
p                 641 drivers/media/dvb-frontends/lgdt3306a.c 				   struct dtv_frontend_properties *p)
p                 647 drivers/media/dvb-frontends/lgdt3306a.c 	switch (p->modulation) {
p                 654 drivers/media/dvb-frontends/lgdt3306a.c 		ret = lgdt3306a_set_qam(state, p->modulation);
p                 662 drivers/media/dvb-frontends/lgdt3306a.c 	state->current_modulation = p->modulation;
p                 671 drivers/media/dvb-frontends/lgdt3306a.c 			      struct dtv_frontend_properties *p)
p                 676 drivers/media/dvb-frontends/lgdt3306a.c 	switch (p->modulation) {
p                 715 drivers/media/dvb-frontends/lgdt3306a.c 				       struct dtv_frontend_properties *p,
p                 729 drivers/media/dvb-frontends/lgdt3306a.c 	switch (p->modulation) {
p                 748 drivers/media/dvb-frontends/lgdt3306a.c 			   struct dtv_frontend_properties *p)
p                 754 drivers/media/dvb-frontends/lgdt3306a.c 	switch (p->modulation) {
p                1006 drivers/media/dvb-frontends/lgdt3306a.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1010 drivers/media/dvb-frontends/lgdt3306a.c 	dbg_info("(%d, %d)\n", p->frequency, p->modulation);
p                1012 drivers/media/dvb-frontends/lgdt3306a.c 	if (state->current_frequency  == p->frequency &&
p                1013 drivers/media/dvb-frontends/lgdt3306a.c 	   state->current_modulation == p->modulation) {
p                1031 drivers/media/dvb-frontends/lgdt3306a.c 		state->current_frequency = p->frequency;
p                1035 drivers/media/dvb-frontends/lgdt3306a.c 	ret = lgdt3306a_set_modulation(state, p);
p                1039 drivers/media/dvb-frontends/lgdt3306a.c 	ret = lgdt3306a_agc_setup(state, p);
p                1043 drivers/media/dvb-frontends/lgdt3306a.c 	ret = lgdt3306a_set_if(state, p);
p                1047 drivers/media/dvb-frontends/lgdt3306a.c 	ret = lgdt3306a_spectral_inversion(state, p,
p                1073 drivers/media/dvb-frontends/lgdt3306a.c 	state->current_frequency = p->frequency;
p                1079 drivers/media/dvb-frontends/lgdt3306a.c 				  struct dtv_frontend_properties *p)
p                1086 drivers/media/dvb-frontends/lgdt3306a.c 	p->modulation = state->current_modulation;
p                1087 drivers/media/dvb-frontends/lgdt3306a.c 	p->frequency = state->current_frequency;
p                 182 drivers/media/dvb-frontends/lgdt330x.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 289 drivers/media/dvb-frontends/lgdt330x.c 	p->cnr.len = 1;
p                 290 drivers/media/dvb-frontends/lgdt330x.c 	p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 291 drivers/media/dvb-frontends/lgdt330x.c 	p->block_error.len = 1;
p                 292 drivers/media/dvb-frontends/lgdt330x.c 	p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 293 drivers/media/dvb-frontends/lgdt330x.c 	p->block_count.len = 1;
p                 294 drivers/media/dvb-frontends/lgdt330x.c 	p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 311 drivers/media/dvb-frontends/lgdt330x.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 346 drivers/media/dvb-frontends/lgdt330x.c 	if (state->current_modulation != p->modulation) {
p                 347 drivers/media/dvb-frontends/lgdt330x.c 		switch (p->modulation) {
p                 401 drivers/media/dvb-frontends/lgdt330x.c 				 __func__, p->modulation);
p                 407 drivers/media/dvb-frontends/lgdt330x.c 				 __func__, p->modulation);
p                 421 drivers/media/dvb-frontends/lgdt330x.c 		state->current_modulation = p->modulation;
p                 436 drivers/media/dvb-frontends/lgdt330x.c 	state->current_frequency = p->frequency;
p                 443 drivers/media/dvb-frontends/lgdt330x.c 				 struct dtv_frontend_properties *p)
p                 447 drivers/media/dvb-frontends/lgdt330x.c 	p->frequency = state->current_frequency;
p                 622 drivers/media/dvb-frontends/lgdt330x.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 680 drivers/media/dvb-frontends/lgdt330x.c 		p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 681 drivers/media/dvb-frontends/lgdt330x.c 		p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 682 drivers/media/dvb-frontends/lgdt330x.c 		p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 694 drivers/media/dvb-frontends/lgdt330x.c 		p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
p                 695 drivers/media/dvb-frontends/lgdt330x.c 		p->cnr.stat[0].svalue = (((u64)state->snr) * 1000) >> 24;
p                 697 drivers/media/dvb-frontends/lgdt330x.c 		p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 707 drivers/media/dvb-frontends/lgdt330x.c 		p->block_error.stat[0].uvalue += state->ucblocks;
p                 709 drivers/media/dvb-frontends/lgdt330x.c 		p->block_count.stat[0].uvalue += 10000;
p                 711 drivers/media/dvb-frontends/lgdt330x.c 		p->block_error.stat[0].scale = FE_SCALE_COUNTER;
p                 712 drivers/media/dvb-frontends/lgdt330x.c 		p->block_count.stat[0].scale = FE_SCALE_COUNTER;
p                 714 drivers/media/dvb-frontends/lgdt330x.c 		p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 715 drivers/media/dvb-frontends/lgdt330x.c 		p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 725 drivers/media/dvb-frontends/lgdt330x.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 786 drivers/media/dvb-frontends/lgdt330x.c 		p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 787 drivers/media/dvb-frontends/lgdt330x.c 		p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 788 drivers/media/dvb-frontends/lgdt330x.c 		p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 800 drivers/media/dvb-frontends/lgdt330x.c 		p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
p                 801 drivers/media/dvb-frontends/lgdt330x.c 		p->cnr.stat[0].svalue = (((u64)state->snr) * 1000) >> 24;
p                 803 drivers/media/dvb-frontends/lgdt330x.c 		p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 813 drivers/media/dvb-frontends/lgdt330x.c 		p->block_error.stat[0].uvalue += state->ucblocks;
p                 815 drivers/media/dvb-frontends/lgdt330x.c 		p->block_count.stat[0].uvalue += 10000;
p                 817 drivers/media/dvb-frontends/lgdt330x.c 		p->block_error.stat[0].scale = FE_SCALE_COUNTER;
p                 818 drivers/media/dvb-frontends/lgdt330x.c 		p->block_count.stat[0].scale = FE_SCALE_COUNTER;
p                 820 drivers/media/dvb-frontends/lgdt330x.c 		p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 821 drivers/media/dvb-frontends/lgdt330x.c 		p->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 304 drivers/media/dvb-frontends/lgs8gl5.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 309 drivers/media/dvb-frontends/lgs8gl5.c 	if (p->bandwidth_hz != 8000000)
p                 328 drivers/media/dvb-frontends/lgs8gl5.c 		     struct dtv_frontend_properties *p)
p                 334 drivers/media/dvb-frontends/lgs8gl5.c 	p->inversion = (inv & REG_INVERSION_ON) ? INVERSION_ON : INVERSION_OFF;
p                 336 drivers/media/dvb-frontends/lgs8gl5.c 	p->code_rate_HP = FEC_1_2;
p                 337 drivers/media/dvb-frontends/lgs8gl5.c 	p->code_rate_LP = FEC_7_8;
p                 338 drivers/media/dvb-frontends/lgs8gl5.c 	p->guard_interval = GUARD_INTERVAL_1_32;
p                 339 drivers/media/dvb-frontends/lgs8gl5.c 	p->transmission_mode = TRANSMISSION_MODE_2K;
p                 340 drivers/media/dvb-frontends/lgs8gl5.c 	p->modulation = QAM_64;
p                 341 drivers/media/dvb-frontends/lgs8gl5.c 	p->hierarchy = HIERARCHY_NONE;
p                 342 drivers/media/dvb-frontends/lgs8gl5.c 	p->bandwidth_hz = 8000000;
p                1605 drivers/media/dvb-frontends/mb86a16.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1608 drivers/media/dvb-frontends/mb86a16.c 	state->frequency = p->frequency / 1000;
p                1609 drivers/media/dvb-frontends/mb86a16.c 	state->srate = p->symbol_rate / 1000;
p                 537 drivers/media/dvb-frontends/mt312.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 547 drivers/media/dvb-frontends/mt312.c 	dprintk("%s: Freq %d\n", __func__, p->frequency);
p                 549 drivers/media/dvb-frontends/mt312.c 	if ((p->frequency < fe->ops.info.frequency_min_hz / kHz)
p                 550 drivers/media/dvb-frontends/mt312.c 	    || (p->frequency > fe->ops.info.frequency_max_hz / kHz))
p                 553 drivers/media/dvb-frontends/mt312.c 	if (((int)p->inversion < INVERSION_OFF)
p                 554 drivers/media/dvb-frontends/mt312.c 	    || (p->inversion > INVERSION_ON))
p                 557 drivers/media/dvb-frontends/mt312.c 	if ((p->symbol_rate < fe->ops.info.symbol_rate_min)
p                 558 drivers/media/dvb-frontends/mt312.c 	    || (p->symbol_rate > fe->ops.info.symbol_rate_max))
p                 561 drivers/media/dvb-frontends/mt312.c 	if (((int)p->fec_inner < FEC_NONE)
p                 562 drivers/media/dvb-frontends/mt312.c 	    || (p->fec_inner > FEC_AUTO))
p                 565 drivers/media/dvb-frontends/mt312.c 	if ((p->fec_inner == FEC_4_5)
p                 566 drivers/media/dvb-frontends/mt312.c 	    || (p->fec_inner == FEC_8_9))
p                 578 drivers/media/dvb-frontends/mt312.c 		if (p->symbol_rate >= 30000000) {
p                 613 drivers/media/dvb-frontends/mt312.c 	sr = mt312_div(p->symbol_rate * 4, 15625);
p                 620 drivers/media/dvb-frontends/mt312.c 	buf[2] = inv_tab[p->inversion] | fec_tab[p->fec_inner];
p                 625 drivers/media/dvb-frontends/mt312.c 	if (p->symbol_rate < 10000000)
p                 643 drivers/media/dvb-frontends/mt312.c 			      struct dtv_frontend_properties *p)
p                 648 drivers/media/dvb-frontends/mt312.c 	ret = mt312_get_inversion(state, &p->inversion);
p                 652 drivers/media/dvb-frontends/mt312.c 	ret = mt312_get_symbol_rate(state, &p->symbol_rate);
p                 656 drivers/media/dvb-frontends/mt312.c 	ret = mt312_get_code_rate(state, &p->fec_inner);
p                 345 drivers/media/dvb-frontends/mxl5xx.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 348 drivers/media/dvb-frontends/mxl5xx.c 	p->strength.len = 1;
p                 349 drivers/media/dvb-frontends/mxl5xx.c 	p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 350 drivers/media/dvb-frontends/mxl5xx.c 	p->cnr.len = 1;
p                 351 drivers/media/dvb-frontends/mxl5xx.c 	p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 352 drivers/media/dvb-frontends/mxl5xx.c 	p->pre_bit_error.len = 1;
p                 353 drivers/media/dvb-frontends/mxl5xx.c 	p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 354 drivers/media/dvb-frontends/mxl5xx.c 	p->pre_bit_count.len = 1;
p                 355 drivers/media/dvb-frontends/mxl5xx.c 	p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 356 drivers/media/dvb-frontends/mxl5xx.c 	p->post_bit_error.len = 1;
p                 357 drivers/media/dvb-frontends/mxl5xx.c 	p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 358 drivers/media/dvb-frontends/mxl5xx.c 	p->post_bit_count.len = 1;
p                 359 drivers/media/dvb-frontends/mxl5xx.c 	p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 439 drivers/media/dvb-frontends/mxl5xx.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 446 drivers/media/dvb-frontends/mxl5xx.c 	if (p->frequency < 950000 || p->frequency > 2150000)
p                 448 drivers/media/dvb-frontends/mxl5xx.c 	if (p->symbol_rate < 1000000 || p->symbol_rate > 45000000)
p                 453 drivers/media/dvb-frontends/mxl5xx.c 	switch (p->delivery_system) {
p                 459 drivers/media/dvb-frontends/mxl5xx.c 		srange = p->symbol_rate / 1000000;
p                 472 drivers/media/dvb-frontends/mxl5xx.c 		cfg_scrambler(state, p->scrambling_sequence_index);
p                 479 drivers/media/dvb-frontends/mxl5xx.c 	demod_chan_cfg.frequency_in_hz = p->frequency * 1000;
p                 480 drivers/media/dvb-frontends/mxl5xx.c 	demod_chan_cfg.symbol_rate_in_hz = p->symbol_rate;
p                 505 drivers/media/dvb-frontends/mxl5xx.c 	struct mxl *p;
p                 511 drivers/media/dvb-frontends/mxl5xx.c 		list_for_each_entry(p, &state->base->mxls, mxl) {
p                 512 drivers/media/dvb-frontends/mxl5xx.c 			if (p->tuner_in_use == state->tuner)
p                 515 drivers/media/dvb-frontends/mxl5xx.c 		if (&p->mxl == &state->base->mxls)
p                 527 drivers/media/dvb-frontends/mxl5xx.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 537 drivers/media/dvb-frontends/mxl5xx.c 	p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
p                 538 drivers/media/dvb-frontends/mxl5xx.c 	p->cnr.stat[0].svalue = (s16)reg_data * 10;
p                 546 drivers/media/dvb-frontends/mxl5xx.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 558 drivers/media/dvb-frontends/mxl5xx.c 	switch (p->delivery_system) {
p                 561 drivers/media/dvb-frontends/mxl5xx.c 		p->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
p                 562 drivers/media/dvb-frontends/mxl5xx.c 		p->pre_bit_error.stat[0].uvalue = reg[2];
p                 563 drivers/media/dvb-frontends/mxl5xx.c 		p->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
p                 564 drivers/media/dvb-frontends/mxl5xx.c 		p->pre_bit_count.stat[0].uvalue = reg[3];
p                 576 drivers/media/dvb-frontends/mxl5xx.c 	switch (p->delivery_system) {
p                 579 drivers/media/dvb-frontends/mxl5xx.c 		p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
p                 580 drivers/media/dvb-frontends/mxl5xx.c 		p->post_bit_error.stat[0].uvalue = reg[5];
p                 581 drivers/media/dvb-frontends/mxl5xx.c 		p->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
p                 582 drivers/media/dvb-frontends/mxl5xx.c 		p->post_bit_count.stat[0].uvalue = reg[6];
p                 585 drivers/media/dvb-frontends/mxl5xx.c 		p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
p                 586 drivers/media/dvb-frontends/mxl5xx.c 		p->post_bit_error.stat[0].uvalue = reg[1];
p                 587 drivers/media/dvb-frontends/mxl5xx.c 		p->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
p                 588 drivers/media/dvb-frontends/mxl5xx.c 		p->post_bit_count.stat[0].uvalue = reg[2];
p                 602 drivers/media/dvb-frontends/mxl5xx.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 614 drivers/media/dvb-frontends/mxl5xx.c 	p->strength.stat[0].scale = FE_SCALE_DECIBEL;
p                 615 drivers/media/dvb-frontends/mxl5xx.c 	p->strength.stat[0].svalue = (s16) reg_data * 10; /* fix scale */
p                 623 drivers/media/dvb-frontends/mxl5xx.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 644 drivers/media/dvb-frontends/mxl5xx.c 		p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 649 drivers/media/dvb-frontends/mxl5xx.c 		p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 650 drivers/media/dvb-frontends/mxl5xx.c 		p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 651 drivers/media/dvb-frontends/mxl5xx.c 		p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 652 drivers/media/dvb-frontends/mxl5xx.c 		p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 690 drivers/media/dvb-frontends/mxl5xx.c 			struct dtv_frontend_properties *p)
p                 715 drivers/media/dvb-frontends/mxl5xx.c 	p->symbol_rate = reg_data[DMD_SYMBOL_RATE_ADDR];
p                 716 drivers/media/dvb-frontends/mxl5xx.c 	p->frequency = freq;
p                 726 drivers/media/dvb-frontends/mxl5xx.c 	p->fec_inner = conv_fec(reg_data[DMD_FEC_CODE_RATE_ADDR]);
p                 727 drivers/media/dvb-frontends/mxl5xx.c 	switch (p->delivery_system) {
p                 734 drivers/media/dvb-frontends/mxl5xx.c 			p->pilot = PILOT_OFF;
p                 737 drivers/media/dvb-frontends/mxl5xx.c 			p->pilot = PILOT_ON;
p                 747 drivers/media/dvb-frontends/mxl5xx.c 			p->modulation = QPSK;
p                 750 drivers/media/dvb-frontends/mxl5xx.c 			p->modulation = PSK_8;
p                 758 drivers/media/dvb-frontends/mxl5xx.c 			p->rolloff = ROLLOFF_20;
p                 761 drivers/media/dvb-frontends/mxl5xx.c 			p->rolloff = ROLLOFF_35;
p                 764 drivers/media/dvb-frontends/mxl5xx.c 			p->rolloff = ROLLOFF_25;
p                 809 drivers/media/dvb-frontends/mxl5xx.c 	struct mxl_base *p;
p                 811 drivers/media/dvb-frontends/mxl5xx.c 	list_for_each_entry(p, &mxllist, mxllist)
p                 812 drivers/media/dvb-frontends/mxl5xx.c 		if (p->i2c == i2c && p->adr == adr)
p                 813 drivers/media/dvb-frontends/mxl5xx.c 			return p;
p                 527 drivers/media/dvb-frontends/nxt200x.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 543 drivers/media/dvb-frontends/nxt200x.c 	switch (p->modulation) {
p                 573 drivers/media/dvb-frontends/nxt200x.c 	switch (p->modulation) {
p                 617 drivers/media/dvb-frontends/nxt200x.c 	switch (p->modulation) {
p                 711 drivers/media/dvb-frontends/nxt200x.c 	switch (p->modulation) {
p                 469 drivers/media/dvb-frontends/nxt6000.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 478 drivers/media/dvb-frontends/nxt6000.c 	result = nxt6000_set_bandwidth(state, p->bandwidth_hz);
p                 482 drivers/media/dvb-frontends/nxt6000.c 	result = nxt6000_set_guard_interval(state, p->guard_interval);
p                 486 drivers/media/dvb-frontends/nxt6000.c 	result = nxt6000_set_transmission_mode(state, p->transmission_mode);
p                 490 drivers/media/dvb-frontends/nxt6000.c 	result = nxt6000_set_inversion(state, p->inversion);
p                 296 drivers/media/dvb-frontends/or51132.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 305 drivers/media/dvb-frontends/or51132.c 	    modulation_fw_class(p->modulation)) {
p                 306 drivers/media/dvb-frontends/or51132.c 		switch (modulation_fw_class(p->modulation)) {
p                 323 drivers/media/dvb-frontends/or51132.c 			       p->modulation);
p                 343 drivers/media/dvb-frontends/or51132.c 	if (state->current_modulation != p->modulation) {
p                 344 drivers/media/dvb-frontends/or51132.c 		state->current_modulation = p->modulation;
p                 357 drivers/media/dvb-frontends/or51132.c 	state->current_frequency = p->frequency;
p                 362 drivers/media/dvb-frontends/or51132.c 				  struct dtv_frontend_properties *p)
p                 376 drivers/media/dvb-frontends/or51132.c 		p->modulation = VSB_8;
p                 379 drivers/media/dvb-frontends/or51132.c 		p->modulation = QAM_64;
p                 382 drivers/media/dvb-frontends/or51132.c 		p->modulation = QAM_256;
p                 393 drivers/media/dvb-frontends/or51132.c 	p->frequency = state->current_frequency;
p                 396 drivers/media/dvb-frontends/or51132.c 	p->inversion = INVERSION_AUTO;
p                 207 drivers/media/dvb-frontends/or51211.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 211 drivers/media/dvb-frontends/or51211.c 	if (state->current_frequency != p->frequency) {
p                 221 drivers/media/dvb-frontends/or51211.c 		state->current_frequency = p->frequency;
p                 624 drivers/media/dvb-frontends/s5h1409.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 627 drivers/media/dvb-frontends/s5h1409.c 	dprintk("%s(frequency=%d)\n", __func__, p->frequency);
p                 631 drivers/media/dvb-frontends/s5h1409.c 	state->current_frequency = p->frequency;
p                 633 drivers/media/dvb-frontends/s5h1409.c 	s5h1409_enable_modulation(fe, p->modulation);
p                 917 drivers/media/dvb-frontends/s5h1409.c 				struct dtv_frontend_properties *p)
p                 921 drivers/media/dvb-frontends/s5h1409.c 	p->frequency = state->current_frequency;
p                 922 drivers/media/dvb-frontends/s5h1409.c 	p->modulation = state->current_modulation;
p                 578 drivers/media/dvb-frontends/s5h1411.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 581 drivers/media/dvb-frontends/s5h1411.c 	dprintk("%s(frequency=%d)\n", __func__, p->frequency);
p                 585 drivers/media/dvb-frontends/s5h1411.c 	state->current_frequency = p->frequency;
p                 587 drivers/media/dvb-frontends/s5h1411.c 	s5h1411_enable_modulation(fe, p->modulation);
p                 832 drivers/media/dvb-frontends/s5h1411.c 				struct dtv_frontend_properties *p)
p                 836 drivers/media/dvb-frontends/s5h1411.c 	p->frequency = state->current_frequency;
p                 837 drivers/media/dvb-frontends/s5h1411.c 	p->modulation = state->current_modulation;
p                 465 drivers/media/dvb-frontends/s5h1420.c 				  struct dtv_frontend_properties *p)
p                 472 drivers/media/dvb-frontends/s5h1420.c 	val = ((u64) p->symbol_rate / 1000ULL) * (1ULL<<24);
p                 473 drivers/media/dvb-frontends/s5h1420.c 	if (p->symbol_rate < 29000000)
p                 536 drivers/media/dvb-frontends/s5h1420.c 				     struct dtv_frontend_properties *p)
p                 543 drivers/media/dvb-frontends/s5h1420.c 	if (p->inversion == INVERSION_OFF)
p                 545 drivers/media/dvb-frontends/s5h1420.c 	else if (p->inversion == INVERSION_ON)
p                 548 drivers/media/dvb-frontends/s5h1420.c 	if ((p->fec_inner == FEC_AUTO) || (p->inversion == INVERSION_AUTO)) {
p                 552 drivers/media/dvb-frontends/s5h1420.c 		switch (p->fec_inner) {
p                 630 drivers/media/dvb-frontends/s5h1420.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 639 drivers/media/dvb-frontends/s5h1420.c 	frequency_delta = p->frequency - state->tunedfreq;
p                 643 drivers/media/dvb-frontends/s5h1420.c 			(state->fec_inner == p->fec_inner) &&
p                 644 drivers/media/dvb-frontends/s5h1420.c 			(state->symbol_rate == p->symbol_rate)) {
p                 654 drivers/media/dvb-frontends/s5h1420.c 			s5h1420_setfreqoffset(state, p->frequency - tmp);
p                 667 drivers/media/dvb-frontends/s5h1420.c 	if (p->symbol_rate > 33000000)
p                 669 drivers/media/dvb-frontends/s5h1420.c 	else if (p->symbol_rate > 28500000)
p                 671 drivers/media/dvb-frontends/s5h1420.c 	else if (p->symbol_rate > 25000000)
p                 673 drivers/media/dvb-frontends/s5h1420.c 	else if (p->symbol_rate > 1900000)
p                 684 drivers/media/dvb-frontends/s5h1420.c 	if (p->symbol_rate > 29000000)
p                 697 drivers/media/dvb-frontends/s5h1420.c 	if (p->symbol_rate > 20000000)
p                 703 drivers/media/dvb-frontends/s5h1420.c 	if (p->symbol_rate >= 8000000)
p                 705 drivers/media/dvb-frontends/s5h1420.c 	else if (p->symbol_rate >= 4000000)
p                 730 drivers/media/dvb-frontends/s5h1420.c 	s5h1420_setsymbolrate(state, p);
p                 731 drivers/media/dvb-frontends/s5h1420.c 	s5h1420_setfec_inversion(state, p);
p                 736 drivers/media/dvb-frontends/s5h1420.c 	state->fec_inner = p->fec_inner;
p                 737 drivers/media/dvb-frontends/s5h1420.c 	state->symbol_rate = p->symbol_rate;
p                 739 drivers/media/dvb-frontends/s5h1420.c 	state->tunedfreq = p->frequency;
p                 746 drivers/media/dvb-frontends/s5h1420.c 				struct dtv_frontend_properties *p)
p                 750 drivers/media/dvb-frontends/s5h1420.c 	p->frequency = state->tunedfreq + s5h1420_getfreqoffset(state);
p                 751 drivers/media/dvb-frontends/s5h1420.c 	p->inversion = s5h1420_getinversion(state);
p                 752 drivers/media/dvb-frontends/s5h1420.c 	p->symbol_rate = s5h1420_getsymbolrate(state);
p                 753 drivers/media/dvb-frontends/s5h1420.c 	p->fec_inner = s5h1420_getfec(state);
p                 761 drivers/media/dvb-frontends/s5h1420.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 762 drivers/media/dvb-frontends/s5h1420.c 	if (p->symbol_rate > 20000000) {
p                 766 drivers/media/dvb-frontends/s5h1420.c 	} else if (p->symbol_rate > 12000000) {
p                 770 drivers/media/dvb-frontends/s5h1420.c 	} else if (p->symbol_rate > 8000000) {
p                 774 drivers/media/dvb-frontends/s5h1420.c 	} else if (p->symbol_rate > 4000000) {
p                 778 drivers/media/dvb-frontends/s5h1420.c 	} else if (p->symbol_rate > 2000000) {
p                 780 drivers/media/dvb-frontends/s5h1420.c 		fesettings->step_size = (p->symbol_rate / 8000);
p                 784 drivers/media/dvb-frontends/s5h1420.c 		fesettings->step_size = (p->symbol_rate / 8000);
p                 170 drivers/media/dvb-frontends/s5h1432.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 174 drivers/media/dvb-frontends/s5h1432.c 	if (p->frequency == state->current_frequency) {
p                 181 drivers/media/dvb-frontends/s5h1432.c 		switch (p->bandwidth_hz) {
p                 205 drivers/media/dvb-frontends/s5h1432.c 		switch (p->bandwidth_hz) {
p                 230 drivers/media/dvb-frontends/s5h1432.c 	state->current_frequency = p->frequency;
p                 259 drivers/media/dvb-frontends/s921.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 266 drivers/media/dvb-frontends/s921.c 	dprintk("frequency=%i\n", p->frequency);
p                 269 drivers/media/dvb-frontends/s921.c 		if (p->frequency < s921_bandselect[band].freq_low)
p                 280 drivers/media/dvb-frontends/s921.c 	offset = ((u64)p->frequency) * 258;
p                 411 drivers/media/dvb-frontends/s921.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 423 drivers/media/dvb-frontends/s921.c 	state->currentfreq = p->frequency;
p                 429 drivers/media/dvb-frontends/s921.c 			     struct dtv_frontend_properties *p)
p                 434 drivers/media/dvb-frontends/s921.c 	p->frequency = state->currentfreq;
p                 435 drivers/media/dvb-frontends/s921.c 	p->delivery_system = SYS_ISDBT;
p                 901 drivers/media/dvb-frontends/si2165.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 905 drivers/media/dvb-frontends/si2165.c 	u32 bw_hz = p->bandwidth_hz;
p                 970 drivers/media/dvb-frontends/si2165.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 971 drivers/media/dvb-frontends/si2165.c 	const u32 dvb_rate = p->symbol_rate;
p                 989 drivers/media/dvb-frontends/si2165.c 	switch (p->modulation) {
p                1036 drivers/media/dvb-frontends/si2165.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1037 drivers/media/dvb-frontends/si2165.c 	u32 delsys = p->delivery_system;
p                 158 drivers/media/dvb-frontends/sp8870.c static int configure_reg0xc05 (struct dtv_frontend_properties *p, u16 *reg0xc05)
p                 164 drivers/media/dvb-frontends/sp8870.c 	switch (p->modulation) {
p                 180 drivers/media/dvb-frontends/sp8870.c 	switch (p->hierarchy) {
p                 199 drivers/media/dvb-frontends/sp8870.c 	switch (p->code_rate_HP) {
p                 237 drivers/media/dvb-frontends/sp8870.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 242 drivers/media/dvb-frontends/sp8870.c 	if ((err = configure_reg0xc05(p, &reg0xc05)))
p                 267 drivers/media/dvb-frontends/sp8870.c 	if (p->bandwidth_hz == 6000000)
p                 269 drivers/media/dvb-frontends/sp8870.c 	else if (p->bandwidth_hz == 7000000)
p                 275 drivers/media/dvb-frontends/sp8870.c 	if (p->transmission_mode == TRANSMISSION_MODE_2K)
p                 453 drivers/media/dvb-frontends/sp8870.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 468 drivers/media/dvb-frontends/sp8870.c 	dprintk("%s: frequency = %i\n", __func__, p->frequency);
p                 212 drivers/media/dvb-frontends/sp887x.c static int configure_reg0xc05(struct dtv_frontend_properties *p, u16 *reg0xc05)
p                 218 drivers/media/dvb-frontends/sp887x.c 	switch (p->modulation) {
p                 234 drivers/media/dvb-frontends/sp887x.c 	switch (p->hierarchy) {
p                 253 drivers/media/dvb-frontends/sp887x.c 	switch (p->code_rate_HP) {
p                 306 drivers/media/dvb-frontends/sp887x.c 				    struct dtv_frontend_properties *p,
p                 311 drivers/media/dvb-frontends/sp887x.c 	int freq_offset = actual_freq - p->frequency;
p                 317 drivers/media/dvb-frontends/sp887x.c 	switch (p->bandwidth_hz) {
p                 330 drivers/media/dvb-frontends/sp887x.c 	if (p->inversion == INVERSION_ON)
p                 337 drivers/media/dvb-frontends/sp887x.c 	if (p->inversion == INVERSION_ON)
p                 351 drivers/media/dvb-frontends/sp887x.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 357 drivers/media/dvb-frontends/sp887x.c 	if (p->bandwidth_hz != 8000000 &&
p                 358 drivers/media/dvb-frontends/sp887x.c 	    p->bandwidth_hz != 7000000 &&
p                 359 drivers/media/dvb-frontends/sp887x.c 	    p->bandwidth_hz != 6000000)
p                 362 drivers/media/dvb-frontends/sp887x.c 	if ((err = configure_reg0xc05(p, &reg0xc05)))
p                 376 drivers/media/dvb-frontends/sp887x.c 		actual_freq = p->frequency;
p                 382 drivers/media/dvb-frontends/sp887x.c 	sp887x_correct_offsets(state, p, actual_freq);
p                 385 drivers/media/dvb-frontends/sp887x.c 	if (p->bandwidth_hz == 6000000)
p                 387 drivers/media/dvb-frontends/sp887x.c 	else if (p->bandwidth_hz == 7000000)
p                 395 drivers/media/dvb-frontends/sp887x.c 	if (p->transmission_mode == TRANSMISSION_MODE_2K)
p                 402 drivers/media/dvb-frontends/sp887x.c 	if (p->bandwidth_hz == 6000000)
p                 404 drivers/media/dvb-frontends/sp887x.c 	else if (p->bandwidth_hz == 7000000)
p                1554 drivers/media/dvb-frontends/stb0899_drv.c 				struct dtv_frontend_properties *p)
p                1560 drivers/media/dvb-frontends/stb0899_drv.c 	p->symbol_rate = internal->srate;
p                1561 drivers/media/dvb-frontends/stb0899_drv.c 	p->frequency = internal->freq;
p                  66 drivers/media/dvb-frontends/stb6000.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                  82 drivers/media/dvb-frontends/stb6000.c 	freq_mhz = p->frequency / 1000;
p                  83 drivers/media/dvb-frontends/stb6000.c 	bandwidth = p->symbol_rate / 1000000;
p                 329 drivers/media/dvb-frontends/stb6100.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 339 drivers/media/dvb-frontends/stb6100.c 		fe->ops.get_frontend(fe, p);
p                 341 drivers/media/dvb-frontends/stb6100.c 	srate = p->symbol_rate;
p                 400 drivers/media/dvb-frontends/stv0297.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 411 drivers/media/dvb-frontends/stv0297.c 	switch (p->modulation) {
p                 430 drivers/media/dvb-frontends/stv0297.c 	inversion = p->inversion;
p                 513 drivers/media/dvb-frontends/stv0297.c 	stv0297_set_qam(state, p->modulation);
p                 514 drivers/media/dvb-frontends/stv0297.c 	stv0297_set_symbolrate(state, p->symbol_rate / 1000);
p                 515 drivers/media/dvb-frontends/stv0297.c 	stv0297_set_sweeprate(state, sweeprate, p->symbol_rate / 1000);
p                 521 drivers/media/dvb-frontends/stv0297.c 	if (p->modulation == QAM_128 ||
p                 522 drivers/media/dvb-frontends/stv0297.c 		p->modulation == QAM_256)
p                 598 drivers/media/dvb-frontends/stv0297.c 	state->base_freq = p->frequency;
p                 607 drivers/media/dvb-frontends/stv0297.c 				struct dtv_frontend_properties *p)
p                 615 drivers/media/dvb-frontends/stv0297.c 	p->frequency = state->base_freq;
p                 616 drivers/media/dvb-frontends/stv0297.c 	p->inversion = (reg_83 & 0x08) ? INVERSION_ON : INVERSION_OFF;
p                 618 drivers/media/dvb-frontends/stv0297.c 		p->inversion = (p->inversion == INVERSION_ON) ? INVERSION_OFF : INVERSION_ON;
p                 619 drivers/media/dvb-frontends/stv0297.c 	p->symbol_rate = stv0297_get_symbolrate(state) * 1000;
p                 620 drivers/media/dvb-frontends/stv0297.c 	p->fec_inner = FEC_NONE;
p                 624 drivers/media/dvb-frontends/stv0297.c 		p->modulation = QAM_16;
p                 627 drivers/media/dvb-frontends/stv0297.c 		p->modulation = QAM_32;
p                 630 drivers/media/dvb-frontends/stv0297.c 		p->modulation = QAM_128;
p                 633 drivers/media/dvb-frontends/stv0297.c 		p->modulation = QAM_256;
p                 636 drivers/media/dvb-frontends/stv0297.c 		p->modulation = QAM_64;
p                 558 drivers/media/dvb-frontends/stv0299.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 567 drivers/media/dvb-frontends/stv0299.c 	if (p->inversion == INVERSION_OFF) invval = 0;
p                 568 drivers/media/dvb-frontends/stv0299.c 	else if (p->inversion == INVERSION_ON) invval = 1;
p                 581 drivers/media/dvb-frontends/stv0299.c 	stv0299_set_FEC(state, p->fec_inner);
p                 582 drivers/media/dvb-frontends/stv0299.c 	stv0299_set_symbolrate(fe, p->symbol_rate);
p                 586 drivers/media/dvb-frontends/stv0299.c 	state->tuner_frequency = p->frequency;
p                 587 drivers/media/dvb-frontends/stv0299.c 	state->fec_inner = p->fec_inner;
p                 588 drivers/media/dvb-frontends/stv0299.c 	state->symbol_rate = p->symbol_rate;
p                 594 drivers/media/dvb-frontends/stv0299.c 				struct dtv_frontend_properties *p)
p                 607 drivers/media/dvb-frontends/stv0299.c 	p->frequency += derot_freq;
p                 611 drivers/media/dvb-frontends/stv0299.c 	p->inversion = invval ? INVERSION_ON : INVERSION_OFF;
p                 613 drivers/media/dvb-frontends/stv0299.c 	p->fec_inner = stv0299_get_fec(state);
p                 614 drivers/media/dvb-frontends/stv0299.c 	p->symbol_rate = stv0299_get_symbolrate(state);
p                 645 drivers/media/dvb-frontends/stv0299.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 648 drivers/media/dvb-frontends/stv0299.c 	if (p->symbol_rate < 10000000) {
p                 649 drivers/media/dvb-frontends/stv0299.c 		fesettings->step_size = p->symbol_rate / 32000;
p                 652 drivers/media/dvb-frontends/stv0299.c 		fesettings->step_size = p->symbol_rate / 16000;
p                 653 drivers/media/dvb-frontends/stv0299.c 		fesettings->max_drift = p->symbol_rate / 2000;
p                 455 drivers/media/dvb-frontends/stv0367.c 	u32 m, n, p;
p                 468 drivers/media/dvb-frontends/stv0367.c 		p = (u32)stv0367_readbits(state, F367TER_PLL_PDIV);
p                 469 drivers/media/dvb-frontends/stv0367.c 		if (p > 5)
p                 470 drivers/media/dvb-frontends/stv0367.c 			p = 5;
p                 472 drivers/media/dvb-frontends/stv0367.c 		mclk_Hz = ((ExtClk_Hz / 2) * n) / (m * (1 << p));
p                 475 drivers/media/dvb-frontends/stv0367.c 				n, m, p, mclk_Hz, ExtClk_Hz);
p                 997 drivers/media/dvb-frontends/stv0367.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1011 drivers/media/dvb-frontends/stv0367.c 	ter_state->frequency = p->frequency;
p                1040 drivers/media/dvb-frontends/stv0367.c 	switch (p->inversion) {
p                1056 drivers/media/dvb-frontends/stv0367.c 						p->inversion);
p                1059 drivers/media/dvb-frontends/stv0367.c 						p->inversion);
p                1224 drivers/media/dvb-frontends/stv0367.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1243 drivers/media/dvb-frontends/stv0367.c 	switch (p->transmission_mode) {
p                1257 drivers/media/dvb-frontends/stv0367.c 	switch (p->guard_interval) {
p                1263 drivers/media/dvb-frontends/stv0367.c 		ter_state->guard = p->guard_interval;
p                1270 drivers/media/dvb-frontends/stv0367.c 	switch (p->bandwidth_hz) {
p                1284 drivers/media/dvb-frontends/stv0367.c 	switch (p->inversion) {
p                1301 drivers/media/dvb-frontends/stv0367.c 			if (p->inversion == INVERSION_AUTO)
p                1308 drivers/media/dvb-frontends/stv0367.c 				(p->inversion == INVERSION_AUTO) &&
p                1344 drivers/media/dvb-frontends/stv0367.c 				   struct dtv_frontend_properties *p)
p                1351 drivers/media/dvb-frontends/stv0367.c 	p->frequency = stv0367_get_tuner_freq(fe);
p                1352 drivers/media/dvb-frontends/stv0367.c 	if ((int)p->frequency < 0)
p                1353 drivers/media/dvb-frontends/stv0367.c 		p->frequency = -p->frequency;
p                1357 drivers/media/dvb-frontends/stv0367.c 		p->modulation = QPSK;
p                1359 drivers/media/dvb-frontends/stv0367.c 		p->modulation = QAM_16;
p                1361 drivers/media/dvb-frontends/stv0367.c 		p->modulation = QAM_64;
p                1363 drivers/media/dvb-frontends/stv0367.c 	p->inversion = stv0367_readbits(state, F367TER_INV_SPECTR);
p                1370 drivers/media/dvb-frontends/stv0367.c 		p->hierarchy = HIERARCHY_NONE;
p                1373 drivers/media/dvb-frontends/stv0367.c 		p->hierarchy = HIERARCHY_1;
p                1376 drivers/media/dvb-frontends/stv0367.c 		p->hierarchy = HIERARCHY_2;
p                1379 drivers/media/dvb-frontends/stv0367.c 		p->hierarchy = HIERARCHY_4;
p                1382 drivers/media/dvb-frontends/stv0367.c 		p->hierarchy = HIERARCHY_AUTO;
p                1394 drivers/media/dvb-frontends/stv0367.c 		p->code_rate_HP = FEC_1_2;
p                1397 drivers/media/dvb-frontends/stv0367.c 		p->code_rate_HP = FEC_2_3;
p                1400 drivers/media/dvb-frontends/stv0367.c 		p->code_rate_HP = FEC_3_4;
p                1403 drivers/media/dvb-frontends/stv0367.c 		p->code_rate_HP = FEC_5_6;
p                1406 drivers/media/dvb-frontends/stv0367.c 		p->code_rate_HP = FEC_7_8;
p                1409 drivers/media/dvb-frontends/stv0367.c 		p->code_rate_HP = FEC_AUTO;
p                1417 drivers/media/dvb-frontends/stv0367.c 		p->transmission_mode = TRANSMISSION_MODE_2K;
p                1423 drivers/media/dvb-frontends/stv0367.c 		p->transmission_mode = TRANSMISSION_MODE_8K;
p                1426 drivers/media/dvb-frontends/stv0367.c 		p->transmission_mode = TRANSMISSION_MODE_AUTO;
p                1429 drivers/media/dvb-frontends/stv0367.c 	p->guard_interval = stv0367_readbits(state, F367TER_SYR_GUARD);
p                2326 drivers/media/dvb-frontends/stv0367.c 					     struct dtv_frontend_properties *p)
p                2344 drivers/media/dvb-frontends/stv0367.c 	TRLTimeOut = 100000000 / p->symbol_rate;
p                2354 drivers/media/dvb-frontends/stv0367.c 	switch (p->modulation) {
p                2388 drivers/media/dvb-frontends/stv0367.c 					(p->symbol_rate / 1000);
p                2390 drivers/media/dvb-frontends/stv0367.c 	CRLTimeOut = (1000 * CRLTimeOut) / p->symbol_rate;
p                2420 drivers/media/dvb-frontends/stv0367.c 	if ((p->symbol_rate > 10800000) | (p->symbol_rate < 1800000)) {
p                2542 drivers/media/dvb-frontends/stv0367.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                2548 drivers/media/dvb-frontends/stv0367.c 					p->frequency, p->symbol_rate);
p                2552 drivers/media/dvb-frontends/stv0367.c 	switch (p->modulation) {
p                2586 drivers/media/dvb-frontends/stv0367.c 			p->symbol_rate,
p                2592 drivers/media/dvb-frontends/stv0367.c 			p->symbol_rate,
p                2595 drivers/media/dvb-frontends/stv0367.c 	cab_state->state = stv0367cab_algo(state, p);
p                2600 drivers/media/dvb-frontends/stv0367.c 				   struct dtv_frontend_properties *p)
p                2611 drivers/media/dvb-frontends/stv0367.c 	p->symbol_rate = stv0367cab_GetSymbolRate(state, cab_state->mclk);
p                2616 drivers/media/dvb-frontends/stv0367.c 		p->modulation = QAM_16;
p                2619 drivers/media/dvb-frontends/stv0367.c 		p->modulation = QAM_32;
p                2622 drivers/media/dvb-frontends/stv0367.c 		p->modulation = QAM_64;
p                2625 drivers/media/dvb-frontends/stv0367.c 		p->modulation = QAM_128;
p                2628 drivers/media/dvb-frontends/stv0367.c 		p->modulation = QAM_256;
p                2634 drivers/media/dvb-frontends/stv0367.c 	p->frequency = stv0367_get_tuner_freq(fe);
p                2636 drivers/media/dvb-frontends/stv0367.c 	dprintk("%s: tuner frequency = %d\n", __func__, p->frequency);
p                2639 drivers/media/dvb-frontends/stv0367.c 		p->frequency +=
p                2646 drivers/media/dvb-frontends/stv0367.c 		p->frequency += (ifkhz
p                2650 drivers/media/dvb-frontends/stv0367.c 		p->frequency += (ifkhz
p                3028 drivers/media/dvb-frontends/stv0367.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                3036 drivers/media/dvb-frontends/stv0367.c 		p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3040 drivers/media/dvb-frontends/stv0367.c 	p->strength.stat[0].scale = FE_SCALE_DECIBEL;
p                3041 drivers/media/dvb-frontends/stv0367.c 	p->strength.stat[0].uvalue = signalstrength;
p                3047 drivers/media/dvb-frontends/stv0367.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                3069 drivers/media/dvb-frontends/stv0367.c 		p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3073 drivers/media/dvb-frontends/stv0367.c 	p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
p                3074 drivers/media/dvb-frontends/stv0367.c 	p->cnr.stat[0].uvalue = snrval;
p                3080 drivers/media/dvb-frontends/stv0367.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                3091 drivers/media/dvb-frontends/stv0367.c 		p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3095 drivers/media/dvb-frontends/stv0367.c 	p->block_error.stat[0].scale = FE_SCALE_COUNTER;
p                3096 drivers/media/dvb-frontends/stv0367.c 	p->block_error.stat[0].uvalue = ucblocks;
p                3103 drivers/media/dvb-frontends/stv0367.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                3127 drivers/media/dvb-frontends/stv0367.c 		p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3133 drivers/media/dvb-frontends/stv0367.c 		p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3139 drivers/media/dvb-frontends/stv0367.c 				   struct dtv_frontend_properties *p)
p                3145 drivers/media/dvb-frontends/stv0367.c 		return stv0367ter_get_frontend(fe, p);
p                3147 drivers/media/dvb-frontends/stv0367.c 		return stv0367cab_get_frontend(fe, p);
p                3176 drivers/media/dvb-frontends/stv0367.c 	struct dtv_frontend_properties *p = &state->fe.dtv_property_cache;
p                3251 drivers/media/dvb-frontends/stv0367.c 	p->strength.len = 1;
p                3252 drivers/media/dvb-frontends/stv0367.c 	p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3253 drivers/media/dvb-frontends/stv0367.c 	p->cnr.len = 1;
p                3254 drivers/media/dvb-frontends/stv0367.c 	p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                3255 drivers/media/dvb-frontends/stv0367.c 	p->block_error.len = 1;
p                3256 drivers/media/dvb-frontends/stv0367.c 	p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1852 drivers/media/dvb-frontends/stv0900_core.c 				struct dtv_frontend_properties *p)
p                1859 drivers/media/dvb-frontends/stv0900_core.c 	p->frequency = p_result.locked ? p_result.frequency : 0;
p                1860 drivers/media/dvb-frontends/stv0900_core.c 	p->symbol_rate = p_result.locked ? p_result.symbol_rate : 0;
p                 917 drivers/media/dvb-frontends/stv0910.c 			     struct dtv_frontend_properties *p)
p                 919 drivers/media/dvb-frontends/stv0910.c 	set_isi(state, p->stream_id);
p                 920 drivers/media/dvb-frontends/stv0910.c 	set_pls(state, p->scrambling_sequence_index);
p                 924 drivers/media/dvb-frontends/stv0910.c 			     struct dtv_frontend_properties *p)
p                 937 drivers/media/dvb-frontends/stv0910.c 	set_stream_modes(state, p);
p                1019 drivers/media/dvb-frontends/stv0910.c static int start(struct stv *state, struct dtv_frontend_properties *p)
p                1025 drivers/media/dvb-frontends/stv0910.c 	if (p->symbol_rate < 100000 || p->symbol_rate > 70000000)
p                1035 drivers/media/dvb-frontends/stv0910.c 	init_search_param(state, p);
p                1037 drivers/media/dvb-frontends/stv0910.c 	if (p->symbol_rate <= 1000000) { /* SR <=1Msps */
p                1040 drivers/media/dvb-frontends/stv0910.c 	} else if (p->symbol_rate <= 2000000) { /* 1Msps < SR <=2Msps */
p                1043 drivers/media/dvb-frontends/stv0910.c 	} else if (p->symbol_rate <= 5000000) { /* 2Msps< SR <=5Msps */
p                1046 drivers/media/dvb-frontends/stv0910.c 	} else if (p->symbol_rate <= 10000000) { /* 5Msps< SR <=10Msps */
p                1049 drivers/media/dvb-frontends/stv0910.c 	} else if (p->symbol_rate < 20000000) { /* 10Msps< SR <=20Msps */
p                1058 drivers/media/dvb-frontends/stv0910.c 	symb = muldiv32(p->symbol_rate, 65536, state->base->mclk);
p                1106 drivers/media/dvb-frontends/stv0910.c 	if (p->symbol_rate <= 5000000)
p                1289 drivers/media/dvb-frontends/stv0910.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1294 drivers/media/dvb-frontends/stv0910.c 	state->symbol_rate = p->symbol_rate;
p                1295 drivers/media/dvb-frontends/stv0910.c 	stat = start(state, p);
p                1323 drivers/media/dvb-frontends/stv0910.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1327 drivers/media/dvb-frontends/stv0910.c 		p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
p                1328 drivers/media/dvb-frontends/stv0910.c 		p->cnr.stat[0].svalue = 100 * snrval; /* fix scale */
p                1330 drivers/media/dvb-frontends/stv0910.c 		p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1339 drivers/media/dvb-frontends/stv0910.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1344 drivers/media/dvb-frontends/stv0910.c 	p->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
p                1345 drivers/media/dvb-frontends/stv0910.c 	p->pre_bit_error.stat[0].uvalue = n;
p                1346 drivers/media/dvb-frontends/stv0910.c 	p->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
p                1347 drivers/media/dvb-frontends/stv0910.c 	p->pre_bit_count.stat[0].uvalue = d;
p                1355 drivers/media/dvb-frontends/stv0910.c 	struct dtv_frontend_properties *p = &state->fe.dtv_property_cache;
p                1375 drivers/media/dvb-frontends/stv0910.c 	p->strength.stat[0].scale = FE_SCALE_DECIBEL;
p                1376 drivers/media/dvb-frontends/stv0910.c 	p->strength.stat[0].svalue = (padc - agc);
p                1382 drivers/media/dvb-frontends/stv0910.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1402 drivers/media/dvb-frontends/stv0910.c 		p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1403 drivers/media/dvb-frontends/stv0910.c 		p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1404 drivers/media/dvb-frontends/stv0910.c 		p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1405 drivers/media/dvb-frontends/stv0910.c 		p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1537 drivers/media/dvb-frontends/stv0910.c 		p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1543 drivers/media/dvb-frontends/stv0910.c 		p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1544 drivers/media/dvb-frontends/stv0910.c 		p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1551 drivers/media/dvb-frontends/stv0910.c 			struct dtv_frontend_properties *p)
p                1581 drivers/media/dvb-frontends/stv0910.c 		p->pilot = (tmp & 0x01) ? PILOT_ON : PILOT_OFF;
p                1582 drivers/media/dvb-frontends/stv0910.c 		p->modulation = modcod2mod[mc];
p                1583 drivers/media/dvb-frontends/stv0910.c 		p->fec_inner = modcod2fec[mc];
p                1588 drivers/media/dvb-frontends/stv0910.c 			p->fec_inner = FEC_1_2;
p                1591 drivers/media/dvb-frontends/stv0910.c 			p->fec_inner = FEC_2_3;
p                1594 drivers/media/dvb-frontends/stv0910.c 			p->fec_inner = FEC_3_4;
p                1597 drivers/media/dvb-frontends/stv0910.c 			p->fec_inner = FEC_5_6;
p                1600 drivers/media/dvb-frontends/stv0910.c 			p->fec_inner = FEC_7_8;
p                1603 drivers/media/dvb-frontends/stv0910.c 			p->fec_inner = FEC_NONE;
p                1606 drivers/media/dvb-frontends/stv0910.c 		p->rolloff = ROLLOFF_35;
p                1611 drivers/media/dvb-frontends/stv0910.c 		p->symbol_rate = symbolrate;
p                1754 drivers/media/dvb-frontends/stv0910.c 	struct stv_base *p;
p                1756 drivers/media/dvb-frontends/stv0910.c 	list_for_each_entry(p, &stvlist, stvlist)
p                1757 drivers/media/dvb-frontends/stv0910.c 		if (p->i2c == i2c && p->adr == adr)
p                1758 drivers/media/dvb-frontends/stv0910.c 			return p;
p                1764 drivers/media/dvb-frontends/stv0910.c 	struct dtv_frontend_properties *p = &state->fe.dtv_property_cache;
p                1766 drivers/media/dvb-frontends/stv0910.c 	p->strength.len = 1;
p                1767 drivers/media/dvb-frontends/stv0910.c 	p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1768 drivers/media/dvb-frontends/stv0910.c 	p->cnr.len = 1;
p                1769 drivers/media/dvb-frontends/stv0910.c 	p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1770 drivers/media/dvb-frontends/stv0910.c 	p->pre_bit_error.len = 1;
p                1771 drivers/media/dvb-frontends/stv0910.c 	p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                1772 drivers/media/dvb-frontends/stv0910.c 	p->pre_bit_count.len = 1;
p                1773 drivers/media/dvb-frontends/stv0910.c 	p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p                 252 drivers/media/dvb-frontends/stv6110.c 	u32 divider, ref, p, presc, i, result_freq, vco_freq;
p                 268 drivers/media/dvb-frontends/stv6110.c 		p = 1;
p                 271 drivers/media/dvb-frontends/stv6110.c 		p = 1;
p                 274 drivers/media/dvb-frontends/stv6110.c 		p = 0;
p                 277 drivers/media/dvb-frontends/stv6110.c 		p = 0;
p                 282 drivers/media/dvb-frontends/stv6110.c 	priv->regs[RSTV6110_TUNING2] |= (p << 4);
p                 288 drivers/media/dvb-frontends/stv6110.c 	p_val = (int)(1 << (p + 1)) * 10;/* P = 2 or P = 4 */
p                 299 drivers/media/dvb-frontends/stv6110.c 	ref = priv->mclk / ((1 << (r_div_opt + 1))  * (1 << (p + 1)));
p                 453 drivers/media/dvb-frontends/stv6111.c 	u32 p = 1, psel = 0, fvco, div, frac;
p                 462 drivers/media/dvb-frontends/stv6111.c 		p =  4;
p                 465 drivers/media/dvb-frontends/stv6111.c 		p =  2;
p                 468 drivers/media/dvb-frontends/stv6111.c 	fvco = frequency * p;
p                 522 drivers/media/dvb-frontends/stv6111.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 526 drivers/media/dvb-frontends/stv6111.c 	if (p->delivery_system != SYS_DVBS && p->delivery_system != SYS_DVBS2)
p                 529 drivers/media/dvb-frontends/stv6111.c 	freq = p->frequency * 1000;
p                 530 drivers/media/dvb-frontends/stv6111.c 	cutoff = 5000000 + muldiv32(p->symbol_rate, 135, 200);
p                 258 drivers/media/dvb-frontends/tc90522.c 		u32 p, p4;
p                 269 drivers/media/dvb-frontends/tc90522.c 		p = int_sqrt(cndat << 16);
p                 271 drivers/media/dvb-frontends/tc90522.c 		cn = div64_s64(-16346LL * p4 * p, 10) >> 35;
p                 273 drivers/media/dvb-frontends/tc90522.c 		cn -= (50259LL * cndat * p) >> 23;
p                 275 drivers/media/dvb-frontends/tc90522.c 		cn -= (89565LL * p) >> 11;
p                 416 drivers/media/dvb-frontends/tc90522.c 		u32 p, tmp;
p                 425 drivers/media/dvb-frontends/tc90522.c 		p = intlog10(5505024) - intlog10(cndat);
p                 426 drivers/media/dvb-frontends/tc90522.c 		p *= 10;
p                 429 drivers/media/dvb-frontends/tc90522.c 		cn += div64_s64(43827LL * p, 10) >> 24;
p                 430 drivers/media/dvb-frontends/tc90522.c 		tmp = p >> 8;
p                 432 drivers/media/dvb-frontends/tc90522.c 		tmp = p >> 13;
p                 434 drivers/media/dvb-frontends/tc90522.c 		tmp = p >> 18;
p                 641 drivers/media/dvb-frontends/tc90522.c 	u8 *p, *bufend;
p                 655 drivers/media/dvb-frontends/tc90522.c 	p = wbuf;
p                 663 drivers/media/dvb-frontends/tc90522.c 			if (p + 2 > bufend)
p                 665 drivers/media/dvb-frontends/tc90522.c 			p[0] = TC90522_I2C_THRU_REG;
p                 666 drivers/media/dvb-frontends/tc90522.c 			p[1] = msgs[i].addr << 1 | 0x01;
p                 667 drivers/media/dvb-frontends/tc90522.c 			new_msgs[j].buf = p;
p                 669 drivers/media/dvb-frontends/tc90522.c 			p += 2;
p                 678 drivers/media/dvb-frontends/tc90522.c 		if (p + msgs[i].len + 2 > bufend)
p                 680 drivers/media/dvb-frontends/tc90522.c 		p[0] = TC90522_I2C_THRU_REG;
p                 681 drivers/media/dvb-frontends/tc90522.c 		p[1] = msgs[i].addr << 1;
p                 682 drivers/media/dvb-frontends/tc90522.c 		memcpy(p + 2, msgs[i].buf, msgs[i].len);
p                 683 drivers/media/dvb-frontends/tc90522.c 		new_msgs[j].buf = p;
p                 685 drivers/media/dvb-frontends/tc90522.c 		p += new_msgs[j].len;
p                 378 drivers/media/dvb-frontends/tda10021.c 				 struct dtv_frontend_properties *p)
p                 391 drivers/media/dvb-frontends/tda10021.c 		       -((s32)p->symbol_rate * afc) >> 10);
p                 394 drivers/media/dvb-frontends/tda10021.c 	p->inversion = ((state->reg0 & 0x20) == 0x20) ^ (state->config->invert != 0) ? INVERSION_ON : INVERSION_OFF;
p                 395 drivers/media/dvb-frontends/tda10021.c 	p->modulation = ((state->reg0 >> 2) & 7) + QAM_16;
p                 397 drivers/media/dvb-frontends/tda10021.c 	p->fec_inner = FEC_NONE;
p                 398 drivers/media/dvb-frontends/tda10021.c 	p->frequency = ((p->frequency + 31250) / 62500) * 62500;
p                 401 drivers/media/dvb-frontends/tda10021.c 		p->frequency -= ((s32)p->symbol_rate * afc) >> 10;
p                 447 drivers/media/dvb-frontends/tda10023.c 				 struct dtv_frontend_properties *p)
p                 462 drivers/media/dvb-frontends/tda10023.c 		       -((s32)p->symbol_rate * afc) >> 10);
p                 465 drivers/media/dvb-frontends/tda10023.c 	p->inversion = (inv&0x20?0:1);
p                 466 drivers/media/dvb-frontends/tda10023.c 	p->modulation = ((state->reg0 >> 2) & 7) + QAM_16;
p                 468 drivers/media/dvb-frontends/tda10023.c 	p->fec_inner = FEC_NONE;
p                 469 drivers/media/dvb-frontends/tda10023.c 	p->frequency = ((p->frequency + 31250) / 62500) * 62500;
p                 472 drivers/media/dvb-frontends/tda10023.c 		p->frequency -= ((s32)p->symbol_rate * afc) >> 10;
p                 578 drivers/media/dvb-frontends/tda10048.c 	struct dtv_frontend_properties *p)
p                 589 drivers/media/dvb-frontends/tda10048.c 		p->modulation = QPSK;
p                 592 drivers/media/dvb-frontends/tda10048.c 		p->modulation = QAM_16;
p                 595 drivers/media/dvb-frontends/tda10048.c 		p->modulation = QAM_64;
p                 600 drivers/media/dvb-frontends/tda10048.c 		p->hierarchy = HIERARCHY_NONE;
p                 603 drivers/media/dvb-frontends/tda10048.c 		p->hierarchy = HIERARCHY_1;
p                 606 drivers/media/dvb-frontends/tda10048.c 		p->hierarchy = HIERARCHY_2;
p                 609 drivers/media/dvb-frontends/tda10048.c 		p->hierarchy = HIERARCHY_4;
p                 614 drivers/media/dvb-frontends/tda10048.c 		p->code_rate_HP = FEC_1_2;
p                 617 drivers/media/dvb-frontends/tda10048.c 		p->code_rate_HP = FEC_2_3;
p                 620 drivers/media/dvb-frontends/tda10048.c 		p->code_rate_HP = FEC_3_4;
p                 623 drivers/media/dvb-frontends/tda10048.c 		p->code_rate_HP = FEC_5_6;
p                 626 drivers/media/dvb-frontends/tda10048.c 		p->code_rate_HP = FEC_7_8;
p                 633 drivers/media/dvb-frontends/tda10048.c 		p->code_rate_LP = FEC_1_2;
p                 636 drivers/media/dvb-frontends/tda10048.c 		p->code_rate_LP = FEC_2_3;
p                 639 drivers/media/dvb-frontends/tda10048.c 		p->code_rate_LP = FEC_3_4;
p                 642 drivers/media/dvb-frontends/tda10048.c 		p->code_rate_LP = FEC_5_6;
p                 645 drivers/media/dvb-frontends/tda10048.c 		p->code_rate_LP = FEC_7_8;
p                 652 drivers/media/dvb-frontends/tda10048.c 		p->guard_interval = GUARD_INTERVAL_1_32;
p                 655 drivers/media/dvb-frontends/tda10048.c 		p->guard_interval = GUARD_INTERVAL_1_16;
p                 658 drivers/media/dvb-frontends/tda10048.c 		p->guard_interval =  GUARD_INTERVAL_1_8;
p                 661 drivers/media/dvb-frontends/tda10048.c 		p->guard_interval =  GUARD_INTERVAL_1_4;
p                 666 drivers/media/dvb-frontends/tda10048.c 		p->transmission_mode = TRANSMISSION_MODE_2K;
p                 669 drivers/media/dvb-frontends/tda10048.c 		p->transmission_mode = TRANSMISSION_MODE_8K;
p                 717 drivers/media/dvb-frontends/tda10048.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 720 drivers/media/dvb-frontends/tda10048.c 	dprintk(1, "%s(frequency=%d)\n", __func__, p->frequency);
p                 723 drivers/media/dvb-frontends/tda10048.c 	if (p->bandwidth_hz != state->bandwidth) {
p                 724 drivers/media/dvb-frontends/tda10048.c 		tda10048_set_if(fe, p->bandwidth_hz);
p                 725 drivers/media/dvb-frontends/tda10048.c 		tda10048_set_bandwidth(fe, p->bandwidth_hz);
p                1020 drivers/media/dvb-frontends/tda10048.c 				 struct dtv_frontend_properties *p)
p                1026 drivers/media/dvb-frontends/tda10048.c 	p->inversion = tda10048_readreg(state, TDA10048_CONF_C1_1)
p                1029 drivers/media/dvb-frontends/tda10048.c 	return tda10048_get_tps(state, p);
p                 658 drivers/media/dvb-frontends/tda10086.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 660 drivers/media/dvb-frontends/tda10086.c 	if (p->symbol_rate > 20000000) {
p                 664 drivers/media/dvb-frontends/tda10086.c 	} else if (p->symbol_rate > 12000000) {
p                 668 drivers/media/dvb-frontends/tda10086.c 	} else if (p->symbol_rate > 8000000) {
p                 672 drivers/media/dvb-frontends/tda10086.c 	} else if (p->symbol_rate > 4000000) {
p                 676 drivers/media/dvb-frontends/tda10086.c 	} else if (p->symbol_rate > 2000000) {
p                 678 drivers/media/dvb-frontends/tda10086.c 		fesettings->step_size = p->symbol_rate / 8000;
p                 682 drivers/media/dvb-frontends/tda10086.c 		fesettings->step_size =  p->symbol_rate / 8000;
p                 315 drivers/media/dvb-frontends/tda8083.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 323 drivers/media/dvb-frontends/tda8083.c 	tda8083_set_inversion (state, p->inversion);
p                 324 drivers/media/dvb-frontends/tda8083.c 	tda8083_set_fec(state, p->fec_inner);
p                 325 drivers/media/dvb-frontends/tda8083.c 	tda8083_set_symbolrate(state, p->symbol_rate);
p                 334 drivers/media/dvb-frontends/tda8083.c 				struct dtv_frontend_properties *p)
p                 340 drivers/media/dvb-frontends/tda8083.c 	p->inversion = (tda8083_readreg (state, 0x0e) & 0x80) ?
p                 342 drivers/media/dvb-frontends/tda8083.c 	p->fec_inner = tda8083_get_fec(state);
p                  62 drivers/media/dvb-frontends/tda826x.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                  73 drivers/media/dvb-frontends/tda826x.c 	div = (p->frequency + (1000-1)) / 1000;
p                  77 drivers/media/dvb-frontends/tda826x.c 	ksyms = p->symbol_rate / 1000;
p                  29 drivers/media/dvb-frontends/tdhd1.h 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                  35 drivers/media/dvb-frontends/tdhd1.h 	div = (p->frequency + 36166666) / 166666;
p                  41 drivers/media/dvb-frontends/tdhd1.h 	if (p->frequency >= 174000000 && p->frequency <= 230000000)
p                  43 drivers/media/dvb-frontends/tdhd1.h 	else if (p->frequency >= 470000000 && p->frequency <= 823000000)
p                  45 drivers/media/dvb-frontends/tdhd1.h 	else if (p->frequency > 823000000 && p->frequency <= 862000000)
p                 199 drivers/media/dvb-frontends/ves1820.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 206 drivers/media/dvb-frontends/ves1820.c 	int real_qam = p->modulation - QAM_16;
p                 216 drivers/media/dvb-frontends/ves1820.c 	ves1820_set_symbolrate(state, p->symbol_rate);
p                 224 drivers/media/dvb-frontends/ves1820.c 	ves1820_setup_reg0(state, reg0x00[real_qam], p->inversion);
p                 304 drivers/media/dvb-frontends/ves1820.c 				struct dtv_frontend_properties *p)
p                 315 drivers/media/dvb-frontends/ves1820.c 			"ves1820: [AFC (%d) %dHz]\n", afc, -((s32) p->symbol_rate * afc) >> 10);
p                 319 drivers/media/dvb-frontends/ves1820.c 		p->inversion = (state->reg0 & 0x20) ? INVERSION_ON : INVERSION_OFF;
p                 321 drivers/media/dvb-frontends/ves1820.c 		p->inversion = (!(state->reg0 & 0x20)) ? INVERSION_ON : INVERSION_OFF;
p                 324 drivers/media/dvb-frontends/ves1820.c 	p->modulation = ((state->reg0 >> 2) & 7) + QAM_16;
p                 326 drivers/media/dvb-frontends/ves1820.c 	p->fec_inner = FEC_NONE;
p                 328 drivers/media/dvb-frontends/ves1820.c 	p->frequency = ((p->frequency + 31250) / 62500) * 62500;
p                 330 drivers/media/dvb-frontends/ves1820.c 		p->frequency -= ((s32) p->symbol_rate * afc) >> 10;
p                 380 drivers/media/dvb-frontends/ves1x93.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 387 drivers/media/dvb-frontends/ves1x93.c 	ves1x93_set_inversion (state, p->inversion);
p                 388 drivers/media/dvb-frontends/ves1x93.c 	ves1x93_set_fec(state, p->fec_inner);
p                 389 drivers/media/dvb-frontends/ves1x93.c 	ves1x93_set_symbolrate(state, p->symbol_rate);
p                 390 drivers/media/dvb-frontends/ves1x93.c 	state->inversion = p->inversion;
p                 391 drivers/media/dvb-frontends/ves1x93.c 	state->frequency = p->frequency;
p                 397 drivers/media/dvb-frontends/ves1x93.c 				struct dtv_frontend_properties *p)
p                 403 drivers/media/dvb-frontends/ves1x93.c 	afc = (afc * (int)(p->symbol_rate/1000/8))/16;
p                 405 drivers/media/dvb-frontends/ves1x93.c 	p->frequency = state->frequency - afc;
p                 412 drivers/media/dvb-frontends/ves1x93.c 		p->inversion = (ves1x93_readreg (state, 0x0f) & 2) ?
p                 414 drivers/media/dvb-frontends/ves1x93.c 	p->fec_inner = ves1x93_get_fec(state);
p                 296 drivers/media/dvb-frontends/zl10036.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 299 drivers/media/dvb-frontends/zl10036.c 	u32 frequency = p->frequency;
p                 315 drivers/media/dvb-frontends/zl10036.c 	fbw = (27 * p->symbol_rate) / 32;
p                 342 drivers/media/dvb-frontends/zl10036.c 	ret = zl10036_set_frequency(state, p->frequency);
p                 334 drivers/media/firewire/firedtv-avc.c 			      struct dtv_frontend_properties *p)
p                 348 drivers/media/firewire/firedtv-avc.c 	c->operand[4] = (p->frequency >> 24) & 0xff;
p                 349 drivers/media/firewire/firedtv-avc.c 	c->operand[5] = (p->frequency >> 16) & 0xff;
p                 350 drivers/media/firewire/firedtv-avc.c 	c->operand[6] = (p->frequency >> 8) & 0xff;
p                 351 drivers/media/firewire/firedtv-avc.c 	c->operand[7] = p->frequency & 0xff;
p                 353 drivers/media/firewire/firedtv-avc.c 	c->operand[8] = ((p->symbol_rate / 1000) >> 8) & 0xff;
p                 354 drivers/media/firewire/firedtv-avc.c 	c->operand[9] = (p->symbol_rate / 1000) & 0xff;
p                 356 drivers/media/firewire/firedtv-avc.c 	switch (p->fec_inner) {
p                 415 drivers/media/firewire/firedtv-avc.c 			       struct dtv_frontend_properties *p)
p                 434 drivers/media/firewire/firedtv-avc.c 			| (p->fec_inner  != FEC_AUTO ? 1 << 1 : 0)
p                 435 drivers/media/firewire/firedtv-avc.c 			| (p->modulation != QAM_AUTO ? 1 << 0 : 0);
p                 446 drivers/media/firewire/firedtv-avc.c 	c->operand[11] = (((p->frequency / 4000) >> 16) & 0xff) | (2 << 6);
p                 447 drivers/media/firewire/firedtv-avc.c 	c->operand[12] = ((p->frequency / 4000) >> 8) & 0xff;
p                 448 drivers/media/firewire/firedtv-avc.c 	c->operand[13] = (p->frequency / 4000) & 0xff;
p                 449 drivers/media/firewire/firedtv-avc.c 	c->operand[14] = ((p->symbol_rate / 1000) >> 12) & 0xff;
p                 450 drivers/media/firewire/firedtv-avc.c 	c->operand[15] = ((p->symbol_rate / 1000) >> 4) & 0xff;
p                 451 drivers/media/firewire/firedtv-avc.c 	c->operand[16] = ((p->symbol_rate / 1000) << 4) & 0xf0;
p                 454 drivers/media/firewire/firedtv-avc.c 	switch (p->fec_inner) {
p                 466 drivers/media/firewire/firedtv-avc.c 	switch (p->modulation) {
p                 483 drivers/media/firewire/firedtv-avc.c 			       struct dtv_frontend_properties *p)
p                 499 drivers/media/firewire/firedtv-avc.c 	    | (p->bandwidth_hz != 0        ? 1 << 5 : 0)
p                 500 drivers/media/firewire/firedtv-avc.c 	    | (p->modulation  != QAM_AUTO              ? 1 << 4 : 0)
p                 501 drivers/media/firewire/firedtv-avc.c 	    | (p->hierarchy != HIERARCHY_AUTO ? 1 << 3 : 0)
p                 502 drivers/media/firewire/firedtv-avc.c 	    | (p->code_rate_HP   != FEC_AUTO              ? 1 << 2 : 0)
p                 503 drivers/media/firewire/firedtv-avc.c 	    | (p->code_rate_LP   != FEC_AUTO              ? 1 << 1 : 0)
p                 504 drivers/media/firewire/firedtv-avc.c 	    | (p->guard_interval != GUARD_INTERVAL_AUTO   ? 1 << 0 : 0);
p                 509 drivers/media/firewire/firedtv-avc.c 	    | (p->transmission_mode != TRANSMISSION_MODE_AUTO ? 1 << 6 : 0)
p                 514 drivers/media/firewire/firedtv-avc.c 	c->operand[8]  = (p->frequency / 10) >> 24;
p                 515 drivers/media/firewire/firedtv-avc.c 	c->operand[9]  = ((p->frequency / 10) >> 16) & 0xff;
p                 516 drivers/media/firewire/firedtv-avc.c 	c->operand[10] = ((p->frequency / 10) >>  8) & 0xff;
p                 517 drivers/media/firewire/firedtv-avc.c 	c->operand[11] = (p->frequency / 10) & 0xff;
p                 519 drivers/media/firewire/firedtv-avc.c 	switch (p->bandwidth_hz) {
p                 527 drivers/media/firewire/firedtv-avc.c 	switch (p->modulation) {
p                 534 drivers/media/firewire/firedtv-avc.c 	switch (p->hierarchy) {
p                 543 drivers/media/firewire/firedtv-avc.c 	switch (p->code_rate_HP) {
p                 552 drivers/media/firewire/firedtv-avc.c 	switch (p->code_rate_LP) {
p                 561 drivers/media/firewire/firedtv-avc.c 	switch (p->guard_interval) {
p                 570 drivers/media/firewire/firedtv-avc.c 	switch (p->transmission_mode) {
p                 584 drivers/media/firewire/firedtv-avc.c 		  struct dtv_frontend_properties *p)
p                 596 drivers/media/firewire/firedtv-avc.c 	case FIREDTV_DVB_S2: pos = avc_tuner_tuneqpsk(fdtv, p); break;
p                 597 drivers/media/firewire/firedtv-avc.c 	case FIREDTV_DVB_C: pos = avc_tuner_dsd_dvb_c(fdtv, p); break;
p                 598 drivers/media/firewire/firedtv-avc.c 	case FIREDTV_DVB_T: pos = avc_tuner_dsd_dvb_t(fdtv, p); break;
p                1364 drivers/media/firewire/firedtv-avc.c #define set_opcr_p2p_connections(p, v)	set_opcr((p), (v), 0x3f, 24)
p                1365 drivers/media/firewire/firedtv-avc.c #define set_opcr_channel(p, v)		set_opcr((p), (v), 0x3f, 16)
p                1366 drivers/media/firewire/firedtv-avc.c #define set_opcr_data_rate(p, v)	set_opcr((p), (v), 0x3, 14)
p                1367 drivers/media/firewire/firedtv-avc.c #define set_opcr_overhead_id(p, v)	set_opcr((p), (v), 0xf, 10)
p                 142 drivers/media/firewire/firedtv-fe.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 145 drivers/media/firewire/firedtv-fe.c 	return avc_tuner_dsd(fdtv, p);
p                  87 drivers/media/firewire/firedtv-fw.c 	struct fw_iso_packet p;
p                  89 drivers/media/firewire/firedtv-fw.c 	p.payload_length = MAX_PACKET_SIZE;
p                  90 drivers/media/firewire/firedtv-fw.c 	p.interrupt = !(++ctx->interrupt_packet & (IRQ_INTERVAL - 1));
p                  91 drivers/media/firewire/firedtv-fw.c 	p.skip = 0;
p                  92 drivers/media/firewire/firedtv-fw.c 	p.header_length = ISO_HEADER_SIZE;
p                  94 drivers/media/firewire/firedtv-fw.c 	return fw_iso_context_queue(ctx->context, &p, &ctx->buffer,
p                 105 drivers/media/firewire/firedtv-fw.c 	char *p, *p_end;
p                 114 drivers/media/firewire/firedtv-fw.c 		p = ctx->pages[i / PACKETS_PER_PAGE]
p                 116 drivers/media/firewire/firedtv-fw.c 		p_end = p + length;
p                 118 drivers/media/firewire/firedtv-fw.c 		for (p += CIP_HEADER_SIZE + MPEG2_TS_HEADER_SIZE; p < p_end;
p                 119 drivers/media/firewire/firedtv-fw.c 		     p += MPEG2_TS_SOURCE_PACKET_SIZE)
p                 120 drivers/media/firewire/firedtv-fw.c 			dvb_dmx_swfilter_packets(&fdtv->demux, p, 1);
p                 175 drivers/media/i2c/cx25840/cx25840-core.c 				   struct v4l2_subdev_io_pin_config *p)
p                 187 drivers/media/i2c/cx25840/cx25840-core.c 		strength = p[i].strength;
p                 191 drivers/media/i2c/cx25840/cx25840-core.c 		switch (p[i].pin) {
p                 193 drivers/media/i2c/cx25840/cx25840-core.c 			if (p[i].function != CX23885_PAD_IRQ_N) {
p                 198 drivers/media/i2c/cx25840/cx25840-core.c 				if (p[i].flags &
p                 205 drivers/media/i2c/cx25840/cx25840-core.c 				if (p[i].flags &
p                 214 drivers/media/i2c/cx25840/cx25840-core.c 			if (p[i].function != CX23885_PAD_GPIO19) {
p                 222 drivers/media/i2c/cx25840/cx25840-core.c 				if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_SET_VALUE)) {
p                 224 drivers/media/i2c/cx25840/cx25840-core.c 					gpio_data |= ((p[i].value & 0x1) << 0);
p                 231 drivers/media/i2c/cx25840/cx25840-core.c 			if (p[i].function != CX23885_PAD_GPIO20) {
p                 234 drivers/media/i2c/cx25840/cx25840-core.c 				if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_DISABLE))
p                 243 drivers/media/i2c/cx25840/cx25840-core.c 				if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_SET_VALUE)) {
p                 245 drivers/media/i2c/cx25840/cx25840-core.c 					gpio_data |= ((p[i].value & 0x1) << 1);
p                 252 drivers/media/i2c/cx25840/cx25840-core.c 			if (p[i].function != CX23885_PAD_GPIO21) {
p                 261 drivers/media/i2c/cx25840/cx25840-core.c 				if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_SET_VALUE)) {
p                 263 drivers/media/i2c/cx25840/cx25840-core.c 					gpio_data |= ((p[i].value & 0x1) << 2);
p                 270 drivers/media/i2c/cx25840/cx25840-core.c 			if (p[i].function != CX23885_PAD_GPIO22) {
p                 279 drivers/media/i2c/cx25840/cx25840-core.c 				if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_SET_VALUE)) {
p                 281 drivers/media/i2c/cx25840/cx25840-core.c 					gpio_data |= ((p[i].value & 0x1) << 3);
p                 288 drivers/media/i2c/cx25840/cx25840-core.c 			if (p[i].function != CX23885_PAD_GPIO23) {
p                 297 drivers/media/i2c/cx25840/cx25840-core.c 				if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_SET_VALUE)) {
p                 299 drivers/media/i2c/cx25840/cx25840-core.c 					gpio_data |= ((p[i].value & 0x1) << 4);
p                 386 drivers/media/i2c/cx25840/cx25840-core.c 				   struct v4l2_subdev_io_pin_config *p)
p                 401 drivers/media/i2c/cx25840/cx25840-core.c 		u8 strength = p[i].strength;
p                 408 drivers/media/i2c/cx25840/cx25840-core.c 				(unsigned int)p[i].pin,
p                 414 drivers/media/i2c/cx25840/cx25840-core.c 		switch (p[i].pin) {
p                 416 drivers/media/i2c/cx25840/cx25840-core.c 			if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_DISABLE))
p                 423 drivers/media/i2c/cx25840/cx25840-core.c 							      p[i].function);
p                 426 drivers/media/i2c/cx25840/cx25840-core.c 					   p[i].function,
p                 428 drivers/media/i2c/cx25840/cx25840-core.c 					   p[i].flags &
p                 445 drivers/media/i2c/cx25840/cx25840-core.c 			if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_DISABLE))
p                 452 drivers/media/i2c/cx25840/cx25840-core.c 							      p[i].function);
p                 455 drivers/media/i2c/cx25840/cx25840-core.c 					   p[i].function,
p                 457 drivers/media/i2c/cx25840/cx25840-core.c 					   p[i].flags &
p                 474 drivers/media/i2c/cx25840/cx25840-core.c 			if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_DISABLE))
p                 479 drivers/media/i2c/cx25840/cx25840-core.c 			switch (p[i].function) {
p                 500 drivers/media/i2c/cx25840/cx25840-core.c 								p[i].function)
p                 508 drivers/media/i2c/cx25840/cx25840-core.c 				(unsigned int)p[i].pin);
p                1910 drivers/media/i2c/cx25840/cx25840-core.c 	char *p;
p                1914 drivers/media/i2c/cx25840/cx25840-core.c 		p = "mono";
p                1917 drivers/media/i2c/cx25840/cx25840-core.c 		p = "stereo";
p                1920 drivers/media/i2c/cx25840/cx25840-core.c 		p = "dual";
p                1923 drivers/media/i2c/cx25840/cx25840-core.c 		p = "tri";
p                1926 drivers/media/i2c/cx25840/cx25840-core.c 		p = "mono with SAP";
p                1929 drivers/media/i2c/cx25840/cx25840-core.c 		p = "stereo with SAP";
p                1932 drivers/media/i2c/cx25840/cx25840-core.c 		p = "dual with SAP";
p                1935 drivers/media/i2c/cx25840/cx25840-core.c 		p = "tri with SAP";
p                1938 drivers/media/i2c/cx25840/cx25840-core.c 		p = "forced mode";
p                1941 drivers/media/i2c/cx25840/cx25840-core.c 		p = "not defined";
p                1943 drivers/media/i2c/cx25840/cx25840-core.c 	v4l_info(client, "Detected audio mode:       %s\n", p);
p                1947 drivers/media/i2c/cx25840/cx25840-core.c 		p = "not defined";
p                1950 drivers/media/i2c/cx25840/cx25840-core.c 		p = "EIAJ";
p                1953 drivers/media/i2c/cx25840/cx25840-core.c 		p = "A2-M";
p                1956 drivers/media/i2c/cx25840/cx25840-core.c 		p = "A2-BG";
p                1959 drivers/media/i2c/cx25840/cx25840-core.c 		p = "A2-DK1";
p                1962 drivers/media/i2c/cx25840/cx25840-core.c 		p = "A2-DK2";
p                1965 drivers/media/i2c/cx25840/cx25840-core.c 		p = "A2-DK3";
p                1968 drivers/media/i2c/cx25840/cx25840-core.c 		p = "A1 (6.0 MHz FM Mono)";
p                1971 drivers/media/i2c/cx25840/cx25840-core.c 		p = "AM-L";
p                1974 drivers/media/i2c/cx25840/cx25840-core.c 		p = "NICAM-BG";
p                1977 drivers/media/i2c/cx25840/cx25840-core.c 		p = "NICAM-DK";
p                1980 drivers/media/i2c/cx25840/cx25840-core.c 		p = "NICAM-I";
p                1983 drivers/media/i2c/cx25840/cx25840-core.c 		p = "NICAM-L";
p                1986 drivers/media/i2c/cx25840/cx25840-core.c 		p = "BTSC/EIAJ/A2-M Mono (4.5 MHz FMMono)";
p                1989 drivers/media/i2c/cx25840/cx25840-core.c 		p = "IF FM Radio";
p                1992 drivers/media/i2c/cx25840/cx25840-core.c 		p = "BTSC";
p                1995 drivers/media/i2c/cx25840/cx25840-core.c 		p = "high-deviation FM";
p                1998 drivers/media/i2c/cx25840/cx25840-core.c 		p = "very high-deviation FM";
p                2001 drivers/media/i2c/cx25840/cx25840-core.c 		p = "unknown audio standard";
p                2004 drivers/media/i2c/cx25840/cx25840-core.c 		p = "forced audio standard";
p                2007 drivers/media/i2c/cx25840/cx25840-core.c 		p = "no detected audio standard";
p                2010 drivers/media/i2c/cx25840/cx25840-core.c 		p = "not defined";
p                2012 drivers/media/i2c/cx25840/cx25840-core.c 	v4l_info(client, "Detected audio standard:   %s\n", p);
p                2019 drivers/media/i2c/cx25840/cx25840-core.c 		p = "undefined";
p                2022 drivers/media/i2c/cx25840/cx25840-core.c 		p = "BTSC";
p                2025 drivers/media/i2c/cx25840/cx25840-core.c 		p = "EIAJ";
p                2028 drivers/media/i2c/cx25840/cx25840-core.c 		p = "A2-M";
p                2031 drivers/media/i2c/cx25840/cx25840-core.c 		p = "A2-BG";
p                2034 drivers/media/i2c/cx25840/cx25840-core.c 		p = "A2-DK1";
p                2037 drivers/media/i2c/cx25840/cx25840-core.c 		p = "A2-DK2";
p                2040 drivers/media/i2c/cx25840/cx25840-core.c 		p = "A2-DK3";
p                2043 drivers/media/i2c/cx25840/cx25840-core.c 		p = "A1 (6.0 MHz FM Mono)";
p                2046 drivers/media/i2c/cx25840/cx25840-core.c 		p = "AM-L";
p                2049 drivers/media/i2c/cx25840/cx25840-core.c 		p = "NICAM-BG";
p                2052 drivers/media/i2c/cx25840/cx25840-core.c 		p = "NICAM-DK";
p                2055 drivers/media/i2c/cx25840/cx25840-core.c 		p = "NICAM-I";
p                2058 drivers/media/i2c/cx25840/cx25840-core.c 		p = "NICAM-L";
p                2061 drivers/media/i2c/cx25840/cx25840-core.c 		p = "FM radio";
p                2064 drivers/media/i2c/cx25840/cx25840-core.c 		p = "automatic detection";
p                2067 drivers/media/i2c/cx25840/cx25840-core.c 		p = "undefined";
p                2069 drivers/media/i2c/cx25840/cx25840-core.c 	v4l_info(client, "Configured audio standard: %s\n", p);
p                2074 drivers/media/i2c/cx25840/cx25840-core.c 			p = "MONO1 (LANGUAGE A/Mono L+R channel for BTSC, EIAJ, A2)";
p                2077 drivers/media/i2c/cx25840/cx25840-core.c 			p = "MONO2 (LANGUAGE B)";
p                2080 drivers/media/i2c/cx25840/cx25840-core.c 			p = "MONO3 (STEREO forced MONO)";
p                2083 drivers/media/i2c/cx25840/cx25840-core.c 			p = "MONO4 (NICAM ANALOG-Language C/Analog Fallback)";
p                2086 drivers/media/i2c/cx25840/cx25840-core.c 			p = "STEREO";
p                2089 drivers/media/i2c/cx25840/cx25840-core.c 			p = "DUAL1 (AB)";
p                2092 drivers/media/i2c/cx25840/cx25840-core.c 			p = "DUAL2 (AC) (FM)";
p                2095 drivers/media/i2c/cx25840/cx25840-core.c 			p = "DUAL3 (BC) (FM)";
p                2098 drivers/media/i2c/cx25840/cx25840-core.c 			p = "DUAL4 (AC) (AM)";
p                2101 drivers/media/i2c/cx25840/cx25840-core.c 			p = "DUAL5 (BC) (AM)";
p                2104 drivers/media/i2c/cx25840/cx25840-core.c 			p = "SAP";
p                2107 drivers/media/i2c/cx25840/cx25840-core.c 			p = "undefined";
p                2109 drivers/media/i2c/cx25840/cx25840-core.c 		v4l_info(client, "Configured audio mode:     %s\n", p);
p                2113 drivers/media/i2c/cx25840/cx25840-core.c 			p = "BG";
p                2116 drivers/media/i2c/cx25840/cx25840-core.c 			p = "DK1";
p                2119 drivers/media/i2c/cx25840/cx25840-core.c 			p = "DK2";
p                2122 drivers/media/i2c/cx25840/cx25840-core.c 			p = "DK3";
p                2125 drivers/media/i2c/cx25840/cx25840-core.c 			p = "I";
p                2128 drivers/media/i2c/cx25840/cx25840-core.c 			p = "L";
p                2131 drivers/media/i2c/cx25840/cx25840-core.c 			p = "BTSC";
p                2134 drivers/media/i2c/cx25840/cx25840-core.c 			p = "EIAJ";
p                2137 drivers/media/i2c/cx25840/cx25840-core.c 			p = "A2-M";
p                2140 drivers/media/i2c/cx25840/cx25840-core.c 			p = "FM Radio";
p                2143 drivers/media/i2c/cx25840/cx25840-core.c 			p = "automatic standard and mode detection";
p                2146 drivers/media/i2c/cx25840/cx25840-core.c 			p = "undefined";
p                2148 drivers/media/i2c/cx25840/cx25840-core.c 		v4l_info(client, "Configured audio system:   %s\n", p);
p                2160 drivers/media/i2c/cx25840/cx25840-core.c 		p = "mono/language A";
p                2163 drivers/media/i2c/cx25840/cx25840-core.c 		p = "language B";
p                2166 drivers/media/i2c/cx25840/cx25840-core.c 		p = "language C";
p                2169 drivers/media/i2c/cx25840/cx25840-core.c 		p = "analog fallback";
p                2172 drivers/media/i2c/cx25840/cx25840-core.c 		p = "stereo";
p                2175 drivers/media/i2c/cx25840/cx25840-core.c 		p = "language AC";
p                2178 drivers/media/i2c/cx25840/cx25840-core.c 		p = "language BC";
p                2181 drivers/media/i2c/cx25840/cx25840-core.c 		p = "language AB";
p                2184 drivers/media/i2c/cx25840/cx25840-core.c 		p = "undefined";
p                2186 drivers/media/i2c/cx25840/cx25840-core.c 	v4l_info(client, "Preferred audio mode:      %s\n", p);
p                2191 drivers/media/i2c/cx25840/cx25840-core.c 			p = "system DK";
p                2194 drivers/media/i2c/cx25840/cx25840-core.c 			p = "system L";
p                2197 drivers/media/i2c/cx25840/cx25840-core.c 			p = "autodetect";
p                2200 drivers/media/i2c/cx25840/cx25840-core.c 			p = "undefined";
p                2202 drivers/media/i2c/cx25840/cx25840-core.c 		v4l_info(client, "Selected 65 MHz format:    %s\n", p);
p                2206 drivers/media/i2c/cx25840/cx25840-core.c 			p = "chroma";
p                2209 drivers/media/i2c/cx25840/cx25840-core.c 			p = "BTSC";
p                2212 drivers/media/i2c/cx25840/cx25840-core.c 			p = "EIAJ";
p                2215 drivers/media/i2c/cx25840/cx25840-core.c 			p = "A2-M";
p                2218 drivers/media/i2c/cx25840/cx25840-core.c 			p = "autodetect";
p                2221 drivers/media/i2c/cx25840/cx25840-core.c 			p = "undefined";
p                2223 drivers/media/i2c/cx25840/cx25840-core.c 		v4l_info(client, "Selected 45 MHz format:    %s\n", p);
p                 655 drivers/media/i2c/cx25840/cx25840-ir.c 	union cx25840_ir_fifo_rec *p;
p                 677 drivers/media/i2c/cx25840/cx25840-ir.c 	for (p = (union cx25840_ir_fifo_rec *) buf, i = 0; i < n; p++, i++) {
p                 679 drivers/media/i2c/cx25840/cx25840-ir.c 		if ((p->hw_fifo_data & FIFO_RXTX_RTO) == FIFO_RXTX_RTO) {
p                 684 drivers/media/i2c/cx25840/cx25840-ir.c 			u = (p->hw_fifo_data & FIFO_RXTX_LVL) ? 1 : 0;
p                 691 drivers/media/i2c/cx25840/cx25840-ir.c 				  (u16) (p->hw_fifo_data & FIFO_RXTX), divider);
p                 695 drivers/media/i2c/cx25840/cx25840-ir.c 		p->ir_core_data = (struct ir_raw_event)
p                 707 drivers/media/i2c/cx25840/cx25840-ir.c 				      struct v4l2_subdev_ir_parameters *p)
p                 715 drivers/media/i2c/cx25840/cx25840-ir.c 	memcpy(p, &ir_state->rx_params,
p                 747 drivers/media/i2c/cx25840/cx25840-ir.c 				      struct v4l2_subdev_ir_parameters *p)
p                 757 drivers/media/i2c/cx25840/cx25840-ir.c 	if (p->shutdown)
p                 760 drivers/media/i2c/cx25840/cx25840-ir.c 	if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH)
p                 768 drivers/media/i2c/cx25840/cx25840-ir.c 	o->shutdown = p->shutdown;
p                 770 drivers/media/i2c/cx25840/cx25840-ir.c 	p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
p                 771 drivers/media/i2c/cx25840/cx25840-ir.c 	o->mode = p->mode;
p                 773 drivers/media/i2c/cx25840/cx25840-ir.c 	p->bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec);
p                 774 drivers/media/i2c/cx25840/cx25840-ir.c 	o->bytes_per_data_element = p->bytes_per_data_element;
p                 780 drivers/media/i2c/cx25840/cx25840-ir.c 	control_rx_demodulation_enable(c, p->modulation);
p                 781 drivers/media/i2c/cx25840/cx25840-ir.c 	o->modulation = p->modulation;
p                 783 drivers/media/i2c/cx25840/cx25840-ir.c 	if (p->modulation) {
p                 784 drivers/media/i2c/cx25840/cx25840-ir.c 		p->carrier_freq = rxclk_rx_s_carrier(c, p->carrier_freq,
p                 787 drivers/media/i2c/cx25840/cx25840-ir.c 		o->carrier_freq = p->carrier_freq;
p                 789 drivers/media/i2c/cx25840/cx25840-ir.c 		p->duty_cycle = 50;
p                 790 drivers/media/i2c/cx25840/cx25840-ir.c 		o->duty_cycle = p->duty_cycle;
p                 792 drivers/media/i2c/cx25840/cx25840-ir.c 		control_rx_s_carrier_window(c, p->carrier_freq,
p                 793 drivers/media/i2c/cx25840/cx25840-ir.c 					    &p->carrier_range_lower,
p                 794 drivers/media/i2c/cx25840/cx25840-ir.c 					    &p->carrier_range_upper);
p                 795 drivers/media/i2c/cx25840/cx25840-ir.c 		o->carrier_range_lower = p->carrier_range_lower;
p                 796 drivers/media/i2c/cx25840/cx25840-ir.c 		o->carrier_range_upper = p->carrier_range_upper;
p                 798 drivers/media/i2c/cx25840/cx25840-ir.c 		p->max_pulse_width =
p                 801 drivers/media/i2c/cx25840/cx25840-ir.c 		p->max_pulse_width =
p                 802 drivers/media/i2c/cx25840/cx25840-ir.c 			    rxclk_rx_s_max_pulse_width(c, p->max_pulse_width,
p                 805 drivers/media/i2c/cx25840/cx25840-ir.c 	o->max_pulse_width = p->max_pulse_width;
p                 808 drivers/media/i2c/cx25840/cx25840-ir.c 	p->noise_filter_min_width =
p                 809 drivers/media/i2c/cx25840/cx25840-ir.c 			    filter_rx_s_min_width(c, p->noise_filter_min_width);
p                 810 drivers/media/i2c/cx25840/cx25840-ir.c 	o->noise_filter_min_width = p->noise_filter_min_width;
p                 812 drivers/media/i2c/cx25840/cx25840-ir.c 	p->resolution = clock_divider_to_resolution(rxclk_divider);
p                 813 drivers/media/i2c/cx25840/cx25840-ir.c 	o->resolution = p->resolution;
p                 820 drivers/media/i2c/cx25840/cx25840-ir.c 	o->invert_level = p->invert_level;
p                 821 drivers/media/i2c/cx25840/cx25840-ir.c 	atomic_set(&ir_state->rx_invert, p->invert_level);
p                 823 drivers/media/i2c/cx25840/cx25840-ir.c 	o->interrupt_enable = p->interrupt_enable;
p                 824 drivers/media/i2c/cx25840/cx25840-ir.c 	o->enable = p->enable;
p                 825 drivers/media/i2c/cx25840/cx25840-ir.c 	if (p->enable) {
p                 831 drivers/media/i2c/cx25840/cx25840-ir.c 		if (p->interrupt_enable)
p                 833 drivers/media/i2c/cx25840/cx25840-ir.c 		control_rx_enable(c, p->enable);
p                 897 drivers/media/i2c/cx25840/cx25840-ir.c 				      struct v4l2_subdev_ir_parameters *p)
p                 905 drivers/media/i2c/cx25840/cx25840-ir.c 	memcpy(p, &ir_state->tx_params,
p                 935 drivers/media/i2c/cx25840/cx25840-ir.c 				      struct v4l2_subdev_ir_parameters *p)
p                 945 drivers/media/i2c/cx25840/cx25840-ir.c 	if (p->shutdown)
p                 948 drivers/media/i2c/cx25840/cx25840-ir.c 	if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH)
p                 955 drivers/media/i2c/cx25840/cx25840-ir.c 	o->shutdown = p->shutdown;
p                 957 drivers/media/i2c/cx25840/cx25840-ir.c 	p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
p                 958 drivers/media/i2c/cx25840/cx25840-ir.c 	o->mode = p->mode;
p                 960 drivers/media/i2c/cx25840/cx25840-ir.c 	p->bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec);
p                 961 drivers/media/i2c/cx25840/cx25840-ir.c 	o->bytes_per_data_element = p->bytes_per_data_element;
p                 967 drivers/media/i2c/cx25840/cx25840-ir.c 	control_tx_modulation_enable(c, p->modulation);
p                 968 drivers/media/i2c/cx25840/cx25840-ir.c 	o->modulation = p->modulation;
p                 970 drivers/media/i2c/cx25840/cx25840-ir.c 	if (p->modulation) {
p                 971 drivers/media/i2c/cx25840/cx25840-ir.c 		p->carrier_freq = txclk_tx_s_carrier(c, p->carrier_freq,
p                 973 drivers/media/i2c/cx25840/cx25840-ir.c 		o->carrier_freq = p->carrier_freq;
p                 975 drivers/media/i2c/cx25840/cx25840-ir.c 		p->duty_cycle = cduty_tx_s_duty_cycle(c, p->duty_cycle);
p                 976 drivers/media/i2c/cx25840/cx25840-ir.c 		o->duty_cycle = p->duty_cycle;
p                 978 drivers/media/i2c/cx25840/cx25840-ir.c 		p->max_pulse_width =
p                 981 drivers/media/i2c/cx25840/cx25840-ir.c 		p->max_pulse_width =
p                 982 drivers/media/i2c/cx25840/cx25840-ir.c 			    txclk_tx_s_max_pulse_width(c, p->max_pulse_width,
p                 985 drivers/media/i2c/cx25840/cx25840-ir.c 	o->max_pulse_width = p->max_pulse_width;
p                 988 drivers/media/i2c/cx25840/cx25840-ir.c 	p->resolution = clock_divider_to_resolution(txclk_divider);
p                 989 drivers/media/i2c/cx25840/cx25840-ir.c 	o->resolution = p->resolution;
p                 994 drivers/media/i2c/cx25840/cx25840-ir.c 	control_tx_polarity_invert(c, p->invert_carrier_sense);
p                 995 drivers/media/i2c/cx25840/cx25840-ir.c 	o->invert_carrier_sense = p->invert_carrier_sense;
p                1003 drivers/media/i2c/cx25840/cx25840-ir.c 	o->invert_level = p->invert_level;
p                1005 drivers/media/i2c/cx25840/cx25840-ir.c 	o->interrupt_enable = p->interrupt_enable;
p                1006 drivers/media/i2c/cx25840/cx25840-ir.c 	o->enable = p->enable;
p                1007 drivers/media/i2c/cx25840/cx25840-ir.c 	if (p->enable) {
p                1009 drivers/media/i2c/cx25840/cx25840-ir.c 		if (p->interrupt_enable)
p                1011 drivers/media/i2c/cx25840/cx25840-ir.c 		control_tx_enable(c, p->enable);
p                  22 drivers/media/i2c/cx25840/cx25840-vbi.c static int decode_vps(u8 * dst, u8 * p)
p                  63 drivers/media/i2c/cx25840/cx25840-vbi.c 		err |= biphase_tbl[p[i]] | biphase_tbl[p[i + 1]];
p                  64 drivers/media/i2c/cx25840/cx25840-vbi.c 		c = (biphase_tbl[p[i + 1]] & 0xf) |
p                  65 drivers/media/i2c/cx25840/cx25840-vbi.c 		    ((biphase_tbl[p[i]] & 0xf) << 4);
p                 219 drivers/media/i2c/cx25840/cx25840-vbi.c 	u8 *p = vbi->p;
p                 222 drivers/media/i2c/cx25840/cx25840-vbi.c 	if (p[0] || p[1] != 0xff || p[2] != 0xff ||
p                 223 drivers/media/i2c/cx25840/cx25840-vbi.c 			(p[3] != 0x55 && p[3] != 0x91)) {
p                 228 drivers/media/i2c/cx25840/cx25840-vbi.c 	p += 4;
p                 229 drivers/media/i2c/cx25840/cx25840-vbi.c 	id1 = p[-1];
p                 230 drivers/media/i2c/cx25840/cx25840-vbi.c 	id2 = p[0] & 0xf;
p                 231 drivers/media/i2c/cx25840/cx25840-vbi.c 	l = p[2] & 0x3f;
p                 233 drivers/media/i2c/cx25840/cx25840-vbi.c 	p += 4;
p                 244 drivers/media/i2c/cx25840/cx25840-vbi.c 		err = !odd_parity(p[0]) || !odd_parity(p[1]);
p                 248 drivers/media/i2c/cx25840/cx25840-vbi.c 		if (decode_vps(p, p) != 0)
p                 260 drivers/media/i2c/cx25840/cx25840-vbi.c 	vbi->p = p;
p                 377 drivers/media/i2c/ir-kbd-i2c.c 	u8 buf[5], *p;
p                 379 drivers/media/i2c/ir-kbd-i2c.c 	p = &code_block->length;
p                 380 drivers/media/i2c/ir-kbd-i2c.c 	for (i = 0; p < code_block->csum; i++)
p                 381 drivers/media/i2c/ir-kbd-i2c.c 		code_block->csum[i & 1] ^= *p++;
p                 383 drivers/media/i2c/ir-kbd-i2c.c 	p = &code_block->length;
p                 392 drivers/media/i2c/ir-kbd-i2c.c 			buf[1 + j] = p[i + j];
p                 500 drivers/media/i2c/ir-kbd-i2c.c 	int rep, i, l, p = 0, s, c = 0;
p                 533 drivers/media/i2c/ir-kbd-i2c.c 			codes[c++] = (p << 4) | s;
p                 535 drivers/media/i2c/ir-kbd-i2c.c 			p = find_slot(code_block->pulse,
p                 537 drivers/media/i2c/ir-kbd-i2c.c 			if (p == -1) {
p                 553 drivers/media/i2c/ir-kbd-i2c.c 	codes[c++] = (p << 4) | s;
p                 563 drivers/media/i2c/msp3400-driver.c 	const char *p;
p                 573 drivers/media/i2c/msp3400-driver.c 		case MSP_MODE_AM_DETECT: p = "AM (for carrier detect)"; break;
p                 574 drivers/media/i2c/msp3400-driver.c 		case MSP_MODE_FM_RADIO: p = "FM Radio"; break;
p                 575 drivers/media/i2c/msp3400-driver.c 		case MSP_MODE_FM_TERRA: p = "Terrestrial FM-mono/stereo"; break;
p                 576 drivers/media/i2c/msp3400-driver.c 		case MSP_MODE_FM_SAT: p = "Satellite FM-mono"; break;
p                 577 drivers/media/i2c/msp3400-driver.c 		case MSP_MODE_FM_NICAM1: p = "NICAM/FM (B/G, D/K)"; break;
p                 578 drivers/media/i2c/msp3400-driver.c 		case MSP_MODE_FM_NICAM2: p = "NICAM/FM (I)"; break;
p                 579 drivers/media/i2c/msp3400-driver.c 		case MSP_MODE_AM_NICAM: p = "NICAM/AM (L)"; break;
p                 580 drivers/media/i2c/msp3400-driver.c 		case MSP_MODE_BTSC: p = "BTSC"; break;
p                 581 drivers/media/i2c/msp3400-driver.c 		case MSP_MODE_EXTERN: p = "External input"; break;
p                 582 drivers/media/i2c/msp3400-driver.c 		default: p = "unknown"; break;
p                 585 drivers/media/i2c/msp3400-driver.c 		dev_info(&client->dev, "Mode:     %s\n", p);
p                 587 drivers/media/i2c/msp3400-driver.c 		dev_info(&client->dev, "Mode:     %s (%s%s)\n", p,
p                 592 drivers/media/i2c/msp3400-driver.c 			dev_info(&client->dev, "Mode:     %s\n", p);
p                 860 drivers/media/i2c/s5k5baf.c 	struct v4l2_rect *p, r;
p                 864 drivers/media/i2c/s5k5baf.c 	p = &state->crop_sink;
p                 865 drivers/media/i2c/s5k5baf.c 	s5k5baf_write_seq(state, REG_G_PREVREQ_IN_WIDTH, p->width, p->height,
p                 866 drivers/media/i2c/s5k5baf.c 			  p->left, p->top);
p                 906 drivers/media/i2c/s5k5baf.c 	p = &state->crop_source;
p                 907 drivers/media/i2c/s5k5baf.c 	s5k5baf_write_seq(state, REG_P_OUT_WIDTH(0), p->width, p->height);
p                 676 drivers/media/i2c/saa7115.c static int saa711x_decode_vps(u8 *dst, u8 *p)
p                 716 drivers/media/i2c/saa7115.c 		err |= biphase_tbl[p[i]] | biphase_tbl[p[i + 1]];
p                 717 drivers/media/i2c/saa7115.c 		c = (biphase_tbl[p[i + 1]] & 0xf) | ((biphase_tbl[p[i]] & 0xf) << 4);
p                 723 drivers/media/i2c/saa7115.c static int saa711x_decode_wss(u8 *p)
p                 733 drivers/media/i2c/saa7115.c 		int b1 = wss_bits[p[i] & 7];
p                 734 drivers/media/i2c/saa7115.c 		int b2 = wss_bits[(p[i] >> 3) & 7];
p                1196 drivers/media/i2c/saa7115.c 	u8 *p = vbi->p;
p                1201 drivers/media/i2c/saa7115.c 	id1 = p[2];
p                1202 drivers/media/i2c/saa7115.c 	id2 = p[3];
p                1208 drivers/media/i2c/saa7115.c 	p += 4;
p                1209 drivers/media/i2c/saa7115.c 	vbi->p = p;
p                1221 drivers/media/i2c/saa7115.c 	if (!memcmp(p, vbi_no_data_pattern, sizeof(vbi_no_data_pattern)))
p                1230 drivers/media/i2c/saa7115.c 		if (!saa711x_odd_parity(p[0]) || !saa711x_odd_parity(p[1]))
p                1235 drivers/media/i2c/saa7115.c 		wss = saa711x_decode_wss(p);
p                1238 drivers/media/i2c/saa7115.c 		p[0] = wss & 0xff;
p                1239 drivers/media/i2c/saa7115.c 		p[1] = wss >> 8;
p                1243 drivers/media/i2c/saa7115.c 		if (saa711x_decode_vps(p, p) != 0)
p                1238 drivers/media/i2c/saa717x.c 	char *p = "";
p                1261 drivers/media/i2c/saa717x.c 		p = "saa7173";
p                1263 drivers/media/i2c/saa717x.c 		p = "saa7174A";
p                1265 drivers/media/i2c/saa717x.c 		p = "saa7174HL";
p                1267 drivers/media/i2c/saa717x.c 		p = "saa7171";
p                1268 drivers/media/i2c/saa717x.c 	v4l2_info(sd, "%s found @ 0x%x (%s)\n", p,
p                 976 drivers/media/i2c/smiapp/smiapp-core.c 	u32 i, s, p, np, v;
p                 980 drivers/media/i2c/smiapp/smiapp-core.c 	for (p = 0; p < np; p++) {
p                 983 drivers/media/i2c/smiapp/smiapp-core.c 			SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT, p);
p                 161 drivers/media/mc/mc-device.c 		unsigned int p;
p                 163 drivers/media/mc/mc-device.c 		for (p = 0; p < entity->num_pads; p++) {
p                 167 drivers/media/mc/mc-device.c 			media_device_kpad_to_upad(&entity->pads[p], &pad);
p                 168 drivers/media/mc/mc-device.c 			if (copy_to_user(&links->pads[p], &pad, sizeof(pad)))
p                 152 drivers/media/mmc/siano/smssdio.c 					 cb->p,
p                 160 drivers/media/mmc/siano/smssdio.c 		hdr = cb->p;
p                 173 drivers/media/mmc/siano/smssdio.c 		hdr = cb->p;
p                 183 drivers/media/mmc/siano/smssdio.c 		buffer = cb->p + (hdr->msg_length - size);
p                 232 drivers/media/mmc/siano/smssdio.c 	smsendian_handle_rx_message((struct sms_msg_data *) cb->p);
p                2643 drivers/media/pci/bt8xx/bttv-driver.c 				struct v4l2_requestbuffers *p)
p                2646 drivers/media/pci/bt8xx/bttv-driver.c 	return videobuf_reqbufs(bttv_queue(fh), p);
p                1580 drivers/media/pci/bt8xx/dst.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1584 drivers/media/pci/bt8xx/dst.c 	if (p != NULL) {
p                1585 drivers/media/pci/bt8xx/dst.c 		retval = dst_set_freq(state, p->frequency);
p                1588 drivers/media/pci/bt8xx/dst.c 		dprintk(3, "Set Frequency=[%d]\n", p->frequency);
p                1592 drivers/media/pci/bt8xx/dst.c 				dst_set_inversion(state, p->inversion);
p                1593 drivers/media/pci/bt8xx/dst.c 			dst_set_fec(state, p->fec_inner);
p                1594 drivers/media/pci/bt8xx/dst.c 			dst_set_symbolrate(state, p->symbol_rate);
p                1596 drivers/media/pci/bt8xx/dst.c 			dprintk(3, "Set Symbolrate=[%d]\n", p->symbol_rate);
p                1599 drivers/media/pci/bt8xx/dst.c 			dst_set_bandwidth(state, p->bandwidth_hz);
p                1601 drivers/media/pci/bt8xx/dst.c 			dst_set_fec(state, p->fec_inner);
p                1602 drivers/media/pci/bt8xx/dst.c 			dst_set_symbolrate(state, p->symbol_rate);
p                1603 drivers/media/pci/bt8xx/dst.c 			dst_set_modulation(state, p->modulation);
p                1618 drivers/media/pci/bt8xx/dst.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1621 drivers/media/pci/bt8xx/dst.c 		dst_set_freq(state, p->frequency);
p                1622 drivers/media/pci/bt8xx/dst.c 		dprintk(3, "Set Frequency=[%d]\n", p->frequency);
p                1626 drivers/media/pci/bt8xx/dst.c 				dst_set_inversion(state, p->inversion);
p                1627 drivers/media/pci/bt8xx/dst.c 			dst_set_fec(state, p->fec_inner);
p                1628 drivers/media/pci/bt8xx/dst.c 			dst_set_symbolrate(state, p->symbol_rate);
p                1630 drivers/media/pci/bt8xx/dst.c 			dprintk(3, "Set Symbolrate=[%d]\n", p->symbol_rate);
p                1633 drivers/media/pci/bt8xx/dst.c 			dst_set_bandwidth(state, p->bandwidth_hz);
p                1635 drivers/media/pci/bt8xx/dst.c 			dst_set_fec(state, p->fec_inner);
p                1636 drivers/media/pci/bt8xx/dst.c 			dst_set_symbolrate(state, p->symbol_rate);
p                1637 drivers/media/pci/bt8xx/dst.c 			dst_set_modulation(state, p->modulation);
p                1655 drivers/media/pci/bt8xx/dst.c 			    struct dtv_frontend_properties *p)
p                1659 drivers/media/pci/bt8xx/dst.c 	p->frequency = state->decode_freq;
p                1662 drivers/media/pci/bt8xx/dst.c 			p->inversion = state->inversion;
p                1663 drivers/media/pci/bt8xx/dst.c 		p->symbol_rate = state->symbol_rate;
p                1664 drivers/media/pci/bt8xx/dst.c 		p->fec_inner = dst_get_fec(state);
p                1666 drivers/media/pci/bt8xx/dst.c 		p->bandwidth_hz = state->bandwidth;
p                1668 drivers/media/pci/bt8xx/dst.c 		p->symbol_rate = state->symbol_rate;
p                1669 drivers/media/pci/bt8xx/dst.c 		p->fec_inner = dst_get_fec(state);
p                1670 drivers/media/pci/bt8xx/dst.c 		p->modulation = dst_get_modulation(state);
p                 188 drivers/media/pci/cobalt/cobalt-alsa-pcm.c 	unsigned char *p = vb2_plane_vaddr(vb, 0);
p                 196 drivers/media/pci/cobalt/cobalt-alsa-pcm.c 			pr_cont("%02x", p[i]);
p                 110 drivers/media/pci/cobalt/cobalt-v4l2.c 	struct list_head *p;
p                 113 drivers/media/pci/cobalt/cobalt-v4l2.c 	list_for_each(p, &s->bufs) {
p                 114 drivers/media/pci/cobalt/cobalt-v4l2.c 		cb = list_entry(p, struct cobalt_buffer, list);
p                 351 drivers/media/pci/cobalt/cobalt-v4l2.c 	struct list_head *p;
p                 370 drivers/media/pci/cobalt/cobalt-v4l2.c 	list_for_each(p, &s->bufs) {
p                 371 drivers/media/pci/cobalt/cobalt-v4l2.c 		cb = list_entry(p, struct cobalt_buffer, list);
p                 397 drivers/media/pci/cobalt/cobalt-v4l2.c 	struct list_head *p, *safe;
p                 404 drivers/media/pci/cobalt/cobalt-v4l2.c 	list_for_each_safe(p, safe, &s->bufs) {
p                 405 drivers/media/pci/cobalt/cobalt-v4l2.c 		cb = list_entry(p, struct cobalt_buffer, list);
p                1077 drivers/media/pci/cx18/cx18-av-core.c 	char *p;
p                1080 drivers/media/pci/cx18/cx18-av-core.c 	case 0x00: p = "mono"; break;
p                1081 drivers/media/pci/cx18/cx18-av-core.c 	case 0x01: p = "stereo"; break;
p                1082 drivers/media/pci/cx18/cx18-av-core.c 	case 0x02: p = "dual"; break;
p                1083 drivers/media/pci/cx18/cx18-av-core.c 	case 0x04: p = "tri"; break;
p                1084 drivers/media/pci/cx18/cx18-av-core.c 	case 0x10: p = "mono with SAP"; break;
p                1085 drivers/media/pci/cx18/cx18-av-core.c 	case 0x11: p = "stereo with SAP"; break;
p                1086 drivers/media/pci/cx18/cx18-av-core.c 	case 0x12: p = "dual with SAP"; break;
p                1087 drivers/media/pci/cx18/cx18-av-core.c 	case 0x14: p = "tri with SAP"; break;
p                1088 drivers/media/pci/cx18/cx18-av-core.c 	case 0xfe: p = "forced mode"; break;
p                1089 drivers/media/pci/cx18/cx18-av-core.c 	default: p = "not defined"; break;
p                1091 drivers/media/pci/cx18/cx18-av-core.c 	CX18_INFO_DEV(sd, "Detected audio mode:       %s\n", p);
p                1094 drivers/media/pci/cx18/cx18-av-core.c 	case 0x00: p = "not defined"; break;
p                1095 drivers/media/pci/cx18/cx18-av-core.c 	case 0x01: p = "EIAJ"; break;
p                1096 drivers/media/pci/cx18/cx18-av-core.c 	case 0x02: p = "A2-M"; break;
p                1097 drivers/media/pci/cx18/cx18-av-core.c 	case 0x03: p = "A2-BG"; break;
p                1098 drivers/media/pci/cx18/cx18-av-core.c 	case 0x04: p = "A2-DK1"; break;
p                1099 drivers/media/pci/cx18/cx18-av-core.c 	case 0x05: p = "A2-DK2"; break;
p                1100 drivers/media/pci/cx18/cx18-av-core.c 	case 0x06: p = "A2-DK3"; break;
p                1101 drivers/media/pci/cx18/cx18-av-core.c 	case 0x07: p = "A1 (6.0 MHz FM Mono)"; break;
p                1102 drivers/media/pci/cx18/cx18-av-core.c 	case 0x08: p = "AM-L"; break;
p                1103 drivers/media/pci/cx18/cx18-av-core.c 	case 0x09: p = "NICAM-BG"; break;
p                1104 drivers/media/pci/cx18/cx18-av-core.c 	case 0x0a: p = "NICAM-DK"; break;
p                1105 drivers/media/pci/cx18/cx18-av-core.c 	case 0x0b: p = "NICAM-I"; break;
p                1106 drivers/media/pci/cx18/cx18-av-core.c 	case 0x0c: p = "NICAM-L"; break;
p                1107 drivers/media/pci/cx18/cx18-av-core.c 	case 0x0d: p = "BTSC/EIAJ/A2-M Mono (4.5 MHz FMMono)"; break;
p                1108 drivers/media/pci/cx18/cx18-av-core.c 	case 0x0e: p = "IF FM Radio"; break;
p                1109 drivers/media/pci/cx18/cx18-av-core.c 	case 0x0f: p = "BTSC"; break;
p                1110 drivers/media/pci/cx18/cx18-av-core.c 	case 0x10: p = "detected chrominance"; break;
p                1111 drivers/media/pci/cx18/cx18-av-core.c 	case 0xfd: p = "unknown audio standard"; break;
p                1112 drivers/media/pci/cx18/cx18-av-core.c 	case 0xfe: p = "forced audio standard"; break;
p                1113 drivers/media/pci/cx18/cx18-av-core.c 	case 0xff: p = "no detected audio standard"; break;
p                1114 drivers/media/pci/cx18/cx18-av-core.c 	default: p = "not defined"; break;
p                1116 drivers/media/pci/cx18/cx18-av-core.c 	CX18_INFO_DEV(sd, "Detected audio standard:   %s\n", p);
p                1123 drivers/media/pci/cx18/cx18-av-core.c 	case 0x00: p = "undefined"; break;
p                1124 drivers/media/pci/cx18/cx18-av-core.c 	case 0x01: p = "BTSC"; break;
p                1125 drivers/media/pci/cx18/cx18-av-core.c 	case 0x02: p = "EIAJ"; break;
p                1126 drivers/media/pci/cx18/cx18-av-core.c 	case 0x03: p = "A2-M"; break;
p                1127 drivers/media/pci/cx18/cx18-av-core.c 	case 0x04: p = "A2-BG"; break;
p                1128 drivers/media/pci/cx18/cx18-av-core.c 	case 0x05: p = "A2-DK1"; break;
p                1129 drivers/media/pci/cx18/cx18-av-core.c 	case 0x06: p = "A2-DK2"; break;
p                1130 drivers/media/pci/cx18/cx18-av-core.c 	case 0x07: p = "A2-DK3"; break;
p                1131 drivers/media/pci/cx18/cx18-av-core.c 	case 0x08: p = "A1 (6.0 MHz FM Mono)"; break;
p                1132 drivers/media/pci/cx18/cx18-av-core.c 	case 0x09: p = "AM-L"; break;
p                1133 drivers/media/pci/cx18/cx18-av-core.c 	case 0x0a: p = "NICAM-BG"; break;
p                1134 drivers/media/pci/cx18/cx18-av-core.c 	case 0x0b: p = "NICAM-DK"; break;
p                1135 drivers/media/pci/cx18/cx18-av-core.c 	case 0x0c: p = "NICAM-I"; break;
p                1136 drivers/media/pci/cx18/cx18-av-core.c 	case 0x0d: p = "NICAM-L"; break;
p                1137 drivers/media/pci/cx18/cx18-av-core.c 	case 0x0e: p = "FM radio"; break;
p                1138 drivers/media/pci/cx18/cx18-av-core.c 	case 0x0f: p = "automatic detection"; break;
p                1139 drivers/media/pci/cx18/cx18-av-core.c 	default: p = "undefined"; break;
p                1141 drivers/media/pci/cx18/cx18-av-core.c 	CX18_INFO_DEV(sd, "Configured audio standard: %s\n", p);
p                1145 drivers/media/pci/cx18/cx18-av-core.c 		case 0x00: p = "MONO1 (LANGUAGE A/Mono L+R channel for BTSC, EIAJ, A2)"; break;
p                1146 drivers/media/pci/cx18/cx18-av-core.c 		case 0x01: p = "MONO2 (LANGUAGE B)"; break;
p                1147 drivers/media/pci/cx18/cx18-av-core.c 		case 0x02: p = "MONO3 (STEREO forced MONO)"; break;
p                1148 drivers/media/pci/cx18/cx18-av-core.c 		case 0x03: p = "MONO4 (NICAM ANALOG-Language C/Analog Fallback)"; break;
p                1149 drivers/media/pci/cx18/cx18-av-core.c 		case 0x04: p = "STEREO"; break;
p                1150 drivers/media/pci/cx18/cx18-av-core.c 		case 0x05: p = "DUAL1 (AC)"; break;
p                1151 drivers/media/pci/cx18/cx18-av-core.c 		case 0x06: p = "DUAL2 (BC)"; break;
p                1152 drivers/media/pci/cx18/cx18-av-core.c 		case 0x07: p = "DUAL3 (AB)"; break;
p                1153 drivers/media/pci/cx18/cx18-av-core.c 		default: p = "undefined";
p                1155 drivers/media/pci/cx18/cx18-av-core.c 		CX18_INFO_DEV(sd, "Configured audio mode:     %s\n", p);
p                1158 drivers/media/pci/cx18/cx18-av-core.c 		case 0x00: p = "BG"; break;
p                1159 drivers/media/pci/cx18/cx18-av-core.c 		case 0x01: p = "DK1"; break;
p                1160 drivers/media/pci/cx18/cx18-av-core.c 		case 0x02: p = "DK2"; break;
p                1161 drivers/media/pci/cx18/cx18-av-core.c 		case 0x03: p = "DK3"; break;
p                1162 drivers/media/pci/cx18/cx18-av-core.c 		case 0x04: p = "I"; break;
p                1163 drivers/media/pci/cx18/cx18-av-core.c 		case 0x05: p = "L"; break;
p                1164 drivers/media/pci/cx18/cx18-av-core.c 		case 0x06: p = "BTSC"; break;
p                1165 drivers/media/pci/cx18/cx18-av-core.c 		case 0x07: p = "EIAJ"; break;
p                1166 drivers/media/pci/cx18/cx18-av-core.c 		case 0x08: p = "A2-M"; break;
p                1167 drivers/media/pci/cx18/cx18-av-core.c 		case 0x09: p = "FM Radio (4.5 MHz)"; break;
p                1168 drivers/media/pci/cx18/cx18-av-core.c 		case 0x0a: p = "FM Radio (5.5 MHz)"; break;
p                1169 drivers/media/pci/cx18/cx18-av-core.c 		case 0x0b: p = "S-Video"; break;
p                1170 drivers/media/pci/cx18/cx18-av-core.c 		case 0x0f: p = "automatic standard and mode detection"; break;
p                1171 drivers/media/pci/cx18/cx18-av-core.c 		default: p = "undefined"; break;
p                1173 drivers/media/pci/cx18/cx18-av-core.c 		CX18_INFO_DEV(sd, "Configured audio system:   %s\n", p);
p                1183 drivers/media/pci/cx18/cx18-av-core.c 	case 0: p = "mono/language A"; break;
p                1184 drivers/media/pci/cx18/cx18-av-core.c 	case 1: p = "language B"; break;
p                1185 drivers/media/pci/cx18/cx18-av-core.c 	case 2: p = "language C"; break;
p                1186 drivers/media/pci/cx18/cx18-av-core.c 	case 3: p = "analog fallback"; break;
p                1187 drivers/media/pci/cx18/cx18-av-core.c 	case 4: p = "stereo"; break;
p                1188 drivers/media/pci/cx18/cx18-av-core.c 	case 5: p = "language AC"; break;
p                1189 drivers/media/pci/cx18/cx18-av-core.c 	case 6: p = "language BC"; break;
p                1190 drivers/media/pci/cx18/cx18-av-core.c 	case 7: p = "language AB"; break;
p                1191 drivers/media/pci/cx18/cx18-av-core.c 	default: p = "undefined"; break;
p                1193 drivers/media/pci/cx18/cx18-av-core.c 	CX18_INFO_DEV(sd, "Preferred audio mode:      %s\n", p);
p                1197 drivers/media/pci/cx18/cx18-av-core.c 		case 0: p = "system DK"; break;
p                1198 drivers/media/pci/cx18/cx18-av-core.c 		case 1: p = "system L"; break;
p                1200 drivers/media/pci/cx18/cx18-av-core.c 		CX18_INFO_DEV(sd, "Selected 65 MHz format:    %s\n", p);
p                1203 drivers/media/pci/cx18/cx18-av-core.c 		case 0: p = "Chroma"; break;
p                1204 drivers/media/pci/cx18/cx18-av-core.c 		case 1: p = "BTSC"; break;
p                1205 drivers/media/pci/cx18/cx18-av-core.c 		case 2: p = "EIAJ"; break;
p                1206 drivers/media/pci/cx18/cx18-av-core.c 		case 3: p = "A2-M"; break;
p                1207 drivers/media/pci/cx18/cx18-av-core.c 		case 4: p = "autodetect"; break;
p                1208 drivers/media/pci/cx18/cx18-av-core.c 		default: p = "undefined"; break;
p                1210 drivers/media/pci/cx18/cx18-av-core.c 		CX18_INFO_DEV(sd, "Selected 45 MHz format:    %s\n", p);
p                  68 drivers/media/pci/cx18/cx18-av-vbi.c static int decode_vps(u8 *dst, u8 *p)
p                 109 drivers/media/pci/cx18/cx18-av-vbi.c 		err |= biphase_tbl[p[i]] | biphase_tbl[p[i + 1]];
p                 110 drivers/media/pci/cx18/cx18-av-vbi.c 		c = (biphase_tbl[p[i + 1]] & 0xf) |
p                 111 drivers/media/pci/cx18/cx18-av-vbi.c 		    ((biphase_tbl[p[i]] & 0xf) << 4);
p                 250 drivers/media/pci/cx18/cx18-av-vbi.c 	struct vbi_anc_data *anc = (struct vbi_anc_data *)vbi->p;
p                 251 drivers/media/pci/cx18/cx18-av-vbi.c 	u8 *p;
p                 269 drivers/media/pci/cx18/cx18-av-vbi.c 	p = anc->payload;
p                 281 drivers/media/pci/cx18/cx18-av-vbi.c 		err = !odd_parity(p[0]) || !odd_parity(p[1]);
p                 285 drivers/media/pci/cx18/cx18-av-vbi.c 		if (decode_vps(p, p) != 0)
p                 297 drivers/media/pci/cx18/cx18-av-vbi.c 	vbi->p = p;
p                 290 drivers/media/pci/cx18/cx18-fileops.c 		const char *p = start + 1;
p                 295 drivers/media/pci/cx18/cx18-fileops.c 		while (start + len > p) {
p                 297 drivers/media/pci/cx18/cx18-fileops.c 			q = memchr(p, 0, start + len - p);
p                 300 drivers/media/pci/cx18/cx18-fileops.c 			p = q + 1;
p                 323 drivers/media/pci/cx18/cx18-fileops.c 					p = q + 9; /* Skip this video PES hdr */
p                 207 drivers/media/pci/cx18/cx18-gpio.c 	const struct cx18_gpio_i2c_slave_reset *p;
p                 209 drivers/media/pci/cx18/cx18-gpio.c 	p = &cx->card->gpio_i2c_slave_reset;
p                 212 drivers/media/pci/cx18/cx18-gpio.c 		gpio_reset_seq(cx, p->active_lo_mask, p->active_hi_mask,
p                 213 drivers/media/pci/cx18/cx18-gpio.c 			       p->msecs_asserted, p->msecs_recovery);
p                 230 drivers/media/pci/cx18/cx18-gpio.c 		gpio_reset_seq(cx, p->ir_reset_mask, 0,
p                 231 drivers/media/pci/cx18/cx18-gpio.c 			       p->msecs_asserted, p->msecs_recovery);
p                  94 drivers/media/pci/cx18/cx18-mailbox.c 	char *p;
p                  97 drivers/media/pci/cx18/cx18-mailbox.c 	for (i = 0, p = buf; i < n; i++, p += 11) {
p                  99 drivers/media/pci/cx18/cx18-mailbox.c 		snprintf(p, 12, " %#010x", data[i]);
p                 101 drivers/media/pci/cx18/cx18-mailbox.c 	*p = '\0';
p                 153 drivers/media/pci/cx18/cx18-mailbox.c 	u8 *p;
p                 168 drivers/media/pci/cx18/cx18-mailbox.c 	p = videobuf_to_vmalloc(&vb_buf->vb);
p                 169 drivers/media/pci/cx18/cx18-mailbox.c 	if (!p)
p                 178 drivers/media/pci/cx18/cx18-mailbox.c 			memcpy(p + offset, buf->buf, buf->bytesused);
p                 325 drivers/media/pci/cx18/cx18-mailbox.c 	char *p;
p                 329 drivers/media/pci/cx18/cx18-mailbox.c 	p = strchr(str, '.');
p                 330 drivers/media/pci/cx18/cx18-mailbox.c 	if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str)
p                 331 drivers/media/pci/cx18/cx18-mailbox.c 		CX18_INFO("FW version: %s\n", p - 1);
p                 100 drivers/media/pci/cx18/cx18-vbi.c 	u8 *p;
p                 107 drivers/media/pci/cx18/cx18-vbi.c 		p = buf + i * line_size;
p                 110 drivers/media/pci/cx18/cx18-vbi.c 		if (p[0] != 0xff || p[1] || p[2] ||
p                 111 drivers/media/pci/cx18/cx18-vbi.c 		    (p[3] != raw_vbi_sav_rp[0] &&
p                 112 drivers/media/pci/cx18/cx18-vbi.c 		     p[3] != raw_vbi_sav_rp[1]))
p                 116 drivers/media/pci/cx18/cx18-vbi.c 			memcpy(q, p + 4, line_size - 4 - hdr_size);
p                 118 drivers/media/pci/cx18/cx18-vbi.c 			p += line_size - hdr_size - 1;
p                 119 drivers/media/pci/cx18/cx18-vbi.c 			memset(q, (int) *p, hdr_size);
p                 121 drivers/media/pci/cx18/cx18-vbi.c 			memcpy(q, p + 4, line_size - 4);
p                 155 drivers/media/pci/cx18/cx18-vbi.c 		u8 *p = buf + i * line_size;
p                 158 drivers/media/pci/cx18/cx18-vbi.c 		if (p[0] != 0xff || p[1] || p[2] ||
p                 159 drivers/media/pci/cx18/cx18-vbi.c 		    (p[3] != sliced_vbi_eav_rp[0] &&
p                 160 drivers/media/pci/cx18/cx18-vbi.c 		     p[3] != sliced_vbi_eav_rp[1]))
p                 162 drivers/media/pci/cx18/cx18-vbi.c 		vbi.p = p + 4;
p                 168 drivers/media/pci/cx18/cx18-vbi.c 			memcpy(cx->vbi.sliced_data[line].data, vbi.p, 42);
p                 187 drivers/media/pci/cx18/cx18-vbi.c 	u8 *p = (u8 *) buf->buf;
p                 203 drivers/media/pci/cx18/cx18-vbi.c 		     compress_raw_buf(cx, p, size, sizeof(struct vbi_data_hdr));
p                 209 drivers/media/pci/cx18/cx18-vbi.c 		p += size - 4;
p                 210 drivers/media/pci/cx18/cx18-vbi.c 		memcpy(p, &cx->vbi.frame, 4);
p                 220 drivers/media/pci/cx18/cx18-vbi.c 	lines = compress_sliced_buf(cx, p, size, sizeof(struct vbi_data_hdr));
p                 230 drivers/media/pci/cx18/cx18-vbi.c 	memcpy(p, &cx->vbi.sliced_data[0], size);
p                 736 drivers/media/pci/cx23885/cx23885-dvb.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 742 drivers/media/pci/cx23885/cx23885-dvb.c 		switch (p->modulation) {
p                 657 drivers/media/pci/cx23885/cx23888-ir.c 	union cx23888_ir_fifo_rec *p;
p                 672 drivers/media/pci/cx23885/cx23888-ir.c 	for (p = (union cx23888_ir_fifo_rec *) buf, i = 0; i < n; p++, i++) {
p                 674 drivers/media/pci/cx23885/cx23888-ir.c 		if ((p->hw_fifo_data & FIFO_RXTX_RTO) == FIFO_RXTX_RTO) {
p                 679 drivers/media/pci/cx23885/cx23888-ir.c 			u = (p->hw_fifo_data & FIFO_RXTX_LVL) ? 1 : 0;
p                 686 drivers/media/pci/cx23885/cx23888-ir.c 				  (u16) (p->hw_fifo_data & FIFO_RXTX), divider);
p                 690 drivers/media/pci/cx23885/cx23888-ir.c 		p->ir_core_data = (struct ir_raw_event)
p                 702 drivers/media/pci/cx23885/cx23888-ir.c 				      struct v4l2_subdev_ir_parameters *p)
p                 706 drivers/media/pci/cx23885/cx23888-ir.c 	memcpy(p, &state->rx_params, sizeof(struct v4l2_subdev_ir_parameters));
p                 733 drivers/media/pci/cx23885/cx23888-ir.c 				      struct v4l2_subdev_ir_parameters *p)
p                 740 drivers/media/pci/cx23885/cx23888-ir.c 	if (p->shutdown)
p                 743 drivers/media/pci/cx23885/cx23888-ir.c 	if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH)
p                 748 drivers/media/pci/cx23885/cx23888-ir.c 	o->shutdown = p->shutdown;
p                 750 drivers/media/pci/cx23885/cx23888-ir.c 	o->mode = p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
p                 752 drivers/media/pci/cx23885/cx23888-ir.c 	o->bytes_per_data_element = p->bytes_per_data_element
p                 759 drivers/media/pci/cx23885/cx23888-ir.c 	control_rx_demodulation_enable(dev, p->modulation);
p                 760 drivers/media/pci/cx23885/cx23888-ir.c 	o->modulation = p->modulation;
p                 762 drivers/media/pci/cx23885/cx23888-ir.c 	if (p->modulation) {
p                 763 drivers/media/pci/cx23885/cx23888-ir.c 		p->carrier_freq = rxclk_rx_s_carrier(dev, p->carrier_freq,
p                 766 drivers/media/pci/cx23885/cx23888-ir.c 		o->carrier_freq = p->carrier_freq;
p                 768 drivers/media/pci/cx23885/cx23888-ir.c 		o->duty_cycle = p->duty_cycle = 50;
p                 770 drivers/media/pci/cx23885/cx23888-ir.c 		control_rx_s_carrier_window(dev, p->carrier_freq,
p                 771 drivers/media/pci/cx23885/cx23888-ir.c 					    &p->carrier_range_lower,
p                 772 drivers/media/pci/cx23885/cx23888-ir.c 					    &p->carrier_range_upper);
p                 773 drivers/media/pci/cx23885/cx23888-ir.c 		o->carrier_range_lower = p->carrier_range_lower;
p                 774 drivers/media/pci/cx23885/cx23888-ir.c 		o->carrier_range_upper = p->carrier_range_upper;
p                 776 drivers/media/pci/cx23885/cx23888-ir.c 		p->max_pulse_width =
p                 779 drivers/media/pci/cx23885/cx23888-ir.c 		p->max_pulse_width =
p                 780 drivers/media/pci/cx23885/cx23888-ir.c 			    rxclk_rx_s_max_pulse_width(dev, p->max_pulse_width,
p                 783 drivers/media/pci/cx23885/cx23888-ir.c 	o->max_pulse_width = p->max_pulse_width;
p                 786 drivers/media/pci/cx23885/cx23888-ir.c 	p->noise_filter_min_width =
p                 787 drivers/media/pci/cx23885/cx23888-ir.c 			  filter_rx_s_min_width(dev, p->noise_filter_min_width);
p                 788 drivers/media/pci/cx23885/cx23888-ir.c 	o->noise_filter_min_width = p->noise_filter_min_width;
p                 790 drivers/media/pci/cx23885/cx23888-ir.c 	p->resolution = clock_divider_to_resolution(rxclk_divider);
p                 791 drivers/media/pci/cx23885/cx23888-ir.c 	o->resolution = p->resolution;
p                 798 drivers/media/pci/cx23885/cx23888-ir.c 	o->invert_level = p->invert_level;
p                 799 drivers/media/pci/cx23885/cx23888-ir.c 	atomic_set(&state->rx_invert, p->invert_level);
p                 801 drivers/media/pci/cx23885/cx23888-ir.c 	o->interrupt_enable = p->interrupt_enable;
p                 802 drivers/media/pci/cx23885/cx23888-ir.c 	o->enable = p->enable;
p                 803 drivers/media/pci/cx23885/cx23888-ir.c 	if (p->enable) {
p                 810 drivers/media/pci/cx23885/cx23888-ir.c 		if (p->interrupt_enable)
p                 812 drivers/media/pci/cx23885/cx23888-ir.c 		control_rx_enable(dev, p->enable);
p                 832 drivers/media/pci/cx23885/cx23888-ir.c 				      struct v4l2_subdev_ir_parameters *p)
p                 836 drivers/media/pci/cx23885/cx23888-ir.c 	memcpy(p, &state->tx_params, sizeof(struct v4l2_subdev_ir_parameters));
p                 861 drivers/media/pci/cx23885/cx23888-ir.c 				      struct v4l2_subdev_ir_parameters *p)
p                 868 drivers/media/pci/cx23885/cx23888-ir.c 	if (p->shutdown)
p                 871 drivers/media/pci/cx23885/cx23888-ir.c 	if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH)
p                 876 drivers/media/pci/cx23885/cx23888-ir.c 	o->shutdown = p->shutdown;
p                 878 drivers/media/pci/cx23885/cx23888-ir.c 	o->mode = p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH;
p                 880 drivers/media/pci/cx23885/cx23888-ir.c 	o->bytes_per_data_element = p->bytes_per_data_element
p                 887 drivers/media/pci/cx23885/cx23888-ir.c 	control_tx_modulation_enable(dev, p->modulation);
p                 888 drivers/media/pci/cx23885/cx23888-ir.c 	o->modulation = p->modulation;
p                 890 drivers/media/pci/cx23885/cx23888-ir.c 	if (p->modulation) {
p                 891 drivers/media/pci/cx23885/cx23888-ir.c 		p->carrier_freq = txclk_tx_s_carrier(dev, p->carrier_freq,
p                 893 drivers/media/pci/cx23885/cx23888-ir.c 		o->carrier_freq = p->carrier_freq;
p                 895 drivers/media/pci/cx23885/cx23888-ir.c 		p->duty_cycle = cduty_tx_s_duty_cycle(dev, p->duty_cycle);
p                 896 drivers/media/pci/cx23885/cx23888-ir.c 		o->duty_cycle = p->duty_cycle;
p                 898 drivers/media/pci/cx23885/cx23888-ir.c 		p->max_pulse_width =
p                 901 drivers/media/pci/cx23885/cx23888-ir.c 		p->max_pulse_width =
p                 902 drivers/media/pci/cx23885/cx23888-ir.c 			    txclk_tx_s_max_pulse_width(dev, p->max_pulse_width,
p                 905 drivers/media/pci/cx23885/cx23888-ir.c 	o->max_pulse_width = p->max_pulse_width;
p                 908 drivers/media/pci/cx23885/cx23888-ir.c 	p->resolution = clock_divider_to_resolution(txclk_divider);
p                 909 drivers/media/pci/cx23885/cx23888-ir.c 	o->resolution = p->resolution;
p                 914 drivers/media/pci/cx23885/cx23888-ir.c 	control_tx_polarity_invert(dev, p->invert_carrier_sense);
p                 915 drivers/media/pci/cx23885/cx23888-ir.c 	o->invert_carrier_sense = p->invert_carrier_sense;
p                 917 drivers/media/pci/cx23885/cx23888-ir.c 	control_tx_level_invert(dev, p->invert_level);
p                 918 drivers/media/pci/cx23885/cx23888-ir.c 	o->invert_level = p->invert_level;
p                 920 drivers/media/pci/cx23885/cx23888-ir.c 	o->interrupt_enable = p->interrupt_enable;
p                 921 drivers/media/pci/cx23885/cx23888-ir.c 	o->enable = p->enable;
p                 922 drivers/media/pci/cx23885/cx23888-ir.c 	if (p->enable) {
p                 923 drivers/media/pci/cx23885/cx23888-ir.c 		if (p->interrupt_enable)
p                 925 drivers/media/pci/cx23885/cx23888-ir.c 		control_tx_enable(dev, p->enable);
p                 714 drivers/media/pci/cx88/cx88-alsa.c 	.tlv.p = snd_cx88_db_scale,
p                 812 drivers/media/pci/cx88/cx88-tvaudio.c 	static const char * const p[] = { "no pilot", "pilot c1",
p                 822 drivers/media/pci/cx88/cx88-tvaudio.c 			reg, m[mode], p[pilot],
p                 227 drivers/media/pci/ddbridge/ddbridge-core.c static int ddb_redirect(u32 i, u32 p)
p                 231 drivers/media/pci/ddbridge/ddbridge-core.c 	struct ddb *pdev = ddbs[(p >> 4) & 0x3f];
p                 239 drivers/media/pci/ddbridge/ddbridge-core.c 	port = &pdev->port[p & 0x0f];
p                2367 drivers/media/pci/ddbridge/ddbridge-core.c 	u32 i, l, p;
p                2372 drivers/media/pci/ddbridge/ddbridge-core.c 	for (p = l = 0; l < DDB_MAX_LINK; l++) {
p                2379 drivers/media/pci/ddbridge/ddbridge-core.c 		for (i = 0; i < info->port_num; i++, p++) {
p                2380 drivers/media/pci/ddbridge/ddbridge-core.c 			port = &dev->port[p];
p                2384 drivers/media/pci/ddbridge/ddbridge-core.c 			port->pnr = p;
p                2396 drivers/media/pci/ddbridge/ddbridge-core.c 			port->dvb[0].adap = &dev->adap[2 * p];
p                2397 drivers/media/pci/ddbridge/ddbridge-core.c 			port->dvb[1].adap = &dev->adap[2 * p + 1];
p                2399 drivers/media/pci/ddbridge/ddbridge-core.c 			if (port->class == DDB_PORT_NONE && i && p &&
p                2400 drivers/media/pci/ddbridge/ddbridge-core.c 			    dev->port[p - 1].type == DDB_CI_EXTERNAL_XO2) {
p                2404 drivers/media/pci/ddbridge/ddbridge-core.c 				port->i2c = dev->port[p - 1].i2c;
p                2444 drivers/media/pci/ddbridge/ddbridge-core.c 				ddb_input_init(port, 2 * i, 0, 2 * p);
p                2445 drivers/media/pci/ddbridge/ddbridge-core.c 				ddb_input_init(port, 2 * i + 1, 1, 2 * p + 1);
p                2452 drivers/media/pci/ddbridge/ddbridge-core.c 	dev->port_num = p;
p                2969 drivers/media/pci/ddbridge/ddbridge-core.c 	unsigned int i, p;
p                2972 drivers/media/pci/ddbridge/ddbridge-core.c 	if (sscanf(buf, "%x %x\n", &i, &p) != 2)
p                2974 drivers/media/pci/ddbridge/ddbridge-core.c 	res = ddb_redirect(i, p);
p                2977 drivers/media/pci/ddbridge/ddbridge-core.c 	dev_info(device, "redirect: %02x, %02x\n", i, p);
p                 112 drivers/media/pci/ddbridge/ddbridge-mci.c 	struct mci_base *p;
p                 114 drivers/media/pci/ddbridge/ddbridge-mci.c 	list_for_each_entry(p, &mci_list, mci_list)
p                 115 drivers/media/pci/ddbridge/ddbridge-mci.c 		if (p->key == key)
p                 116 drivers/media/pci/ddbridge/ddbridge-mci.c 			return p;
p                  84 drivers/media/pci/ddbridge/ddbridge-sx8.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                  86 drivers/media/pci/ddbridge/ddbridge-sx8.c 	p->cnr.len = 1;
p                  87 drivers/media/pci/ddbridge/ddbridge-sx8.c 	p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
p                  88 drivers/media/pci/ddbridge/ddbridge-sx8.c 	p->cnr.stat[0].svalue =
p                  97 drivers/media/pci/ddbridge/ddbridge-sx8.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 103 drivers/media/pci/ddbridge/ddbridge-sx8.c 	p->strength.len = 1;
p                 104 drivers/media/pci/ddbridge/ddbridge-sx8.c 	p->strength.stat[0].scale = FE_SCALE_DECIBEL;
p                 105 drivers/media/pci/ddbridge/ddbridge-sx8.c 	p->strength.stat[0].svalue = str;
p                 188 drivers/media/pci/ddbridge/ddbridge-sx8.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 196 drivers/media/pci/ddbridge/ddbridge-sx8.c 	if (p->symbol_rate >= (MCLK / 2))
p                 218 drivers/media/pci/ddbridge/ddbridge-sx8.c 		if (p->symbol_rate >= MCLK / 2) {
p                 240 drivers/media/pci/ddbridge/ddbridge-sx8.c 		while (p->symbol_rate * bits_per_symbol > free_ldpc_bitrate)
p                 253 drivers/media/pci/ddbridge/ddbridge-sx8.c 		i = (p->symbol_rate > (MCLK / 2)) ? 3 : 7;
p                 263 drivers/media/pci/ddbridge/ddbridge-sx8.c 	sx8_base->used_ldpc_bitrate[state->mci.nr] = p->symbol_rate
p                 284 drivers/media/pci/ddbridge/ddbridge-sx8.c 	if (p->stream_id != NO_STREAM_ID_FILTER && p->stream_id != 0x80000000)
p                 292 drivers/media/pci/ddbridge/ddbridge-sx8.c 	cmd.dvbs2_search.frequency = p->frequency * 1000;
p                 293 drivers/media/pci/ddbridge/ddbridge-sx8.c 	cmd.dvbs2_search.symbol_rate = p->symbol_rate;
p                 295 drivers/media/pci/ddbridge/ddbridge-sx8.c 		p->scrambling_sequence_index | 0x80000000;
p                 297 drivers/media/pci/ddbridge/ddbridge-sx8.c 		(p->stream_id != NO_STREAM_ID_FILTER) ? p->stream_id : 0;
p                 301 drivers/media/pci/ddbridge/ddbridge-sx8.c 	if (p->stream_id == 0x80000000)
p                 315 drivers/media/pci/ddbridge/ddbridge-sx8.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 347 drivers/media/pci/ddbridge/ddbridge-sx8.c 	cmd.sx8_start_iq.frequency = p->frequency * 1000;
p                 348 drivers/media/pci/ddbridge/ddbridge-sx8.c 	cmd.sx8_start_iq.symbol_rate = p->symbol_rate;
p                 362 drivers/media/pci/ddbridge/ddbridge-sx8.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 368 drivers/media/pci/ddbridge/ddbridge-sx8.c 	isi = p->stream_id;
p                 377 drivers/media/pci/ddbridge/ddbridge-sx8.c 		switch (p->modulation) {
p                 292 drivers/media/pci/dt3155/dt3155.c static int dt3155_querycap(struct file *filp, void *p,
p                 304 drivers/media/pci/dt3155/dt3155.c 				   void *p, struct v4l2_fmtdesc *f)
p                 312 drivers/media/pci/dt3155/dt3155.c static int dt3155_fmt_vid_cap(struct file *filp, void *p, struct v4l2_format *f)
p                 326 drivers/media/pci/dt3155/dt3155.c static int dt3155_g_std(struct file *filp, void *p, v4l2_std_id *norm)
p                 334 drivers/media/pci/dt3155/dt3155.c static int dt3155_s_std(struct file *filp, void *p, v4l2_std_id norm)
p                 355 drivers/media/pci/dt3155/dt3155.c static int dt3155_enum_input(struct file *filp, void *p,
p                 371 drivers/media/pci/dt3155/dt3155.c static int dt3155_g_input(struct file *filp, void *p, unsigned int *i)
p                 379 drivers/media/pci/dt3155/dt3155.c static int dt3155_s_input(struct file *filp, void *p, unsigned int i)
p                 296 drivers/media/pci/ivtv/ivtv-fileops.c 		const char *p = start + 1;
p                 301 drivers/media/pci/ivtv/ivtv-fileops.c 		while (start + len > p && (q = memchr(p, 0, start + len - p))) {
p                 302 drivers/media/pci/ivtv/ivtv-fileops.c 			p = q + 1;
p                 314 drivers/media/pci/ivtv/ivtv-fileops.c 					p = q + 9;
p                 250 drivers/media/pci/ivtv/ivtv-irq.c 	struct list_head *p;
p                 257 drivers/media/pci/ivtv/ivtv-irq.c 	list_for_each(p, &s->q_dma.list) {
p                 258 drivers/media/pci/ivtv/ivtv-irq.c 		buf = list_entry(p, struct ivtv_buffer, list);
p                 361 drivers/media/pci/ivtv/ivtv-mailbox.c 	volatile u32 __iomem *p = mbdata->mbox[mb].data;
p                 363 drivers/media/pci/ivtv/ivtv-mailbox.c 	for (i = 0; i < argc; i++, p++)
p                 364 drivers/media/pci/ivtv/ivtv-mailbox.c 		data[i] = readl(p);
p                 234 drivers/media/pci/ivtv/ivtv-vbi.c static int ivtv_convert_ivtv_vbi(struct ivtv *itv, u8 *p)
p                 240 drivers/media/pci/ivtv/ivtv-vbi.c 	if (!memcmp(p, "itv0", 4)) {
p                 241 drivers/media/pci/ivtv/ivtv-vbi.c 		memcpy(linemask, p + 4, 8);
p                 242 drivers/media/pci/ivtv/ivtv-vbi.c 		p += 12;
p                 243 drivers/media/pci/ivtv/ivtv-vbi.c 	} else if (!memcmp(p, "ITV0", 4)) {
p                 246 drivers/media/pci/ivtv/ivtv-vbi.c 		p += 4;
p                 258 drivers/media/pci/ivtv/ivtv-vbi.c 		id2 = *p & 0xf;
p                 265 drivers/media/pci/ivtv/ivtv-vbi.c 			err = !odd_parity(p[1]) || !odd_parity(p[2]);
p                 282 drivers/media/pci/ivtv/ivtv-vbi.c 			memcpy(itv->vbi.sliced_dec_data[line].data, p + 1, 42);
p                 285 drivers/media/pci/ivtv/ivtv-vbi.c 		p += 43;
p                 306 drivers/media/pci/ivtv/ivtv-vbi.c 	u8 *p;
p                 310 drivers/media/pci/ivtv/ivtv-vbi.c 		p = buf + i * line_size;
p                 313 drivers/media/pci/ivtv/ivtv-vbi.c 		if (p[0] != 0xff || p[1] || p[2] || (p[3] != sav1 && p[3] != sav2)) {
p                 316 drivers/media/pci/ivtv/ivtv-vbi.c 		memcpy(q, p + 4, line_size - 4);
p                 343 drivers/media/pci/ivtv/ivtv-vbi.c 		u8 *p = buf + i * line_size;
p                 346 drivers/media/pci/ivtv/ivtv-vbi.c 		if (p[0] != 0xff || p[1] || p[2] || p[3] != sav) {
p                 349 drivers/media/pci/ivtv/ivtv-vbi.c 		vbi.p = p + 4;
p                 356 drivers/media/pci/ivtv/ivtv-vbi.c 			memcpy(itv->vbi.sliced_data[line].data, vbi.p, 42);
p                 366 drivers/media/pci/ivtv/ivtv-vbi.c 	u8 *p = (u8 *) buf->buf;
p                 376 drivers/media/pci/ivtv/ivtv-vbi.c 		type = p[3];
p                 378 drivers/media/pci/ivtv/ivtv-vbi.c 		size = buf->bytesused = compress_raw_buf(itv, p, size);
p                 384 drivers/media/pci/ivtv/ivtv-vbi.c 			p += size - 4;
p                 385 drivers/media/pci/ivtv/ivtv-vbi.c 			memcpy(p, &itv->vbi.frame, 4);
p                 398 drivers/media/pci/ivtv/ivtv-vbi.c 		lines = compress_sliced_buf(itv, 0, p, size / 2,
p                 403 drivers/media/pci/ivtv/ivtv-vbi.c 		lines = compress_sliced_buf(itv, lines, p + size / 2 - 32, size / 2 + 32,
p                 413 drivers/media/pci/ivtv/ivtv-vbi.c 		memcpy(p, &itv->vbi.sliced_data[0], size);
p                 435 drivers/media/pci/ivtv/ivtv-vbi.c 			p += 4 - offset;
p                 439 drivers/media/pci/ivtv/ivtv-vbi.c 		       swab32s((u32 *)(p + y));
p                 442 drivers/media/pci/ivtv/ivtv-vbi.c 		cnt = ivtv_convert_ivtv_vbi(itv, p + offset);
p                 370 drivers/media/pci/ivtv/ivtvfb.c 	unsigned long p = *ppos;
p                 389 drivers/media/pci/ivtv/ivtvfb.c 	if (p > total_size)
p                 397 drivers/media/pci/ivtv/ivtvfb.c 	if (count + p > total_size) {
p                 400 drivers/media/pci/ivtv/ivtvfb.c 		count = total_size - p;
p                 403 drivers/media/pci/ivtv/ivtvfb.c 	dst = (void __force *) (info->screen_base + p);
p                 426 drivers/media/pci/ivtv/ivtvfb.c 		       p + lead + dma_offset, (void __user *)buf, dma_size);
p                1232 drivers/media/pci/ivtv/ivtvfb.c static int __init ivtvfb_callback_init(struct device *dev, void *p)
p                1241 drivers/media/pci/ivtv/ivtvfb.c 			(*(int *)p)++;
p                1247 drivers/media/pci/ivtv/ivtvfb.c static int ivtvfb_callback_cleanup(struct device *dev, void *p)
p                  76 drivers/media/pci/mantis/mantis_vp1033.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                  86 drivers/media/pci/mantis/mantis_vp1033.c 	div = p->frequency / 250;
p                  93 drivers/media/pci/mantis/mantis_vp1033.c 	if (p->frequency < 1531000)
p                  58 drivers/media/pci/mantis/mantis_vp2033.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                  69 drivers/media/pci/mantis/mantis_vp2033.c 	u32 div = (p->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
p                  74 drivers/media/pci/mantis/mantis_vp2033.c 	buf[3] = (p->frequency < 150000000 ? 0x01 :
p                  75 drivers/media/pci/mantis/mantis_vp2033.c 		  p->frequency < 445000000 ? 0x02 : 0x04);
p                  40 drivers/media/pci/mantis/mantis_vp2040.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                  51 drivers/media/pci/mantis/mantis_vp2040.c 	u32 div = (p->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
p                  56 drivers/media/pci/mantis/mantis_vp2040.c 	buf[3] = (p->frequency < 150000000 ? 0x01 :
p                  57 drivers/media/pci/mantis/mantis_vp2040.c 		  p->frequency < 445000000 ? 0x02 : 0x04);
p                 864 drivers/media/pci/meye/meye.c static int meyeioc_g_params(struct meye_params *p)
p                 866 drivers/media/pci/meye/meye.c 	*p = meye.params;
p                 537 drivers/media/pci/netup_unidvb/netup_unidvb_core.c 	u8 *p = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
p                 540 drivers/media/pci/netup_unidvb/netup_unidvb_core.c 	if (p == NULL) {
p                 545 drivers/media/pci/netup_unidvb/netup_unidvb_core.c 	p += buf->size;
p                 550 drivers/media/pci/netup_unidvb/netup_unidvb_core.c 		memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
p                 551 drivers/media/pci/netup_unidvb/netup_unidvb_core.c 		p += copy_bytes;
p                 563 drivers/media/pci/netup_unidvb/netup_unidvb_core.c 		memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
p                 120 drivers/media/pci/ngene/ngene-dvb.c static void swap_buffer(u32 *p, u32 len)
p                 123 drivers/media/pci/ngene/ngene-dvb.c 		*p = swab32(*p);
p                 124 drivers/media/pci/ngene/ngene-dvb.c 		p++;
p                 436 drivers/media/pci/pluto2/pluto2.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 448 drivers/media/pci/pluto2/pluto2.c 	div = divide(p->frequency * 3, 500000) + 217;
p                 452 drivers/media/pci/pluto2/pluto2.c 	if (p->frequency < 611000000)
p                 454 drivers/media/pci/pluto2/pluto2.c 	else if (p->frequency < 811000000)
p                 462 drivers/media/pci/pluto2/pluto2.c 	if (p->frequency < 350000000)
p                 467 drivers/media/pci/pluto2/pluto2.c 	if (p->bandwidth_hz == 8000000)
p                  84 drivers/media/pci/pt3/pt3_dma.c 		u8 *p;
p                  86 drivers/media/pci/pt3/pt3_dma.c 		p = &adap->buffer[adap->buf_idx].data[adap->buf_ofs];
p                  90 drivers/media/pci/pt3/pt3_dma.c 			dvb_dmx_swfilter_packets(&adap->demux, p,
p                  95 drivers/media/pci/pt3/pt3_dma.c 			dvb_dmx_swfilter_packets(&adap->demux, p,
p                  98 drivers/media/pci/pt3/pt3_dma.c 		*p = PT3_BUF_CANARY;
p                 108 drivers/media/pci/pt3/pt3_dma.c 	u8 *p;
p                 112 drivers/media/pci/pt3/pt3_dma.c 	p = adap->buffer[0].data;
p                 115 drivers/media/pci/pt3/pt3_dma.c 		p[ofs] = PT3_BUF_CANARY;
p                 120 drivers/media/pci/pt3/pt3_dma.c 			p = adap->buffer[idx].data;
p                 148 drivers/media/pci/pt3/pt3_dma.c 	void *p;
p                 159 drivers/media/pci/pt3/pt3_dma.c 		p = dma_alloc_coherent(&pt3->pdev->dev, DATA_BUF_SZ,
p                 161 drivers/media/pci/pt3/pt3_dma.c 		if (p == NULL)
p                 163 drivers/media/pci/pt3/pt3_dma.c 		adap->buffer[i].data = p;
p                 174 drivers/media/pci/pt3/pt3_dma.c 		p = dma_alloc_coherent(&pt3->pdev->dev, PAGE_SIZE,
p                 176 drivers/media/pci/pt3/pt3_dma.c 		if (p == NULL)
p                 179 drivers/media/pci/pt3/pt3_dma.c 		adap->desc_buf[i].descs = p;
p                 197 drivers/media/pci/pt3/pt3_i2c.c 	void __iomem *p;
p                 217 drivers/media/pci/pt3/pt3_i2c.c 	p = pt3->regs[1] + PT3_I2C_BASE;
p                 220 drivers/media/pci/pt3/pt3_i2c.c 			memcpy_fromio(msgs[i].buf, p, msgs[i].len);
p                 221 drivers/media/pci/pt3/pt3_i2c.c 			p += msgs[i].len;
p                 240 drivers/media/pci/saa7134/saa7134-core.c 	unsigned int  i, p;
p                 246 drivers/media/pci/saa7134/saa7134-core.c 		for (p = 0; p * 4096 < list->length; p++, ptr++)
p                 248 drivers/media/pci/saa7134/saa7134-core.c 						list->offset + p * 4096);
p                 763 drivers/media/pci/saa7134/saa7134-core.c 	unsigned int i,p;
p                 784 drivers/media/pci/saa7134/saa7134-core.c 		for (p = 0; saa7134_pci_tbl[p].driver_data; p++) {
p                 785 drivers/media/pci/saa7134/saa7134-core.c 			if (saa7134_pci_tbl[p].driver_data != i)
p                 788 drivers/media/pci/saa7134/saa7134-core.c 			       saa7134_pci_tbl[p].subvendor,
p                 789 drivers/media/pci/saa7134/saa7134-core.c 			       saa7134_pci_tbl[p].subdevice);
p                 572 drivers/media/pci/saa7164/saa7164-api.c 	struct tmComResTunerStandardAuto p;
p                 579 drivers/media/pci/saa7164/saa7164-api.c 		p.mode = TU_STANDARD_AUTO;
p                 581 drivers/media/pci/saa7164/saa7164-api.c 		p.mode = TU_STANDARD_MANUAL;
p                 583 drivers/media/pci/saa7164/saa7164-api.c 		TU_STANDARD_AUTO_CONTROL, sizeof(p), &p);
p                 732 drivers/media/pci/saa7164/saa7164-api.c 	struct saa7164_port *p = NULL;
p                 748 drivers/media/pci/saa7164/saa7164-api.c 			p = &dev->ports[SAA7164_PORT_ENC1];
p                 750 drivers/media/pci/saa7164/saa7164-api.c 			p = &dev->ports[SAA7164_PORT_ENC2];
p                 755 drivers/media/pci/saa7164/saa7164-api.c 			p = &dev->ports[SAA7164_PORT_ENC1];
p                 757 drivers/media/pci/saa7164/saa7164-api.c 			p = &dev->ports[SAA7164_PORT_ENC2];
p                 761 drivers/media/pci/saa7164/saa7164-api.c 	if (p)
p                 762 drivers/media/pci/saa7164/saa7164-api.c 		ret = saa7164_api_configure_dif(p, std);
p                  88 drivers/media/pci/saa7164/saa7164-core.c 	u8 *p = (u8 *)buf->cpu;
p                  93 drivers/media/pci/saa7164/saa7164-core.c 		if ((*(p + i + 0) != 0x00) || (*(p + i + 1) != 0x00) ||
p                  94 drivers/media/pci/saa7164/saa7164-core.c 			(*(p + i + 2) != 0x01) || (*(p + i + 3) != 0xBA)) {
p                  98 drivers/media/pci/saa7164/saa7164-core.c 				       p + 1, 32, false);
p                 254 drivers/media/pci/saa7164/saa7164-core.c 	u8 *p;
p                 277 drivers/media/pci/saa7164/saa7164-core.c 				p = (u8 *)buf->cpu;
p                 278 drivers/media/pci/saa7164/saa7164-core.c 				if ((*(p + buf->actual_size + 0) != 0xff) ||
p                 279 drivers/media/pci/saa7164/saa7164-core.c 					(*(p + buf->actual_size + 1) != 0xff) ||
p                 280 drivers/media/pci/saa7164/saa7164-core.c 					(*(p + buf->actual_size + 2) != 0xff) ||
p                 281 drivers/media/pci/saa7164/saa7164-core.c 					(*(p + buf->actual_size + 3) != 0xff) ||
p                 282 drivers/media/pci/saa7164/saa7164-core.c 					(*(p + buf->actual_size + 0x10) != 0xff) ||
p                 283 drivers/media/pci/saa7164/saa7164-core.c 					(*(p + buf->actual_size + 0x11) != 0xff) ||
p                 284 drivers/media/pci/saa7164/saa7164-core.c 					(*(p + buf->actual_size + 0x12) != 0xff) ||
p                 285 drivers/media/pci/saa7164/saa7164-core.c 					(*(p + buf->actual_size + 0x13) != 0xff)) {
p                 290 drivers/media/pci/saa7164/saa7164-core.c 				       p + buf->actual_size - 32, 64, false);
p                 195 drivers/media/pci/saa7164/saa7164-dvb.c 	struct list_head *p, *q;
p                 206 drivers/media/pci/saa7164/saa7164-dvb.c 	list_for_each_safe(p, q, &port->dmaqueue.list) {
p                 207 drivers/media/pci/saa7164/saa7164-dvb.c 		buf = list_entry(p, struct saa7164_buffer, list);
p                  61 drivers/media/pci/saa7164/saa7164-encoder.c 	struct list_head *c, *n, *p, *q, *l, *v;
p                  77 drivers/media/pci/saa7164/saa7164-encoder.c 	list_for_each_safe(p, q, &port->list_buf_used.list) {
p                  78 drivers/media/pci/saa7164/saa7164-encoder.c 		ubuf = list_entry(p, struct saa7164_user_buffer, list);
p                  79 drivers/media/pci/saa7164/saa7164-encoder.c 		list_del(p);
p                 797 drivers/media/pci/saa7164/saa7164-encoder.c 	u8 *p;
p                 843 drivers/media/pci/saa7164/saa7164-encoder.c 		p = ubuf->data + ubuf->pos;
p                 849 drivers/media/pci/saa7164/saa7164-encoder.c 		if (copy_to_user(buffer, p, cnt)) {
p                  30 drivers/media/pci/saa7164/saa7164-vbi.c 	struct list_head *c, *n, *p, *q, *l, *v;
p                  46 drivers/media/pci/saa7164/saa7164-vbi.c 	list_for_each_safe(p, q, &port->list_buf_used.list) {
p                  47 drivers/media/pci/saa7164/saa7164-vbi.c 		ubuf = list_entry(p, struct saa7164_user_buffer, list);
p                  48 drivers/media/pci/saa7164/saa7164-vbi.c 		list_del(p);
p                 501 drivers/media/pci/saa7164/saa7164-vbi.c 	u8 *p;
p                 547 drivers/media/pci/saa7164/saa7164-vbi.c 		p = ubuf->data + ubuf->pos;
p                 553 drivers/media/pci/saa7164/saa7164-vbi.c 		if (copy_to_user(buffer, p, cnt)) {
p                 164 drivers/media/pci/solo6x10/solo6x10-core.c 	u16 *p = (u16 *)buf;
p                 182 drivers/media/pci/solo6x10/solo6x10-core.c 		solo_eeprom_write(solo_dev, i, cpu_to_be16(p[i]));
p                 194 drivers/media/pci/solo6x10/solo6x10-core.c 	u16 *p = (u16 *)buf;
p                 199 drivers/media/pci/solo6x10/solo6x10-core.c 		p[i] = be16_to_cpu(solo_eeprom_read(solo_dev, i));
p                 197 drivers/media/pci/solo6x10/solo6x10-v4l2.c 		void *p = vb2_plane_vaddr(vb, 0);
p                 201 drivers/media/pci/solo6x10/solo6x10-v4l2.c 			((u8 *)p)[i] = 0x80;
p                 202 drivers/media/pci/solo6x10/solo6x10-v4l2.c 			((u8 *)p)[i + 1] = 0x00;
p                1538 drivers/media/pci/ttpci/av7110.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1543 drivers/media/pci/ttpci/av7110.c 	u32 div = (p->frequency + 479500) / 125;
p                1545 drivers/media/pci/ttpci/av7110.c 	if (p->frequency > 2000000)
p                1547 drivers/media/pci/ttpci/av7110.c 	else if (p->frequency > 1800000)
p                1549 drivers/media/pci/ttpci/av7110.c 	else if (p->frequency > 1600000)
p                1551 drivers/media/pci/ttpci/av7110.c 	else if (p->frequency > 1200000)
p                1553 drivers/media/pci/ttpci/av7110.c 	else if (p->frequency >= 1100000)
p                1581 drivers/media/pci/ttpci/av7110.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1587 drivers/media/pci/ttpci/av7110.c 	div = (p->frequency + 35937500 + 31250) / 62500;
p                1592 drivers/media/pci/ttpci/av7110.c 	data[3] = (p->frequency < 174000000 ? 0x88 : p->frequency < 470000000 ? 0x84 : 0x81);
p                1613 drivers/media/pci/ttpci/av7110.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1619 drivers/media/pci/ttpci/av7110.c 	div = p->frequency / 125;
p                1640 drivers/media/pci/ttpci/av7110.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1643 drivers/media/pci/ttpci/av7110.c 	u32 f = p->frequency;
p                1672 drivers/media/pci/ttpci/av7110.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1678 drivers/media/pci/ttpci/av7110.c 	div = (p->frequency + 36200000) / 166666;
p                1680 drivers/media/pci/ttpci/av7110.c 	if (p->frequency <= 782000000)
p                1810 drivers/media/pci/ttpci/av7110.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1818 drivers/media/pci/ttpci/av7110.c 	div = (p->frequency + 36150000 + 31250) / 62500;
p                1824 drivers/media/pci/ttpci/av7110.c 	if (p->frequency < 45000000)
p                1826 drivers/media/pci/ttpci/av7110.c 	else if (p->frequency < 137000000)
p                1828 drivers/media/pci/ttpci/av7110.c 	else if (p->frequency < 403000000)
p                1830 drivers/media/pci/ttpci/av7110.c 	else if (p->frequency < 860000000)
p                1866 drivers/media/pci/ttpci/av7110.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1873 drivers/media/pci/ttpci/av7110.c 	div = (36125000 + p->frequency) / 166666;
p                1877 drivers/media/pci/ttpci/av7110.c 	if (p->frequency < 175000000)
p                1879 drivers/media/pci/ttpci/av7110.c 	else if (p->frequency < 390000000)
p                1881 drivers/media/pci/ttpci/av7110.c 	else if (p->frequency < 470000000)
p                1883 drivers/media/pci/ttpci/av7110.c 	else if (p->frequency < 750000000)
p                1888 drivers/media/pci/ttpci/av7110.c 	if (p->frequency < 175000000)
p                1890 drivers/media/pci/ttpci/av7110.c 	else if (p->frequency < 470000000)
p                 354 drivers/media/pci/ttpci/av7110_av.c 	u8 *p;
p                 362 drivers/media/pci/ttpci/av7110_av.c 		p = buf + i;
p                 363 drivers/media/pci/ttpci/av7110_av.c 		if (p[0] || p[1] || p[2] != 0x01 || p[3] != 0xb3)
p                 365 drivers/media/pci/ttpci/av7110_av.c 		p += 4;
p                 366 drivers/media/pci/ttpci/av7110_av.c 		hsize = ((p[1] &0xF0) >> 4) | (p[0] << 4);
p                 367 drivers/media/pci/ttpci/av7110_av.c 		vsize = ((p[1] &0x0F) << 8) | (p[2]);
p                 368 drivers/media/pci/ttpci/av7110_av.c 		sw = (p[3] & 0x0F);
p                 564 drivers/media/pci/ttpci/av7110_av.c void av7110_p2t_init(struct av7110_p2t *p, struct dvb_demux_feed *feed)
p                 566 drivers/media/pci/ttpci/av7110_av.c 	memset(p->pes, 0, TS_SIZE);
p                 567 drivers/media/pci/ttpci/av7110_av.c 	p->counter = 0;
p                 568 drivers/media/pci/ttpci/av7110_av.c 	p->pos = 0;
p                 569 drivers/media/pci/ttpci/av7110_av.c 	p->frags = 0;
p                 571 drivers/media/pci/ttpci/av7110_av.c 		p->feed = feed;
p                 574 drivers/media/pci/ttpci/av7110_av.c static void clear_p2t(struct av7110_p2t *p)
p                 576 drivers/media/pci/ttpci/av7110_av.c 	memset(p->pes, 0, TS_SIZE);
p                 578 drivers/media/pci/ttpci/av7110_av.c 	p->pos = 0;
p                 579 drivers/media/pci/ttpci/av7110_av.c 	p->frags = 0;
p                 631 drivers/media/pci/ttpci/av7110_av.c void av7110_p2t_write(u8 const *buf, long int length, u16 pid, struct av7110_p2t *p)
p                 638 drivers/media/pci/ttpci/av7110_av.c 	if (p->frags){
p                 640 drivers/media/pci/ttpci/av7110_av.c 		switch(p->frags) {
p                 669 drivers/media/pci/ttpci/av7110_av.c 				p->pes[0] = 0x00;
p                 670 drivers/media/pci/ttpci/av7110_av.c 				p->pes[1] = 0x00;
p                 671 drivers/media/pci/ttpci/av7110_av.c 				p->pes[2] = 0x01;
p                 672 drivers/media/pci/ttpci/av7110_av.c 				p->pes[3] = buf[c];
p                 673 drivers/media/pci/ttpci/av7110_av.c 				p->pos = 4;
p                 674 drivers/media/pci/ttpci/av7110_av.c 				memcpy(p->pes + p->pos, buf + c, (TS_SIZE - 4) - p->pos);
p                 675 drivers/media/pci/ttpci/av7110_av.c 				c += (TS_SIZE - 4) - p->pos;
p                 676 drivers/media/pci/ttpci/av7110_av.c 				p_to_t(p->pes, (TS_SIZE - 4), pid, &p->counter, p->feed);
p                 677 drivers/media/pci/ttpci/av7110_av.c 				clear_p2t(p);
p                 685 drivers/media/pci/ttpci/av7110_av.c 		p->frags = 0;
p                 688 drivers/media/pci/ttpci/av7110_av.c 	if (p->pos) {
p                 689 drivers/media/pci/ttpci/av7110_av.c 		c2 = find_pes_header(buf + c, length - c, &p->frags);
p                 690 drivers/media/pci/ttpci/av7110_av.c 		if (c2 >= 0 && c2 < (TS_SIZE - 4) - p->pos)
p                 693 drivers/media/pci/ttpci/av7110_av.c 			l = (TS_SIZE - 4) - p->pos;
p                 694 drivers/media/pci/ttpci/av7110_av.c 		memcpy(p->pes + p->pos, buf, l);
p                 696 drivers/media/pci/ttpci/av7110_av.c 		p->pos += l;
p                 697 drivers/media/pci/ttpci/av7110_av.c 		p_to_t(p->pes, p->pos, pid, &p->counter, p->feed);
p                 698 drivers/media/pci/ttpci/av7110_av.c 		clear_p2t(p);
p                 703 drivers/media/pci/ttpci/av7110_av.c 		c2 = find_pes_header(buf + c + add, length - c - add, &p->frags);
p                 707 drivers/media/pci/ttpci/av7110_av.c 				p_to_t(buf + c, c2 - c, pid, &p->counter, p->feed);
p                 709 drivers/media/pci/ttpci/av7110_av.c 				clear_p2t(p);
p                 717 drivers/media/pci/ttpci/av7110_av.c 			p_to_t(buf + c, l, pid, &p->counter, p->feed);
p                 718 drivers/media/pci/ttpci/av7110_av.c 			memcpy(p->pes, buf + c + l, rest);
p                 719 drivers/media/pci/ttpci/av7110_av.c 			p->pos = rest;
p                  23 drivers/media/pci/ttpci/av7110_av.h extern void av7110_p2t_init(struct av7110_p2t *p, struct dvb_demux_feed *feed);
p                  24 drivers/media/pci/ttpci/av7110_av.h extern void av7110_p2t_write(u8 const *buf, long int length, u16 pid, struct av7110_p2t *p);
p                  76 drivers/media/pci/ttpci/av7110_ca.c 	struct dvb_ringbuffer *tab[] = { cirbuf, ciwbuf, NULL }, **p;
p                  79 drivers/media/pci/ttpci/av7110_ca.c 	for (p = tab; *p; p++) {
p                  82 drivers/media/pci/ttpci/av7110_ca.c 			while (p-- != tab) {
p                  83 drivers/media/pci/ttpci/av7110_ca.c 				vfree(p[0]->data);
p                  84 drivers/media/pci/ttpci/av7110_ca.c 				p[0]->data = NULL;
p                  88 drivers/media/pci/ttpci/av7110_ca.c 		dvb_ringbuffer_init(*p, data, size);
p                   8 drivers/media/pci/ttpci/av7110_ipack.c void av7110_ipack_reset(struct ipack *p)
p                  10 drivers/media/pci/ttpci/av7110_ipack.c 	p->found = 0;
p                  11 drivers/media/pci/ttpci/av7110_ipack.c 	p->cid = 0;
p                  12 drivers/media/pci/ttpci/av7110_ipack.c 	p->plength = 0;
p                  13 drivers/media/pci/ttpci/av7110_ipack.c 	p->flag1 = 0;
p                  14 drivers/media/pci/ttpci/av7110_ipack.c 	p->flag2 = 0;
p                  15 drivers/media/pci/ttpci/av7110_ipack.c 	p->hlength = 0;
p                  16 drivers/media/pci/ttpci/av7110_ipack.c 	p->mpeg = 0;
p                  17 drivers/media/pci/ttpci/av7110_ipack.c 	p->check = 0;
p                  18 drivers/media/pci/ttpci/av7110_ipack.c 	p->which = 0;
p                  19 drivers/media/pci/ttpci/av7110_ipack.c 	p->done = 0;
p                  20 drivers/media/pci/ttpci/av7110_ipack.c 	p->count = 0;
p                  24 drivers/media/pci/ttpci/av7110_ipack.c int av7110_ipack_init(struct ipack *p, int size,
p                  27 drivers/media/pci/ttpci/av7110_ipack.c 	if (!(p->buf = vmalloc(size))) {
p                  31 drivers/media/pci/ttpci/av7110_ipack.c 	p->size = size;
p                  32 drivers/media/pci/ttpci/av7110_ipack.c 	p->func = func;
p                  33 drivers/media/pci/ttpci/av7110_ipack.c 	p->repack_subids = 0;
p                  34 drivers/media/pci/ttpci/av7110_ipack.c 	av7110_ipack_reset(p);
p                  39 drivers/media/pci/ttpci/av7110_ipack.c void av7110_ipack_free(struct ipack *p)
p                  41 drivers/media/pci/ttpci/av7110_ipack.c 	vfree(p->buf);
p                  45 drivers/media/pci/ttpci/av7110_ipack.c static void send_ipack(struct ipack *p)
p                  54 drivers/media/pci/ttpci/av7110_ipack.c 	switch (p->mpeg) {
p                  56 drivers/media/pci/ttpci/av7110_ipack.c 		if (p->count < 10)
p                  58 drivers/media/pci/ttpci/av7110_ipack.c 		p->buf[3] = p->cid;
p                  59 drivers/media/pci/ttpci/av7110_ipack.c 		p->buf[4] = (u8)(((p->count - 6) & 0xff00) >> 8);
p                  60 drivers/media/pci/ttpci/av7110_ipack.c 		p->buf[5] = (u8)((p->count - 6) & 0x00ff);
p                  61 drivers/media/pci/ttpci/av7110_ipack.c 		if (p->repack_subids && p->cid == PRIVATE_STREAM1) {
p                  62 drivers/media/pci/ttpci/av7110_ipack.c 			off = 9 + p->buf[8];
p                  63 drivers/media/pci/ttpci/av7110_ipack.c 			streamid = p->buf[off];
p                  66 drivers/media/pci/ttpci/av7110_ipack.c 				ac3_off = ((p->buf[off + 2] << 8)|
p                  67 drivers/media/pci/ttpci/av7110_ipack.c 					   p->buf[off + 3]);
p                  68 drivers/media/pci/ttpci/av7110_ipack.c 				if (ac3_off < p->count)
p                  69 drivers/media/pci/ttpci/av7110_ipack.c 					f = dvb_filter_get_ac3info(p->buf + off + 3 + ac3_off,
p                  70 drivers/media/pci/ttpci/av7110_ipack.c 								   p->count - ac3_off, &ai, 0);
p                  72 drivers/media/pci/ttpci/av7110_ipack.c 					nframes = (p->count - off - 3 - ac3_off) /
p                  74 drivers/media/pci/ttpci/av7110_ipack.c 					p->buf[off + 2] = (ac3_off >> 8) & 0xff;
p                  75 drivers/media/pci/ttpci/av7110_ipack.c 					p->buf[off + 3] = (ac3_off) & 0xff;
p                  76 drivers/media/pci/ttpci/av7110_ipack.c 					p->buf[off + 1] = nframes;
p                  77 drivers/media/pci/ttpci/av7110_ipack.c 					ac3_off +=  nframes * ai.framesize - p->count;
p                  81 drivers/media/pci/ttpci/av7110_ipack.c 		p->func(p->buf, p->count, p->data);
p                  83 drivers/media/pci/ttpci/av7110_ipack.c 		p->buf[6] = 0x80;
p                  84 drivers/media/pci/ttpci/av7110_ipack.c 		p->buf[7] = 0x00;
p                  85 drivers/media/pci/ttpci/av7110_ipack.c 		p->buf[8] = 0x00;
p                  86 drivers/media/pci/ttpci/av7110_ipack.c 		p->count = 9;
p                  87 drivers/media/pci/ttpci/av7110_ipack.c 		if (p->repack_subids && p->cid == PRIVATE_STREAM1
p                  89 drivers/media/pci/ttpci/av7110_ipack.c 			p->count += 4;
p                  90 drivers/media/pci/ttpci/av7110_ipack.c 			p->buf[9] = streamid;
p                  91 drivers/media/pci/ttpci/av7110_ipack.c 			p->buf[10] = (ac3_off >> 8) & 0xff;
p                  92 drivers/media/pci/ttpci/av7110_ipack.c 			p->buf[11] = (ac3_off) & 0xff;
p                  93 drivers/media/pci/ttpci/av7110_ipack.c 			p->buf[12] = 0;
p                  98 drivers/media/pci/ttpci/av7110_ipack.c 		if (p->count < 8)
p                 100 drivers/media/pci/ttpci/av7110_ipack.c 		p->buf[3] = p->cid;
p                 101 drivers/media/pci/ttpci/av7110_ipack.c 		p->buf[4] = (u8)(((p->count - 6) & 0xff00) >> 8);
p                 102 drivers/media/pci/ttpci/av7110_ipack.c 		p->buf[5] = (u8)((p->count - 6) & 0x00ff);
p                 103 drivers/media/pci/ttpci/av7110_ipack.c 		p->func(p->buf, p->count, p->data);
p                 105 drivers/media/pci/ttpci/av7110_ipack.c 		p->buf[6] = 0x0f;
p                 106 drivers/media/pci/ttpci/av7110_ipack.c 		p->count = 7;
p                 112 drivers/media/pci/ttpci/av7110_ipack.c void av7110_ipack_flush(struct ipack *p)
p                 114 drivers/media/pci/ttpci/av7110_ipack.c 	if (p->plength != MMAX_PLENGTH - 6 || p->found <= 6)
p                 116 drivers/media/pci/ttpci/av7110_ipack.c 	p->plength = p->found - 6;
p                 117 drivers/media/pci/ttpci/av7110_ipack.c 	p->found = 0;
p                 118 drivers/media/pci/ttpci/av7110_ipack.c 	send_ipack(p);
p                 119 drivers/media/pci/ttpci/av7110_ipack.c 	av7110_ipack_reset(p);
p                 123 drivers/media/pci/ttpci/av7110_ipack.c static void write_ipack(struct ipack *p, const u8 *data, int count)
p                 127 drivers/media/pci/ttpci/av7110_ipack.c 	if (p->count < 6) {
p                 128 drivers/media/pci/ttpci/av7110_ipack.c 		memcpy(p->buf, headr, 3);
p                 129 drivers/media/pci/ttpci/av7110_ipack.c 		p->count = 6;
p                 132 drivers/media/pci/ttpci/av7110_ipack.c 	if (p->count + count < p->size){
p                 133 drivers/media/pci/ttpci/av7110_ipack.c 		memcpy(p->buf+p->count, data, count);
p                 134 drivers/media/pci/ttpci/av7110_ipack.c 		p->count += count;
p                 136 drivers/media/pci/ttpci/av7110_ipack.c 		int rest = p->size - p->count;
p                 137 drivers/media/pci/ttpci/av7110_ipack.c 		memcpy(p->buf+p->count, data, rest);
p                 138 drivers/media/pci/ttpci/av7110_ipack.c 		p->count += rest;
p                 139 drivers/media/pci/ttpci/av7110_ipack.c 		send_ipack(p);
p                 141 drivers/media/pci/ttpci/av7110_ipack.c 			write_ipack(p, data + rest, count - rest);
p                 146 drivers/media/pci/ttpci/av7110_ipack.c int av7110_ipack_instant_repack (const u8 *buf, int count, struct ipack *p)
p                 151 drivers/media/pci/ttpci/av7110_ipack.c 	while (c < count && (p->mpeg == 0 ||
p                 152 drivers/media/pci/ttpci/av7110_ipack.c 			     (p->mpeg == 1 && p->found < 7) ||
p                 153 drivers/media/pci/ttpci/av7110_ipack.c 			     (p->mpeg == 2 && p->found < 9))
p                 154 drivers/media/pci/ttpci/av7110_ipack.c 	       &&  (p->found < 5 || !p->done)) {
p                 155 drivers/media/pci/ttpci/av7110_ipack.c 		switch (p->found) {
p                 159 drivers/media/pci/ttpci/av7110_ipack.c 				p->found++;
p                 161 drivers/media/pci/ttpci/av7110_ipack.c 				p->found = 0;
p                 166 drivers/media/pci/ttpci/av7110_ipack.c 				p->found++;
p                 168 drivers/media/pci/ttpci/av7110_ipack.c 				p->found = 2;
p                 170 drivers/media/pci/ttpci/av7110_ipack.c 				p->found = 0;
p                 174 drivers/media/pci/ttpci/av7110_ipack.c 			p->cid = 0;
p                 184 drivers/media/pci/ttpci/av7110_ipack.c 				p->done = 1;
p                 189 drivers/media/pci/ttpci/av7110_ipack.c 				p->found++;
p                 190 drivers/media/pci/ttpci/av7110_ipack.c 				p->cid = buf[c];
p                 194 drivers/media/pci/ttpci/av7110_ipack.c 				p->found = 0;
p                 201 drivers/media/pci/ttpci/av7110_ipack.c 				p->plen[0] = buf[c];
p                 203 drivers/media/pci/ttpci/av7110_ipack.c 				p->plen[1] = buf[c];
p                 205 drivers/media/pci/ttpci/av7110_ipack.c 				p->found += 2;
p                 206 drivers/media/pci/ttpci/av7110_ipack.c 				p->plength = (p->plen[0] << 8) | p->plen[1];
p                 208 drivers/media/pci/ttpci/av7110_ipack.c 				p->plen[0] = buf[c];
p                 209 drivers/media/pci/ttpci/av7110_ipack.c 				p->found++;
p                 214 drivers/media/pci/ttpci/av7110_ipack.c 			p->plen[1] = buf[c];
p                 216 drivers/media/pci/ttpci/av7110_ipack.c 			p->found++;
p                 217 drivers/media/pci/ttpci/av7110_ipack.c 			p->plength = (p->plen[0] << 8) | p->plen[1];
p                 220 drivers/media/pci/ttpci/av7110_ipack.c 			if (!p->done) {
p                 221 drivers/media/pci/ttpci/av7110_ipack.c 				p->flag1 = buf[c];
p                 223 drivers/media/pci/ttpci/av7110_ipack.c 				p->found++;
p                 224 drivers/media/pci/ttpci/av7110_ipack.c 				if ((p->flag1 & 0xc0) == 0x80)
p                 225 drivers/media/pci/ttpci/av7110_ipack.c 					p->mpeg = 2;
p                 227 drivers/media/pci/ttpci/av7110_ipack.c 					p->hlength = 0;
p                 228 drivers/media/pci/ttpci/av7110_ipack.c 					p->which = 0;
p                 229 drivers/media/pci/ttpci/av7110_ipack.c 					p->mpeg = 1;
p                 230 drivers/media/pci/ttpci/av7110_ipack.c 					p->flag2 = 0;
p                 236 drivers/media/pci/ttpci/av7110_ipack.c 			if (!p->done && p->mpeg == 2) {
p                 237 drivers/media/pci/ttpci/av7110_ipack.c 				p->flag2 = buf[c];
p                 239 drivers/media/pci/ttpci/av7110_ipack.c 				p->found++;
p                 244 drivers/media/pci/ttpci/av7110_ipack.c 			if (!p->done && p->mpeg == 2) {
p                 245 drivers/media/pci/ttpci/av7110_ipack.c 				p->hlength = buf[c];
p                 247 drivers/media/pci/ttpci/av7110_ipack.c 				p->found++;
p                 256 drivers/media/pci/ttpci/av7110_ipack.c 	if (!p->plength)
p                 257 drivers/media/pci/ttpci/av7110_ipack.c 		p->plength = MMAX_PLENGTH - 6;
p                 259 drivers/media/pci/ttpci/av7110_ipack.c 	if (p->done || ((p->mpeg == 2 && p->found >= 9) ||
p                 260 drivers/media/pci/ttpci/av7110_ipack.c 			(p->mpeg == 1 && p->found >= 7))) {
p                 261 drivers/media/pci/ttpci/av7110_ipack.c 		switch (p->cid) {
p                 265 drivers/media/pci/ttpci/av7110_ipack.c 			if (p->mpeg == 2 && p->found == 9) {
p                 266 drivers/media/pci/ttpci/av7110_ipack.c 				write_ipack(p, &p->flag1, 1);
p                 267 drivers/media/pci/ttpci/av7110_ipack.c 				write_ipack(p, &p->flag2, 1);
p                 268 drivers/media/pci/ttpci/av7110_ipack.c 				write_ipack(p, &p->hlength, 1);
p                 271 drivers/media/pci/ttpci/av7110_ipack.c 			if (p->mpeg == 1 && p->found == 7)
p                 272 drivers/media/pci/ttpci/av7110_ipack.c 				write_ipack(p, &p->flag1, 1);
p                 274 drivers/media/pci/ttpci/av7110_ipack.c 			if (p->mpeg == 2 && (p->flag2 & PTS_ONLY) &&
p                 275 drivers/media/pci/ttpci/av7110_ipack.c 			    p->found < 14) {
p                 276 drivers/media/pci/ttpci/av7110_ipack.c 				while (c < count && p->found < 14) {
p                 277 drivers/media/pci/ttpci/av7110_ipack.c 					p->pts[p->found - 9] = buf[c];
p                 278 drivers/media/pci/ttpci/av7110_ipack.c 					write_ipack(p, buf + c, 1);
p                 280 drivers/media/pci/ttpci/av7110_ipack.c 					p->found++;
p                 286 drivers/media/pci/ttpci/av7110_ipack.c 			if (p->mpeg == 1 && p->which < 2000) {
p                 288 drivers/media/pci/ttpci/av7110_ipack.c 				if (p->found == 7) {
p                 289 drivers/media/pci/ttpci/av7110_ipack.c 					p->check = p->flag1;
p                 290 drivers/media/pci/ttpci/av7110_ipack.c 					p->hlength = 1;
p                 293 drivers/media/pci/ttpci/av7110_ipack.c 				while (!p->which && c < count &&
p                 294 drivers/media/pci/ttpci/av7110_ipack.c 				       p->check == 0xff){
p                 295 drivers/media/pci/ttpci/av7110_ipack.c 					p->check = buf[c];
p                 296 drivers/media/pci/ttpci/av7110_ipack.c 					write_ipack(p, buf + c, 1);
p                 298 drivers/media/pci/ttpci/av7110_ipack.c 					p->found++;
p                 299 drivers/media/pci/ttpci/av7110_ipack.c 					p->hlength++;
p                 305 drivers/media/pci/ttpci/av7110_ipack.c 				if ((p->check & 0xc0) == 0x40 && !p->which) {
p                 306 drivers/media/pci/ttpci/av7110_ipack.c 					p->check = buf[c];
p                 307 drivers/media/pci/ttpci/av7110_ipack.c 					write_ipack(p, buf + c, 1);
p                 309 drivers/media/pci/ttpci/av7110_ipack.c 					p->found++;
p                 310 drivers/media/pci/ttpci/av7110_ipack.c 					p->hlength++;
p                 312 drivers/media/pci/ttpci/av7110_ipack.c 					p->which = 1;
p                 315 drivers/media/pci/ttpci/av7110_ipack.c 					p->check = buf[c];
p                 316 drivers/media/pci/ttpci/av7110_ipack.c 					write_ipack(p, buf + c, 1);
p                 318 drivers/media/pci/ttpci/av7110_ipack.c 					p->found++;
p                 319 drivers/media/pci/ttpci/av7110_ipack.c 					p->hlength++;
p                 320 drivers/media/pci/ttpci/av7110_ipack.c 					p->which = 2;
p                 325 drivers/media/pci/ttpci/av7110_ipack.c 				if (p->which == 1) {
p                 326 drivers/media/pci/ttpci/av7110_ipack.c 					p->check = buf[c];
p                 327 drivers/media/pci/ttpci/av7110_ipack.c 					write_ipack(p, buf + c, 1);
p                 329 drivers/media/pci/ttpci/av7110_ipack.c 					p->found++;
p                 330 drivers/media/pci/ttpci/av7110_ipack.c 					p->hlength++;
p                 331 drivers/media/pci/ttpci/av7110_ipack.c 					p->which = 2;
p                 336 drivers/media/pci/ttpci/av7110_ipack.c 				if ((p->check & 0x30) && p->check != 0xff) {
p                 337 drivers/media/pci/ttpci/av7110_ipack.c 					p->flag2 = (p->check & 0xf0) << 2;
p                 338 drivers/media/pci/ttpci/av7110_ipack.c 					p->pts[0] = p->check;
p                 339 drivers/media/pci/ttpci/av7110_ipack.c 					p->which = 3;
p                 344 drivers/media/pci/ttpci/av7110_ipack.c 				if (p->which > 2){
p                 345 drivers/media/pci/ttpci/av7110_ipack.c 					if ((p->flag2 & PTS_DTS_FLAGS) == PTS_ONLY) {
p                 346 drivers/media/pci/ttpci/av7110_ipack.c 						while (c < count && p->which < 7) {
p                 347 drivers/media/pci/ttpci/av7110_ipack.c 							p->pts[p->which - 2] = buf[c];
p                 348 drivers/media/pci/ttpci/av7110_ipack.c 							write_ipack(p, buf + c, 1);
p                 350 drivers/media/pci/ttpci/av7110_ipack.c 							p->found++;
p                 351 drivers/media/pci/ttpci/av7110_ipack.c 							p->which++;
p                 352 drivers/media/pci/ttpci/av7110_ipack.c 							p->hlength++;
p                 356 drivers/media/pci/ttpci/av7110_ipack.c 					} else if ((p->flag2 & PTS_DTS_FLAGS) == PTS_DTS) {
p                 357 drivers/media/pci/ttpci/av7110_ipack.c 						while (c < count && p->which < 12) {
p                 358 drivers/media/pci/ttpci/av7110_ipack.c 							if (p->which < 7)
p                 359 drivers/media/pci/ttpci/av7110_ipack.c 								p->pts[p->which - 2] = buf[c];
p                 360 drivers/media/pci/ttpci/av7110_ipack.c 							write_ipack(p, buf + c, 1);
p                 362 drivers/media/pci/ttpci/av7110_ipack.c 							p->found++;
p                 363 drivers/media/pci/ttpci/av7110_ipack.c 							p->which++;
p                 364 drivers/media/pci/ttpci/av7110_ipack.c 							p->hlength++;
p                 369 drivers/media/pci/ttpci/av7110_ipack.c 					p->which = 2000;
p                 374 drivers/media/pci/ttpci/av7110_ipack.c 			while (c < count && p->found < p->plength + 6) {
p                 376 drivers/media/pci/ttpci/av7110_ipack.c 				if (l + p->found > p->plength + 6)
p                 377 drivers/media/pci/ttpci/av7110_ipack.c 					l = p->plength + 6 - p->found;
p                 378 drivers/media/pci/ttpci/av7110_ipack.c 				write_ipack(p, buf + c, l);
p                 379 drivers/media/pci/ttpci/av7110_ipack.c 				p->found += l;
p                 386 drivers/media/pci/ttpci/av7110_ipack.c 		if (p->done) {
p                 387 drivers/media/pci/ttpci/av7110_ipack.c 			if (p->found + count - c < p->plength + 6) {
p                 388 drivers/media/pci/ttpci/av7110_ipack.c 				p->found += count - c;
p                 391 drivers/media/pci/ttpci/av7110_ipack.c 				c += p->plength + 6 - p->found;
p                 392 drivers/media/pci/ttpci/av7110_ipack.c 				p->found = p->plength + 6;
p                 396 drivers/media/pci/ttpci/av7110_ipack.c 		if (p->plength && p->found == p->plength + 6) {
p                 397 drivers/media/pci/ttpci/av7110_ipack.c 			send_ipack(p);
p                 398 drivers/media/pci/ttpci/av7110_ipack.c 			av7110_ipack_reset(p);
p                 400 drivers/media/pci/ttpci/av7110_ipack.c 				av7110_ipack_instant_repack(buf + c, count - c, p);
p                   5 drivers/media/pci/ttpci/av7110_ipack.h extern int av7110_ipack_init(struct ipack *p, int size,
p                   8 drivers/media/pci/ttpci/av7110_ipack.h extern void av7110_ipack_reset(struct ipack *p);
p                   9 drivers/media/pci/ttpci/av7110_ipack.h extern int  av7110_ipack_instant_repack(const u8 *buf, int count, struct ipack *p);
p                  10 drivers/media/pci/ttpci/av7110_ipack.h extern void av7110_ipack_free(struct ipack * p);
p                  11 drivers/media/pci/ttpci/av7110_ipack.h extern void av7110_ipack_flush(struct ipack *p);
p                 649 drivers/media/pci/ttpci/budget-ci.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 655 drivers/media/pci/ttpci/budget-ci.c 	if ((p->frequency < 950000) || (p->frequency > 2150000))
p                 658 drivers/media/pci/ttpci/budget-ci.c 	div = (p->frequency + (500 - 1)) / 500;	/* round correctly */
p                 664 drivers/media/pci/ttpci/budget-ci.c 	if (p->symbol_rate < 4000000)
p                 667 drivers/media/pci/ttpci/budget-ci.c 	if (p->frequency < 1250000)
p                 669 drivers/media/pci/ttpci/budget-ci.c 	else if (p->frequency < 1550000)
p                 671 drivers/media/pci/ttpci/budget-ci.c 	else if (p->frequency < 2050000)
p                 673 drivers/media/pci/ttpci/budget-ci.c 	else if (p->frequency < 2150000)
p                 730 drivers/media/pci/ttpci/budget-ci.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 738 drivers/media/pci/ttpci/budget-ci.c 	tuner_frequency = p->frequency + 36130000;
p                 763 drivers/media/pci/ttpci/budget-ci.c 	if (p->frequency < 49000000)
p                 765 drivers/media/pci/ttpci/budget-ci.c 	else if (p->frequency < 159000000)
p                 767 drivers/media/pci/ttpci/budget-ci.c 	else if (p->frequency < 444000000)
p                 769 drivers/media/pci/ttpci/budget-ci.c 	else if (p->frequency < 861000000)
p                 775 drivers/media/pci/ttpci/budget-ci.c 	switch (p->bandwidth_hz) {
p                 797 drivers/media/pci/ttpci/budget-ci.c 	tuner_frequency = (((p->frequency / 1000) * 6) + 217280) / 1000;
p                 846 drivers/media/pci/ttpci/budget-ci.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 857 drivers/media/pci/ttpci/budget-ci.c 	tuner_frequency = p->frequency + 36125000;
p                 894 drivers/media/pci/ttpci/budget-ci.c 	tuner_frequency = (p->frequency + 36125000 + (62500/2)) / 62500;
p                 252 drivers/media/pci/ttpci/budget-patch.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 257 drivers/media/pci/ttpci/budget-patch.c 	u32 div = (p->frequency + 479500) / 125;
p                 259 drivers/media/pci/ttpci/budget-patch.c 	if (p->frequency > 2000000)
p                 261 drivers/media/pci/ttpci/budget-patch.c 	else if (p->frequency > 1800000)
p                 263 drivers/media/pci/ttpci/budget-patch.c 	else if (p->frequency > 1600000)
p                 265 drivers/media/pci/ttpci/budget-patch.c 	else if (p->frequency > 1200000)
p                 267 drivers/media/pci/ttpci/budget-patch.c 	else if (p->frequency >= 1100000)
p                 294 drivers/media/pci/ttpci/budget-patch.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 300 drivers/media/pci/ttpci/budget-patch.c 	div = p->frequency / 125;
p                 194 drivers/media/pci/tw68/tw68-risc.c 	u32 p;
p                 196 drivers/media/pci/tw68/tw68-risc.c 	p = RISC_OP(risc);
p                 197 drivers/media/pci/tw68/tw68-risc.c 	if (!(risc & 0x80000000) || !instr[p].name) {
p                 202 drivers/media/pci/tw68/tw68-risc.c 		risc, instr[p].name, (risc >> 27) & 1);
p                 203 drivers/media/pci/tw68/tw68-risc.c 	if (instr[p].has_data_type)
p                 205 drivers/media/pci/tw68/tw68-risc.c 	if (instr[p].has_byte_info)
p                 208 drivers/media/pci/tw68/tw68-risc.c 	if (instr[p].has_addr)
p                 451 drivers/media/platform/coda/coda-bit.c 	u32 *p = ctx->parabuf.vaddr;
p                 454 drivers/media/platform/coda/coda-bit.c 		p[index] = value;
p                 456 drivers/media/platform/coda/coda-bit.c 		p[index ^ 1] = value;
p                2586 drivers/media/platform/coda/coda-common.c 	u16 *p;
p                2605 drivers/media/platform/coda/coda-common.c 	p = (u16 *)dev->codebuf.vaddr;
p                2609 drivers/media/platform/coda/coda-common.c 				CODA_DOWN_DATA_SET(p[i ^ 1]);
p                2615 drivers/media/platform/coda/coda-common.c 				CODA_DOWN_DATA_SET(p[round_down(i, 4) +
p                  49 drivers/media/platform/coda/coda-h264.c int coda_h264_filler_nal(int size, char *p)
p                  54 drivers/media/platform/coda/coda-h264.c 	p[0] = 0x00;
p                  55 drivers/media/platform/coda/coda-h264.c 	p[1] = 0x00;
p                  56 drivers/media/platform/coda/coda-h264.c 	p[2] = 0x00;
p                  57 drivers/media/platform/coda/coda-h264.c 	p[3] = 0x01;
p                  58 drivers/media/platform/coda/coda-h264.c 	p[4] = 0x0c;
p                  59 drivers/media/platform/coda/coda-h264.c 	memset(p + 5, 0xff, size - 6);
p                  61 drivers/media/platform/coda/coda-h264.c 	p[size - 1] = 0x80;
p                  66 drivers/media/platform/coda/coda-h264.c int coda_h264_padding(int size, char *p)
p                  76 drivers/media/platform/coda/coda-h264.c 	coda_h264_filler_nal(nal_size, p);
p                 346 drivers/media/platform/coda/coda.h int coda_h264_filler_nal(int size, char *p);
p                 347 drivers/media/platform/coda/coda.h int coda_h264_padding(int size, char *p);
p                1321 drivers/media/platform/davinci/vpfe_capture.c 		     struct v4l2_buffer *p)
p                1328 drivers/media/platform/davinci/vpfe_capture.c 	if (V4L2_BUF_TYPE_VIDEO_CAPTURE != p->type) {
p                1341 drivers/media/platform/davinci/vpfe_capture.c 	return videobuf_qbuf(&vpfe_dev->buffer_queue, p);
p                 180 drivers/media/platform/exynos4-is/fimc-capture.c 	struct fimc_pipeline *p = to_fimc_pipeline(cap->ve.pipe);
p                 181 drivers/media/platform/exynos4-is/fimc-capture.c 	struct v4l2_subdev *csis = p->subdevs[IDX_CSIS];
p                 774 drivers/media/platform/exynos4-is/fimc-capture.c 	struct fimc_pipeline *p = to_fimc_pipeline(fimc->vid_cap.ve.pipe);
p                 775 drivers/media/platform/exynos4-is/fimc-capture.c 	struct v4l2_subdev *sd = p->subdevs[IDX_SENSOR];
p                1111 drivers/media/platform/exynos4-is/fimc-capture.c 	struct fimc_pipeline *p = to_fimc_pipeline(vc->ve.pipe);
p                1124 drivers/media/platform/exynos4-is/fimc-capture.c 			struct media_pad *p = &sd->entity.pads[i];
p                1126 drivers/media/platform/exynos4-is/fimc-capture.c 			if (p->flags & MEDIA_PAD_FL_SINK) {
p                1127 drivers/media/platform/exynos4-is/fimc-capture.c 				sink_pad = p;
p                1164 drivers/media/platform/exynos4-is/fimc-capture.c 		if (sd == p->subdevs[IDX_SENSOR] &&
p                  64 drivers/media/platform/exynos4-is/media-dev.c static void fimc_pipeline_prepare(struct fimc_pipeline *p,
p                  73 drivers/media/platform/exynos4-is/media-dev.c 		p->subdevs[i] = NULL;
p                  97 drivers/media/platform/exynos4-is/media-dev.c 			p->subdevs[IDX_SENSOR] = sd;
p                 100 drivers/media/platform/exynos4-is/media-dev.c 			p->subdevs[IDX_CSIS] = sd;
p                 103 drivers/media/platform/exynos4-is/media-dev.c 			p->subdevs[IDX_FLITE] = sd;
p                 106 drivers/media/platform/exynos4-is/media-dev.c 			p->subdevs[IDX_FIMC] = sd;
p                 109 drivers/media/platform/exynos4-is/media-dev.c 			p->subdevs[IDX_IS_ISP] = sd;
p                 119 drivers/media/platform/exynos4-is/media-dev.c 	if (sensor && p->subdevs[IDX_FIMC])
p                 120 drivers/media/platform/exynos4-is/media-dev.c 		__setup_sensor_notification(fmd, sensor, p->subdevs[IDX_FIMC]);
p                 156 drivers/media/platform/exynos4-is/media-dev.c static int fimc_pipeline_s_power(struct fimc_pipeline *p, bool on)
p                 164 drivers/media/platform/exynos4-is/media-dev.c 	if (p->subdevs[IDX_SENSOR] == NULL)
p                 170 drivers/media/platform/exynos4-is/media-dev.c 		ret = __subdev_set_power(p->subdevs[idx], on);
p                 180 drivers/media/platform/exynos4-is/media-dev.c 		__subdev_set_power(p->subdevs[idx], !on);
p                 196 drivers/media/platform/exynos4-is/media-dev.c 	struct fimc_pipeline *p = to_fimc_pipeline(ep);
p                 200 drivers/media/platform/exynos4-is/media-dev.c 	if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) {
p                 206 drivers/media/platform/exynos4-is/media-dev.c 	ret = fimc_pipeline_s_power(p, 1);
p                 210 drivers/media/platform/exynos4-is/media-dev.c 	if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP])
p                 229 drivers/media/platform/exynos4-is/media-dev.c 	struct fimc_pipeline *p = to_fimc_pipeline(ep);
p                 232 drivers/media/platform/exynos4-is/media-dev.c 	if (WARN_ON(p == NULL || me == NULL))
p                 236 drivers/media/platform/exynos4-is/media-dev.c 		fimc_pipeline_prepare(p, me);
p                 238 drivers/media/platform/exynos4-is/media-dev.c 	sd = p->subdevs[IDX_SENSOR];
p                 259 drivers/media/platform/exynos4-is/media-dev.c 	struct fimc_pipeline *p = to_fimc_pipeline(ep);
p                 260 drivers/media/platform/exynos4-is/media-dev.c 	struct v4l2_subdev *sd = p ? p->subdevs[IDX_SENSOR] : NULL;
p                 269 drivers/media/platform/exynos4-is/media-dev.c 	ret = fimc_pipeline_s_power(p, 0);
p                 274 drivers/media/platform/exynos4-is/media-dev.c 	if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP])
p                 291 drivers/media/platform/exynos4-is/media-dev.c 	struct fimc_pipeline *p = to_fimc_pipeline(ep);
p                 292 drivers/media/platform/exynos4-is/media-dev.c 	struct fimc_md *fmd = entity_to_fimc_mdev(&p->subdevs[IDX_CSIS]->entity);
p                 296 drivers/media/platform/exynos4-is/media-dev.c 	if (p->subdevs[IDX_SENSOR] == NULL) {
p                 306 drivers/media/platform/exynos4-is/media-dev.c 		if (p->subdevs[IDX_FIMC])
p                 308 drivers/media/platform/exynos4-is/media-dev.c 		else if (p->subdevs[IDX_IS_ISP])
p                 310 drivers/media/platform/exynos4-is/media-dev.c 		else if (p->subdevs[IDX_FLITE])
p                 319 drivers/media/platform/exynos4-is/media-dev.c 		fimc_pipeline_prepare(p, &p->subdevs[sd_id]->entity);
p                 321 drivers/media/platform/exynos4-is/media-dev.c 		if (p->subdevs[IDX_SENSOR] == NULL)
p                 333 drivers/media/platform/exynos4-is/media-dev.c 		ret = v4l2_subdev_call(p->subdevs[idx], video, s_stream, on);
p                 341 drivers/media/platform/exynos4-is/media-dev.c 	fimc_pipeline_s_power(p, !on);
p                 344 drivers/media/platform/exynos4-is/media-dev.c 		v4l2_subdev_call(p->subdevs[idx], video, s_stream, !on);
p                 359 drivers/media/platform/exynos4-is/media-dev.c 	struct fimc_pipeline *p;
p                 361 drivers/media/platform/exynos4-is/media-dev.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 362 drivers/media/platform/exynos4-is/media-dev.c 	if (!p)
p                 365 drivers/media/platform/exynos4-is/media-dev.c 	list_add_tail(&p->list, &fmd->pipelines);
p                 367 drivers/media/platform/exynos4-is/media-dev.c 	p->ep.ops = &fimc_pipeline_ops;
p                 368 drivers/media/platform/exynos4-is/media-dev.c 	return &p->ep;
p                 374 drivers/media/platform/exynos4-is/media-dev.c 		struct fimc_pipeline *p;
p                 376 drivers/media/platform/exynos4-is/media-dev.c 		p = list_entry(fmd->pipelines.next, typeof(*p), list);
p                 377 drivers/media/platform/exynos4-is/media-dev.c 		list_del(&p->list);
p                 378 drivers/media/platform/exynos4-is/media-dev.c 		kfree(p);
p                1108 drivers/media/platform/exynos4-is/media-dev.c 	struct fimc_pipeline *p;
p                1117 drivers/media/platform/exynos4-is/media-dev.c 	p = to_fimc_pipeline(ve->pipe);
p                1122 drivers/media/platform/exynos4-is/media-dev.c 	if (!enable && p->subdevs[IDX_SENSOR] == NULL)
p                1131 drivers/media/platform/exynos4-is/media-dev.c 		memset(p->subdevs, 0, sizeof(p->subdevs));
p                 199 drivers/media/platform/exynos4-is/media-dev.h 	struct fimc_pipeline *p = to_fimc_pipeline(ep);
p                 201 drivers/media/platform/exynos4-is/media-dev.h 	if (!p || index >= IDX_MAX)
p                 204 drivers/media/platform/exynos4-is/media-dev.h 		return p->subdevs[index];
p                 833 drivers/media/platform/fsl-viu.c 				struct v4l2_requestbuffers *p)
p                 837 drivers/media/platform/fsl-viu.c 	return videobuf_reqbufs(&fh->vb_vidq, p);
p                 841 drivers/media/platform/fsl-viu.c 					struct v4l2_buffer *p)
p                 845 drivers/media/platform/fsl-viu.c 	return videobuf_querybuf(&fh->vb_vidq, p);
p                 848 drivers/media/platform/fsl-viu.c static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
p                 852 drivers/media/platform/fsl-viu.c 	return videobuf_qbuf(&fh->vb_vidq, p);
p                 855 drivers/media/platform/fsl-viu.c static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
p                 859 drivers/media/platform/fsl-viu.c 	return videobuf_dqbuf(&fh->vb_vidq, p,
p                  85 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c 	struct mtk_enc_params *p = &ctx->enc_params;
p                  92 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c 		p->bitrate = ctrl->val;
p                  98 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c 		p->num_b_frame = ctrl->val;
p                 103 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c 		p->rc_frame = ctrl->val;
p                 108 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c 		p->h264_max_qp = ctrl->val;
p                 113 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c 		p->seq_hdr_mode = ctrl->val;
p                 118 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c 		p->rc_mb = ctrl->val;
p                 123 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c 		p->h264_profile = ctrl->val;
p                 128 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c 		p->h264_level = ctrl->val;
p                 133 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c 		p->intra_period = ctrl->val;
p                 139 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c 		p->gop_size = ctrl->val;
p                 144 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c 		p->force_intra = 1;
p                 254 drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c 	u32 *p = &inst->vsi->dec_table[VP8_DEC_TABLE_OFFSET];
p                 259 drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c 			val = *p++;
p                 270 drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c 	u32 *p = &inst->vsi->dec_table[VP8_DEC_TABLE_OFFSET];
p                 276 drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c 		*p++ = readl(hwd + VP8_BSDSET);
p                 277 drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c 		*p++ = readl(hwd + VP8_BSDSET);
p                 278 drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c 		*p++ = readl(hwd + VP8_BSDSET) & 0xFFFFFF;
p                 448 drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c 	unsigned char *p = buf;
p                 455 drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c 	memcpy(p, h264_filler_marker, ARRAY_SIZE(h264_filler_marker));
p                 457 drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c 	p += H264_FILLER_MARKER_SIZE;
p                 458 drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c 	memset(p, 0xff, size);
p                 515 drivers/media/platform/qcom/camss/camss-vfe-4-1.c 	u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
p                 525 drivers/media/platform/qcom/camss/camss-vfe-4-1.c 			if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
p                 684 drivers/media/platform/qcom/camss/camss-vfe-4-1.c 	u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
p                 726 drivers/media/platform/qcom/camss/camss-vfe-4-1.c 	if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
p                 739 drivers/media/platform/qcom/camss/camss-vfe-4-1.c 	u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
p                 760 drivers/media/platform/qcom/camss/camss-vfe-4-1.c 	if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
p                 561 drivers/media/platform/qcom/camss/camss-vfe-4-7.c 	u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
p                 564 drivers/media/platform/qcom/camss/camss-vfe-4-7.c 	switch (p) {
p                 585 drivers/media/platform/qcom/camss/camss-vfe-4-7.c 		if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
p                 607 drivers/media/platform/qcom/camss/camss-vfe-4-7.c 		if (p == V4L2_PIX_FMT_YUYV || p == V4L2_PIX_FMT_YVYU)
p                 630 drivers/media/platform/qcom/camss/camss-vfe-4-7.c 	u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
p                 633 drivers/media/platform/qcom/camss/camss-vfe-4-7.c 	if (p != V4L2_PIX_FMT_YUYV && p != V4L2_PIX_FMT_YVYU &&
p                 634 drivers/media/platform/qcom/camss/camss-vfe-4-7.c 			p != V4L2_PIX_FMT_VYUY && p != V4L2_PIX_FMT_UYVY)
p                 646 drivers/media/platform/qcom/camss/camss-vfe-4-7.c 	if (p == V4L2_PIX_FMT_UYVY || p == V4L2_PIX_FMT_YUYV)
p                 787 drivers/media/platform/qcom/camss/camss-vfe-4-7.c 	u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
p                 829 drivers/media/platform/qcom/camss/camss-vfe-4-7.c 	if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
p                 842 drivers/media/platform/qcom/camss/camss-vfe-4-7.c 	u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
p                 863 drivers/media/platform/qcom/camss/camss-vfe-4-7.c 	if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
p                 580 drivers/media/platform/qcom/camss/camss-video.c 	struct v4l2_plane_pix_format *p;
p                 591 drivers/media/platform/qcom/camss/camss-video.c 			p = &pix_mp->plane_fmt[i];
p                 592 drivers/media/platform/qcom/camss/camss-video.c 			bytesperline[i] = clamp_t(u32, p->bytesperline,
p                 594 drivers/media/platform/qcom/camss/camss-video.c 			sizeimage[i] = clamp_t(u32, p->sizeimage,
p                 635 drivers/media/platform/qcom/camss/camss-video.c 			p = &pix_mp->plane_fmt[i];
p                 636 drivers/media/platform/qcom/camss/camss-video.c 			p->bytesperline = clamp_t(u32, p->bytesperline,
p                 638 drivers/media/platform/qcom/camss/camss-video.c 			p->sizeimage = clamp_t(u32, p->sizeimage,
p                 639 drivers/media/platform/qcom/camss/camss-video.c 					       p->bytesperline,
p                 640 drivers/media/platform/qcom/camss/camss-video.c 					       p->bytesperline * 4096);
p                 641 drivers/media/platform/qcom/camss/camss-video.c 			lines = p->sizeimage / p->bytesperline;
p                 643 drivers/media/platform/qcom/camss/camss-video.c 			if (p->bytesperline < bytesperline[i])
p                 644 drivers/media/platform/qcom/camss/camss-video.c 				p->bytesperline = ALIGN(bytesperline[i], 8);
p                 646 drivers/media/platform/qcom/camss/camss-video.c 			if (p->sizeimage < p->bytesperline * lines)
p                 647 drivers/media/platform/qcom/camss/camss-video.c 				p->sizeimage = p->bytesperline * lines;
p                 649 drivers/media/platform/qcom/camss/camss-video.c 			if (p->sizeimage < sizeimage[i])
p                 650 drivers/media/platform/qcom/camss/camss-video.c 				p->sizeimage = sizeimage[i];
p                 963 drivers/media/platform/qcom/venus/hfi_venus.c 	void *p;
p                 968 drivers/media/platform/qcom/venus/hfi_venus.c 	p = memchr(sfr->data, '\0', sfr->buf_size);
p                 973 drivers/media/platform/qcom/venus/hfi_venus.c 	if (!p)
p                  27 drivers/media/platform/qcom/venus/venc_ctrls.c 	u32 b, p, ratio;
p                  40 drivers/media/platform/qcom/venus/venc_ctrls.c 	b = p = half;
p                  42 drivers/media/platform/qcom/venus/venc_ctrls.c 	for (; b <= gop_size - 1; b++, p--) {
p                  43 drivers/media/platform/qcom/venus/venc_ctrls.c 		if (b % p)
p                  46 drivers/media/platform/qcom/venus/venc_ctrls.c 		ratio = b / p;
p                  60 drivers/media/platform/qcom/venus/venc_ctrls.c 	if (b + p + 1 != gop_size)
p                  64 drivers/media/platform/qcom/venus/venc_ctrls.c 	*pf = p;
p                1257 drivers/media/platform/rcar_drif.c static struct device_node *rcar_drif_bond_enabled(struct platform_device *p)
p                1261 drivers/media/platform/rcar_drif.c 	np = of_parse_phandle(p->dev.of_node, "renesas,bonding", 0);
p                 545 drivers/media/platform/rcar_jpu.c static void put_qtbl(u8 *p, const u8 *qtbl)
p                 550 drivers/media/platform/rcar_jpu.c 		p[i] = *(qtbl + zigzag[i]);
p                 553 drivers/media/platform/rcar_jpu.c static void put_htbl(u8 *p, const u8 *htbl, unsigned int len)
p                 559 drivers/media/platform/rcar_jpu.c 			p[i + j] = htbl[i + 3 - j];
p                 562 drivers/media/platform/rcar_jpu.c static void jpu_generate_hdr(unsigned short quality, unsigned char *p)
p                 564 drivers/media/platform/rcar_jpu.c 	put_qtbl(p + JPU_JPEG_QTBL_LUM_OFFSET, (const u8 *)qtbl_lum[quality]);
p                 565 drivers/media/platform/rcar_jpu.c 	put_qtbl(p + JPU_JPEG_QTBL_CHR_OFFSET, (const u8 *)qtbl_chr[quality]);
p                 567 drivers/media/platform/rcar_jpu.c 	put_htbl(p + JPU_JPEG_HDCTBL_LUM_OFFSET, (const u8 *)hdctbl_lum,
p                 569 drivers/media/platform/rcar_jpu.c 	put_htbl(p + JPU_JPEG_HACTBL_LUM_OFFSET, (const u8 *)hactbl_lum,
p                 572 drivers/media/platform/rcar_jpu.c 	put_htbl(p + JPU_JPEG_HDCTBL_CHR_OFFSET, (const u8 *)hdctbl_chr,
p                 574 drivers/media/platform/rcar_jpu.c 	put_htbl(p + JPU_JPEG_HACTBL_CHR_OFFSET, (const u8 *)hactbl_chr,
p                 120 drivers/media/platform/rockchip/rga/rga-buf.c 	unsigned int address, len, i, p;
p                 135 drivers/media/platform/rockchip/rga/rga-buf.c 		for (p = 0; p < len; p++) {
p                 137 drivers/media/platform/rockchip/rga/rga-buf.c 					  ((dma_addr_t)p << PAGE_SHIFT);
p                 139 drivers/media/platform/rockchip/rga/rga-buf.c 			pages[mapped_size + p] = phys;
p                1145 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 	struct s5p_mfc_enc_params *p = &ctx->enc_params;
p                1149 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 	if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) {
p                1793 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 	struct s5p_mfc_enc_params *p = &ctx->enc_params;
p                1798 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->gop_size = ctrl->val;
p                1801 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->slice_mode = ctrl->val;
p                1804 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->slice_mb = ctrl->val;
p                1807 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->slice_bit = ctrl->val * 8;
p                1810 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->intra_refresh_mb = ctrl->val;
p                1813 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->pad = ctrl->val;
p                1816 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->pad_luma = (ctrl->val >> 16) & 0xff;
p                1817 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->pad_cb = (ctrl->val >> 8) & 0xff;
p                1818 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->pad_cr = (ctrl->val >> 0) & 0xff;
p                1821 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->rc_frame = ctrl->val;
p                1824 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->rc_bitrate = ctrl->val;
p                1827 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->rc_reaction_coeff = ctrl->val;
p                1837 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->vbv_size = ctrl->val;
p                1840 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->mv_h_range = ctrl->val;
p                1843 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->mv_v_range = ctrl->val;
p                1846 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.cpb_size = ctrl->val;
p                1849 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->seq_hdr_mode = ctrl->val;
p                1852 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->frame_skip_mode = ctrl->val;
p                1855 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->fixed_target_bit = ctrl->val;
p                1858 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->num_b_frame = ctrl->val;
p                1863 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 			p->codec.h264.profile =
p                1867 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 			p->codec.h264.profile =
p                1871 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 			p->codec.h264.profile =
p                1876 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 				p->codec.h264.profile =
p                1886 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.level_v4l2 = ctrl->val;
p                1887 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.level = h264_level(ctrl->val);
p                1888 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		if (p->codec.h264.level < 0) {
p                1890 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 			ret = p->codec.h264.level;
p                1894 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.mpeg4.level_v4l2 = ctrl->val;
p                1895 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.mpeg4.level = mpeg4_level(ctrl->val);
p                1896 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		if (p->codec.mpeg4.level < 0) {
p                1898 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 			ret = p->codec.mpeg4.level;
p                1902 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.loop_filter_mode = ctrl->val;
p                1905 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.loop_filter_alpha = ctrl->val;
p                1908 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.loop_filter_beta = ctrl->val;
p                1911 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.entropy_mode = ctrl->val;
p                1914 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.num_ref_pic_4p = ctrl->val;
p                1917 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264._8x8_transform = ctrl->val;
p                1920 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->rc_mb = ctrl->val;
p                1923 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.rc_frame_qp = ctrl->val;
p                1926 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.rc_min_qp = ctrl->val;
p                1929 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.rc_max_qp = ctrl->val;
p                1932 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.rc_p_frame_qp = ctrl->val;
p                1935 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.rc_b_frame_qp = ctrl->val;
p                1939 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.mpeg4.rc_frame_qp = ctrl->val;
p                1943 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.mpeg4.rc_min_qp = ctrl->val;
p                1947 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.mpeg4.rc_max_qp = ctrl->val;
p                1951 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.mpeg4.rc_p_frame_qp = ctrl->val;
p                1955 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.mpeg4.rc_b_frame_qp = ctrl->val;
p                1958 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.rc_mb_dark = ctrl->val;
p                1961 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.rc_mb_smooth = ctrl->val;
p                1964 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.rc_mb_static = ctrl->val;
p                1967 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.rc_mb_activity = ctrl->val;
p                1970 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.vui_sar = ctrl->val;
p                1973 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.vui_sar_idc = vui_sar_idc(ctrl->val);
p                1976 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.vui_ext_sar_width = ctrl->val;
p                1979 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.vui_ext_sar_height = ctrl->val;
p                1982 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.open_gop = !ctrl->val;
p                1985 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.h264.open_gop_size = ctrl->val;
p                1990 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 			p->codec.mpeg4.profile =
p                1994 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 			p->codec.mpeg4.profile =
p                2002 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.mpeg4.quarter_pixel = ctrl->val;
p                2005 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.vp8.num_partitions = ctrl->val;
p                2008 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.vp8.imd_4x4 = ctrl->val;
p                2011 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.vp8.num_ref = ctrl->val;
p                2014 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.vp8.filter_level = ctrl->val;
p                2017 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.vp8.filter_sharpness = ctrl->val;
p                2020 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.vp8.golden_frame_ref_period = ctrl->val;
p                2023 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.vp8.golden_frame_sel = ctrl->val;
p                2026 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.vp8.rc_min_qp = ctrl->val;
p                2029 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.vp8.rc_max_qp = ctrl->val;
p                2032 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.vp8.rc_frame_qp = ctrl->val;
p                2035 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.vp8.rc_p_frame_qp = ctrl->val;
p                2038 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.vp8.profile = ctrl->val;
p                2041 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.rc_frame_qp = ctrl->val;
p                2044 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.rc_p_frame_qp = ctrl->val;
p                2047 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.rc_b_frame_qp = ctrl->val;
p                2050 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.rc_framerate = ctrl->val;
p                2053 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.rc_min_qp = ctrl->val;
p                2055 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 						 p->codec.hevc.rc_max_qp);
p                2058 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.rc_max_qp = ctrl->val;
p                2059 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		__enc_update_hevc_qp_ctrls_range(ctx, p->codec.hevc.rc_min_qp,
p                2063 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.level_v4l2 = ctrl->val;
p                2064 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.level = hevc_level(ctrl->val);
p                2069 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 			p->codec.hevc.profile =
p                2073 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 			p->codec.hevc.profile =
p                2081 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.tier = ctrl->val;
p                2084 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.max_partition_depth = ctrl->val;
p                2087 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.num_refs_for_p = ctrl->val;
p                2090 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.refreshtype = ctrl->val;
p                2093 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.const_intra_period_enable = ctrl->val;
p                2096 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.lossless_cu_enable = ctrl->val;
p                2099 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.wavefront_enable = ctrl->val;
p                2102 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.loopfilter = ctrl->val;
p                2105 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_qp_enable = ctrl->val;
p                2108 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_qp_type = ctrl->val;
p                2111 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.num_hier_layer = ctrl->val;
p                2114 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_qp_layer[0] = ctrl->val;
p                2117 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_qp_layer[1] = ctrl->val;
p                2120 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_qp_layer[2] = ctrl->val;
p                2123 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_qp_layer[3] = ctrl->val;
p                2126 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_qp_layer[4] = ctrl->val;
p                2129 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_qp_layer[5] = ctrl->val;
p                2132 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_qp_layer[6] = ctrl->val;
p                2135 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_bit_layer[0] = ctrl->val;
p                2138 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_bit_layer[1] = ctrl->val;
p                2141 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_bit_layer[2] = ctrl->val;
p                2144 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_bit_layer[3] = ctrl->val;
p                2147 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_bit_layer[4] = ctrl->val;
p                2150 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_bit_layer[5] = ctrl->val;
p                2153 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.hier_bit_layer[6] = ctrl->val;
p                2156 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.general_pb_enable = ctrl->val;
p                2159 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.temporal_id_enable = ctrl->val;
p                2162 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.strong_intra_smooth = ctrl->val;
p                2165 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.intra_pu_split_disable = ctrl->val;
p                2168 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.tmv_prediction_disable = !ctrl->val;
p                2171 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.max_num_merge_mv = ctrl->val;
p                2174 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.encoding_nostartcode_enable = ctrl->val;
p                2177 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.refreshperiod = ctrl->val;
p                2180 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.lf_beta_offset_div2 = ctrl->val;
p                2183 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.lf_tc_offset_div2 = ctrl->val;
p                2186 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.size_of_length_field = ctrl->val;
p                2189 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c 		p->codec.hevc.prepend_sps_pps_to_idr = ctrl->val;
p                 677 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	struct s5p_mfc_enc_params *p = &ctx->enc_params;
p                 689 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	reg |= p->gop_size;
p                 694 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	mfc_write(dev, p->slice_mode, S5P_FIMV_ENC_MSLICE_CTRL);
p                 695 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) {
p                 696 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 		mfc_write(dev, p->slice_mb, S5P_FIMV_ENC_MSLICE_MB);
p                 697 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	} else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES) {
p                 698 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 		mfc_write(dev, p->slice_bit, S5P_FIMV_ENC_MSLICE_BIT);
p                 704 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	mfc_write(dev, p->intra_refresh_mb, S5P_FIMV_ENC_CIR_CTRL);
p                 712 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	if (p->pad) {
p                 717 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 		reg |= (p->pad_cr << 16);
p                 720 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 		reg |= (p->pad_cb << 8);
p                 723 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 		reg |= (p->pad_luma);
p                 733 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	reg |= (p->rc_frame << 9);
p                 736 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	if (p->rc_frame)
p                 737 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 		mfc_write(dev, p->rc_bitrate,
p                 742 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	if (p->rc_frame)
p                 743 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 		mfc_write(dev, p->rc_reaction_coeff, S5P_FIMV_ENC_RC_RPARA);
p                 747 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	shm |= (p->seq_hdr_mode << 3);
p                 750 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	shm |= (p->frame_skip_mode << 1);
p                 753 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	s5p_mfc_write_info_v5(ctx, p->fixed_target_bit, RC_CONTROL_CONFIG);
p                 760 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	struct s5p_mfc_enc_params *p = &ctx->enc_params;
p                 761 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	struct s5p_mfc_h264_enc_params *p_264 = &p->codec.h264;
p                 770 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	reg |= (p->num_b_frame << 16);
p                 826 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	reg |= (p->rc_mb << 8);
p                 832 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	if (p->rc_frame && p->rc_framerate_denom)
p                 833 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 		mfc_write(dev, p->rc_framerate_num * 1000
p                 834 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 			/ p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
p                 847 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	if (p->rc_mb) {
p                 863 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	if (!p->rc_frame && !p->rc_mb) {
p                 905 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	if (p->frame_skip_mode ==
p                 917 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	struct s5p_mfc_enc_params *p = &ctx->enc_params;
p                 918 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
p                 928 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	reg |= (p->num_b_frame << 16);
p                 942 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	if (!p->rc_frame) {
p                 950 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	if (p->rc_frame) {
p                 951 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 		if (p->rc_framerate_denom > 0) {
p                 952 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 			framerate = p->rc_framerate_num * 1000 /
p                 953 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 						p->rc_framerate_denom;
p                 959 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 			shm |= ((p->rc_framerate_num & 0x7FFF) << 16);
p                 960 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 			shm |= (p->rc_framerate_denom & 0xFFFF);
p                 984 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	if (p->frame_skip_mode ==
p                 987 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 		shm |= (p->vbv_size << 16);
p                 996 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	struct s5p_mfc_enc_params *p = &ctx->enc_params;
p                 997 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
p                1003 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	if (!p->rc_frame) {
p                1010 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	if (p->rc_frame && p->rc_framerate_denom)
p                1011 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 		mfc_write(dev, p->rc_framerate_num * 1000
p                1012 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 			/ p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
p                1033 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 	if (p->frame_skip_mode ==
p                1036 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c 		shm |= (p->vbv_size << 16);
p                 753 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	struct s5p_mfc_enc_params *p = &ctx->enc_params;
p                 772 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg |= p->gop_size & 0xFFFF;
p                 777 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	ctx->slice_mode = p->slice_mode;
p                 779 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) {
p                 782 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		ctx->slice_size.mb = p->slice_mb;
p                 783 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	} else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES) {
p                 786 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		ctx->slice_size.bits = p->slice_bit;
p                 795 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	writel(p->intra_refresh_mb, mfc_regs->e_ir_size);
p                 797 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->intra_refresh_mb == 0)
p                 840 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->pad) {
p                 845 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		reg |= ((p->pad_cr & 0xFF) << 16);
p                 847 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		reg |= ((p->pad_cb & 0xFF) << 8);
p                 849 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		reg |= p->pad_luma & 0xFF;
p                 856 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg |= ((p->rc_frame & 0x1) << 9);
p                 860 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->rc_frame)
p                 861 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		writel(p->rc_bitrate,
p                 867 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->rc_frame) {
p                 868 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		if (p->rc_reaction_coeff < TIGHT_CBR_MAX) /* tight CBR */
p                 877 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg |= ((p->seq_hdr_mode & 0x1) << 2);
p                 881 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg |= (p->frame_skip_mode & 0x3);
p                 890 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg = (p->mv_h_range & S5P_FIMV_E_MV_RANGE_V6_MASK);
p                 893 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg = (p->mv_v_range & S5P_FIMV_E_MV_RANGE_V6_MASK);
p                 918 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	struct s5p_mfc_enc_params *p = &ctx->enc_params;
p                 919 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	struct s5p_mfc_h264_enc_params *p_h264 = &p->codec.h264;
p                 930 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg |= ((p->num_b_frame & 0x3) << 16);
p                 945 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg |= ((p->rc_mb & 0x1) << 8);
p                 963 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (!p->rc_frame && !p->rc_mb) {
p                 972 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) {
p                 974 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
p                 975 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		reg |= p->rc_framerate_denom & 0xFFFF;
p                 980 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->frame_skip_mode ==
p                 985 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		if (p->rc_frame)
p                 986 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 			writel(p->vbv_delay, mfc_regs->e_vbv_init_delay);
p                1049 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->rc_mb) {
p                1199 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	struct s5p_mfc_enc_params *p = &ctx->enc_params;
p                1200 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
p                1210 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg |= ((p->num_b_frame & 0x3) << 16);
p                1225 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg |= ((p->rc_mb & 0x1) << 8);
p                1243 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (!p->rc_frame && !p->rc_mb) {
p                1252 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) {
p                1254 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
p                1255 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		reg |= p->rc_framerate_denom & 0xFFFF;
p                1260 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->frame_skip_mode ==
p                1262 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		writel(p->vbv_size & 0xFFFF, mfc_regs->e_vbv_buffer_size);
p                1264 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		if (p->rc_frame)
p                1265 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 			writel(p->vbv_delay, mfc_regs->e_vbv_init_delay);
p                1281 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	struct s5p_mfc_enc_params *p = &ctx->enc_params;
p                1282 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
p                1299 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg |= ((p->rc_mb & 0x1) << 8);
p                1317 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (!p->rc_frame && !p->rc_mb) {
p                1326 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) {
p                1328 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
p                1329 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		reg |= p->rc_framerate_denom & 0xFFFF;
p                1334 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->frame_skip_mode ==
p                1336 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		writel(p->vbv_size & 0xFFFF, mfc_regs->e_vbv_buffer_size);
p                1338 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		if (p->rc_frame)
p                1339 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 			writel(p->vbv_delay, mfc_regs->e_vbv_init_delay);
p                1351 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	struct s5p_mfc_enc_params *p = &ctx->enc_params;
p                1352 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	struct s5p_mfc_vp8_enc_params *p_vp8 = &p->codec.vp8;
p                1363 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg |= ((p->num_b_frame & 0x3) << 16);
p                1374 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg |= ((p->rc_mb & 0x1) << 8);
p                1378 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) {
p                1380 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
p                1381 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		reg |= p->rc_framerate_denom & 0xFFFF;
p                1392 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (!p->rc_frame && !p->rc_mb) {
p                1406 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->frame_skip_mode ==
p                1408 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		writel(p->vbv_size & 0xFFFF, mfc_regs->e_vbv_buffer_size);
p                1410 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		if (p->rc_frame)
p                1411 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 			writel(p->vbv_delay, mfc_regs->e_vbv_init_delay);
p                1444 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	struct s5p_mfc_enc_params *p = &ctx->enc_params;
p                1445 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	struct s5p_mfc_hevc_enc_params *p_hevc = &p->codec.hevc;
p                1457 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg |= (p->num_b_frame << 16);
p                1541 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 		if (p->rc_frame) {
p                1553 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	reg |= (p->rc_mb << 8);
p                1561 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (p->rc_frame) {
p                1581 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	if (!p->rc_frame && !p->rc_mb) {
p                1740 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	struct s5p_mfc_enc_params *p = &ctx->enc_params;
p                1741 drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c 	struct s5p_mfc_h264_enc_params *p_h264 = &p->codec.h264;
p                  33 drivers/media/platform/sti/delta/delta-mjpeg-dec.c static char *ipc_open_param_str(struct jpeg_video_decode_init_params_t *p,
p                  38 drivers/media/platform/sti/delta/delta-mjpeg-dec.c 	if (!p)
p                  45 drivers/media/platform/sti/delta/delta-mjpeg-dec.c 		      p->circular_buffer_begin_addr_p,
p                  46 drivers/media/platform/sti/delta/delta-mjpeg-dec.c 		      p->circular_buffer_end_addr_p);
p                  51 drivers/media/platform/sti/delta/delta-mjpeg-dec.c static char *ipc_decode_param_str(struct jpeg_decode_params_t *p,
p                  56 drivers/media/platform/sti/delta/delta-mjpeg-dec.c 	if (!p)
p                  70 drivers/media/platform/sti/delta/delta-mjpeg-dec.c 		      p->picture_start_addr_p,
p                  71 drivers/media/platform/sti/delta/delta-mjpeg-dec.c 		      p->picture_end_addr_p,
p                  72 drivers/media/platform/sti/delta/delta-mjpeg-dec.c 		      p->decoding_mode,
p                  73 drivers/media/platform/sti/delta/delta-mjpeg-dec.c 		      p->display_buffer_addr.display_decimated_luma_p,
p                  74 drivers/media/platform/sti/delta/delta-mjpeg-dec.c 		      p->display_buffer_addr.display_decimated_chroma_p,
p                  75 drivers/media/platform/sti/delta/delta-mjpeg-dec.c 		      p->main_aux_enable, p->additional_flags,
p                  76 drivers/media/platform/sti/delta/delta-mjpeg-dec.c 		      p->field_flag,
p                  77 drivers/media/platform/sti/delta/delta-mjpeg-dec.c 		      p->is_jpeg_image);
p                1384 drivers/media/platform/stm32/stm32-dcmi.c 		       struct v4l2_streamparm *p)
p                1388 drivers/media/platform/stm32/stm32-dcmi.c 	return v4l2_g_parm_cap(video_devdata(file), dcmi->entity.source, p);
p                1392 drivers/media/platform/stm32/stm32-dcmi.c 		       struct v4l2_streamparm *p)
p                1396 drivers/media/platform/stm32/stm32-dcmi.c 	return v4l2_s_parm_cap(video_devdata(file), dcmi->entity.source, p);
p                  25 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c vb2_v4l2_to_csi_buffer(const struct vb2_v4l2_buffer *p)
p                  27 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c 	return container_of(p, struct sun4i_csi_buffer, vb);
p                  31 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c vb2_to_csi_buffer(const struct vb2_buffer *p)
p                  33 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c 	return vb2_v4l2_to_csi_buffer(to_vb2_v4l2_buffer(p));
p                 756 drivers/media/platform/vicodec/codec-fwht.c 		u8 *p;
p                 766 drivers/media/platform/vicodec/codec-fwht.c 			for (i = 0, p = input; i < width; i++, p += input_step)
p                 767 drivers/media/platform/vicodec/codec-fwht.c 				*out++ = (*p == 0xff) ? 0xfe : *p;
p                 356 drivers/media/platform/vicodec/vicodec-core.c 	u8 *p = *pp;
p                 364 drivers/media/platform/vicodec/vicodec-core.c 		for (; p < *pp + sz; p++) {
p                 367 drivers/media/platform/vicodec/vicodec-core.c 			p = memchr(p, magic[ctx->comp_magic_cnt],
p                 368 drivers/media/platform/vicodec/vicodec-core.c 				   *pp + sz - p);
p                 369 drivers/media/platform/vicodec/vicodec-core.c 			if (!p) {
p                 371 drivers/media/platform/vicodec/vicodec-core.c 				p = *pp + sz;
p                 375 drivers/media/platform/vicodec/vicodec-core.c 			if (*pp + sz - p < copy)
p                 376 drivers/media/platform/vicodec/vicodec-core.c 				copy = *pp + sz - p;
p                 378 drivers/media/platform/vicodec/vicodec-core.c 			memcpy(header + ctx->comp_magic_cnt, p, copy);
p                 381 drivers/media/platform/vicodec/vicodec-core.c 				p += copy;
p                 388 drivers/media/platform/vicodec/vicodec-core.c 			*pp = p;
p                 397 drivers/media/platform/vicodec/vicodec-core.c 		if (*pp + sz - p < copy)
p                 398 drivers/media/platform/vicodec/vicodec-core.c 			copy = *pp + sz - p;
p                 400 drivers/media/platform/vicodec/vicodec-core.c 		memcpy(header + ctx->header_size, p, copy);
p                 401 drivers/media/platform/vicodec/vicodec-core.c 		p += copy;
p                 404 drivers/media/platform/vicodec/vicodec-core.c 	*pp = p;
p                 578 drivers/media/platform/vicodec/vicodec-core.c 	u8 *p;
p                 603 drivers/media/platform/vicodec/vicodec-core.c 	p = p_src + ctx->cur_buf_offset;
p                 608 drivers/media/platform/vicodec/vicodec-core.c 		state = get_next_header(ctx, &p, p_src + sz - p);
p                 631 drivers/media/platform/vicodec/vicodec-core.c 		if (copy > p_src + sz - p)
p                 632 drivers/media/platform/vicodec/vicodec-core.c 			copy = p_src + sz - p;
p                 635 drivers/media/platform/vicodec/vicodec-core.c 		       p, copy);
p                 636 drivers/media/platform/vicodec/vicodec-core.c 		p += copy;
p                 645 drivers/media/platform/vicodec/vicodec-core.c 	ctx->cur_buf_offset = p - p_src;
p                 651 drivers/media/platform/vicodec/vicodec-core.c 		struct fwht_cframe_hdr *p_hdr = (struct fwht_cframe_hdr *)p;
p                 655 drivers/media/platform/vicodec/vicodec-core.c 		if (!memcmp(p, magic, sizeof(magic)))
p                1472 drivers/media/platform/vicodec/vicodec-core.c 	u8 *p = p_src;
p                1529 drivers/media/platform/vicodec/vicodec-core.c 			get_next_header(ctx, &p, p_src + sz - p);
p                1542 drivers/media/platform/vicodec/vicodec-core.c 			p = p - sizeof(struct fwht_cframe_hdr) + 1;
p                1543 drivers/media/platform/vicodec/vicodec-core.c 			if (p < p_src)
p                1544 drivers/media/platform/vicodec/vicodec-core.c 				p = p_src;
p                1551 drivers/media/platform/vicodec/vicodec-core.c 	ctx->cur_buf_offset = p - p_src;
p                 451 drivers/media/platform/vim2m.c 	u8 *p_in, *p_line, *p_in_x[2], *p, *p_out;
p                 501 drivers/media/platform/vim2m.c 			p = p_in + (y * bytesperline);
p                 503 drivers/media/platform/vim2m.c 				p += bytesperline - (q_data_in->fmt->depth >> 3);
p                 505 drivers/media/platform/vim2m.c 			copy_line(q_data_out, p, p_out,
p                 920 drivers/media/platform/vivid/vivid-core.c 		char *p = dev->query_dv_timings_qmenu_strings + i * 32;
p                 923 drivers/media/platform/vivid/vivid-core.c 		dev->query_dv_timings_qmenu[i] = p;
p                 927 drivers/media/platform/vivid/vivid-core.c 		snprintf(p, 32, "%ux%u%s%u",
p                  63 drivers/media/platform/vivid/vivid-kthread-cap.c 		const u8 *p = dev->bitmap_out;
p                  68 drivers/media/platform/vivid/vivid-kthread-cap.c 		if (!(p[stride * win_y + win_x / 8] & (1 << (win_x & 7))))
p                 222 drivers/media/platform/vivid/vivid-kthread-cap.c 			 unsigned p, unsigned bpl[TPG_MAX_PLANES], unsigned h)
p                 227 drivers/media/platform/vivid/vivid-kthread-cap.c 	if (p == 0 || tpg_g_buffers(tpg) > 1)
p                 228 drivers/media/platform/vivid/vivid-kthread-cap.c 		return vb2_plane_vaddr(&buf->vb.vb2_buf, p);
p                 230 drivers/media/platform/vivid/vivid-kthread-cap.c 	for (i = 0; i < p; i++)
p                 235 drivers/media/platform/vivid/vivid-kthread-cap.c static noinline_for_stack int vivid_copy_buffer(struct vivid_dev *dev, unsigned p,
p                 241 drivers/media/platform/vivid/vivid-kthread-cap.c 	unsigned vdiv = dev->fmt_out->vdownsampling[p];
p                 242 drivers/media/platform/vivid/vivid-kthread-cap.c 	unsigned twopixsize = tpg_g_twopixelsize(tpg, p);
p                 243 drivers/media/platform/vivid/vivid-kthread-cap.c 	unsigned img_width = tpg_hdiv(tpg, p, dev->compose_cap.width);
p                 245 drivers/media/platform/vivid/vivid-kthread-cap.c 	unsigned stride_cap = tpg->bytesperline[p];
p                 246 drivers/media/platform/vivid/vivid-kthread-cap.c 	unsigned stride_out = dev->bytesperline_out[p];
p                 262 drivers/media/platform/vivid/vivid-kthread-cap.c 	unsigned vid_cap_left = tpg_hdiv(tpg, p, dev->loop_vid_cap.left);
p                 277 drivers/media/platform/vivid/vivid-kthread-cap.c 	voutbuf = plane_vaddr(tpg, vid_out_buf, p,
p                 279 drivers/media/platform/vivid/vivid-kthread-cap.c 	if (p < dev->fmt_out->buffers)
p                 280 drivers/media/platform/vivid/vivid-kthread-cap.c 		voutbuf += vid_out_buf->vb.vb2_buf.planes[p].data_offset;
p                 281 drivers/media/platform/vivid/vivid-kthread-cap.c 	voutbuf += tpg_hdiv(tpg, p, dev->loop_vid_out.left) +
p                 283 drivers/media/platform/vivid/vivid-kthread-cap.c 	vcapbuf += tpg_hdiv(tpg, p, dev->compose_cap.left) +
p                 292 drivers/media/platform/vivid/vivid-kthread-cap.c 			memcpy(vcapbuf, tpg->black_line[p], img_width);
p                 307 drivers/media/platform/vivid/vivid-kthread-cap.c 	vid_cap_right = tpg_hdiv(tpg, p, dev->loop_vid_cap.left + dev->loop_vid_cap.width);
p                 323 drivers/media/platform/vivid/vivid-kthread-cap.c 			memcpy(vcapbuf, tpg->black_line[p], img_width);
p                 329 drivers/media/platform/vivid/vivid-kthread-cap.c 			memcpy(vcapbuf, tpg->black_line[p], vid_cap_left);
p                 333 drivers/media/platform/vivid/vivid-kthread-cap.c 			memcpy(vcapbuf + vid_cap_right, tpg->black_line[p],
p                 339 drivers/media/platform/vivid/vivid-kthread-cap.c 			       tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
p                 344 drivers/media/platform/vivid/vivid-kthread-cap.c 			       tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
p                 349 drivers/media/platform/vivid/vivid-kthread-cap.c 				tpg_hdiv(tpg, p, dev->loop_vid_out.width),
p                 350 drivers/media/platform/vivid/vivid-kthread-cap.c 				tpg_hdiv(tpg, p, dev->loop_vid_cap.width),
p                 351 drivers/media/platform/vivid/vivid-kthread-cap.c 				tpg_g_twopixelsize(tpg, p));
p                 364 drivers/media/platform/vivid/vivid-kthread-cap.c 				tpg_g_twopixelsize(tpg, p));
p                 375 drivers/media/platform/vivid/vivid-kthread-cap.c 					tpg_g_twopixelsize(tpg, p));
p                 379 drivers/media/platform/vivid/vivid-kthread-cap.c 		       tpg_hdiv(tpg, p, dev->loop_vid_cap.width));
p                 401 drivers/media/platform/vivid/vivid-kthread-cap.c 		memcpy(vcapbuf, tpg->contrast_line[p], img_width);
p                 412 drivers/media/platform/vivid/vivid-kthread-cap.c 	unsigned p;
p                 451 drivers/media/platform/vivid/vivid-kthread-cap.c 	for (p = 0; p < tpg_g_planes(tpg); p++) {
p                 452 drivers/media/platform/vivid/vivid-kthread-cap.c 		void *vbuf = plane_vaddr(tpg, buf, p,
p                 460 drivers/media/platform/vivid/vivid-kthread-cap.c 		if (p < tpg_g_buffers(tpg) && dev->fmt_cap->data_offset[p]) {
p                 461 drivers/media/platform/vivid/vivid-kthread-cap.c 			memset(vbuf, dev->fmt_cap->data_offset[p] & 0xff,
p                 462 drivers/media/platform/vivid/vivid-kthread-cap.c 			       dev->fmt_cap->data_offset[p]);
p                 463 drivers/media/platform/vivid/vivid-kthread-cap.c 			vbuf += dev->fmt_cap->data_offset[p];
p                 465 drivers/media/platform/vivid/vivid-kthread-cap.c 		tpg_calc_text_basep(tpg, basep, p, vbuf);
p                 466 drivers/media/platform/vivid/vivid-kthread-cap.c 		if (!is_loop || vivid_copy_buffer(dev, p, vbuf, buf))
p                 468 drivers/media/platform/vivid/vivid-kthread-cap.c 					p, vbuf);
p                 566 drivers/media/platform/vivid/vivid-kthread-cap.c 		const u8 *p = dev->bitmap_cap;
p                 569 drivers/media/platform/vivid/vivid-kthread-cap.c 		if (!(p[stride * win_y + win_x / 8] & (1 << (win_x & 7))))
p                  50 drivers/media/platform/vivid/vivid-osd.c 	void *p = dev->video_vbase;
p                  58 drivers/media/platform/vivid/vivid-osd.c 		u16 *d = p;
p                  62 drivers/media/platform/vivid/vivid-osd.c 		p += dev->display_byte_stride;
p                  89 drivers/media/platform/vivid/vivid-vid-cap.c 	unsigned p;
p                 115 drivers/media/platform/vivid/vivid-vid-cap.c 		for (p = 0; p < buffers; p++) {
p                 116 drivers/media/platform/vivid/vivid-vid-cap.c 			if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
p                 117 drivers/media/platform/vivid/vivid-vid-cap.c 						dev->fmt_cap->data_offset[p])
p                 121 drivers/media/platform/vivid/vivid-vid-cap.c 		for (p = 0; p < buffers; p++)
p                 122 drivers/media/platform/vivid/vivid-vid-cap.c 			sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) /
p                 123 drivers/media/platform/vivid/vivid-vid-cap.c 					dev->fmt_cap->vdownsampling[p] +
p                 124 drivers/media/platform/vivid/vivid-vid-cap.c 					dev->fmt_cap->data_offset[p];
p                 133 drivers/media/platform/vivid/vivid-vid-cap.c 	for (p = 0; p < buffers; p++)
p                 134 drivers/media/platform/vivid/vivid-vid-cap.c 		dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
p                 144 drivers/media/platform/vivid/vivid-vid-cap.c 	unsigned p;
p                 159 drivers/media/platform/vivid/vivid-vid-cap.c 	for (p = 0; p < buffers; p++) {
p                 160 drivers/media/platform/vivid/vivid-vid-cap.c 		size = (tpg_g_line_width(&dev->tpg, p) *
p                 162 drivers/media/platform/vivid/vivid-vid-cap.c 			dev->fmt_cap->vdownsampling[p] +
p                 163 drivers/media/platform/vivid/vivid-vid-cap.c 			dev->fmt_cap->data_offset[p];
p                 165 drivers/media/platform/vivid/vivid-vid-cap.c 		if (vb2_plane_size(vb, p) < size) {
p                 167 drivers/media/platform/vivid/vivid-vid-cap.c 					__func__, p, vb2_plane_size(vb, p), size);
p                 171 drivers/media/platform/vivid/vivid-vid-cap.c 		vb2_set_plane_payload(vb, p, size);
p                 172 drivers/media/platform/vivid/vivid-vid-cap.c 		vb->planes[p].data_offset = dev->fmt_cap->data_offset[p];
p                 528 drivers/media/platform/vivid/vivid-vid-cap.c 	unsigned p;
p                 542 drivers/media/platform/vivid/vivid-vid-cap.c 	for (p = 0; p < mp->num_planes; p++) {
p                 543 drivers/media/platform/vivid/vivid-vid-cap.c 		mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
p                 544 drivers/media/platform/vivid/vivid-vid-cap.c 		mp->plane_fmt[p].sizeimage =
p                 545 drivers/media/platform/vivid/vivid-vid-cap.c 			(tpg_g_line_width(&dev->tpg, p) * mp->height) /
p                 546 drivers/media/platform/vivid/vivid-vid-cap.c 			dev->fmt_cap->vdownsampling[p] +
p                 547 drivers/media/platform/vivid/vivid-vid-cap.c 			dev->fmt_cap->data_offset[p];
p                 562 drivers/media/platform/vivid/vivid-vid-cap.c 	unsigned p;
p                 615 drivers/media/platform/vivid/vivid-vid-cap.c 	for (p = 0; p < fmt->buffers; p++) {
p                 617 drivers/media/platform/vivid/vivid-vid-cap.c 		bytesperline = (mp->width * fmt->bit_depth[p]) >> 3;
p                 619 drivers/media/platform/vivid/vivid-vid-cap.c 		max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3;
p                 621 drivers/media/platform/vivid/vivid-vid-cap.c 		if (pfmt[p].bytesperline > max_bpl)
p                 622 drivers/media/platform/vivid/vivid-vid-cap.c 			pfmt[p].bytesperline = max_bpl;
p                 623 drivers/media/platform/vivid/vivid-vid-cap.c 		if (pfmt[p].bytesperline < bytesperline)
p                 624 drivers/media/platform/vivid/vivid-vid-cap.c 			pfmt[p].bytesperline = bytesperline;
p                 626 drivers/media/platform/vivid/vivid-vid-cap.c 		pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) /
p                 627 drivers/media/platform/vivid/vivid-vid-cap.c 				fmt->vdownsampling[p] + fmt->data_offset[p];
p                 629 drivers/media/platform/vivid/vivid-vid-cap.c 		memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
p                 631 drivers/media/platform/vivid/vivid-vid-cap.c 	for (p = fmt->buffers; p < fmt->planes; p++)
p                 633 drivers/media/platform/vivid/vivid-vid-cap.c 			(fmt->bit_depth[p] / fmt->vdownsampling[p])) /
p                 657 drivers/media/platform/vivid/vivid-vid-cap.c 	unsigned p;
p                 761 drivers/media/platform/vivid/vivid-vid-cap.c 	for (p = 0; p < tpg_g_buffers(&dev->tpg); p++)
p                 762 drivers/media/platform/vivid/vivid-vid-cap.c 		tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline);
p                  32 drivers/media/platform/vivid/vivid-vid-out.c 	unsigned p;
p                  34 drivers/media/platform/vivid/vivid-vid-out.c 	for (p = vfmt->buffers; p < vfmt->planes; p++)
p                  35 drivers/media/platform/vivid/vivid-vid-out.c 		size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p] +
p                  36 drivers/media/platform/vivid/vivid-vid-out.c 			vfmt->data_offset[p];
p                  65 drivers/media/platform/vivid/vivid-vid-out.c 		for (p = 1; p < planes; p++) {
p                  66 drivers/media/platform/vivid/vivid-vid-out.c 			if (sizes[p] < dev->bytesperline_out[p] * h +
p                  67 drivers/media/platform/vivid/vivid-vid-out.c 				       vfmt->data_offset[p])
p                  71 drivers/media/platform/vivid/vivid-vid-out.c 		for (p = 0; p < planes; p++)
p                  72 drivers/media/platform/vivid/vivid-vid-out.c 			sizes[p] = p ? dev->bytesperline_out[p] * h +
p                  73 drivers/media/platform/vivid/vivid-vid-out.c 				       vfmt->data_offset[p] : size;
p                  82 drivers/media/platform/vivid/vivid-vid-out.c 	for (p = 0; p < planes; p++)
p                  83 drivers/media/platform/vivid/vivid-vid-out.c 		dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
p                 109 drivers/media/platform/vivid/vivid-vid-out.c 	unsigned p;
p                 111 drivers/media/platform/vivid/vivid-vid-out.c 	for (p = vfmt->buffers; p < vfmt->planes; p++)
p                 112 drivers/media/platform/vivid/vivid-vid-out.c 		size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p];
p                 128 drivers/media/platform/vivid/vivid-vid-out.c 	for (p = 0; p < planes; p++) {
p                 129 drivers/media/platform/vivid/vivid-vid-out.c 		if (p)
p                 130 drivers/media/platform/vivid/vivid-vid-out.c 			size = dev->bytesperline_out[p] * h;
p                 131 drivers/media/platform/vivid/vivid-vid-out.c 		size += vb->planes[p].data_offset;
p                 133 drivers/media/platform/vivid/vivid-vid-out.c 		if (vb2_get_plane_payload(vb, p) < size) {
p                 135 drivers/media/platform/vivid/vivid-vid-out.c 					__func__, p, vb2_get_plane_payload(vb, p), size);
p                 220 drivers/media/platform/vivid/vivid-vid-out.c 	unsigned size, p;
p                 276 drivers/media/platform/vivid/vivid-vid-out.c 	for (p = 0; p < dev->fmt_out->planes; p++)
p                 277 drivers/media/platform/vivid/vivid-vid-out.c 		dev->bytesperline_out[p] =
p                 278 drivers/media/platform/vivid/vivid-vid-out.c 			(dev->sink_rect.width * dev->fmt_out->bit_depth[p]) / 8;
p                 323 drivers/media/platform/vivid/vivid-vid-out.c 	unsigned p;
p                 334 drivers/media/platform/vivid/vivid-vid-out.c 	for (p = 0; p < mp->num_planes; p++) {
p                 335 drivers/media/platform/vivid/vivid-vid-out.c 		mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p];
p                 336 drivers/media/platform/vivid/vivid-vid-out.c 		mp->plane_fmt[p].sizeimage =
p                 337 drivers/media/platform/vivid/vivid-vid-out.c 			mp->plane_fmt[p].bytesperline * mp->height +
p                 338 drivers/media/platform/vivid/vivid-vid-out.c 			fmt->data_offset[p];
p                 340 drivers/media/platform/vivid/vivid-vid-out.c 	for (p = fmt->buffers; p < fmt->planes; p++) {
p                 341 drivers/media/platform/vivid/vivid-vid-out.c 		unsigned stride = dev->bytesperline_out[p];
p                 344 drivers/media/platform/vivid/vivid-vid-out.c 			(stride * mp->height) / fmt->vdownsampling[p];
p                 360 drivers/media/platform/vivid/vivid-vid-out.c 	unsigned p;
p                 404 drivers/media/platform/vivid/vivid-vid-out.c 	for (p = 0; p < fmt->buffers; p++) {
p                 406 drivers/media/platform/vivid/vivid-vid-out.c 		bytesperline = (mp->width * fmt->bit_depth[p]) >> 3;
p                 408 drivers/media/platform/vivid/vivid-vid-out.c 		max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3;
p                 410 drivers/media/platform/vivid/vivid-vid-out.c 		if (pfmt[p].bytesperline > max_bpl)
p                 411 drivers/media/platform/vivid/vivid-vid-out.c 			pfmt[p].bytesperline = max_bpl;
p                 412 drivers/media/platform/vivid/vivid-vid-out.c 		if (pfmt[p].bytesperline < bytesperline)
p                 413 drivers/media/platform/vivid/vivid-vid-out.c 			pfmt[p].bytesperline = bytesperline;
p                 415 drivers/media/platform/vivid/vivid-vid-out.c 		pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) /
p                 416 drivers/media/platform/vivid/vivid-vid-out.c 				fmt->vdownsampling[p] + fmt->data_offset[p];
p                 418 drivers/media/platform/vivid/vivid-vid-out.c 		memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
p                 420 drivers/media/platform/vivid/vivid-vid-out.c 	for (p = fmt->buffers; p < fmt->planes; p++)
p                 422 drivers/media/platform/vivid/vivid-vid-out.c 			(fmt->bit_depth[p] / fmt->vdownsampling[p])) /
p                 457 drivers/media/platform/vivid/vivid-vid-out.c 	unsigned p;
p                 553 drivers/media/platform/vivid/vivid-vid-out.c 	for (p = 0; p < mp->num_planes; p++)
p                 554 drivers/media/platform/vivid/vivid-vid-out.c 		dev->bytesperline_out[p] = mp->plane_fmt[p].bytesperline;
p                 555 drivers/media/platform/vivid/vivid-vid-out.c 	for (p = dev->fmt_out->buffers; p < dev->fmt_out->planes; p++)
p                 556 drivers/media/platform/vivid/vivid-vid-out.c 		dev->bytesperline_out[p] =
p                 557 drivers/media/platform/vivid/vivid-vid-out.c 			(dev->bytesperline_out[0] * dev->fmt_out->bit_depth[p]) /
p                 210 drivers/media/radio/radio-miropcm20.c static bool sanitize(char *p, int size)
p                 216 drivers/media/radio/radio-miropcm20.c 		if (p[i] < 32) {
p                 217 drivers/media/radio/radio-miropcm20.c 			p[i] = ' ';
p                 143 drivers/media/radio/radio-tea5764.c 	u16 *p = (u16 *) &radio->regs;
p                 155 drivers/media/radio/radio-tea5764.c 		p[i] = __be16_to_cpu((__force __be16)p[i]);
p                  85 drivers/media/radio/radio-terratec.c 	int p;
p                  96 drivers/media/radio/radio-terratec.c 	p = 10;
p                 106 drivers/media/radio/radio-terratec.c 		p--;
p                  78 drivers/media/radio/si4713/radio-platform-si4713.c static int radio_si4713_g_modulator(struct file *file, void *p,
p                  85 drivers/media/radio/si4713/radio-platform-si4713.c static int radio_si4713_s_modulator(struct file *file, void *p,
p                  92 drivers/media/radio/si4713/radio-platform-si4713.c static int radio_si4713_g_frequency(struct file *file, void *p,
p                  99 drivers/media/radio/si4713/radio-platform-si4713.c static int radio_si4713_s_frequency(struct file *file, void *p,
p                 106 drivers/media/radio/si4713/radio-platform-si4713.c static long radio_si4713_default(struct file *file, void *p,
p                  75 drivers/media/radio/si4713/si4713.c #define get_status_bit(p, b, m)	(((p) & (m)) >> (b))
p                  76 drivers/media/radio/si4713/si4713.c #define set_bits(p, v, b, m)	(((p) & ~(m)) | ((v) << (b)))
p                  89 drivers/media/radio/si4713/si4713.c #define set_mute(p)	((p & 1) | ((p & 1) << 1));
p                 907 drivers/media/radio/si4713/si4713.c 	u8 p = 0, a = 0, n = 0;
p                 909 drivers/media/radio/si4713/si4713.c 	rval = si4713_tx_tune_status(sdev, 0x00, &f, &p, &a, &n);
p                1306 drivers/media/radio/si4713/si4713.c 	u32 p;
p                1326 drivers/media/radio/si4713/si4713.c 						SI4713_TX_COMPONENT_ENABLE, &p);
p                1330 drivers/media/radio/si4713/si4713.c 		p = set_bits(p, stereo, 1, 1 << 1);
p                1331 drivers/media/radio/si4713/si4713.c 		p = set_bits(p, rds, 2, 1 << 2);
p                1334 drivers/media/radio/si4713/si4713.c 						SI4713_TX_COMPONENT_ENABLE, p);
p                1356 drivers/media/radio/si4713/si4713.c 		u8 p, a, n;
p                1358 drivers/media/radio/si4713/si4713.c 		rval = si4713_tx_tune_status(sdev, 0x00, &freq, &p, &a, &n);
p                  11 drivers/media/rc/bpf-lirc.c #define lirc_rcu_dereference(p)						\
p                  12 drivers/media/rc/bpf-lirc.c 	rcu_dereference_protected(p, lockdep_is_held(&ir_raw_handler_lock))
p                 339 drivers/media/rc/iguanair.c 	unsigned int i, size, p, periods;
p                 348 drivers/media/rc/iguanair.c 			p = min(periods, 127u);
p                 353 drivers/media/rc/iguanair.c 			ir->packet->payload[size++] = p | ((i & 1) ? 0x80 : 0);
p                 354 drivers/media/rc/iguanair.c 			periods -= p;
p                  52 drivers/media/rc/mtk-cir.c #define MTK_IR_END(v, p)	  ((v) == MTK_MAX_SAMPLES && (p) == 0)
p                 165 drivers/media/tuners/fc0011.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 169 drivers/media/tuners/fc0011.c 	u32 freq = p->frequency / 1000;
p                 170 drivers/media/tuners/fc0011.c 	u32 bandwidth = p->bandwidth_hz / 1000;
p                 433 drivers/media/tuners/fc0011.c 	priv->frequency = p->frequency;
p                 434 drivers/media/tuners/fc0011.c 	priv->bandwidth = p->bandwidth_hz;
p                 122 drivers/media/tuners/fc0012.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 123 drivers/media/tuners/fc0012.c 	u32 freq = p->frequency / 1000;
p                 124 drivers/media/tuners/fc0012.c 	u32 delsys = p->delivery_system;
p                 237 drivers/media/tuners/fc0012.c 		switch (p->bandwidth_hz) {
p                 305 drivers/media/tuners/fc0012.c 	priv->frequency = p->frequency;
p                 306 drivers/media/tuners/fc0012.c 	priv->bandwidth = p->bandwidth_hz;
p                 213 drivers/media/tuners/fc0013.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 214 drivers/media/tuners/fc0013.c 	u32 freq = p->frequency / 1000;
p                 215 drivers/media/tuners/fc0013.c 	u32 delsys = p->delivery_system;
p                 390 drivers/media/tuners/fc0013.c 		switch (p->bandwidth_hz) {
p                 464 drivers/media/tuners/fc0013.c 	priv->frequency = p->frequency;
p                 465 drivers/media/tuners/fc0013.c 	priv->bandwidth = p->bandwidth_hz;
p                 423 drivers/media/tuners/r820t.c 	u8 *p = &priv->buf[1];
p                 427 drivers/media/tuners/r820t.c 	rc = tuner_i2c_xfer_send_recv(&priv->i2c_props, priv->buf, 1, p, len);
p                 430 drivers/media/tuners/r820t.c 			   __func__, rc, reg, len, len, p);
p                 438 drivers/media/tuners/r820t.c 		val[i] = bitrev8(p[i]);
p                2175 drivers/media/tuners/r820t.c 				 struct analog_parameters *p)
p                2184 drivers/media/tuners/r820t.c 	if (!p->std)
p                2185 drivers/media/tuners/r820t.c 		p->std = V4L2_STD_MN;
p                2187 drivers/media/tuners/r820t.c 	if ((p->std == V4L2_STD_PAL_M) || (p->std == V4L2_STD_NTSC))
p                2196 drivers/media/tuners/r820t.c 	rc = generic_set_freq(fe, 62500l * p->frequency, bw,
p                2197 drivers/media/tuners/r820t.c 			      V4L2_TUNER_ANALOG_TV, p->std, SYS_UNDEFINED);
p                 305 drivers/media/tuners/tuner-xc2028.c 	const unsigned char   *p, *endp;
p                 312 drivers/media/tuners/tuner-xc2028.c 	p = fw->data;
p                 313 drivers/media/tuners/tuner-xc2028.c 	endp = p + fw->size;
p                 321 drivers/media/tuners/tuner-xc2028.c 	memcpy(name, p, sizeof(name) - 1);
p                 323 drivers/media/tuners/tuner-xc2028.c 	p += sizeof(name) - 1;
p                 325 drivers/media/tuners/tuner-xc2028.c 	priv->firm_version = get_unaligned_le16(p);
p                 326 drivers/media/tuners/tuner-xc2028.c 	p += 2;
p                 328 drivers/media/tuners/tuner-xc2028.c 	n_array = get_unaligned_le16(p);
p                 329 drivers/media/tuners/tuner-xc2028.c 	p += 2;
p                 344 drivers/media/tuners/tuner-xc2028.c 	while (p < endp) {
p                 356 drivers/media/tuners/tuner-xc2028.c 		if (endp - p < sizeof(type) + sizeof(id) + sizeof(size))
p                 359 drivers/media/tuners/tuner-xc2028.c 		type = get_unaligned_le32(p);
p                 360 drivers/media/tuners/tuner-xc2028.c 		p += sizeof(type);
p                 362 drivers/media/tuners/tuner-xc2028.c 		id = get_unaligned_le64(p);
p                 363 drivers/media/tuners/tuner-xc2028.c 		p += sizeof(id);
p                 366 drivers/media/tuners/tuner-xc2028.c 			int_freq = get_unaligned_le16(p);
p                 367 drivers/media/tuners/tuner-xc2028.c 			p += sizeof(int_freq);
p                 368 drivers/media/tuners/tuner-xc2028.c 			if (endp - p < sizeof(size))
p                 372 drivers/media/tuners/tuner-xc2028.c 		size = get_unaligned_le32(p);
p                 373 drivers/media/tuners/tuner-xc2028.c 		p += sizeof(size);
p                 375 drivers/media/tuners/tuner-xc2028.c 		if (!size || size > endp - p) {
p                 380 drivers/media/tuners/tuner-xc2028.c 			       type, (unsigned long long)id, (endp - p), size);
p                 384 drivers/media/tuners/tuner-xc2028.c 		priv->firm[n].ptr = kmemdup(p, size, GFP_KERNEL);
p                 402 drivers/media/tuners/tuner-xc2028.c 		p += size;
p                 547 drivers/media/tuners/tuner-xc2028.c 	unsigned char      *p, *endp, buf[MAX_XFER_SIZE];
p                 563 drivers/media/tuners/tuner-xc2028.c 	p = priv->firm[pos].ptr;
p                 564 drivers/media/tuners/tuner-xc2028.c 	endp = p + priv->firm[pos].size;
p                 566 drivers/media/tuners/tuner-xc2028.c 	while (p < endp) {
p                 570 drivers/media/tuners/tuner-xc2028.c 		if (p + sizeof(size) > endp) {
p                 575 drivers/media/tuners/tuner-xc2028.c 		size = le16_to_cpu(*(__le16 *) p);
p                 576 drivers/media/tuners/tuner-xc2028.c 		p += sizeof(size);
p                 586 drivers/media/tuners/tuner-xc2028.c 					   (*p) & 0x7f);
p                 597 drivers/media/tuners/tuner-xc2028.c 						  (*p) & 0x7f);
p                 616 drivers/media/tuners/tuner-xc2028.c 		if ((size + p > endp)) {
p                 618 drivers/media/tuners/tuner-xc2028.c 				   size, (endp - p));
p                 622 drivers/media/tuners/tuner-xc2028.c 		buf[0] = *p;
p                 623 drivers/media/tuners/tuner-xc2028.c 		p++;
p                 631 drivers/media/tuners/tuner-xc2028.c 			memcpy(buf + 1, p, len);
p                 639 drivers/media/tuners/tuner-xc2028.c 			p += len;
p                 658 drivers/media/tuners/tuner-xc2028.c 	unsigned char	   *p;
p                 676 drivers/media/tuners/tuner-xc2028.c 	p = priv->firm[pos].ptr;
p                 681 drivers/media/tuners/tuner-xc2028.c 		p += 12 * scode;
p                 686 drivers/media/tuners/tuner-xc2028.c 		    le16_to_cpu(*(__le16 *)(p + 14 * scode)) != 12)
p                 688 drivers/media/tuners/tuner-xc2028.c 		p += 14 * scode + 2;
p                 704 drivers/media/tuners/tuner-xc2028.c 	rc = i2c_send(priv, p, 12);
p                1158 drivers/media/tuners/tuner-xc2028.c 			      struct analog_parameters *p)
p                1165 drivers/media/tuners/tuner-xc2028.c 	if (p->mode == V4L2_TUNER_RADIO) {
p                1169 drivers/media/tuners/tuner-xc2028.c 		return generic_set_freq(fe, (625l * p->frequency) / 10,
p                1174 drivers/media/tuners/tuner-xc2028.c 	if (!p->std)
p                1175 drivers/media/tuners/tuner-xc2028.c 		p->std = V4L2_STD_MN;
p                1178 drivers/media/tuners/tuner-xc2028.c 	if (!(p->std & V4L2_STD_MN))
p                1182 drivers/media/tuners/tuner-xc2028.c 	p->std |= parse_audio_std_option();
p                1184 drivers/media/tuners/tuner-xc2028.c 	return generic_set_freq(fe, 62500l * p->frequency,
p                1185 drivers/media/tuners/tuner-xc2028.c 				V4L2_TUNER_ANALOG_TV, type, p->std, 0);
p                1386 drivers/media/tuners/tuner-xc2028.c 	struct xc2028_ctrl *p    = priv_cfg;
p                1396 drivers/media/tuners/tuner-xc2028.c 	memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
p                1402 drivers/media/tuners/tuner-xc2028.c 	if (!firmware_name[0] && p->fname &&
p                1403 drivers/media/tuners/tuner-xc2028.c 	    priv->fname && strcmp(p->fname, priv->fname))
p                1411 drivers/media/tuners/tuner-xc2028.c 			priv->fname = kstrdup(p->fname, GFP_KERNEL);
p                 687 drivers/media/tuners/xc4000.c 	unsigned char      *p;
p                 693 drivers/media/tuners/xc4000.c 	p = priv->firm[pos].ptr;
p                 698 drivers/media/tuners/xc4000.c 	rc = xc_load_i2c_sequence(fe, p);
p                 709 drivers/media/tuners/xc4000.c 	const unsigned char   *p, *endp;
p                 745 drivers/media/tuners/xc4000.c 	p = fw->data;
p                 746 drivers/media/tuners/xc4000.c 	endp = p + fw->size;
p                 754 drivers/media/tuners/xc4000.c 	memcpy(name, p, sizeof(name) - 1);
p                 756 drivers/media/tuners/xc4000.c 	p += sizeof(name) - 1;
p                 758 drivers/media/tuners/xc4000.c 	priv->firm_version = get_unaligned_le16(p);
p                 759 drivers/media/tuners/xc4000.c 	p += 2;
p                 761 drivers/media/tuners/xc4000.c 	n_array = get_unaligned_le16(p);
p                 762 drivers/media/tuners/xc4000.c 	p += 2;
p                 777 drivers/media/tuners/xc4000.c 	while (p < endp) {
p                 789 drivers/media/tuners/xc4000.c 		if (endp - p < sizeof(type) + sizeof(id) + sizeof(size))
p                 792 drivers/media/tuners/xc4000.c 		type = get_unaligned_le32(p);
p                 793 drivers/media/tuners/xc4000.c 		p += sizeof(type);
p                 795 drivers/media/tuners/xc4000.c 		id = get_unaligned_le64(p);
p                 796 drivers/media/tuners/xc4000.c 		p += sizeof(id);
p                 799 drivers/media/tuners/xc4000.c 			int_freq = get_unaligned_le16(p);
p                 800 drivers/media/tuners/xc4000.c 			p += sizeof(int_freq);
p                 801 drivers/media/tuners/xc4000.c 			if (endp - p < sizeof(size))
p                 805 drivers/media/tuners/xc4000.c 		size = get_unaligned_le32(p);
p                 806 drivers/media/tuners/xc4000.c 		p += sizeof(size);
p                 808 drivers/media/tuners/xc4000.c 		if (!size || size > endp - p) {
p                 811 drivers/media/tuners/xc4000.c 			       endp - p, size);
p                 815 drivers/media/tuners/xc4000.c 		priv->firm[n].ptr = kmemdup(p, size, GFP_KERNEL);
p                 834 drivers/media/tuners/xc4000.c 		p += size;
p                 863 drivers/media/tuners/xc4000.c 	unsigned char	*p;
p                 883 drivers/media/tuners/xc4000.c 	p = priv->firm[pos].ptr;
p                 887 drivers/media/tuners/xc4000.c 	p += 12 * scode;
p                 898 drivers/media/tuners/xc4000.c 	memcpy(&scode_buf[1], p, 12);
p                1324 drivers/media/tuners/xc5000.c 	struct xc5000_config *p = priv_cfg;
p                1328 drivers/media/tuners/xc5000.c 	if (p->if_khz)
p                1329 drivers/media/tuners/xc5000.c 		priv->if_khz = p->if_khz;
p                1331 drivers/media/tuners/xc5000.c 	if (p->radio_input)
p                1332 drivers/media/tuners/xc5000.c 		priv->radio_input = p->radio_input;
p                1334 drivers/media/tuners/xc5000.c 	if (p->output_amp)
p                1335 drivers/media/tuners/xc5000.c 		priv->output_amp = p->output_amp;
p                 312 drivers/media/usb/au0828/au0828-video.c 			      unsigned char *p,
p                 325 drivers/media/usb/au0828/au0828-video.c 	startread = p;
p                 416 drivers/media/usb/au0828/au0828-video.c 			      unsigned char *p,
p                 434 drivers/media/usb/au0828/au0828-video.c 	if (p == NULL) {
p                 448 drivers/media/usb/au0828/au0828-video.c 	startread = p;
p                 499 drivers/media/usb/au0828/au0828-video.c 	unsigned char *p;
p                 543 drivers/media/usb/au0828/au0828-video.c 		p = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
p                 544 drivers/media/usb/au0828/au0828-video.c 		fbyte = p[0];
p                 546 drivers/media/usb/au0828/au0828-video.c 		p += 4;
p                 550 drivers/media/usb/au0828/au0828-video.c 			p += 4;
p                 612 drivers/media/usb/au0828/au0828-video.c 				au0828_copy_vbi(dev, vbi_dma_q, vbi_buf, p,
p                 616 drivers/media/usb/au0828/au0828-video.c 			p += lencopy;
p                 621 drivers/media/usb/au0828/au0828-video.c 			au0828_copy_video(dev, dma_q, buf, p, outp, len);
p                 485 drivers/media/usb/cpia2/cpia2_v4l.c static int cpia2_g_parm(struct file *file, void *fh, struct v4l2_streamparm *p)
p                 488 drivers/media/usb/cpia2/cpia2_v4l.c 	struct v4l2_captureparm *cap = &p->parm.capture;
p                 491 drivers/media/usb/cpia2/cpia2_v4l.c 	if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
p                 504 drivers/media/usb/cpia2/cpia2_v4l.c static int cpia2_s_parm(struct file *file, void *fh, struct v4l2_streamparm *p)
p                 507 drivers/media/usb/cpia2/cpia2_v4l.c 	struct v4l2_captureparm *cap = &p->parm.capture;
p                 513 drivers/media/usb/cpia2/cpia2_v4l.c 	ret = cpia2_g_parm(file, fh, p);
p                1640 drivers/media/usb/cx231xx/cx231xx-417.c 				struct v4l2_requestbuffers *p)
p                1644 drivers/media/usb/cx231xx/cx231xx-417.c 	return videobuf_reqbufs(&fh->vidq, p);
p                1648 drivers/media/usb/cx231xx/cx231xx-417.c 				struct v4l2_buffer *p)
p                1652 drivers/media/usb/cx231xx/cx231xx-417.c 	return videobuf_querybuf(&fh->vidq, p);
p                1656 drivers/media/usb/cx231xx/cx231xx-417.c 				struct v4l2_buffer *p)
p                1660 drivers/media/usb/cx231xx/cx231xx-417.c 	return videobuf_qbuf(&fh->vidq, p);
p                  87 drivers/media/usb/dvb-usb-v2/lmedm04.c #define debug_data_snipet(level, name, p) \
p                  88 drivers/media/usb/dvb-usb-v2/lmedm04.c 	 deb_info(level, name" (%8phN)", p);
p                 586 drivers/media/usb/dvb-usb-v2/lmedm04.c static u8 check_sum(u8 *p, u8 len)
p                 590 drivers/media/usb/dvb-usb-v2/lmedm04.c 		sum += *p++;
p                 503 drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c 				       struct dtv_frontend_properties *p)
p                 509 drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c 	p->inversion = /* FIXME */ ? INVERSION_ON : INVERSION_OFF;
p                 512 drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c 		fe->ops.tuner_ops.get_bandwidth(fe, &p->bandwidth_hz);
p                 514 drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c 		fe->ops.tuner_ops.get_frequency(fe, &p->frequency);
p                 515 drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c 	mxl1x1sf_demod_get_tps_code_rate(state, &p->code_rate_HP);
p                 516 drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c 	mxl1x1sf_demod_get_tps_code_rate(state, &p->code_rate_LP);
p                 517 drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c 	mxl1x1sf_demod_get_tps_modulation(state, &p->modulation);
p                 519 drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c 					      &p->transmission_mode);
p                 521 drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c 					      &p->guard_interval);
p                 523 drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c 					 &p->hierarchy);
p                 483 drivers/media/usb/dvb-usb/cxusb.c 	struct usb_data_stream_properties *p = &d->props.adapter[0].fe[0].stream;
p                 485 drivers/media/usb/dvb-usb/cxusb.c 	const int junk_len = p->u.bulk.buffersize;
p                 495 drivers/media/usb/dvb-usb/cxusb.c 				 usb_rcvbulkpipe(d->udev, p->endpoint),
p                1169 drivers/media/usb/dvb-usb/cxusb.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1174 drivers/media/usb/dvb-usb/cxusb.c 	u8 band = BAND_OF_FREQUENCY(p->frequency / 1000);
p                 843 drivers/media/usb/dvb-usb/dib0700_devices.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 848 drivers/media/usb/dvb-usb/dib0700_devices.c 	u8 band = BAND_OF_FREQUENCY(p->frequency/1000);
p                 861 drivers/media/usb/dvb-usb/dib0700_devices.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 866 drivers/media/usb/dvb-usb/dib0700_devices.c 	u8 band = BAND_OF_FREQUENCY(p->frequency/1000);
p                 986 drivers/media/usb/dvb-usb/dib0700_devices.c 	struct usb_device_descriptor *p = &adap->dev->udev->descriptor;
p                 992 drivers/media/usb/dvb-usb/dib0700_devices.c 	if (p->idVendor  == cpu_to_le16(USB_VID_PINNACLE) &&
p                 993 drivers/media/usb/dvb-usb/dib0700_devices.c 	    p->idProduct == cpu_to_le16(USB_PID_PINNACLE_PCTV72E))
p                1044 drivers/media/usb/dvb-usb/dib0700_devices.c 	struct usb_device_descriptor *p = &adap->dev->udev->descriptor;
p                1050 drivers/media/usb/dvb-usb/dib0700_devices.c 	if (p->idVendor  == cpu_to_le16(USB_VID_PINNACLE) &&
p                1051 drivers/media/usb/dvb-usb/dib0700_devices.c 	    p->idProduct == cpu_to_le16(USB_PID_PINNACLE_PCTV72E))
p                1285 drivers/media/usb/dvb-usb/dib0700_devices.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1290 drivers/media/usb/dvb-usb/dib0700_devices.c 	u8 band = BAND_OF_FREQUENCY(p->frequency/1000);
p                1787 drivers/media/usb/dvb-usb/dw2102.c 	u8 *b, *p;
p                1806 drivers/media/usb/dvb-usb/dw2102.c 	p = kmalloc(fw->size, GFP_KERNEL);
p                1812 drivers/media/usb/dvb-usb/dw2102.c 	if (p != NULL) {
p                1813 drivers/media/usb/dvb-usb/dw2102.c 		memcpy(p, fw->data, fw->size);
p                1815 drivers/media/usb/dvb-usb/dw2102.c 			b = (u8 *) p + i;
p                1891 drivers/media/usb/dvb-usb/dw2102.c 		kfree(p);
p                 447 drivers/media/usb/dvb-usb/opera1.c 	u8 *b, *p;
p                 457 drivers/media/usb/dvb-usb/opera1.c 		p = kmalloc(fw->size, GFP_KERNEL);
p                 459 drivers/media/usb/dvb-usb/opera1.c 		if (p != NULL && testval != 0x67) {
p                 462 drivers/media/usb/dvb-usb/opera1.c 			memcpy(p, fw->data, fw->size);
p                 470 drivers/media/usb/dvb-usb/opera1.c 				b = (u8 *) p + i;
p                 490 drivers/media/usb/dvb-usb/opera1.c 	kfree(p);
p                 695 drivers/media/usb/em28xx/em28xx-audio.c 	tmp.tlv.p = em28xx_db_scale,
p                1605 drivers/media/usb/em28xx/em28xx-video.c 			 struct v4l2_streamparm *p)
p                1612 drivers/media/usb/em28xx/em28xx-video.c 	if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
p                1613 drivers/media/usb/em28xx/em28xx-video.c 	    p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
p                1616 drivers/media/usb/em28xx/em28xx-video.c 	p->parm.capture.readbuffers = EM28XX_MIN_BUF;
p                1617 drivers/media/usb/em28xx/em28xx-video.c 	p->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
p                1622 drivers/media/usb/em28xx/em28xx-video.c 			p->parm.capture.timeperframe = ival.interval;
p                1625 drivers/media/usb/em28xx/em28xx-video.c 					    &p->parm.capture.timeperframe);
p                1632 drivers/media/usb/em28xx/em28xx-video.c 			 struct v4l2_streamparm *p)
p                1637 drivers/media/usb/em28xx/em28xx-video.c 		p->parm.capture.timeperframe
p                1644 drivers/media/usb/em28xx/em28xx-video.c 	if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
p                1645 drivers/media/usb/em28xx/em28xx-video.c 	    p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
p                1648 drivers/media/usb/em28xx/em28xx-video.c 	memset(&p->parm, 0, sizeof(p->parm));
p                1649 drivers/media/usb/em28xx/em28xx-video.c 	p->parm.capture.readbuffers = EM28XX_MIN_BUF;
p                1650 drivers/media/usb/em28xx/em28xx-video.c 	p->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
p                1654 drivers/media/usb/em28xx/em28xx-video.c 		p->parm.capture.timeperframe = ival.interval;
p                  52 drivers/media/usb/go7007/go7007-fw.c 	unsigned char *p; /* destination */
p                  64 drivers/media/usb/go7007/go7007-fw.c 		*name.p = name.a >> 24; \
p                  65 drivers/media/usb/go7007/go7007-fw.c 		++name.p; \
p                 292 drivers/media/usb/go7007/go7007-fw.c 	int i, p = 0;
p                 294 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0xff;
p                 295 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0xd8;
p                 296 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0xff;
p                 297 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0xdb;
p                 298 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0;
p                 299 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 2 + 65;
p                 300 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0;
p                 301 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = default_intra_quant_table[0];
p                 304 drivers/media/usb/go7007/go7007-fw.c 		buf[p++] = (default_intra_quant_table[zz[i]] * q) >> 3;
p                 305 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0xff;
p                 306 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0xc0;
p                 307 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0;
p                 308 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 17;
p                 309 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 8;
p                 310 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = go->height >> 8;
p                 311 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = go->height & 0xff;
p                 312 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = go->width >> 8;
p                 313 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = go->width & 0xff;
p                 314 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 3;
p                 315 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 1;
p                 316 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0x22;
p                 317 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0;
p                 318 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 2;
p                 319 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0x11;
p                 320 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0;
p                 321 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 3;
p                 322 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0x11;
p                 323 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0;
p                 324 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0xff;
p                 325 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0xc4;
p                 326 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 418 >> 8;
p                 327 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 418 & 0xff;
p                 328 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0x00;
p                 329 drivers/media/usb/go7007/go7007-fw.c 	memcpy(buf + p, bits_dc_luminance + 1, 16);
p                 330 drivers/media/usb/go7007/go7007-fw.c 	p += 16;
p                 331 drivers/media/usb/go7007/go7007-fw.c 	memcpy(buf + p, val_dc_luminance, sizeof(val_dc_luminance));
p                 332 drivers/media/usb/go7007/go7007-fw.c 	p += sizeof(val_dc_luminance);
p                 333 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0x01;
p                 334 drivers/media/usb/go7007/go7007-fw.c 	memcpy(buf + p, bits_dc_chrominance + 1, 16);
p                 335 drivers/media/usb/go7007/go7007-fw.c 	p += 16;
p                 336 drivers/media/usb/go7007/go7007-fw.c 	memcpy(buf + p, val_dc_chrominance, sizeof(val_dc_chrominance));
p                 337 drivers/media/usb/go7007/go7007-fw.c 	p += sizeof(val_dc_chrominance);
p                 338 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0x10;
p                 339 drivers/media/usb/go7007/go7007-fw.c 	memcpy(buf + p, bits_ac_luminance + 1, 16);
p                 340 drivers/media/usb/go7007/go7007-fw.c 	p += 16;
p                 341 drivers/media/usb/go7007/go7007-fw.c 	memcpy(buf + p, val_ac_luminance, sizeof(val_ac_luminance));
p                 342 drivers/media/usb/go7007/go7007-fw.c 	p += sizeof(val_ac_luminance);
p                 343 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0x11;
p                 344 drivers/media/usb/go7007/go7007-fw.c 	memcpy(buf + p, bits_ac_chrominance + 1, 16);
p                 345 drivers/media/usb/go7007/go7007-fw.c 	p += 16;
p                 346 drivers/media/usb/go7007/go7007-fw.c 	memcpy(buf + p, val_ac_chrominance, sizeof(val_ac_chrominance));
p                 347 drivers/media/usb/go7007/go7007-fw.c 	p += sizeof(val_ac_chrominance);
p                 348 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0xff;
p                 349 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0xda;
p                 350 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0;
p                 351 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 12;
p                 352 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 3;
p                 353 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 1;
p                 354 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0x00;
p                 355 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 2;
p                 356 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0x11;
p                 357 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 3;
p                 358 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0x11;
p                 359 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0;
p                 360 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 63;
p                 361 drivers/media/usb/go7007/go7007-fw.c 	buf[p++] = 0;
p                 362 drivers/media/usb/go7007/go7007-fw.c 	return p;
p                 163 drivers/media/usb/gspca/m5602/m5602_core.c 	u8 *p;
p                 179 drivers/media/usb/gspca/m5602/m5602_core.c 	p = buf + 16;
p                 183 drivers/media/usb/gspca/m5602/m5602_core.c 		memcpy(p, sensor_urb_skeleton + 16, 4);
p                 184 drivers/media/usb/gspca/m5602/m5602_core.c 		p[3] = i2c_data[i];
p                 185 drivers/media/usb/gspca/m5602/m5602_core.c 		p += 4;
p                 191 drivers/media/usb/gspca/m5602/m5602_core.c 	memcpy(p, sensor_urb_skeleton + 20, 4);
p                 194 drivers/media/usb/gspca/m5602/m5602_core.c 	p[3] = 0x10 + len;
p                 356 drivers/media/usb/gspca/mars.c 	int p;
p                 362 drivers/media/usb/gspca/mars.c 	for (p = 0; p < len - 6; p++) {
p                 363 drivers/media/usb/gspca/mars.c 		if (data[0 + p] == 0xff
p                 364 drivers/media/usb/gspca/mars.c 		    && data[1 + p] == 0xff
p                 365 drivers/media/usb/gspca/mars.c 		    && data[2 + p] == 0x00
p                 366 drivers/media/usb/gspca/mars.c 		    && data[3 + p] == 0xff
p                 367 drivers/media/usb/gspca/mars.c 		    && data[4 + p] == 0x96) {
p                 368 drivers/media/usb/gspca/mars.c 			if (data[5 + p] == 0x64
p                 369 drivers/media/usb/gspca/mars.c 			    || data[5 + p] == 0x65
p                 370 drivers/media/usb/gspca/mars.c 			    || data[5 + p] == 0x66
p                 371 drivers/media/usb/gspca/mars.c 			    || data[5 + p] == 0x67) {
p                 373 drivers/media/usb/gspca/mars.c 					  p, len);
p                 375 drivers/media/usb/gspca/mars.c 						data, p);
p                 380 drivers/media/usb/gspca/mars.c 				data += p + 16;
p                 381 drivers/media/usb/gspca/mars.c 				len -= p + 16;
p                1441 drivers/media/usb/gspca/ov534_9.c 		const char *p;
p                1466 drivers/media/usb/gspca/ov534_9.c 		p = video_device_node_name(&gspca_dev->vdev);
p                1467 drivers/media/usb/gspca/ov534_9.c 		l = strlen(p) - 1;
p                1468 drivers/media/usb/gspca/ov534_9.c 		if (p[l] == '0')
p                 352 drivers/media/usb/gspca/t613.c 	u8 *p, *tmpbuf;
p                 355 drivers/media/usb/gspca/t613.c 		p = tmpbuf = gspca_dev->usb_buf;
p                 357 drivers/media/usb/gspca/t613.c 		p = tmpbuf = kmalloc_array(len, 2, GFP_KERNEL);
p                 365 drivers/media/usb/gspca/t613.c 		*p++ = reg++;
p                 366 drivers/media/usb/gspca/t613.c 		*p++ = *buffer++;
p                 381 drivers/media/usb/gspca/t613.c 	const u8 *p;
p                 417 drivers/media/usb/gspca/t613.c 	p = sensor_init;
p                 418 drivers/media/usb/gspca/t613.c 	while (*p != 0) {
p                 419 drivers/media/usb/gspca/t613.c 		val[1] = *p++;
p                 420 drivers/media/usb/gspca/t613.c 		val[3] = *p++;
p                 421 drivers/media/usb/gspca/t613.c 		if (*p == 0)
p                1011 drivers/media/usb/gspca/topro.c 			const struct cmd *p, int l)
p                1014 drivers/media/usb/gspca/topro.c 		reg_w(gspca_dev, p->reg, p->val);
p                1015 drivers/media/usb/gspca/topro.c 		p++;
p                1038 drivers/media/usb/gspca/topro.c 			const struct cmd *p, int l)
p                1041 drivers/media/usb/gspca/topro.c 		i2c_w(gspca_dev, p->reg, p->val);
p                1042 drivers/media/usb/gspca/topro.c 		p++;
p                 228 drivers/media/usb/gspca/touptek.c 		const struct cmd *p, int l)
p                 231 drivers/media/usb/gspca/touptek.c 		reg_w(gspca_dev, p->value, p->index);
p                 232 drivers/media/usb/gspca/touptek.c 		p++;
p                 100 drivers/media/usb/hdpvr/hdpvr-video.c 	struct list_head *p;
p                 104 drivers/media/usb/hdpvr/hdpvr-video.c 	for (p = q->next; p != q;) {
p                 105 drivers/media/usb/hdpvr/hdpvr-video.c 		buf = list_entry(p, struct hdpvr_buffer, buff_list);
p                 111 drivers/media/usb/hdpvr/hdpvr-video.c 		tmp = p->next;
p                 112 drivers/media/usb/hdpvr/hdpvr-video.c 		list_del(p);
p                 114 drivers/media/usb/hdpvr/hdpvr-video.c 		p = tmp;
p                1980 drivers/media/usb/pvrusb2/pvrusb2-hdw.c 	const unsigned char *p;
p                2000 drivers/media/usb/pvrusb2/pvrusb2-hdw.c 	if (!i2ccnt && ((p = (mid < ARRAY_SIZE(module_i2c_addresses)) ?
p                2003 drivers/media/usb/pvrusb2/pvrusb2-hdw.c 		i2ccnt = pvr2_copy_i2c_addr_list(i2caddr, p,
p                4815 drivers/media/usb/pvrusb2/pvrusb2-hdw.c 	const char *p;
p                4822 drivers/media/usb/pvrusb2/pvrusb2-hdw.c 		p = NULL;
p                4823 drivers/media/usb/pvrusb2/pvrusb2-hdw.c 		if (id < ARRAY_SIZE(module_names)) p = module_names[id];
p                4824 drivers/media/usb/pvrusb2/pvrusb2-hdw.c 		if (p) {
p                4825 drivers/media/usb/pvrusb2/pvrusb2-hdw.c 			ccnt = scnprintf(buf + tcnt, acnt - tcnt, "  %s:", p);
p                 110 drivers/media/usb/pvrusb2/pvrusb2-std.c 	const struct std_name *p;
p                 112 drivers/media/usb/pvrusb2/pvrusb2-std.c 		p = arrPtr + idx;
p                 113 drivers/media/usb/pvrusb2/pvrusb2-std.c 		if (strlen(p->name) != bufSize) continue;
p                 114 drivers/media/usb/pvrusb2/pvrusb2-std.c 		if (!memcmp(bufPtr,p->name,bufSize)) return p;
p                  70 drivers/media/usb/pwc/pwc-dec23.c 	unsigned char *p;
p                  76 drivers/media/usb/pwc/pwc-dec23.c 		p = pdec->table_bitpowermask[bit];
p                  81 drivers/media/usb/pwc/pwc-dec23.c 			*p++ = val;
p                  98 drivers/media/usb/siano/smsusb.c 		struct sms_msg_hdr *phdr = (struct sms_msg_hdr *)surb->cb->p;
p                 166 drivers/media/usb/siano/smsusb.c 		surb->cb->p,
p                 220 drivers/media/usb/stk1160/stk1160-video.c 	u8 *p;
p                 241 drivers/media/usb/stk1160/stk1160-video.c 		p = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
p                 256 drivers/media/usb/stk1160/stk1160-video.c 		if (p[0] == 0xc0) {
p                 277 drivers/media/usb/stk1160/stk1160-video.c 		if (p[0] == 0xc0 || p[0] == 0x80) {
p                 282 drivers/media/usb/stk1160/stk1160-video.c 			dev->isoc_ctl.buf->odd = *p & 0x40;
p                 287 drivers/media/usb/stk1160/stk1160-video.c 		stk1160_copy_video(dev, p, len);
p                 215 drivers/media/usb/tm6000/tm6000-i2c.c 	unsigned char *p = dev->eedata;
p                 223 drivers/media/usb/tm6000/tm6000-i2c.c 		*p = i;
p                 224 drivers/media/usb/tm6000/tm6000-i2c.c 		rc = tm6000_i2c_recv_regs(dev, 0xa0, i, p, 1);
p                 226 drivers/media/usb/tm6000/tm6000-i2c.c 			if (p == dev->eedata)
p                 236 drivers/media/usb/tm6000/tm6000-i2c.c 		p++;
p                 374 drivers/media/usb/tm6000/tm6000-video.c 	char *p;
p                 392 drivers/media/usb/tm6000/tm6000-video.c 			p = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
p                 395 drivers/media/usb/tm6000/tm6000-video.c 					rc = copy_multiplexed(p, len, urb);
p                 399 drivers/media/usb/tm6000/tm6000-video.c 					copy_streams(p, len, urb);
p                 967 drivers/media/usb/tm6000/tm6000-video.c 			   struct v4l2_requestbuffers *p)
p                 971 drivers/media/usb/tm6000/tm6000-video.c 	return videobuf_reqbufs(&fh->vb_vidq, p);
p                 975 drivers/media/usb/tm6000/tm6000-video.c 			    struct v4l2_buffer *p)
p                 979 drivers/media/usb/tm6000/tm6000-video.c 	return videobuf_querybuf(&fh->vb_vidq, p);
p                 982 drivers/media/usb/tm6000/tm6000-video.c static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
p                 986 drivers/media/usb/tm6000/tm6000-video.c 	return videobuf_qbuf(&fh->vb_vidq, p);
p                 989 drivers/media/usb/tm6000/tm6000-video.c static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
p                 993 drivers/media/usb/tm6000/tm6000-video.c 	return videobuf_dqbuf(&fh->vb_vidq, p,
p                1009 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1015 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	div = (p->frequency + 36166667) / 166667;
p                1020 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	data[3] = p->frequency < 592000000 ? 0x40 : 0x80;
p                1064 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1072 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	tuner_frequency = p->frequency + 36130000;
p                1086 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	if (p->frequency < 49000000)
p                1088 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	else if (p->frequency < 159000000)
p                1090 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	else if (p->frequency < 444000000)
p                1092 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	else if (p->frequency < 861000000)
p                1097 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	switch (p->bandwidth_hz) {
p                1119 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	tuner_frequency = (((p->frequency / 1000) * 6) + 217280) / 1000;
p                1271 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1277 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	if ((p->frequency < 950000) || (p->frequency > 2150000))
p                1280 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	div = (p->frequency + (125 - 1)) / 125;	/* round correctly */
p                1286 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	if (p->frequency > 1530000)
p                1315 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1321 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	div = p->frequency / 125;
p                1343 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1349 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	div = (p->frequency + 35937500 + 31250) / 62500;
p                1354 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	data[3] = (p->frequency < 174000000 ? 0x88 : p->frequency < 470000000 ? 0x84 : 0x81);
p                1388 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                1399 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	tuner_frequency = p->frequency;
p                1417 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c 	tuner_frequency = ((p->frequency + 36125000) / 62500);
p                  78 drivers/media/usb/ttusb-dec/ttusbdecfe.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                  86 drivers/media/usb/ttusb-dec/ttusbdecfe.c 	__be32 freq = htonl(p->frequency / 1000);
p                 105 drivers/media/usb/ttusb-dec/ttusbdecfe.c 	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p                 123 drivers/media/usb/ttusb-dec/ttusbdecfe.c 	freq = htonl(p->frequency +
p                 126 drivers/media/usb/ttusb-dec/ttusbdecfe.c 	sym_rate = htonl(p->symbol_rate);
p                 725 drivers/media/usb/uvc/uvc_driver.c 	unsigned int size, i, n, p;
p                 803 drivers/media/usb/uvc/uvc_driver.c 	p = buflen >= 4 ? buffer[3] : 0;
p                 806 drivers/media/usb/uvc/uvc_driver.c 	if (buflen < size + p*n) {
p                 813 drivers/media/usb/uvc/uvc_driver.c 	streaming->header.bNumFormats = p;
p                 826 drivers/media/usb/uvc/uvc_driver.c 	streaming->header.bmaControls = kmemdup(&buffer[size], p * n,
p                1002 drivers/media/usb/uvc/uvc_driver.c 	unsigned int n, p;
p                1036 drivers/media/usb/uvc/uvc_driver.c 		p = buflen >= 22 ? buffer[21] : 0;
p                1037 drivers/media/usb/uvc/uvc_driver.c 		n = buflen >= 25 + p ? buffer[22+p] : 0;
p                1039 drivers/media/usb/uvc/uvc_driver.c 		if (buflen < 25 + p + 2*n) {
p                1047 drivers/media/usb/uvc/uvc_driver.c 					p + 1, 2*n);
p                1053 drivers/media/usb/uvc/uvc_driver.c 		memcpy(unit->baSourceID, &buffer[22], p);
p                1054 drivers/media/usb/uvc/uvc_driver.c 		unit->extension.bControlSize = buffer[22+p];
p                1058 drivers/media/usb/uvc/uvc_driver.c 		memcpy(unit->extension.bmControls, &buffer[23+p], 2*n);
p                1060 drivers/media/usb/uvc/uvc_driver.c 		if (buffer[24+p+2*n] != 0)
p                1061 drivers/media/usb/uvc/uvc_driver.c 			usb_string(udev, buffer[24+p+2*n], unit->name,
p                1081 drivers/media/usb/uvc/uvc_driver.c 	unsigned int i, n, p, len;
p                1142 drivers/media/usb/uvc/uvc_driver.c 		p = 0;
p                1151 drivers/media/usb/uvc/uvc_driver.c 			p = buflen >= 10 + n ? buffer[9+n] : 0;
p                1155 drivers/media/usb/uvc/uvc_driver.c 		if (buflen < len + n + p) {
p                1163 drivers/media/usb/uvc/uvc_driver.c 					1, n + p);
p                1181 drivers/media/usb/uvc/uvc_driver.c 			term->media.bTransportModeSize = p;
p                1185 drivers/media/usb/uvc/uvc_driver.c 			memcpy(term->media.bmTransportModes, &buffer[10+n], p);
p                1238 drivers/media/usb/uvc/uvc_driver.c 		p = buflen >= 5 ? buffer[4] : 0;
p                1240 drivers/media/usb/uvc/uvc_driver.c 		if (buflen < 5 || buflen < 6 + p) {
p                1247 drivers/media/usb/uvc/uvc_driver.c 		unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0);
p                1251 drivers/media/usb/uvc/uvc_driver.c 		memcpy(unit->baSourceID, &buffer[5], p);
p                1253 drivers/media/usb/uvc/uvc_driver.c 		if (buffer[5+p] != 0)
p                1254 drivers/media/usb/uvc/uvc_driver.c 			usb_string(udev, buffer[5+p], unit->name,
p                1264 drivers/media/usb/uvc/uvc_driver.c 		p = dev->uvc_version >= 0x0110 ? 10 : 9;
p                1266 drivers/media/usb/uvc/uvc_driver.c 		if (buflen < p + n) {
p                1296 drivers/media/usb/uvc/uvc_driver.c 		p = buflen >= 22 ? buffer[21] : 0;
p                1297 drivers/media/usb/uvc/uvc_driver.c 		n = buflen >= 24 + p ? buffer[22+p] : 0;
p                1299 drivers/media/usb/uvc/uvc_driver.c 		if (buflen < 24 + p + n) {
p                1306 drivers/media/usb/uvc/uvc_driver.c 		unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n);
p                1312 drivers/media/usb/uvc/uvc_driver.c 		memcpy(unit->baSourceID, &buffer[22], p);
p                1313 drivers/media/usb/uvc/uvc_driver.c 		unit->extension.bControlSize = buffer[22+p];
p                1315 drivers/media/usb/uvc/uvc_driver.c 		memcpy(unit->extension.bmControls, &buffer[23+p], n);
p                1317 drivers/media/usb/uvc/uvc_driver.c 		if (buffer[23+p+n] != 0)
p                1318 drivers/media/usb/uvc/uvc_driver.c 			usb_string(udev, buffer[23+p+n], unit->name,
p                1685 drivers/media/usb/uvc/uvc_driver.c 	char *p = buffer;
p                1693 drivers/media/usb/uvc/uvc_driver.c 			p += sprintf(p, ",");
p                1695 drivers/media/usb/uvc/uvc_driver.c 			p += sprintf(p, "...");
p                1698 drivers/media/usb/uvc/uvc_driver.c 		p += sprintf(p, "%u", term->id);
p                1701 drivers/media/usb/uvc/uvc_driver.c 	return p - buffer;
p                1707 drivers/media/usb/uvc/uvc_driver.c 	char *p = buffer;
p                1709 drivers/media/usb/uvc/uvc_driver.c 	p += uvc_print_terms(&chain->entities, UVC_TERM_INPUT, p);
p                1710 drivers/media/usb/uvc/uvc_driver.c 	p += sprintf(p, " -> ");
p                1711 drivers/media/usb/uvc/uvc_driver.c 	uvc_print_terms(&chain->entities, UVC_TERM_OUTPUT, p);
p                1891 drivers/media/usb/uvc/uvc_driver.c 	struct list_head *p, *n;
p                1903 drivers/media/usb/uvc/uvc_driver.c 	list_for_each_safe(p, n, &dev->chains) {
p                1905 drivers/media/usb/uvc/uvc_driver.c 		chain = list_entry(p, struct uvc_video_chain, list);
p                1909 drivers/media/usb/uvc/uvc_driver.c 	list_for_each_safe(p, n, &dev->entities) {
p                1911 drivers/media/usb/uvc/uvc_driver.c 		entity = list_entry(p, struct uvc_entity, list);
p                1918 drivers/media/usb/uvc/uvc_driver.c 	list_for_each_safe(p, n, &dev->streams) {
p                1920 drivers/media/usb/uvc/uvc_driver.c 		streaming = list_entry(p, struct uvc_streaming, list);
p                1316 drivers/media/usb/uvc/uvc_v4l2.c 	struct uvc_xu_control_mapping32 *p = (void *)kp;
p                1320 drivers/media/usb/uvc/uvc_v4l2.c 	if (copy_from_user(p, up, sizeof(*p)))
p                1323 drivers/media/usb/uvc/uvc_v4l2.c 	count = p->menu_count;
p                1324 drivers/media/usb/uvc/uvc_v4l2.c 	info = p->menu_info;
p                 908 drivers/media/usb/zr364xx/zr364xx.c 			  struct v4l2_requestbuffers *p)
p                 914 drivers/media/usb/zr364xx/zr364xx.c 	return videobuf_reqbufs(&cam->vb_vidq, p);
p                 919 drivers/media/usb/zr364xx/zr364xx.c 				struct v4l2_buffer *p)
p                 923 drivers/media/usb/zr364xx/zr364xx.c 	rc = videobuf_querybuf(&cam->vb_vidq, p);
p                 929 drivers/media/usb/zr364xx/zr364xx.c 				struct v4l2_buffer *p)
p                 936 drivers/media/usb/zr364xx/zr364xx.c 	rc = videobuf_qbuf(&cam->vb_vidq, p);
p                 942 drivers/media/usb/zr364xx/zr364xx.c 				struct v4l2_buffer *p)
p                 949 drivers/media/usb/zr364xx/zr364xx.c 	rc = videobuf_dqbuf(&cam->vb_vidq, p, file->f_flags & O_NONBLOCK);
p                1093 drivers/media/v4l2-core/tuner-core.c 	const char *p;
p                1097 drivers/media/v4l2-core/tuner-core.c 		p = "radio";
p                1100 drivers/media/v4l2-core/tuner-core.c 		p = "digital TV";
p                1104 drivers/media/v4l2-core/tuner-core.c 		p = "analog TV";
p                1114 drivers/media/v4l2-core/tuner-core.c 	pr_info("Tuner mode:      %s%s\n", p,
p                 159 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	compat_caddr_t p;
p                 175 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	if (get_user(p, &p32->clips))
p                 177 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	uclips = compat_ptr(p);
p                 200 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	compat_caddr_t p;
p                 215 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	if (get_user(p, &p32->clips))
p                 217 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	uclips = compat_ptr(p);
p                 498 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	compat_ulong_t p;
p                 513 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 		if (get_user(p, &p32->m.userptr) ||
p                 514 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 		    put_user((unsigned long)compat_ptr(p), &p64->m.userptr))
p                 530 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	unsigned long p;
p                 545 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 		if (get_user(p, &p64->m.userptr) ||
p                 546 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 		    put_user((compat_ulong_t)ptr_to_compat((void __user *)p),
p                 594 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	compat_caddr_t p;
p                 632 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 		if (get_user(p, &p32->m.planes))
p                 635 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 		uplane32 = compat_ptr(p);
p                 692 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	compat_caddr_t p;
p                 731 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 		if (get_user(p, &p32->m.planes))
p                 733 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 		uplane32 = compat_ptr(p);
p                 915 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	compat_caddr_t p;
p                 930 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	if (get_user(p, &p32->controls))
p                 932 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	ucontrols = compat_ptr(p);
p                 953 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 			if (get_user(p, &ucontrols->string))
p                 955 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 			s = compat_ptr(p);
p                 973 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	compat_caddr_t p;
p                 995 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	if (get_user(p, &p32->controls))
p                 997 drivers/media/v4l2-core/v4l2-compat-ioctl32.c 	ucontrols = compat_ptr(p);
p                1523 drivers/media/v4l2-core/v4l2-ctrls.c 		return !memcmp(ptr1.p + idx, ptr2.p + idx, ctrl->elem_size);
p                1531 drivers/media/v4l2-core/v4l2-ctrls.c 	void *p = ptr.p + idx * ctrl->elem_size;
p                1533 drivers/media/v4l2-core/v4l2-ctrls.c 	memset(p, 0, ctrl->elem_size);
p                1542 drivers/media/v4l2-core/v4l2-ctrls.c 		p_mpeg2_slice_params = p;
p                1675 drivers/media/v4l2-core/v4l2-ctrls.c 	void *p = ptr.p + idx * ctrl->elem_size;
p                1679 drivers/media/v4l2-core/v4l2-ctrls.c 		p_mpeg2_slice_params = p;
p                1734 drivers/media/v4l2-core/v4l2-ctrls.c 		p_vp8_frame_header = p;
p                1843 drivers/media/v4l2-core/v4l2-ctrls.c 		return copy_to_user(c->ptr, ptr.p, c->size) ?
p                1909 drivers/media/v4l2-core/v4l2-ctrls.c 		ret = copy_from_user(ptr.p, c->ptr, c->size) ? -EFAULT : 0;
p                1958 drivers/media/v4l2-core/v4l2-ctrls.c 	memcpy(to.p, from.p, ctrl->elems * ctrl->elem_size);
p                2300 drivers/media/v4l2-core/v4l2-ctrls.c 		new_ref->p_req.p = &new_ref[1];
p                2500 drivers/media/v4l2-core/v4l2-ctrls.c 		ctrl->p_new.p = data;
p                2501 drivers/media/v4l2-core/v4l2-ctrls.c 		ctrl->p_cur.p = data + tot_ctrl_size;
p                2503 drivers/media/v4l2-core/v4l2-ctrls.c 		ctrl->p_new.p = &ctrl->val;
p                2504 drivers/media/v4l2-core/v4l2-ctrls.c 		ctrl->p_cur.p = &ctrl->cur.val;
p                1039 drivers/media/v4l2-core/v4l2-fwnode.c 				      const struct v4l2_fwnode_int_props *p)
p                1044 drivers/media/v4l2-core/v4l2-fwnode.c 	const char *prop = p->name;
p                1045 drivers/media/v4l2-core/v4l2-fwnode.c 	const char * const *props = p->props;
p                1046 drivers/media/v4l2-core/v4l2-fwnode.c 	unsigned int nprops = p->nprops;
p                  34 drivers/media/v4l2-core/v4l2-ioctl.c #define CLEAR_AFTER_FIELD(p, field) \
p                  35 drivers/media/v4l2-core/v4l2-ioctl.c 	memset((u8 *)(p) + offsetof(typeof(*(p)), field) + sizeof((p)->field), \
p                  36 drivers/media/v4l2-core/v4l2-ioctl.c 	0, sizeof(*(p)) - offsetof(typeof(*(p)), field) - sizeof((p)->field))
p                 211 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_capability *p = arg;
p                 214 drivers/media/v4l2-core/v4l2-ioctl.c 		(int)sizeof(p->driver), p->driver,
p                 215 drivers/media/v4l2-core/v4l2-ioctl.c 		(int)sizeof(p->card), p->card,
p                 216 drivers/media/v4l2-core/v4l2-ioctl.c 		(int)sizeof(p->bus_info), p->bus_info,
p                 217 drivers/media/v4l2-core/v4l2-ioctl.c 		p->version, p->capabilities, p->device_caps);
p                 222 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_input *p = arg;
p                 225 drivers/media/v4l2-core/v4l2-ioctl.c 		p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
p                 226 drivers/media/v4l2-core/v4l2-ioctl.c 		p->tuner, (unsigned long long)p->std, p->status,
p                 227 drivers/media/v4l2-core/v4l2-ioctl.c 		p->capabilities);
p                 232 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_output *p = arg;
p                 235 drivers/media/v4l2-core/v4l2-ioctl.c 		p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
p                 236 drivers/media/v4l2-core/v4l2-ioctl.c 		p->modulator, (unsigned long long)p->std, p->capabilities);
p                 241 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_audio *p = arg;
p                 244 drivers/media/v4l2-core/v4l2-ioctl.c 		pr_cont("index=%u, mode=0x%x\n", p->index, p->mode);
p                 247 drivers/media/v4l2-core/v4l2-ioctl.c 			p->index, (int)sizeof(p->name), p->name,
p                 248 drivers/media/v4l2-core/v4l2-ioctl.c 			p->capability, p->mode);
p                 253 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_audioout *p = arg;
p                 256 drivers/media/v4l2-core/v4l2-ioctl.c 		pr_cont("index=%u\n", p->index);
p                 259 drivers/media/v4l2-core/v4l2-ioctl.c 			p->index, (int)sizeof(p->name), p->name,
p                 260 drivers/media/v4l2-core/v4l2-ioctl.c 			p->capability, p->mode);
p                 265 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_fmtdesc *p = arg;
p                 268 drivers/media/v4l2-core/v4l2-ioctl.c 		p->index, prt_names(p->type, v4l2_type_names),
p                 269 drivers/media/v4l2-core/v4l2-ioctl.c 		p->flags, (p->pixelformat & 0xff),
p                 270 drivers/media/v4l2-core/v4l2-ioctl.c 		(p->pixelformat >>  8) & 0xff,
p                 271 drivers/media/v4l2-core/v4l2-ioctl.c 		(p->pixelformat >> 16) & 0xff,
p                 272 drivers/media/v4l2-core/v4l2-ioctl.c 		(p->pixelformat >> 24) & 0xff,
p                 273 drivers/media/v4l2-core/v4l2-ioctl.c 		(int)sizeof(p->description), p->description);
p                 278 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_format *p = arg;
p                 289 drivers/media/v4l2-core/v4l2-ioctl.c 	pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
p                 290 drivers/media/v4l2-core/v4l2-ioctl.c 	switch (p->type) {
p                 293 drivers/media/v4l2-core/v4l2-ioctl.c 		pix = &p->fmt.pix;
p                 307 drivers/media/v4l2-core/v4l2-ioctl.c 		mp = &p->fmt.pix_mp;
p                 325 drivers/media/v4l2-core/v4l2-ioctl.c 		win = &p->fmt.win;
p                 337 drivers/media/v4l2-core/v4l2-ioctl.c 		vbi = &p->fmt.vbi;
p                 350 drivers/media/v4l2-core/v4l2-ioctl.c 		sliced = &p->fmt.sliced;
p                 360 drivers/media/v4l2-core/v4l2-ioctl.c 		sdr = &p->fmt.sdr;
p                 369 drivers/media/v4l2-core/v4l2-ioctl.c 		meta = &p->fmt.meta;
p                 382 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_framebuffer *p = arg;
p                 385 drivers/media/v4l2-core/v4l2-ioctl.c 			p->capability, p->flags, p->base,
p                 386 drivers/media/v4l2-core/v4l2-ioctl.c 			p->fmt.width, p->fmt.height,
p                 387 drivers/media/v4l2-core/v4l2-ioctl.c 			(p->fmt.pixelformat & 0xff),
p                 388 drivers/media/v4l2-core/v4l2-ioctl.c 			(p->fmt.pixelformat >>  8) & 0xff,
p                 389 drivers/media/v4l2-core/v4l2-ioctl.c 			(p->fmt.pixelformat >> 16) & 0xff,
p                 390 drivers/media/v4l2-core/v4l2-ioctl.c 			(p->fmt.pixelformat >> 24) & 0xff,
p                 391 drivers/media/v4l2-core/v4l2-ioctl.c 			p->fmt.bytesperline, p->fmt.sizeimage,
p                 392 drivers/media/v4l2-core/v4l2-ioctl.c 			p->fmt.colorspace);
p                 402 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_modulator *p = arg;
p                 405 drivers/media/v4l2-core/v4l2-ioctl.c 		pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans);
p                 408 drivers/media/v4l2-core/v4l2-ioctl.c 			p->index, (int)sizeof(p->name), p->name, p->capability,
p                 409 drivers/media/v4l2-core/v4l2-ioctl.c 			p->rangelow, p->rangehigh, p->txsubchans);
p                 414 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_tuner *p = arg;
p                 417 drivers/media/v4l2-core/v4l2-ioctl.c 		pr_cont("index=%u, audmode=%u\n", p->index, p->audmode);
p                 420 drivers/media/v4l2-core/v4l2-ioctl.c 			p->index, (int)sizeof(p->name), p->name, p->type,
p                 421 drivers/media/v4l2-core/v4l2-ioctl.c 			p->capability, p->rangelow,
p                 422 drivers/media/v4l2-core/v4l2-ioctl.c 			p->rangehigh, p->signal, p->afc,
p                 423 drivers/media/v4l2-core/v4l2-ioctl.c 			p->rxsubchans, p->audmode);
p                 428 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_frequency *p = arg;
p                 431 drivers/media/v4l2-core/v4l2-ioctl.c 				p->tuner, p->type, p->frequency);
p                 436 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_standard *p = arg;
p                 439 drivers/media/v4l2-core/v4l2-ioctl.c 		p->index,
p                 440 drivers/media/v4l2-core/v4l2-ioctl.c 		(unsigned long long)p->id, (int)sizeof(p->name), p->name,
p                 441 drivers/media/v4l2-core/v4l2-ioctl.c 		p->frameperiod.numerator,
p                 442 drivers/media/v4l2-core/v4l2-ioctl.c 		p->frameperiod.denominator,
p                 443 drivers/media/v4l2-core/v4l2-ioctl.c 		p->framelines);
p                 453 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_hw_freq_seek *p = arg;
p                 456 drivers/media/v4l2-core/v4l2-ioctl.c 		p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing,
p                 457 drivers/media/v4l2-core/v4l2-ioctl.c 		p->rangelow, p->rangehigh);
p                 462 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_requestbuffers *p = arg;
p                 465 drivers/media/v4l2-core/v4l2-ioctl.c 		p->count,
p                 466 drivers/media/v4l2-core/v4l2-ioctl.c 		prt_names(p->type, v4l2_type_names),
p                 467 drivers/media/v4l2-core/v4l2-ioctl.c 		prt_names(p->memory, v4l2_memory_names));
p                 472 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_buffer *p = arg;
p                 473 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_timecode *tc = &p->timecode;
p                 478 drivers/media/v4l2-core/v4l2-ioctl.c 			p->timestamp.tv_sec / 3600,
p                 479 drivers/media/v4l2-core/v4l2-ioctl.c 			(int)(p->timestamp.tv_sec / 60) % 60,
p                 480 drivers/media/v4l2-core/v4l2-ioctl.c 			(int)(p->timestamp.tv_sec % 60),
p                 481 drivers/media/v4l2-core/v4l2-ioctl.c 			(long)p->timestamp.tv_usec,
p                 482 drivers/media/v4l2-core/v4l2-ioctl.c 			p->index,
p                 483 drivers/media/v4l2-core/v4l2-ioctl.c 			prt_names(p->type, v4l2_type_names), p->request_fd,
p                 484 drivers/media/v4l2-core/v4l2-ioctl.c 			p->flags, prt_names(p->field, v4l2_field_names),
p                 485 drivers/media/v4l2-core/v4l2-ioctl.c 			p->sequence, prt_names(p->memory, v4l2_memory_names));
p                 487 drivers/media/v4l2-core/v4l2-ioctl.c 	if (V4L2_TYPE_IS_MULTIPLANAR(p->type) && p->m.planes) {
p                 489 drivers/media/v4l2-core/v4l2-ioctl.c 		for (i = 0; i < p->length; ++i) {
p                 490 drivers/media/v4l2-core/v4l2-ioctl.c 			plane = &p->m.planes[i];
p                 498 drivers/media/v4l2-core/v4l2-ioctl.c 			p->bytesused, p->m.userptr, p->length);
p                 508 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_exportbuffer *p = arg;
p                 511 drivers/media/v4l2-core/v4l2-ioctl.c 		p->fd, prt_names(p->type, v4l2_type_names),
p                 512 drivers/media/v4l2-core/v4l2-ioctl.c 		p->index, p->plane, p->flags);
p                 517 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_create_buffers *p = arg;
p                 520 drivers/media/v4l2-core/v4l2-ioctl.c 			p->index, p->count,
p                 521 drivers/media/v4l2-core/v4l2-ioctl.c 			prt_names(p->memory, v4l2_memory_names));
p                 522 drivers/media/v4l2-core/v4l2-ioctl.c 	v4l_print_format(&p->format, write_only);
p                 527 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_streamparm *p = arg;
p                 529 drivers/media/v4l2-core/v4l2-ioctl.c 	pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
p                 531 drivers/media/v4l2-core/v4l2-ioctl.c 	if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
p                 532 drivers/media/v4l2-core/v4l2-ioctl.c 	    p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
p                 533 drivers/media/v4l2-core/v4l2-ioctl.c 		const struct v4l2_captureparm *c = &p->parm.capture;
p                 539 drivers/media/v4l2-core/v4l2-ioctl.c 	} else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
p                 540 drivers/media/v4l2-core/v4l2-ioctl.c 		   p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
p                 541 drivers/media/v4l2-core/v4l2-ioctl.c 		const struct v4l2_outputparm *c = &p->parm.output;
p                 554 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_queryctrl *p = arg;
p                 557 drivers/media/v4l2-core/v4l2-ioctl.c 			p->id, p->type, (int)sizeof(p->name), p->name,
p                 558 drivers/media/v4l2-core/v4l2-ioctl.c 			p->minimum, p->maximum,
p                 559 drivers/media/v4l2-core/v4l2-ioctl.c 			p->step, p->default_value, p->flags);
p                 564 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_query_ext_ctrl *p = arg;
p                 567 drivers/media/v4l2-core/v4l2-ioctl.c 			p->id, p->type, (int)sizeof(p->name), p->name,
p                 568 drivers/media/v4l2-core/v4l2-ioctl.c 			p->minimum, p->maximum,
p                 569 drivers/media/v4l2-core/v4l2-ioctl.c 			p->step, p->default_value, p->flags,
p                 570 drivers/media/v4l2-core/v4l2-ioctl.c 			p->elem_size, p->elems, p->nr_of_dims,
p                 571 drivers/media/v4l2-core/v4l2-ioctl.c 			p->dims[0], p->dims[1], p->dims[2], p->dims[3]);
p                 576 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_querymenu *p = arg;
p                 578 drivers/media/v4l2-core/v4l2-ioctl.c 	pr_cont("id=0x%x, index=%d\n", p->id, p->index);
p                 583 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_control *p = arg;
p                 585 drivers/media/v4l2-core/v4l2-ioctl.c 	pr_cont("id=0x%x, value=%d\n", p->id, p->value);
p                 590 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_ext_controls *p = arg;
p                 594 drivers/media/v4l2-core/v4l2-ioctl.c 			p->which, p->count, p->error_idx, p->request_fd);
p                 595 drivers/media/v4l2-core/v4l2-ioctl.c 	for (i = 0; i < p->count; i++) {
p                 596 drivers/media/v4l2-core/v4l2-ioctl.c 		if (!p->controls[i].size)
p                 598 drivers/media/v4l2-core/v4l2-ioctl.c 				p->controls[i].id, p->controls[i].value);
p                 601 drivers/media/v4l2-core/v4l2-ioctl.c 				p->controls[i].id, p->controls[i].size);
p                 608 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_cropcap *p = arg;
p                 611 drivers/media/v4l2-core/v4l2-ioctl.c 		prt_names(p->type, v4l2_type_names),
p                 612 drivers/media/v4l2-core/v4l2-ioctl.c 		p->bounds.width, p->bounds.height,
p                 613 drivers/media/v4l2-core/v4l2-ioctl.c 		p->bounds.left, p->bounds.top,
p                 614 drivers/media/v4l2-core/v4l2-ioctl.c 		p->defrect.width, p->defrect.height,
p                 615 drivers/media/v4l2-core/v4l2-ioctl.c 		p->defrect.left, p->defrect.top,
p                 616 drivers/media/v4l2-core/v4l2-ioctl.c 		p->pixelaspect.numerator, p->pixelaspect.denominator);
p                 621 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_crop *p = arg;
p                 624 drivers/media/v4l2-core/v4l2-ioctl.c 		prt_names(p->type, v4l2_type_names),
p                 625 drivers/media/v4l2-core/v4l2-ioctl.c 		p->c.width, p->c.height,
p                 626 drivers/media/v4l2-core/v4l2-ioctl.c 		p->c.left, p->c.top);
p                 631 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_selection *p = arg;
p                 634 drivers/media/v4l2-core/v4l2-ioctl.c 		prt_names(p->type, v4l2_type_names),
p                 635 drivers/media/v4l2-core/v4l2-ioctl.c 		p->target, p->flags,
p                 636 drivers/media/v4l2-core/v4l2-ioctl.c 		p->r.width, p->r.height, p->r.left, p->r.top);
p                 641 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_jpegcompression *p = arg;
p                 644 drivers/media/v4l2-core/v4l2-ioctl.c 		p->quality, p->APPn, p->APP_len,
p                 645 drivers/media/v4l2-core/v4l2-ioctl.c 		p->COM_len, p->jpeg_markers);
p                 650 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_enc_idx *p = arg;
p                 653 drivers/media/v4l2-core/v4l2-ioctl.c 			p->entries, p->entries_cap);
p                 658 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_encoder_cmd *p = arg;
p                 661 drivers/media/v4l2-core/v4l2-ioctl.c 			p->cmd, p->flags);
p                 666 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_decoder_cmd *p = arg;
p                 668 drivers/media/v4l2-core/v4l2-ioctl.c 	pr_cont("cmd=%d, flags=0x%x\n", p->cmd, p->flags);
p                 670 drivers/media/v4l2-core/v4l2-ioctl.c 	if (p->cmd == V4L2_DEC_CMD_START)
p                 672 drivers/media/v4l2-core/v4l2-ioctl.c 				p->start.speed, p->start.format);
p                 673 drivers/media/v4l2-core/v4l2-ioctl.c 	else if (p->cmd == V4L2_DEC_CMD_STOP)
p                 674 drivers/media/v4l2-core/v4l2-ioctl.c 		pr_info("pts=%llu\n", p->stop.pts);
p                 679 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_dbg_chip_info *p = arg;
p                 681 drivers/media/v4l2-core/v4l2-ioctl.c 	pr_cont("type=%u, ", p->match.type);
p                 682 drivers/media/v4l2-core/v4l2-ioctl.c 	if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER)
p                 684 drivers/media/v4l2-core/v4l2-ioctl.c 				(int)sizeof(p->match.name), p->match.name);
p                 686 drivers/media/v4l2-core/v4l2-ioctl.c 		pr_cont("addr=%u, ", p->match.addr);
p                 687 drivers/media/v4l2-core/v4l2-ioctl.c 	pr_cont("name=%.*s\n", (int)sizeof(p->name), p->name);
p                 692 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_dbg_register *p = arg;
p                 694 drivers/media/v4l2-core/v4l2-ioctl.c 	pr_cont("type=%u, ", p->match.type);
p                 695 drivers/media/v4l2-core/v4l2-ioctl.c 	if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER)
p                 697 drivers/media/v4l2-core/v4l2-ioctl.c 				(int)sizeof(p->match.name), p->match.name);
p                 699 drivers/media/v4l2-core/v4l2-ioctl.c 		pr_cont("addr=%u, ", p->match.addr);
p                 701 drivers/media/v4l2-core/v4l2-ioctl.c 			p->reg, p->val);
p                 706 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_dv_timings *p = arg;
p                 708 drivers/media/v4l2-core/v4l2-ioctl.c 	switch (p->type) {
p                 711 drivers/media/v4l2-core/v4l2-ioctl.c 				p->bt.interlaced, p->bt.pixelclock,
p                 712 drivers/media/v4l2-core/v4l2-ioctl.c 				p->bt.width, p->bt.height,
p                 713 drivers/media/v4l2-core/v4l2-ioctl.c 				p->bt.polarities, p->bt.hfrontporch,
p                 714 drivers/media/v4l2-core/v4l2-ioctl.c 				p->bt.hsync, p->bt.hbackporch,
p                 715 drivers/media/v4l2-core/v4l2-ioctl.c 				p->bt.vfrontporch, p->bt.vsync,
p                 716 drivers/media/v4l2-core/v4l2-ioctl.c 				p->bt.vbackporch, p->bt.il_vfrontporch,
p                 717 drivers/media/v4l2-core/v4l2-ioctl.c 				p->bt.il_vsync, p->bt.il_vbackporch,
p                 718 drivers/media/v4l2-core/v4l2-ioctl.c 				p->bt.standards, p->bt.flags);
p                 721 drivers/media/v4l2-core/v4l2-ioctl.c 		pr_cont("type=%d\n", p->type);
p                 728 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_enum_dv_timings *p = arg;
p                 730 drivers/media/v4l2-core/v4l2-ioctl.c 	pr_cont("index=%u, ", p->index);
p                 731 drivers/media/v4l2-core/v4l2-ioctl.c 	v4l_print_dv_timings(&p->timings, write_only);
p                 736 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_dv_timings_cap *p = arg;
p                 738 drivers/media/v4l2-core/v4l2-ioctl.c 	switch (p->type) {
p                 741 drivers/media/v4l2-core/v4l2-ioctl.c 			p->bt.min_width, p->bt.max_width,
p                 742 drivers/media/v4l2-core/v4l2-ioctl.c 			p->bt.min_height, p->bt.max_height,
p                 743 drivers/media/v4l2-core/v4l2-ioctl.c 			p->bt.min_pixelclock, p->bt.max_pixelclock,
p                 744 drivers/media/v4l2-core/v4l2-ioctl.c 			p->bt.standards, p->bt.capabilities);
p                 747 drivers/media/v4l2-core/v4l2-ioctl.c 		pr_cont("type=%u\n", p->type);
p                 754 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_frmsizeenum *p = arg;
p                 757 drivers/media/v4l2-core/v4l2-ioctl.c 			p->index,
p                 758 drivers/media/v4l2-core/v4l2-ioctl.c 			(p->pixel_format & 0xff),
p                 759 drivers/media/v4l2-core/v4l2-ioctl.c 			(p->pixel_format >>  8) & 0xff,
p                 760 drivers/media/v4l2-core/v4l2-ioctl.c 			(p->pixel_format >> 16) & 0xff,
p                 761 drivers/media/v4l2-core/v4l2-ioctl.c 			(p->pixel_format >> 24) & 0xff,
p                 762 drivers/media/v4l2-core/v4l2-ioctl.c 			p->type);
p                 763 drivers/media/v4l2-core/v4l2-ioctl.c 	switch (p->type) {
p                 766 drivers/media/v4l2-core/v4l2-ioctl.c 			p->discrete.width, p->discrete.height);
p                 770 drivers/media/v4l2-core/v4l2-ioctl.c 				p->stepwise.min_width,
p                 771 drivers/media/v4l2-core/v4l2-ioctl.c 				p->stepwise.min_height,
p                 772 drivers/media/v4l2-core/v4l2-ioctl.c 				p->stepwise.max_width,
p                 773 drivers/media/v4l2-core/v4l2-ioctl.c 				p->stepwise.max_height,
p                 774 drivers/media/v4l2-core/v4l2-ioctl.c 				p->stepwise.step_width,
p                 775 drivers/media/v4l2-core/v4l2-ioctl.c 				p->stepwise.step_height);
p                 787 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_frmivalenum *p = arg;
p                 790 drivers/media/v4l2-core/v4l2-ioctl.c 			p->index,
p                 791 drivers/media/v4l2-core/v4l2-ioctl.c 			(p->pixel_format & 0xff),
p                 792 drivers/media/v4l2-core/v4l2-ioctl.c 			(p->pixel_format >>  8) & 0xff,
p                 793 drivers/media/v4l2-core/v4l2-ioctl.c 			(p->pixel_format >> 16) & 0xff,
p                 794 drivers/media/v4l2-core/v4l2-ioctl.c 			(p->pixel_format >> 24) & 0xff,
p                 795 drivers/media/v4l2-core/v4l2-ioctl.c 			p->width, p->height, p->type);
p                 796 drivers/media/v4l2-core/v4l2-ioctl.c 	switch (p->type) {
p                 799 drivers/media/v4l2-core/v4l2-ioctl.c 				p->discrete.numerator,
p                 800 drivers/media/v4l2-core/v4l2-ioctl.c 				p->discrete.denominator);
p                 804 drivers/media/v4l2-core/v4l2-ioctl.c 				p->stepwise.min.numerator,
p                 805 drivers/media/v4l2-core/v4l2-ioctl.c 				p->stepwise.min.denominator,
p                 806 drivers/media/v4l2-core/v4l2-ioctl.c 				p->stepwise.max.numerator,
p                 807 drivers/media/v4l2-core/v4l2-ioctl.c 				p->stepwise.max.denominator,
p                 808 drivers/media/v4l2-core/v4l2-ioctl.c 				p->stepwise.step.numerator,
p                 809 drivers/media/v4l2-core/v4l2-ioctl.c 				p->stepwise.step.denominator);
p                 821 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_event *p = arg;
p                 825 drivers/media/v4l2-core/v4l2-ioctl.c 			p->type, p->pending, p->sequence, p->id,
p                 826 drivers/media/v4l2-core/v4l2-ioctl.c 			p->timestamp.tv_sec, p->timestamp.tv_nsec);
p                 827 drivers/media/v4l2-core/v4l2-ioctl.c 	switch (p->type) {
p                 830 drivers/media/v4l2-core/v4l2-ioctl.c 			prt_names(p->u.vsync.field, v4l2_field_names));
p                 833 drivers/media/v4l2-core/v4l2-ioctl.c 		c = &p->u.ctrl;
p                 846 drivers/media/v4l2-core/v4l2-ioctl.c 			p->u.frame_sync.frame_sequence);
p                 853 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_event_subscription *p = arg;
p                 856 drivers/media/v4l2-core/v4l2-ioctl.c 			p->type, p->id, p->flags);
p                 861 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_sliced_vbi_cap *p = arg;
p                 865 drivers/media/v4l2-core/v4l2-ioctl.c 			prt_names(p->type, v4l2_type_names), p->service_set);
p                 868 drivers/media/v4l2-core/v4l2-ioctl.c 				p->service_lines[0][i],
p                 869 drivers/media/v4l2-core/v4l2-ioctl.c 				p->service_lines[1][i]);
p                 874 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_frequency_band *p = arg;
p                 877 drivers/media/v4l2-core/v4l2-ioctl.c 			p->tuner, p->type, p->index,
p                 878 drivers/media/v4l2-core/v4l2-ioctl.c 			p->capability, p->rangelow,
p                 879 drivers/media/v4l2-core/v4l2-ioctl.c 			p->rangehigh, p->modulation);
p                 884 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_edid *p = arg;
p                 887 drivers/media/v4l2-core/v4l2-ioctl.c 		p->pad, p->start_block, p->blocks);
p                1100 drivers/media/v4l2-core/v4l2-ioctl.c 	u32 *p = arg;
p                1103 drivers/media/v4l2-core/v4l2-ioctl.c 	*p = v4l2_prio_max(vfd->prio);
p                1112 drivers/media/v4l2-core/v4l2-ioctl.c 	u32 *p = arg;
p                1118 drivers/media/v4l2-core/v4l2-ioctl.c 	return v4l2_prio_change(vfd->prio, &vfh->prio, *p);
p                1125 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_input *p = arg;
p                1134 drivers/media/v4l2-core/v4l2-ioctl.c 		p->capabilities |= V4L2_IN_CAP_STD;
p                1136 drivers/media/v4l2-core/v4l2-ioctl.c 	return ops->vidioc_enum_input(file, fh, p);
p                1143 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_output *p = arg;
p                1152 drivers/media/v4l2-core/v4l2-ioctl.c 		p->capabilities |= V4L2_OUT_CAP_STD;
p                1154 drivers/media/v4l2-core/v4l2-ioctl.c 	return ops->vidioc_enum_output(file, fh, p);
p                1405 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_fmtdesc *p = arg;
p                1406 drivers/media/v4l2-core/v4l2-ioctl.c 	int ret = check_fmt(file, p->type);
p                1413 drivers/media/v4l2-core/v4l2-ioctl.c 	switch (p->type) {
p                1419 drivers/media/v4l2-core/v4l2-ioctl.c 		    (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE))
p                1436 drivers/media/v4l2-core/v4l2-ioctl.c 		    (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
p                1465 drivers/media/v4l2-core/v4l2-ioctl.c 		v4l_fill_fmtdesc(p);
p                1469 drivers/media/v4l2-core/v4l2-ioctl.c static void v4l_pix_format_touch(struct v4l2_pix_format *p)
p                1476 drivers/media/v4l2-core/v4l2-ioctl.c 	p->field = V4L2_FIELD_NONE;
p                1477 drivers/media/v4l2-core/v4l2-ioctl.c 	p->colorspace = V4L2_COLORSPACE_RAW;
p                1478 drivers/media/v4l2-core/v4l2-ioctl.c 	p->flags = 0;
p                1479 drivers/media/v4l2-core/v4l2-ioctl.c 	p->ycbcr_enc = 0;
p                1480 drivers/media/v4l2-core/v4l2-ioctl.c 	p->quantization = 0;
p                1481 drivers/media/v4l2-core/v4l2-ioctl.c 	p->xfer_func = 0;
p                1487 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_format *p = arg;
p                1489 drivers/media/v4l2-core/v4l2-ioctl.c 	int ret = check_fmt(file, p->type);
p                1500 drivers/media/v4l2-core/v4l2-ioctl.c 	switch (p->type) {
p                1503 drivers/media/v4l2-core/v4l2-ioctl.c 		struct v4l2_clip __user *clips = p->fmt.win.clips;
p                1504 drivers/media/v4l2-core/v4l2-ioctl.c 		u32 clipcount = p->fmt.win.clipcount;
p                1505 drivers/media/v4l2-core/v4l2-ioctl.c 		void __user *bitmap = p->fmt.win.bitmap;
p                1507 drivers/media/v4l2-core/v4l2-ioctl.c 		memset(&p->fmt, 0, sizeof(p->fmt));
p                1508 drivers/media/v4l2-core/v4l2-ioctl.c 		p->fmt.win.clips = clips;
p                1509 drivers/media/v4l2-core/v4l2-ioctl.c 		p->fmt.win.clipcount = clipcount;
p                1510 drivers/media/v4l2-core/v4l2-ioctl.c 		p->fmt.win.bitmap = bitmap;
p                1514 drivers/media/v4l2-core/v4l2-ioctl.c 		memset(&p->fmt, 0, sizeof(p->fmt));
p                1518 drivers/media/v4l2-core/v4l2-ioctl.c 	switch (p->type) {
p                1522 drivers/media/v4l2-core/v4l2-ioctl.c 		p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
p                1525 drivers/media/v4l2-core/v4l2-ioctl.c 		p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
p                1527 drivers/media/v4l2-core/v4l2-ioctl.c 			v4l_pix_format_touch(&p->fmt.pix);
p                1540 drivers/media/v4l2-core/v4l2-ioctl.c 		p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
p                1543 drivers/media/v4l2-core/v4l2-ioctl.c 		p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
p                1568 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_format *p = arg;
p                1570 drivers/media/v4l2-core/v4l2-ioctl.c 	int ret = check_fmt(file, p->type);
p                1579 drivers/media/v4l2-core/v4l2-ioctl.c 	v4l_sanitize_format(p);
p                1581 drivers/media/v4l2-core/v4l2-ioctl.c 	switch (p->type) {
p                1585 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.pix);
p                1588 drivers/media/v4l2-core/v4l2-ioctl.c 		p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
p                1590 drivers/media/v4l2-core/v4l2-ioctl.c 			v4l_pix_format_touch(&p->fmt.pix);
p                1595 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
p                1596 drivers/media/v4l2-core/v4l2-ioctl.c 		for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
p                1597 drivers/media/v4l2-core/v4l2-ioctl.c 			CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
p                1603 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.win);
p                1608 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
p                1613 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
p                1618 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.pix);
p                1621 drivers/media/v4l2-core/v4l2-ioctl.c 		p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
p                1626 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
p                1627 drivers/media/v4l2-core/v4l2-ioctl.c 		for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
p                1628 drivers/media/v4l2-core/v4l2-ioctl.c 			CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
p                1634 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.win);
p                1639 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
p                1644 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
p                1649 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
p                1654 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
p                1659 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.meta);
p                1664 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.meta);
p                1673 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_format *p = arg;
p                1675 drivers/media/v4l2-core/v4l2-ioctl.c 	int ret = check_fmt(file, p->type);
p                1681 drivers/media/v4l2-core/v4l2-ioctl.c 	v4l_sanitize_format(p);
p                1683 drivers/media/v4l2-core/v4l2-ioctl.c 	switch (p->type) {
p                1687 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.pix);
p                1690 drivers/media/v4l2-core/v4l2-ioctl.c 		p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
p                1692 drivers/media/v4l2-core/v4l2-ioctl.c 			v4l_pix_format_touch(&p->fmt.pix);
p                1697 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
p                1698 drivers/media/v4l2-core/v4l2-ioctl.c 		for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
p                1699 drivers/media/v4l2-core/v4l2-ioctl.c 			CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
p                1705 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.win);
p                1710 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
p                1715 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
p                1720 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.pix);
p                1723 drivers/media/v4l2-core/v4l2-ioctl.c 		p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
p                1728 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
p                1729 drivers/media/v4l2-core/v4l2-ioctl.c 		for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
p                1730 drivers/media/v4l2-core/v4l2-ioctl.c 			CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
p                1736 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.win);
p                1741 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
p                1746 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
p                1751 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
p                1756 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
p                1761 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.meta);
p                1766 drivers/media/v4l2-core/v4l2-ioctl.c 		CLEAR_AFTER_FIELD(p, fmt.meta);
p                1788 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_tuner *p = arg;
p                1791 drivers/media/v4l2-core/v4l2-ioctl.c 	p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
p                1793 drivers/media/v4l2-core/v4l2-ioctl.c 	err = ops->vidioc_g_tuner(file, fh, p);
p                1795 drivers/media/v4l2-core/v4l2-ioctl.c 		p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
p                1803 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_tuner *p = arg;
p                1809 drivers/media/v4l2-core/v4l2-ioctl.c 	p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
p                1811 drivers/media/v4l2-core/v4l2-ioctl.c 	return ops->vidioc_s_tuner(file, fh, p);
p                1818 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_modulator *p = arg;
p                1822 drivers/media/v4l2-core/v4l2-ioctl.c 		p->type = V4L2_TUNER_RADIO;
p                1824 drivers/media/v4l2-core/v4l2-ioctl.c 	err = ops->vidioc_g_modulator(file, fh, p);
p                1826 drivers/media/v4l2-core/v4l2-ioctl.c 		p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
p                1834 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_modulator *p = arg;
p                1837 drivers/media/v4l2-core/v4l2-ioctl.c 		p->type = V4L2_TUNER_RADIO;
p                1839 drivers/media/v4l2-core/v4l2-ioctl.c 	return ops->vidioc_s_modulator(file, fh, p);
p                1846 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_frequency *p = arg;
p                1849 drivers/media/v4l2-core/v4l2-ioctl.c 		p->type = V4L2_TUNER_SDR;
p                1851 drivers/media/v4l2-core/v4l2-ioctl.c 		p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
p                1853 drivers/media/v4l2-core/v4l2-ioctl.c 	return ops->vidioc_g_frequency(file, fh, p);
p                1860 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_frequency *p = arg;
p                1868 drivers/media/v4l2-core/v4l2-ioctl.c 		if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF)
p                1873 drivers/media/v4l2-core/v4l2-ioctl.c 		if (type != p->type)
p                1876 drivers/media/v4l2-core/v4l2-ioctl.c 	return ops->vidioc_s_frequency(file, fh, p);
p                1883 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_standard *p = arg;
p                1885 drivers/media/v4l2-core/v4l2-ioctl.c 	return v4l_video_std_enumstd(p, vfd->tvnorms);
p                1910 drivers/media/v4l2-core/v4l2-ioctl.c 	v4l2_std_id *p = arg;
p                1924 drivers/media/v4l2-core/v4l2-ioctl.c 	*p = vfd->tvnorms;
p                1932 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_hw_freq_seek *p = arg;
p                1945 drivers/media/v4l2-core/v4l2-ioctl.c 	if (p->type != type)
p                1947 drivers/media/v4l2-core/v4l2-ioctl.c 	return ops->vidioc_s_hw_freq_seek(file, fh, p);
p                1959 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_requestbuffers *p = arg;
p                1960 drivers/media/v4l2-core/v4l2-ioctl.c 	int ret = check_fmt(file, p->type);
p                1965 drivers/media/v4l2-core/v4l2-ioctl.c 	CLEAR_AFTER_FIELD(p, capabilities);
p                1967 drivers/media/v4l2-core/v4l2-ioctl.c 	return ops->vidioc_reqbufs(file, fh, p);
p                1973 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_buffer *p = arg;
p                1974 drivers/media/v4l2-core/v4l2-ioctl.c 	int ret = check_fmt(file, p->type);
p                1976 drivers/media/v4l2-core/v4l2-ioctl.c 	return ret ? ret : ops->vidioc_querybuf(file, fh, p);
p                1982 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_buffer *p = arg;
p                1983 drivers/media/v4l2-core/v4l2-ioctl.c 	int ret = check_fmt(file, p->type);
p                1985 drivers/media/v4l2-core/v4l2-ioctl.c 	return ret ? ret : ops->vidioc_qbuf(file, fh, p);
p                1991 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_buffer *p = arg;
p                1992 drivers/media/v4l2-core/v4l2-ioctl.c 	int ret = check_fmt(file, p->type);
p                1994 drivers/media/v4l2-core/v4l2-ioctl.c 	return ret ? ret : ops->vidioc_dqbuf(file, fh, p);
p                2031 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_streamparm *p = arg;
p                2033 drivers/media/v4l2-core/v4l2-ioctl.c 	int ret = check_fmt(file, p->type);
p                2038 drivers/media/v4l2-core/v4l2-ioctl.c 		return ops->vidioc_g_parm(file, fh, p);
p                2039 drivers/media/v4l2-core/v4l2-ioctl.c 	if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
p                2040 drivers/media/v4l2-core/v4l2-ioctl.c 	    p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
p                2042 drivers/media/v4l2-core/v4l2-ioctl.c 	p->parm.capture.readbuffers = 2;
p                2045 drivers/media/v4l2-core/v4l2-ioctl.c 		v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe);
p                2052 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_streamparm *p = arg;
p                2053 drivers/media/v4l2-core/v4l2-ioctl.c 	int ret = check_fmt(file, p->type);
p                2059 drivers/media/v4l2-core/v4l2-ioctl.c 	if (V4L2_TYPE_IS_OUTPUT(p->type)) {
p                2060 drivers/media/v4l2-core/v4l2-ioctl.c 		memset(p->parm.output.reserved, 0,
p                2061 drivers/media/v4l2-core/v4l2-ioctl.c 		       sizeof(p->parm.output.reserved));
p                2062 drivers/media/v4l2-core/v4l2-ioctl.c 		p->parm.output.extendedmode = 0;
p                2063 drivers/media/v4l2-core/v4l2-ioctl.c 		p->parm.output.outputmode &= V4L2_MODE_HIGHQUALITY;
p                2065 drivers/media/v4l2-core/v4l2-ioctl.c 		memset(p->parm.capture.reserved, 0,
p                2066 drivers/media/v4l2-core/v4l2-ioctl.c 		       sizeof(p->parm.capture.reserved));
p                2067 drivers/media/v4l2-core/v4l2-ioctl.c 		p->parm.capture.extendedmode = 0;
p                2068 drivers/media/v4l2-core/v4l2-ioctl.c 		p->parm.capture.capturemode &= V4L2_MODE_HIGHQUALITY;
p                2070 drivers/media/v4l2-core/v4l2-ioctl.c 	return ops->vidioc_s_parm(file, fh, p);
p                2077 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_queryctrl *p = arg;
p                2082 drivers/media/v4l2-core/v4l2-ioctl.c 		return v4l2_queryctrl(vfh->ctrl_handler, p);
p                2084 drivers/media/v4l2-core/v4l2-ioctl.c 		return v4l2_queryctrl(vfd->ctrl_handler, p);
p                2086 drivers/media/v4l2-core/v4l2-ioctl.c 		return ops->vidioc_queryctrl(file, fh, p);
p                2094 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_query_ext_ctrl *p = arg;
p                2099 drivers/media/v4l2-core/v4l2-ioctl.c 		return v4l2_query_ext_ctrl(vfh->ctrl_handler, p);
p                2101 drivers/media/v4l2-core/v4l2-ioctl.c 		return v4l2_query_ext_ctrl(vfd->ctrl_handler, p);
p                2103 drivers/media/v4l2-core/v4l2-ioctl.c 		return ops->vidioc_query_ext_ctrl(file, fh, p);
p                2111 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_querymenu *p = arg;
p                2116 drivers/media/v4l2-core/v4l2-ioctl.c 		return v4l2_querymenu(vfh->ctrl_handler, p);
p                2118 drivers/media/v4l2-core/v4l2-ioctl.c 		return v4l2_querymenu(vfd->ctrl_handler, p);
p                2120 drivers/media/v4l2-core/v4l2-ioctl.c 		return ops->vidioc_querymenu(file, fh, p);
p                2128 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_control *p = arg;
p                2135 drivers/media/v4l2-core/v4l2-ioctl.c 		return v4l2_g_ctrl(vfh->ctrl_handler, p);
p                2137 drivers/media/v4l2-core/v4l2-ioctl.c 		return v4l2_g_ctrl(vfd->ctrl_handler, p);
p                2139 drivers/media/v4l2-core/v4l2-ioctl.c 		return ops->vidioc_g_ctrl(file, fh, p);
p                2143 drivers/media/v4l2-core/v4l2-ioctl.c 	ctrls.which = V4L2_CTRL_ID2WHICH(p->id);
p                2146 drivers/media/v4l2-core/v4l2-ioctl.c 	ctrl.id = p->id;
p                2147 drivers/media/v4l2-core/v4l2-ioctl.c 	ctrl.value = p->value;
p                2152 drivers/media/v4l2-core/v4l2-ioctl.c 			p->value = ctrl.value;
p                2162 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_control *p = arg;
p                2169 drivers/media/v4l2-core/v4l2-ioctl.c 		return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
p                2171 drivers/media/v4l2-core/v4l2-ioctl.c 		return v4l2_s_ctrl(NULL, vfd->ctrl_handler, p);
p                2173 drivers/media/v4l2-core/v4l2-ioctl.c 		return ops->vidioc_s_ctrl(file, fh, p);
p                2177 drivers/media/v4l2-core/v4l2-ioctl.c 	ctrls.which = V4L2_CTRL_ID2WHICH(p->id);
p                2180 drivers/media/v4l2-core/v4l2-ioctl.c 	ctrl.id = p->id;
p                2181 drivers/media/v4l2-core/v4l2-ioctl.c 	ctrl.value = p->value;
p                2191 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_ext_controls *p = arg;
p                2195 drivers/media/v4l2-core/v4l2-ioctl.c 	p->error_idx = p->count;
p                2198 drivers/media/v4l2-core/v4l2-ioctl.c 					vfd, vfd->v4l2_dev->mdev, p);
p                2201 drivers/media/v4l2-core/v4l2-ioctl.c 					vfd, vfd->v4l2_dev->mdev, p);
p                2204 drivers/media/v4l2-core/v4l2-ioctl.c 	return check_ext_ctrls(p, 0) ? ops->vidioc_g_ext_ctrls(file, fh, p) :
p                2212 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_ext_controls *p = arg;
p                2216 drivers/media/v4l2-core/v4l2-ioctl.c 	p->error_idx = p->count;
p                2219 drivers/media/v4l2-core/v4l2-ioctl.c 					vfd, vfd->v4l2_dev->mdev, p);
p                2222 drivers/media/v4l2-core/v4l2-ioctl.c 					vfd, vfd->v4l2_dev->mdev, p);
p                2225 drivers/media/v4l2-core/v4l2-ioctl.c 	return check_ext_ctrls(p, 0) ? ops->vidioc_s_ext_ctrls(file, fh, p) :
p                2233 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_ext_controls *p = arg;
p                2237 drivers/media/v4l2-core/v4l2-ioctl.c 	p->error_idx = p->count;
p                2240 drivers/media/v4l2-core/v4l2-ioctl.c 					  vfd, vfd->v4l2_dev->mdev, p);
p                2243 drivers/media/v4l2-core/v4l2-ioctl.c 					  vfd, vfd->v4l2_dev->mdev, p);
p                2246 drivers/media/v4l2-core/v4l2-ioctl.c 	return check_ext_ctrls(p, 0) ? ops->vidioc_try_ext_ctrls(file, fh, p) :
p                2262 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_selection *p = arg;
p                2263 drivers/media/v4l2-core/v4l2-ioctl.c 	u32 old_type = p->type;
p                2266 drivers/media/v4l2-core/v4l2-ioctl.c 	if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
p                2267 drivers/media/v4l2-core/v4l2-ioctl.c 		p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
p                2268 drivers/media/v4l2-core/v4l2-ioctl.c 	else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
p                2269 drivers/media/v4l2-core/v4l2-ioctl.c 		p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
p                2270 drivers/media/v4l2-core/v4l2-ioctl.c 	ret = ops->vidioc_g_selection(file, fh, p);
p                2271 drivers/media/v4l2-core/v4l2-ioctl.c 	p->type = old_type;
p                2278 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_selection *p = arg;
p                2279 drivers/media/v4l2-core/v4l2-ioctl.c 	u32 old_type = p->type;
p                2282 drivers/media/v4l2-core/v4l2-ioctl.c 	if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
p                2283 drivers/media/v4l2-core/v4l2-ioctl.c 		p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
p                2284 drivers/media/v4l2-core/v4l2-ioctl.c 	else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
p                2285 drivers/media/v4l2-core/v4l2-ioctl.c 		p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
p                2286 drivers/media/v4l2-core/v4l2-ioctl.c 	ret = ops->vidioc_s_selection(file, fh, p);
p                2287 drivers/media/v4l2-core/v4l2-ioctl.c 	p->type = old_type;
p                2295 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_crop *p = arg;
p                2297 drivers/media/v4l2-core/v4l2-ioctl.c 		.type = p->type,
p                2304 drivers/media/v4l2-core/v4l2-ioctl.c 	if (V4L2_TYPE_IS_OUTPUT(p->type))
p                2317 drivers/media/v4l2-core/v4l2-ioctl.c 		p->c = s.r;
p                2325 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_crop *p = arg;
p                2327 drivers/media/v4l2-core/v4l2-ioctl.c 		.type = p->type,
p                2328 drivers/media/v4l2-core/v4l2-ioctl.c 		.r = p->c,
p                2334 drivers/media/v4l2-core/v4l2-ioctl.c 	if (V4L2_TYPE_IS_OUTPUT(p->type))
p                2350 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_cropcap *p = arg;
p                2351 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_selection s = { .type = p->type };
p                2355 drivers/media/v4l2-core/v4l2-ioctl.c 	p->pixelaspect.numerator = 1;
p                2356 drivers/media/v4l2-core/v4l2-ioctl.c 	p->pixelaspect.denominator = 1;
p                2372 drivers/media/v4l2-core/v4l2-ioctl.c 						&p->pixelaspect);
p                2384 drivers/media/v4l2-core/v4l2-ioctl.c 	if (V4L2_TYPE_IS_OUTPUT(p->type))
p                2396 drivers/media/v4l2-core/v4l2-ioctl.c 	p->bounds = s.r;
p                2407 drivers/media/v4l2-core/v4l2-ioctl.c 	p->defrect = s.r;
p                2432 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_dbg_register *p = arg;
p                2439 drivers/media/v4l2-core/v4l2-ioctl.c 	if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) {
p                2443 drivers/media/v4l2-core/v4l2-ioctl.c 			if (p->match.addr == idx++)
p                2444 drivers/media/v4l2-core/v4l2-ioctl.c 				return v4l2_subdev_call(sd, core, g_register, p);
p                2447 drivers/media/v4l2-core/v4l2-ioctl.c 	if (ops->vidioc_g_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
p                2448 drivers/media/v4l2-core/v4l2-ioctl.c 	    (ops->vidioc_g_chip_info || p->match.addr == 0))
p                2449 drivers/media/v4l2-core/v4l2-ioctl.c 		return ops->vidioc_g_register(file, fh, p);
p                2460 drivers/media/v4l2-core/v4l2-ioctl.c 	const struct v4l2_dbg_register *p = arg;
p                2467 drivers/media/v4l2-core/v4l2-ioctl.c 	if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) {
p                2471 drivers/media/v4l2-core/v4l2-ioctl.c 			if (p->match.addr == idx++)
p                2472 drivers/media/v4l2-core/v4l2-ioctl.c 				return v4l2_subdev_call(sd, core, s_register, p);
p                2475 drivers/media/v4l2-core/v4l2-ioctl.c 	if (ops->vidioc_s_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
p                2476 drivers/media/v4l2-core/v4l2-ioctl.c 	    (ops->vidioc_g_chip_info || p->match.addr == 0))
p                2477 drivers/media/v4l2-core/v4l2-ioctl.c 		return ops->vidioc_s_register(file, fh, p);
p                2489 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_dbg_chip_info *p = arg;
p                2493 drivers/media/v4l2-core/v4l2-ioctl.c 	switch (p->match.type) {
p                2496 drivers/media/v4l2-core/v4l2-ioctl.c 			p->flags |= V4L2_CHIP_FL_WRITABLE;
p                2498 drivers/media/v4l2-core/v4l2-ioctl.c 			p->flags |= V4L2_CHIP_FL_READABLE;
p                2499 drivers/media/v4l2-core/v4l2-ioctl.c 		strscpy(p->name, vfd->v4l2_dev->name, sizeof(p->name));
p                2502 drivers/media/v4l2-core/v4l2-ioctl.c 		if (p->match.addr)
p                2510 drivers/media/v4l2-core/v4l2-ioctl.c 			if (p->match.addr != idx++)
p                2513 drivers/media/v4l2-core/v4l2-ioctl.c 				p->flags |= V4L2_CHIP_FL_WRITABLE;
p                2515 drivers/media/v4l2-core/v4l2-ioctl.c 				p->flags |= V4L2_CHIP_FL_READABLE;
p                2516 drivers/media/v4l2-core/v4l2-ioctl.c 			strscpy(p->name, sd->name, sizeof(p->name));
p                2548 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_sliced_vbi_cap *p = arg;
p                2549 drivers/media/v4l2-core/v4l2-ioctl.c 	int ret = check_fmt(file, p->type);
p                2555 drivers/media/v4l2-core/v4l2-ioctl.c 	memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type));
p                2557 drivers/media/v4l2-core/v4l2-ioctl.c 	return ops->vidioc_g_sliced_vbi_cap(file, fh, p);
p                2564 drivers/media/v4l2-core/v4l2-ioctl.c 	struct v4l2_frequency_band *p = arg;
p                2569 drivers/media/v4l2-core/v4l2-ioctl.c 		if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF)
p                2571 drivers/media/v4l2-core/v4l2-ioctl.c 		type = p->type;
p                2575 drivers/media/v4l2-core/v4l2-ioctl.c 		if (type != p->type)
p                2579 drivers/media/v4l2-core/v4l2-ioctl.c 		err = ops->vidioc_enum_freq_bands(file, fh, p);
p                2585 drivers/media/v4l2-core/v4l2-ioctl.c 			.index = p->tuner,
p                2589 drivers/media/v4l2-core/v4l2-ioctl.c 		if (p->index)
p                2594 drivers/media/v4l2-core/v4l2-ioctl.c 		p->capability = t.capability | V4L2_TUNER_CAP_FREQ_BANDS;
p                2595 drivers/media/v4l2-core/v4l2-ioctl.c 		p->rangelow = t.rangelow;
p                2596 drivers/media/v4l2-core/v4l2-ioctl.c 		p->rangehigh = t.rangehigh;
p                2597 drivers/media/v4l2-core/v4l2-ioctl.c 		p->modulation = (type == V4L2_TUNER_RADIO) ?
p                2603 drivers/media/v4l2-core/v4l2-ioctl.c 			.index = p->tuner,
p                2608 drivers/media/v4l2-core/v4l2-ioctl.c 		if (p->index)
p                2613 drivers/media/v4l2-core/v4l2-ioctl.c 		p->capability = m.capability | V4L2_TUNER_CAP_FREQ_BANDS;
p                2614 drivers/media/v4l2-core/v4l2-ioctl.c 		p->rangelow = m.rangelow;
p                2615 drivers/media/v4l2-core/v4l2-ioctl.c 		p->rangehigh = m.rangehigh;
p                2616 drivers/media/v4l2-core/v4l2-ioctl.c 		p->modulation = (type == V4L2_TUNER_RADIO) ?
p                2628 drivers/media/v4l2-core/v4l2-ioctl.c 		    void *fh, void *p);
p                2649 drivers/media/v4l2-core/v4l2-ioctl.c 			struct file *file, void *fh, void *p)	\
p                2651 drivers/media/v4l2-core/v4l2-ioctl.c 		return ops->vidioc_ ## _vidioc(file, fh, p);	\
p                 404 drivers/media/v4l2-core/v4l2-subdev.c 		struct v4l2_dbg_register *p = arg;
p                 408 drivers/media/v4l2-core/v4l2-subdev.c 		return v4l2_subdev_call(sd, core, g_register, p);
p                 412 drivers/media/v4l2-core/v4l2-subdev.c 		struct v4l2_dbg_register *p = arg;
p                 416 drivers/media/v4l2-core/v4l2-subdev.c 		return v4l2_subdev_call(sd, core, s_register, p);
p                 420 drivers/media/v4l2-core/v4l2-subdev.c 		struct v4l2_dbg_chip_info *p = arg;
p                 422 drivers/media/v4l2-core/v4l2-subdev.c 		if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr)
p                 425 drivers/media/v4l2-core/v4l2-subdev.c 			p->flags |= V4L2_CHIP_FL_WRITABLE;
p                 427 drivers/media/v4l2-core/v4l2-subdev.c 			p->flags |= V4L2_CHIP_FL_READABLE;
p                 428 drivers/media/v4l2-core/v4l2-subdev.c 		strscpy(p->name, sd->name, sizeof(p->name));
p                 595 drivers/media/v4l2-core/v4l2-subdev.c 		struct v4l2_standard *p = arg;
p                 601 drivers/media/v4l2-core/v4l2-subdev.c 		return v4l_video_std_enumstd(p, id);
p                 386 drivers/memory/omap-gpmc.c static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
p                 390 drivers/memory/omap-gpmc.c 			   p->time_para_granularity);
p                 392 drivers/memory/omap-gpmc.c 			   GPMC_CONFIG2_CSEXTRADELAY, p->cs_extra_delay);
p                 394 drivers/memory/omap-gpmc.c 			   GPMC_CONFIG3_ADVEXTRADELAY, p->adv_extra_delay);
p                 396 drivers/memory/omap-gpmc.c 			   GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
p                 398 drivers/memory/omap-gpmc.c 			   GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
p                 401 drivers/memory/omap-gpmc.c 			   p->cycle2cyclesamecsen);
p                 404 drivers/memory/omap-gpmc.c 			   p->cycle2cyclediffcsen);
p                1278 drivers/memory/omap-gpmc.c static void gpmc_irq_disable(struct irq_data *p)
p                1280 drivers/memory/omap-gpmc.c 	gpmc_irq_endis(p->hwirq, false);
p                1283 drivers/memory/omap-gpmc.c static void gpmc_irq_enable(struct irq_data *p)
p                1285 drivers/memory/omap-gpmc.c 	gpmc_irq_endis(p->hwirq, true);
p                1827 drivers/memory/omap-gpmc.c int gpmc_cs_program_settings(int cs, struct gpmc_settings *p)
p                1831 drivers/memory/omap-gpmc.c 	if ((!p->device_width) || (p->device_width > GPMC_DEVWIDTH_16BIT)) {
p                1832 drivers/memory/omap-gpmc.c 		pr_err("%s: invalid width %d!", __func__, p->device_width);
p                1837 drivers/memory/omap-gpmc.c 	if (p->device_nand && p->mux_add_data) {
p                1842 drivers/memory/omap-gpmc.c 	if ((p->mux_add_data > GPMC_MUX_AD) ||
p                1843 drivers/memory/omap-gpmc.c 	    ((p->mux_add_data == GPMC_MUX_AAD) &&
p                1850 drivers/memory/omap-gpmc.c 	if (p->burst_read || p->burst_write) {
p                1851 drivers/memory/omap-gpmc.c 		switch (p->burst_len) {
p                1858 drivers/memory/omap-gpmc.c 			       __func__, p->burst_len);
p                1863 drivers/memory/omap-gpmc.c 	if (p->wait_pin > gpmc_nr_waitpins) {
p                1864 drivers/memory/omap-gpmc.c 		pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin);
p                1868 drivers/memory/omap-gpmc.c 	config1 = GPMC_CONFIG1_DEVICESIZE((p->device_width - 1));
p                1870 drivers/memory/omap-gpmc.c 	if (p->sync_read)
p                1872 drivers/memory/omap-gpmc.c 	if (p->sync_write)
p                1874 drivers/memory/omap-gpmc.c 	if (p->wait_on_read)
p                1876 drivers/memory/omap-gpmc.c 	if (p->wait_on_write)
p                1878 drivers/memory/omap-gpmc.c 	if (p->wait_on_read || p->wait_on_write)
p                1879 drivers/memory/omap-gpmc.c 		config1 |= GPMC_CONFIG1_WAIT_PIN_SEL(p->wait_pin);
p                1880 drivers/memory/omap-gpmc.c 	if (p->device_nand)
p                1882 drivers/memory/omap-gpmc.c 	if (p->mux_add_data)
p                1883 drivers/memory/omap-gpmc.c 		config1	|= GPMC_CONFIG1_MUXTYPE(p->mux_add_data);
p                1884 drivers/memory/omap-gpmc.c 	if (p->burst_read)
p                1886 drivers/memory/omap-gpmc.c 	if (p->burst_write)
p                1888 drivers/memory/omap-gpmc.c 	if (p->burst_read || p->burst_write) {
p                1889 drivers/memory/omap-gpmc.c 		config1 |= GPMC_CONFIG1_PAGE_LEN(p->burst_len >> 3);
p                1890 drivers/memory/omap-gpmc.c 		config1 |= p->burst_wrap ? GPMC_CONFIG1_WRAPBURST_SUPP : 0;
p                1918 drivers/memory/omap-gpmc.c void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
p                1920 drivers/memory/omap-gpmc.c 	memset(p, 0, sizeof(struct gpmc_settings));
p                1922 drivers/memory/omap-gpmc.c 	p->sync_read = of_property_read_bool(np, "gpmc,sync-read");
p                1923 drivers/memory/omap-gpmc.c 	p->sync_write = of_property_read_bool(np, "gpmc,sync-write");
p                1924 drivers/memory/omap-gpmc.c 	of_property_read_u32(np, "gpmc,device-width", &p->device_width);
p                1925 drivers/memory/omap-gpmc.c 	of_property_read_u32(np, "gpmc,mux-add-data", &p->mux_add_data);
p                1927 drivers/memory/omap-gpmc.c 	if (!of_property_read_u32(np, "gpmc,burst-length", &p->burst_len)) {
p                1928 drivers/memory/omap-gpmc.c 		p->burst_wrap = of_property_read_bool(np, "gpmc,burst-wrap");
p                1929 drivers/memory/omap-gpmc.c 		p->burst_read = of_property_read_bool(np, "gpmc,burst-read");
p                1930 drivers/memory/omap-gpmc.c 		p->burst_write = of_property_read_bool(np, "gpmc,burst-write");
p                1931 drivers/memory/omap-gpmc.c 		if (!p->burst_read && !p->burst_write)
p                1936 drivers/memory/omap-gpmc.c 	if (!of_property_read_u32(np, "gpmc,wait-pin", &p->wait_pin)) {
p                1937 drivers/memory/omap-gpmc.c 		p->wait_on_read = of_property_read_bool(np,
p                1939 drivers/memory/omap-gpmc.c 		p->wait_on_write = of_property_read_bool(np,
p                1941 drivers/memory/omap-gpmc.c 		if (!p->wait_on_read && !p->wait_on_write)
p                1950 drivers/memory/omap-gpmc.c 	struct gpmc_bool_timings *p;
p                2009 drivers/memory/omap-gpmc.c 	p = &gpmc_t->bool_timings;
p                2011 drivers/memory/omap-gpmc.c 	p->cycle2cyclediffcsen =
p                2013 drivers/memory/omap-gpmc.c 	p->cycle2cyclesamecsen =
p                2015 drivers/memory/omap-gpmc.c 	p->we_extra_delay = of_property_read_bool(np, "gpmc,we-extra-delay");
p                2016 drivers/memory/omap-gpmc.c 	p->oe_extra_delay = of_property_read_bool(np, "gpmc,oe-extra-delay");
p                2017 drivers/memory/omap-gpmc.c 	p->adv_extra_delay = of_property_read_bool(np, "gpmc,adv-extra-delay");
p                2018 drivers/memory/omap-gpmc.c 	p->cs_extra_delay = of_property_read_bool(np, "gpmc,cs-extra-delay");
p                2019 drivers/memory/omap-gpmc.c 	p->time_para_granularity =
p                1160 drivers/memstick/core/ms_block.c static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
p                1162 drivers/memstick/core/ms_block.c 	p->header.block_id = be16_to_cpu(p->header.block_id);
p                1163 drivers/memstick/core/ms_block.c 	p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
p                1164 drivers/memstick/core/ms_block.c 	p->entry.disabled_block.start_addr
p                1165 drivers/memstick/core/ms_block.c 		= be32_to_cpu(p->entry.disabled_block.start_addr);
p                1166 drivers/memstick/core/ms_block.c 	p->entry.disabled_block.data_size
p                1167 drivers/memstick/core/ms_block.c 		= be32_to_cpu(p->entry.disabled_block.data_size);
p                1168 drivers/memstick/core/ms_block.c 	p->entry.cis_idi.start_addr
p                1169 drivers/memstick/core/ms_block.c 		= be32_to_cpu(p->entry.cis_idi.start_addr);
p                1170 drivers/memstick/core/ms_block.c 	p->entry.cis_idi.data_size
p                1171 drivers/memstick/core/ms_block.c 		= be32_to_cpu(p->entry.cis_idi.data_size);
p                1172 drivers/memstick/core/ms_block.c 	p->attr.block_size = be16_to_cpu(p->attr.block_size);
p                1173 drivers/memstick/core/ms_block.c 	p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
p                1174 drivers/memstick/core/ms_block.c 	p->attr.number_of_effective_blocks
p                1175 drivers/memstick/core/ms_block.c 		= be16_to_cpu(p->attr.number_of_effective_blocks);
p                1176 drivers/memstick/core/ms_block.c 	p->attr.page_size = be16_to_cpu(p->attr.page_size);
p                1177 drivers/memstick/core/ms_block.c 	p->attr.memory_manufacturer_code
p                1178 drivers/memstick/core/ms_block.c 		= be16_to_cpu(p->attr.memory_manufacturer_code);
p                1179 drivers/memstick/core/ms_block.c 	p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
p                1180 drivers/memstick/core/ms_block.c 	p->attr.implemented_capacity
p                1181 drivers/memstick/core/ms_block.c 		= be16_to_cpu(p->attr.implemented_capacity);
p                1182 drivers/memstick/core/ms_block.c 	p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
p                1183 drivers/memstick/core/ms_block.c 	p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
p                 610 drivers/message/fusion/lsi/mpi_targ.h #define SET_PORT(t, p)  ((t) = ((t) & ~TARGET_MODE_REPLY_PORT_MASK) |          \
p                 611 drivers/message/fusion/lsi/mpi_targ.h                                     (((p) << TARGET_MODE_REPLY_PORT_SHIFT) &   \
p                 365 drivers/message/fusion/mptbase.c 	struct task_struct *p;
p                 387 drivers/message/fusion/mptbase.c 		p = kthread_run(mpt_remove_dead_ioc_func, ioc,
p                 389 drivers/message/fusion/mptbase.c 		if (IS_ERR(p))	{
p                6702 drivers/message/fusion/mptbase.c 	int		 p;
p                6757 drivers/message/fusion/mptbase.c 	for (p=0; p < ioc->facts.NumberOfPorts; p++) {
p                6759 drivers/message/fusion/mptbase.c 				p+1,
p                6762 drivers/message/fusion/mptbase.c 			if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
p                6767 drivers/message/fusion/mptbase.c 					ioc->fc_port_page0[p].WWNN.High,
p                6768 drivers/message/fusion/mptbase.c 					ioc->fc_port_page0[p].WWNN.Low,
p                6769 drivers/message/fusion/mptbase.c 					ioc->fc_port_page0[p].WWPN.High,
p                6770 drivers/message/fusion/mptbase.c 					ioc->fc_port_page0[p].WWPN.Low);
p                1497 drivers/message/fusion/mptfc.c 	struct mptfc_rport_info	*p, *n;
p                1512 drivers/message/fusion/mptfc.c 	list_for_each_entry_safe(p, n, &ioc->fc_rports, list) {
p                1513 drivers/message/fusion/mptfc.c 		list_del(&p->list);
p                1514 drivers/message/fusion/mptfc.c 		kfree(p);
p                1496 drivers/message/fusion/mptlan.c 		u32 *p = (u32 *) fch;
p                1498 drivers/message/fusion/mptlan.c 		swab32s(p + 0);
p                1499 drivers/message/fusion/mptlan.c 		swab32s(p + 1);
p                1500 drivers/message/fusion/mptlan.c 		swab32s(p + 2);
p                1501 drivers/message/fusion/mptlan.c 		swab32s(p + 3);
p                1722 drivers/message/fusion/mptsas.c 	struct mptsas_portinfo	*p;
p                1757 drivers/message/fusion/mptsas.c 	list_for_each_entry(p, &ioc->sas_topology, list) {
p                1758 drivers/message/fusion/mptsas.c 		for (i = 0; i < p->num_phys; i++) {
p                1759 drivers/message/fusion/mptsas.c 			if (p->phy_info[i].attached.sas_address !=
p                1762 drivers/message/fusion/mptsas.c 			id = p->phy_info[i].attached.id;
p                1763 drivers/message/fusion/mptsas.c 			channel = p->phy_info[i].attached.channel;
p                1764 drivers/message/fusion/mptsas.c 			mptsas_set_starget(&p->phy_info[i], starget);
p                1774 drivers/message/fusion/mptsas.c 				p->phy_info[i].attached.phys_disk_num = id;
p                1798 drivers/message/fusion/mptsas.c 	struct mptsas_portinfo	*p;
p                1816 drivers/message/fusion/mptsas.c 	list_for_each_entry(p, &ioc->sas_topology, list) {
p                1817 drivers/message/fusion/mptsas.c 		for (i = 0; i < p->num_phys; i++) {
p                1818 drivers/message/fusion/mptsas.c 			if (p->phy_info[i].attached.sas_address !=
p                1825 drivers/message/fusion/mptsas.c 			p->phy_info[i].attached.channel,
p                1826 drivers/message/fusion/mptsas.c 			p->phy_info[i].attached.id,
p                1827 drivers/message/fusion/mptsas.c 			p->phy_info[i].attached.phy_id, (unsigned long long)
p                1828 drivers/message/fusion/mptsas.c 			p->phy_info[i].attached.sas_address);
p                1830 drivers/message/fusion/mptsas.c 			mptsas_set_starget(&p->phy_info[i], NULL);
p                1847 drivers/message/fusion/mptsas.c 	struct mptsas_portinfo	*p;
p                1867 drivers/message/fusion/mptsas.c 	list_for_each_entry(p, &ioc->sas_topology, list) {
p                1868 drivers/message/fusion/mptsas.c 		for (i = 0; i < p->num_phys; i++) {
p                1869 drivers/message/fusion/mptsas.c 			if (p->phy_info[i].attached.sas_address !=
p                1877 drivers/message/fusion/mptsas.c 			    p->phy_info[i].attached.channel,
p                1878 drivers/message/fusion/mptsas.c 			    p->phy_info[i].attached.id))
p                2160 drivers/message/fusion/mptsas.c 	struct mptsas_portinfo *p;
p                2165 drivers/message/fusion/mptsas.c 	list_for_each_entry(p, &ioc->sas_topology, list) {
p                2166 drivers/message/fusion/mptsas.c 		for (i = 0; i < p->num_phys; i++) {
p                2167 drivers/message/fusion/mptsas.c 			if (p->phy_info[i].attached.sas_address ==
p                2169 drivers/message/fusion/mptsas.c 				enclosure_handle = p->phy_info[i].
p                2193 drivers/message/fusion/mptsas.c 	struct mptsas_portinfo *p;
p                2197 drivers/message/fusion/mptsas.c 	list_for_each_entry(p, &ioc->sas_topology, list) {
p                2198 drivers/message/fusion/mptsas.c 		for (i = 0; i < p->num_phys; i++) {
p                2199 drivers/message/fusion/mptsas.c 			if (p->phy_info[i].attached.sas_address ==
p                2201 drivers/message/fusion/mptsas.c 				rc = p->phy_info[i].attached.slot;
p                5325 drivers/message/fusion/mptsas.c 	struct mptsas_portinfo *p, *n;
p                5342 drivers/message/fusion/mptsas.c 	list_for_each_entry_safe(p, n, &ioc->sas_topology, list) {
p                5343 drivers/message/fusion/mptsas.c 		list_del(&p->list);
p                5344 drivers/message/fusion/mptsas.c 		for (i = 0 ; i < p->num_phys ; i++)
p                5345 drivers/message/fusion/mptsas.c 			mptsas_port_delete(ioc, p->phy_info[i].port_details);
p                5347 drivers/message/fusion/mptsas.c 		kfree(p->phy_info);
p                5348 drivers/message/fusion/mptsas.c 		kfree(p);
p                 454 drivers/mfd/ab3100-core.c static int ab3100_registers_print(struct seq_file *s, void *p)
p                1320 drivers/mfd/ab8500-debugfs.c static int ab8500_bank_registers_show(struct seq_file *s, void *p)
p                1334 drivers/mfd/ab8500-debugfs.c static int ab8500_print_all_banks(struct seq_file *s, void *p)
p                1392 drivers/mfd/ab8500-debugfs.c static int ab8500_bank_print(struct seq_file *s, void *p)
p                1425 drivers/mfd/ab8500-debugfs.c static int ab8500_address_print(struct seq_file *s, void *p)
p                1457 drivers/mfd/ab8500-debugfs.c static int ab8500_val_print(struct seq_file *s, void *p)
p                1519 drivers/mfd/ab8500-debugfs.c static int ab8500_interrupts_show(struct seq_file *s, void *p)
p                1591 drivers/mfd/ab8500-debugfs.c static int ab8500_modem_show(struct seq_file *s, void *p)
p                1649 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_bat_ctrl_show(struct seq_file *s, void *p)
p                1668 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_btemp_ball_show(struct seq_file *s, void *p)
p                1687 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_main_charger_v_show(struct seq_file *s, void *p)
p                1706 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_acc_detect1_show(struct seq_file *s, void *p)
p                1725 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_acc_detect2_show(struct seq_file *s, void *p)
p                1744 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_aux1_show(struct seq_file *s, void *p)
p                1763 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_aux2_show(struct seq_file *s, void *p)
p                1782 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_main_bat_v_show(struct seq_file *s, void *p)
p                1801 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_vbus_v_show(struct seq_file *s, void *p)
p                1820 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_main_charger_c_show(struct seq_file *s, void *p)
p                1839 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_usb_charger_c_show(struct seq_file *s, void *p)
p                1858 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_bk_bat_v_show(struct seq_file *s, void *p)
p                1877 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_die_temp_show(struct seq_file *s, void *p)
p                1896 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_usb_id_show(struct seq_file *s, void *p)
p                1915 drivers/mfd/ab8500-debugfs.c static int ab8540_gpadc_xtal_temp_show(struct seq_file *s, void *p)
p                1934 drivers/mfd/ab8500-debugfs.c static int ab8540_gpadc_vbat_true_meas_show(struct seq_file *s, void *p)
p                1954 drivers/mfd/ab8500-debugfs.c static int ab8540_gpadc_bat_ctrl_and_ibat_show(struct seq_file *s, void *p)
p                1982 drivers/mfd/ab8500-debugfs.c static int ab8540_gpadc_vbat_meas_and_ibat_show(struct seq_file *s, void *p)
p                2009 drivers/mfd/ab8500-debugfs.c static int ab8540_gpadc_vbat_true_meas_and_ibat_show(struct seq_file *s, void *p)
p                2037 drivers/mfd/ab8500-debugfs.c static int ab8540_gpadc_bat_temp_and_ibat_show(struct seq_file *s, void *p)
p                2064 drivers/mfd/ab8500-debugfs.c static int ab8540_gpadc_otp_calib_show(struct seq_file *s, void *p)
p                2090 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_avg_sample_print(struct seq_file *s, void *p)
p                2137 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_trig_edge_print(struct seq_file *s, void *p)
p                2184 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_trig_timer_print(struct seq_file *s, void *p)
p                2229 drivers/mfd/ab8500-debugfs.c static int ab8500_gpadc_conv_type_print(struct seq_file *s, void *p)
p                2451 drivers/mfd/ab8500-debugfs.c static int ab8500_subscribe_unsubscribe_print(struct seq_file *s, void *p)
p                  66 drivers/mfd/mc13xxx-spi.c 	unsigned char *p = val;
p                  85 drivers/mfd/mc13xxx-spi.c 	memcpy(p, &r[1], 3);
p                  26 drivers/mfd/stm32-timers.c static void stm32_timers_dma_done(void *p)
p                  28 drivers/mfd/stm32-timers.c 	struct stm32_timers_dma *dma = p;
p                 132 drivers/mfd/tps65010.c 		({int p; switch ((chgconfig >> 3) & 3) {
p                 133 drivers/mfd/tps65010.c 		case 3:		p = 100; break;
p                 134 drivers/mfd/tps65010.c 		case 2:		p = 75; break;
p                 135 drivers/mfd/tps65010.c 		case 1:		p = 50; break;
p                 136 drivers/mfd/tps65010.c 		default:	p = 25; break;
p                 137 drivers/mfd/tps65010.c 		}; p; }),
p                  57 drivers/mfd/twl4030-power.c #define PHY_TO_OFF_PM_MASTER(p)		(p - 0x36)
p                  58 drivers/mfd/twl4030-power.c #define PHY_TO_OFF_PM_RECEIVER(p)	(p - 0x5b)
p                 214 drivers/mfd/ucb1x00-ts.c 		unsigned int x, y, p;
p                 224 drivers/mfd/ucb1x00-ts.c 		p = ucb1x00_ts_read_pressure(ts);
p                 267 drivers/mfd/ucb1x00-ts.c 				ucb1x00_ts_evt_add(ts, p, x, y);
p                 205 drivers/misc/altera-stapl/altera.c 				u8 *p,
p                 267 drivers/misc/altera-stapl/altera.c 		first_word    = get_unaligned_be32(&p[0]);
p                 272 drivers/misc/altera-stapl/altera.c 		action_table  = get_unaligned_be32(&p[4]);
p                 273 drivers/misc/altera-stapl/altera.c 		proc_table    = get_unaligned_be32(&p[8]);
p                 274 drivers/misc/altera-stapl/altera.c 		str_table  = get_unaligned_be32(&p[4 + delta]);
p                 275 drivers/misc/altera-stapl/altera.c 		sym_table  = get_unaligned_be32(&p[16 + delta]);
p                 276 drivers/misc/altera-stapl/altera.c 		data_sect  = get_unaligned_be32(&p[20 + delta]);
p                 277 drivers/misc/altera-stapl/altera.c 		code_sect  = get_unaligned_be32(&p[24 + delta]);
p                 278 drivers/misc/altera-stapl/altera.c 		debug_sect = get_unaligned_be32(&p[28 + delta]);
p                 279 drivers/misc/altera-stapl/altera.c 		action_count  = get_unaligned_be32(&p[40 + delta]);
p                 280 drivers/misc/altera-stapl/altera.c 		proc_count    = get_unaligned_be32(&p[44 + delta]);
p                 281 drivers/misc/altera-stapl/altera.c 		sym_count  = get_unaligned_be32(&p[48 + (2 * delta)]);
p                 327 drivers/misc/altera-stapl/altera.c 		value = get_unaligned_be32(&p[offset + 3 + delta]);
p                 329 drivers/misc/altera-stapl/altera.c 		attrs[i] = p[offset];
p                 338 drivers/misc/altera-stapl/altera.c 		var_size[i] = get_unaligned_be32(&p[offset + 7 + delta]);
p                 356 drivers/misc/altera-stapl/altera.c 			uncomp_size = get_unaligned_le32(&p[data_sect + value]);
p                 367 drivers/misc/altera-stapl/altera.c 				if (altera_shrink(&p[data_sect + value],
p                 380 drivers/misc/altera-stapl/altera.c 			vars[i] = value + data_sect + (long)p;
p                 438 drivers/misc/altera-stapl/altera.c 				name_id = get_unaligned_be32(&p[action_table +
p                 441 drivers/misc/altera-stapl/altera.c 				name = &p[str_table + name_id];
p                 446 drivers/misc/altera-stapl/altera.c 						get_unaligned_be32(&p[action_table +
p                 464 drivers/misc/altera-stapl/altera.c 						(p[proc_table +
p                 475 drivers/misc/altera-stapl/altera.c 				i = get_unaligned_be32(&p[proc_table +
p                 487 drivers/misc/altera-stapl/altera.c 				i = get_unaligned_be32(&p[proc_table +
p                 496 drivers/misc/altera-stapl/altera.c 					get_unaligned_be32(&p[proc_table +
p                 510 drivers/misc/altera-stapl/altera.c 		opcode = (p[pc] & 0xff);
p                 519 drivers/misc/altera-stapl/altera.c 			args[i] = get_unaligned_be32(&p[pc]);
p                 636 drivers/misc/altera-stapl/altera.c 				i = get_unaligned_be32(&p[proc_table +
p                 641 drivers/misc/altera-stapl/altera.c 					i = get_unaligned_be32(&p[proc_table +
p                 651 drivers/misc/altera-stapl/altera.c 								&p[proc_table +
p                1039 drivers/misc/altera-stapl/altera.c 				&p[str_table + args[0]],
p                1137 drivers/misc/altera-stapl/altera.c 						get_unaligned_be32(&p[long_tmp]);
p                1478 drivers/misc/altera-stapl/altera.c 				name = &p[str_table + args[0]];
p                1504 drivers/misc/altera-stapl/altera.c 					get_unaligned_be32(&p[long_tmp]);
p                1630 drivers/misc/altera-stapl/altera.c 			name = &p[str_table + args[0]];
p                2115 drivers/misc/altera-stapl/altera.c static int altera_get_note(u8 *p, s32 program_size, s32 *offset,
p                2140 drivers/misc/altera-stapl/altera.c 		first_word    = get_unaligned_be32(&p[0]);
p                2144 drivers/misc/altera-stapl/altera.c 		note_strings  = get_unaligned_be32(&p[8 + delta]);
p                2145 drivers/misc/altera-stapl/altera.c 		note_table    = get_unaligned_be32(&p[12 + delta]);
p                2146 drivers/misc/altera-stapl/altera.c 		note_count    = get_unaligned_be32(&p[44 + (2 * delta)]);
p                2162 drivers/misc/altera-stapl/altera.c 			key_ptr = &p[note_strings +
p                2164 drivers/misc/altera-stapl/altera.c 					&p[note_table + (8 * i)])];
p                2168 drivers/misc/altera-stapl/altera.c 				value_ptr = &p[note_strings +
p                2170 drivers/misc/altera-stapl/altera.c 						&p[note_table + (8 * i) + 4])];
p                2189 drivers/misc/altera-stapl/altera.c 				strlcpy(key, &p[note_strings +
p                2191 drivers/misc/altera-stapl/altera.c 						&p[note_table + (8 * i)])],
p                2195 drivers/misc/altera-stapl/altera.c 				strlcpy(value, &p[note_strings +
p                2197 drivers/misc/altera-stapl/altera.c 						&p[note_table + (8 * i) + 4])],
p                2207 drivers/misc/altera-stapl/altera.c static int altera_check_crc(u8 *p, s32 program_size)
p                2222 drivers/misc/altera-stapl/altera.c 		first_word  = get_unaligned_be32(&p[0]);
p                2226 drivers/misc/altera-stapl/altera.c 		crc_section = get_unaligned_be32(&p[32 + delta]);
p                2236 drivers/misc/altera-stapl/altera.c 		local_expected = (u16)get_unaligned_be16(&p[crc_section]);
p                2239 drivers/misc/altera-stapl/altera.c 			databyte = p[i];
p                2287 drivers/misc/altera-stapl/altera.c static int altera_get_file_info(u8 *p,
p                2300 drivers/misc/altera-stapl/altera.c 	first_word = get_unaligned_be32(&p[0]);
p                2309 drivers/misc/altera-stapl/altera.c 			*action_count = get_unaligned_be32(&p[48]);
p                2310 drivers/misc/altera-stapl/altera.c 			*procedure_count = get_unaligned_be32(&p[52]);
p                2317 drivers/misc/altera-stapl/altera.c static int altera_get_act_info(u8 *p,
p                2343 drivers/misc/altera-stapl/altera.c 	first_word = get_unaligned_be32(&p[0]);
p                2348 drivers/misc/altera-stapl/altera.c 	action_table = get_unaligned_be32(&p[4]);
p                2349 drivers/misc/altera-stapl/altera.c 	proc_table   = get_unaligned_be32(&p[8]);
p                2350 drivers/misc/altera-stapl/altera.c 	str_table = get_unaligned_be32(&p[12]);
p                2351 drivers/misc/altera-stapl/altera.c 	note_strings = get_unaligned_be32(&p[16]);
p                2352 drivers/misc/altera-stapl/altera.c 	action_count = get_unaligned_be32(&p[48]);
p                2353 drivers/misc/altera-stapl/altera.c 	proc_count   = get_unaligned_be32(&p[52]);
p                2358 drivers/misc/altera-stapl/altera.c 	act_name_id = get_unaligned_be32(&p[action_table + (12 * index)]);
p                2359 drivers/misc/altera-stapl/altera.c 	act_desc_id = get_unaligned_be32(&p[action_table + (12 * index) + 4]);
p                2360 drivers/misc/altera-stapl/altera.c 	act_proc_id = get_unaligned_be32(&p[action_table + (12 * index) + 8]);
p                2362 drivers/misc/altera-stapl/altera.c 	*name = &p[str_table + act_name_id];
p                2365 drivers/misc/altera-stapl/altera.c 		*description = &p[str_table + act_desc_id];
p                2369 drivers/misc/altera-stapl/altera.c 					&p[proc_table + (13 * act_proc_id)]);
p                2371 drivers/misc/altera-stapl/altera.c 			(p[proc_table + (13 * act_proc_id) + 8] & 0x03);
p                2380 drivers/misc/altera-stapl/altera.c 			procptr->name = &p[str_table + act_proc_name];
p                2396 drivers/misc/altera-stapl/altera.c 				&p[proc_table + (13 * act_proc_id) + 4]);
p                  60 drivers/misc/cb710/debug.c 	char msg[100], *p;						\
p                  66 drivers/misc/cb710/debug.c 		p = msg;						\
p                  68 drivers/misc/cb710/debug.c 			*p++ = ' ';					\
p                  70 drivers/misc/cb710/debug.c 				*p++ = ' ';				\
p                  72 drivers/misc/cb710/debug.c 				p += sprintf(p, format, reg[j]);	\
p                  74 drivers/misc/cb710/debug.c 				p += sprintf(p, "%s", xp);		\
p                 143 drivers/misc/cxl/of.c 	char *p;
p                 156 drivers/misc/cxl/of.c 			p = (char *) prop + i;
p                 157 drivers/misc/cxl/of.c 			pr_info("compatible: %s\n", p);
p                 158 drivers/misc/cxl/of.c 			i += strlen(p) + 1;
p                 348 drivers/misc/cxl/of.c 	char *p;
p                 365 drivers/misc/cxl/of.c 			p = (char *) prop + i;
p                 366 drivers/misc/cxl/of.c 			pr_info("compatible: %s\n", p);
p                 367 drivers/misc/cxl/of.c 			i += strlen(p) + 1;
p                 320 drivers/misc/cxl/sysfs.c 	char *p = buf, *end = buf + PAGE_SIZE;
p                 323 drivers/misc/cxl/sysfs.c 		p += scnprintf(p, end - p, "dedicated_process\n");
p                 325 drivers/misc/cxl/sysfs.c 		p += scnprintf(p, end - p, "afu_directed\n");
p                 326 drivers/misc/cxl/sysfs.c 	return (p - buf);
p                 373 drivers/misc/echo/echo.c 		int p, logp, shift;
p                 408 drivers/misc/echo/echo.c 		p = MIN_TX_POWER_FOR_ADAPTION + ec->pstates;
p                 409 drivers/misc/echo/echo.c 		logp = top_bit(p) + ec->log2taps;
p                  32 drivers/misc/eeprom/digsy_mtc_eeprom.c static void digsy_mtc_op_prepare(void *p)
p                  38 drivers/misc/eeprom/digsy_mtc_eeprom.c static void digsy_mtc_op_finish(void *p)
p                1096 drivers/misc/eeprom/idt_89hpesx.c 	const char *compatible, *p;
p                1104 drivers/misc/eeprom/idt_89hpesx.c 	p = strchr(compatible, ',');
p                1105 drivers/misc/eeprom/idt_89hpesx.c 	strlcpy(devname, p ? p + 1 : compatible, sizeof(devname));
p                  31 drivers/misc/fastrpc.c #define FASTRPC_PHYS(p)	((p) & 0xffffffff)
p                 374 drivers/misc/genwqe/card_utils.c 	int i = 0, j = 0, p;
p                 384 drivers/misc/genwqe/card_utils.c 	p = 0;			/* page */
p                 385 drivers/misc/genwqe/card_utils.c 	while (p < sgl->nr_pages) {
p                 400 drivers/misc/genwqe/card_utils.c 			if ((p == 0) && (sgl->fpage != NULL)) {
p                 403 drivers/misc/genwqe/card_utils.c 			} else if ((p == sgl->nr_pages - 1) &&
p                 407 drivers/misc/genwqe/card_utils.c 				daddr = dma_list[p] + map_offs;
p                 423 drivers/misc/genwqe/card_utils.c 				p++; /* process next page */
p                 424 drivers/misc/genwqe/card_utils.c 				if (p == sgl->nr_pages)
p                 439 drivers/misc/genwqe/card_utils.c 			p++;	/* process next page */
p                 440 drivers/misc/genwqe/card_utils.c 			if (p == sgl->nr_pages)
p                  50 drivers/misc/habanalabs/command_buffer.c 	void *p;
p                  69 drivers/misc/habanalabs/command_buffer.c 		p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
p                  72 drivers/misc/habanalabs/command_buffer.c 		p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
p                  75 drivers/misc/habanalabs/command_buffer.c 	if (!p) {
p                  83 drivers/misc/habanalabs/command_buffer.c 	cb->kernel_address = (u64) (uintptr_t) p;
p                1442 drivers/misc/habanalabs/habanalabs.h #define hl_queue_inc_ptr(p)		hl_hw_queue_add_ptr(p, 1)
p                 420 drivers/misc/habanalabs/hw_queue.c 	void *p;
p                 424 drivers/misc/habanalabs/hw_queue.c 		p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
p                 428 drivers/misc/habanalabs/hw_queue.c 		p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
p                 432 drivers/misc/habanalabs/hw_queue.c 	if (!p)
p                 435 drivers/misc/habanalabs/hw_queue.c 	q->kernel_address = (u64) (uintptr_t) p;
p                 470 drivers/misc/habanalabs/hw_queue.c 	void *p;
p                 472 drivers/misc/habanalabs/hw_queue.c 	p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id,
p                 474 drivers/misc/habanalabs/hw_queue.c 	if (!p) {
p                 481 drivers/misc/habanalabs/hw_queue.c 	q->kernel_address = (u64) (uintptr_t) p;
p                 220 drivers/misc/habanalabs/irq.c 	void *p;
p                 224 drivers/misc/habanalabs/irq.c 	p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
p                 226 drivers/misc/habanalabs/irq.c 	if (!p)
p                 230 drivers/misc/habanalabs/irq.c 	q->kernel_address = (u64) (uintptr_t) p;
p                 282 drivers/misc/habanalabs/irq.c 	void *p;
p                 286 drivers/misc/habanalabs/irq.c 	p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
p                 289 drivers/misc/habanalabs/irq.c 	if (!p)
p                 293 drivers/misc/habanalabs/irq.c 	q->kernel_address = (u64) (uintptr_t) p;
p                1049 drivers/misc/ibmvmc.c 	const char *p = buffer;
p                1114 drivers/misc/ibmvmc.c 		bytes -= copy_from_user(buf, p, bytes);
p                1120 drivers/misc/ibmvmc.c 		p += bytes;
p                1122 drivers/misc/ibmvmc.c 	if (p == buffer)
p                1132 drivers/misc/ibmvmc.c 	ret = p - buffer;
p                 111 drivers/misc/lis3lv02d/lis3lv02d.c #define param_check_axis(name, p) param_check_int(name, p)
p                 882 drivers/misc/lis3lv02d/lis3lv02d.c 				struct lis3lv02d_platform_data *p)
p                 885 drivers/misc/lis3lv02d/lis3lv02d.c 	int ctrl2 = p->hipass_ctrl;
p                 887 drivers/misc/lis3lv02d/lis3lv02d.c 	if (p->click_flags) {
p                 888 drivers/misc/lis3lv02d/lis3lv02d.c 		lis3->write(lis3, CLICK_CFG, p->click_flags);
p                 889 drivers/misc/lis3lv02d/lis3lv02d.c 		lis3->write(lis3, CLICK_TIMELIMIT, p->click_time_limit);
p                 890 drivers/misc/lis3lv02d/lis3lv02d.c 		lis3->write(lis3, CLICK_LATENCY, p->click_latency);
p                 891 drivers/misc/lis3lv02d/lis3lv02d.c 		lis3->write(lis3, CLICK_WINDOW, p->click_window);
p                 892 drivers/misc/lis3lv02d/lis3lv02d.c 		lis3->write(lis3, CLICK_THSZ, p->click_thresh_z & 0xf);
p                 894 drivers/misc/lis3lv02d/lis3lv02d.c 			(p->click_thresh_x & 0xf) |
p                 895 drivers/misc/lis3lv02d/lis3lv02d.c 			(p->click_thresh_y << 4));
p                 905 drivers/misc/lis3lv02d/lis3lv02d.c 	if (p->wakeup_flags) {
p                 906 drivers/misc/lis3lv02d/lis3lv02d.c 		lis3->write(lis3, FF_WU_CFG_1, p->wakeup_flags);
p                 907 drivers/misc/lis3lv02d/lis3lv02d.c 		lis3->write(lis3, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
p                 909 drivers/misc/lis3lv02d/lis3lv02d.c 		lis3->write(lis3, FF_WU_DURATION_1, p->duration1 + 1);
p                 913 drivers/misc/lis3lv02d/lis3lv02d.c 	if (p->wakeup_flags2) {
p                 914 drivers/misc/lis3lv02d/lis3lv02d.c 		lis3->write(lis3, FF_WU_CFG_2, p->wakeup_flags2);
p                 915 drivers/misc/lis3lv02d/lis3lv02d.c 		lis3->write(lis3, FF_WU_THS_2, p->wakeup_thresh2 & 0x7f);
p                 917 drivers/misc/lis3lv02d/lis3lv02d.c 		lis3->write(lis3, FF_WU_DURATION_2, p->duration2 + 1);
p                 923 drivers/misc/lis3lv02d/lis3lv02d.c 	if (p->irq2) {
p                 924 drivers/misc/lis3lv02d/lis3lv02d.c 		err = request_threaded_irq(p->irq2,
p                 928 drivers/misc/lis3lv02d/lis3lv02d.c 					(p->irq_flags2 & IRQF_TRIGGER_MASK),
p                1189 drivers/misc/lis3lv02d/lis3lv02d.c 		struct lis3lv02d_platform_data *p = lis3->pdata;
p                1192 drivers/misc/lis3lv02d/lis3lv02d.c 			lis3lv02d_8b_configure(lis3, p);
p                1194 drivers/misc/lis3lv02d/lis3lv02d.c 		irq_flags = p->irq_flags1 & IRQF_TRIGGER_MASK;
p                1196 drivers/misc/lis3lv02d/lis3lv02d.c 		lis3->irq_cfg = p->irq_cfg;
p                1197 drivers/misc/lis3lv02d/lis3lv02d.c 		if (p->irq_cfg)
p                1198 drivers/misc/lis3lv02d/lis3lv02d.c 			lis3->write(lis3, CTRL_REG3, p->irq_cfg);
p                1200 drivers/misc/lis3lv02d/lis3lv02d.c 		if (p->default_rate)
p                1201 drivers/misc/lis3lv02d/lis3lv02d.c 			lis3lv02d_set_odr(lis3, p->default_rate);
p                 137 drivers/misc/lkdtm/bugs.c 	u32 *p;
p                 140 drivers/misc/lkdtm/bugs.c 	p = (u32 *)(data + 1);
p                 141 drivers/misc/lkdtm/bugs.c 	if (*p == 0)
p                 143 drivers/misc/lkdtm/bugs.c 	*p = val;
p                 100 drivers/misc/lkdtm/heap.c 	unsigned long p = __get_free_page(GFP_KERNEL);
p                 101 drivers/misc/lkdtm/heap.c 	if (!p) {
p                 107 drivers/misc/lkdtm/heap.c 	memset((void *)p, 0x3, PAGE_SIZE);
p                 108 drivers/misc/lkdtm/heap.c 	free_page(p);
p                 111 drivers/misc/lkdtm/heap.c 	memset((void *)p, 0x78, PAGE_SIZE);
p                 113 drivers/misc/lkdtm/heap.c 	p = __get_free_page(GFP_KERNEL);
p                 114 drivers/misc/lkdtm/heap.c 	free_page(p);
p                 120 drivers/misc/lkdtm/heap.c 	unsigned long p = __get_free_page(GFP_KERNEL);
p                 124 drivers/misc/lkdtm/heap.c 	if (!p) {
p                 132 drivers/misc/lkdtm/heap.c 		free_page(p);
p                 136 drivers/misc/lkdtm/heap.c 	base = (int *)p;
p                 141 drivers/misc/lkdtm/heap.c 	free_page(p);
p                 189 drivers/misc/lkdtm/heap.c 	unsigned long p = __get_free_page(GFP_KERNEL);
p                 192 drivers/misc/lkdtm/heap.c 	kmem_cache_free(NULL, (void *)p);
p                 193 drivers/misc/lkdtm/heap.c 	free_page(p);
p                1290 drivers/misc/mic/scif/scif_api.c 				   poll_table *p, struct scif_endpt *ep)
p                1300 drivers/misc/mic/scif/scif_api.c 	poll_wait(f, wq, p);
p                 157 drivers/misc/pch_phub.c 	void __iomem *p = chip->pch_phub_base_address;
p                 159 drivers/misc/pch_phub.c 	chip->phub_id_reg = ioread32(p + PCH_PHUB_ID_REG);
p                 160 drivers/misc/pch_phub.c 	chip->q_pri_val_reg = ioread32(p + PCH_PHUB_QUEUE_PRI_VAL_REG);
p                 161 drivers/misc/pch_phub.c 	chip->rc_q_maxsize_reg = ioread32(p + PCH_PHUB_RC_QUEUE_MAXSIZE_REG);
p                 162 drivers/misc/pch_phub.c 	chip->bri_q_maxsize_reg = ioread32(p + PCH_PHUB_BRI_QUEUE_MAXSIZE_REG);
p                 164 drivers/misc/pch_phub.c 				ioread32(p + PCH_PHUB_COMP_RESP_TIMEOUT_REG);
p                 166 drivers/misc/pch_phub.c 				ioread32(p + PCH_PHUB_BUS_SLAVE_CONTROL_REG);
p                 168 drivers/misc/pch_phub.c 				ioread32(p + PCH_PHUB_DEADLOCK_AVOID_TYPE_REG);
p                 170 drivers/misc/pch_phub.c 				ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG0);
p                 172 drivers/misc/pch_phub.c 				ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG1);
p                 174 drivers/misc/pch_phub.c 				ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG2);
p                 176 drivers/misc/pch_phub.c 				ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG3);
p                 202 drivers/misc/pch_phub.c 		    ioread32(p + PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE + 4 * i);
p                 207 drivers/misc/pch_phub.c 	chip->clkcfg_reg = ioread32(p + CLKCFG_REG_OFFSET);
p                 209 drivers/misc/pch_phub.c 		chip->funcsel_reg = ioread32(p + FUNCSEL_REG_OFFSET);
p                 217 drivers/misc/pch_phub.c 	void __iomem *p;
p                 218 drivers/misc/pch_phub.c 	p = chip->pch_phub_base_address;
p                 220 drivers/misc/pch_phub.c 	iowrite32(chip->phub_id_reg, p + PCH_PHUB_ID_REG);
p                 221 drivers/misc/pch_phub.c 	iowrite32(chip->q_pri_val_reg, p + PCH_PHUB_QUEUE_PRI_VAL_REG);
p                 222 drivers/misc/pch_phub.c 	iowrite32(chip->rc_q_maxsize_reg, p + PCH_PHUB_RC_QUEUE_MAXSIZE_REG);
p                 223 drivers/misc/pch_phub.c 	iowrite32(chip->bri_q_maxsize_reg, p + PCH_PHUB_BRI_QUEUE_MAXSIZE_REG);
p                 225 drivers/misc/pch_phub.c 					p + PCH_PHUB_COMP_RESP_TIMEOUT_REG);
p                 227 drivers/misc/pch_phub.c 					p + PCH_PHUB_BUS_SLAVE_CONTROL_REG);
p                 229 drivers/misc/pch_phub.c 					p + PCH_PHUB_DEADLOCK_AVOID_TYPE_REG);
p                 231 drivers/misc/pch_phub.c 					p + PCH_PHUB_INTPIN_REG_WPERMIT_REG0);
p                 233 drivers/misc/pch_phub.c 					p + PCH_PHUB_INTPIN_REG_WPERMIT_REG1);
p                 235 drivers/misc/pch_phub.c 					p + PCH_PHUB_INTPIN_REG_WPERMIT_REG2);
p                 237 drivers/misc/pch_phub.c 					p + PCH_PHUB_INTPIN_REG_WPERMIT_REG3);
p                 263 drivers/misc/pch_phub.c 			p + PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE + 4 * i);
p                 269 drivers/misc/pch_phub.c 	iowrite32(chip->clkcfg_reg, p + CLKCFG_REG_OFFSET);
p                 271 drivers/misc/pch_phub.c 		iowrite32(chip->funcsel_reg, p + FUNCSEL_REG_OFFSET);
p                 108 drivers/misc/pti.c 	u8 *p = buf;
p                 125 drivers/misc/pti.c 		ptiword = be32_to_cpu(*(u32 *)p);
p                 126 drivers/misc/pti.c 		p += 4;
p                 134 drivers/misc/pti.c 		ptiword |= *p++ << (24-(8*i));
p                  35 drivers/misc/sgi-gru/gru_instructions.h #define __flush_cache(p)		ia64_fc((unsigned long)p)
p                  37 drivers/misc/sgi-gru/gru_instructions.h #define gru_ordered_store_ulong(p, v)					\
p                  40 drivers/misc/sgi-gru/gru_instructions.h 			*((volatile unsigned long *)(p)) = v; /* force st.rel */	\
p                  44 drivers/misc/sgi-gru/gru_instructions.h #define __flush_cache(p)		clflush(p)
p                  45 drivers/misc/sgi-gru/gru_instructions.h #define gru_ordered_store_ulong(p, v)					\
p                  48 drivers/misc/sgi-gru/gru_instructions.h 			*(unsigned long *)p = v;			\
p                 331 drivers/misc/sgi-gru/gru_instructions.h static inline void gru_flush_cache(void *p)
p                 333 drivers/misc/sgi-gru/gru_instructions.h 	__flush_cache(p);
p                 702 drivers/misc/sgi-gru/gru_instructions.h static inline void *gru_get_gseg_pointer (void *p)
p                 704 drivers/misc/sgi-gru/gru_instructions.h 	return (void *)((unsigned long)p & ~(GRU_GSEG_PAGESIZE - 1));
p                 528 drivers/misc/sgi-gru/grukservices.c static inline int get_present2(void *p)
p                 530 drivers/misc/sgi-gru/grukservices.c 	struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
p                 534 drivers/misc/sgi-gru/grukservices.c static inline void restore_present2(void *p, int val)
p                 536 drivers/misc/sgi-gru/grukservices.c 	struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
p                 545 drivers/misc/sgi-gru/grukservices.c 		void *p, unsigned int bytes, int nasid, int vector, int apicid)
p                 547 drivers/misc/sgi-gru/grukservices.c 	struct message_queue *mq = p;
p                 954 drivers/misc/sgi-gru/grukservices.c 	unsigned long *p;
p                 959 drivers/misc/sgi-gru/grukservices.c 	p = dsr;
p                 969 drivers/misc/sgi-gru/grukservices.c 	if (*p != MAGIC) {
p                 970 drivers/misc/sgi-gru/grukservices.c 		printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p);
p                 992 drivers/misc/sgi-gru/grukservices.c #define ALIGNUP(p, q)	((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1)))
p                 997 drivers/misc/sgi-gru/grukservices.c 	void *p, *mq;
p                1002 drivers/misc/sgi-gru/grukservices.c 	p = kmalloc(4096, 0);
p                1003 drivers/misc/sgi-gru/grukservices.c 	if (p == NULL)
p                1005 drivers/misc/sgi-gru/grukservices.c 	mq = ALIGNUP(p, 1024);
p                1037 drivers/misc/sgi-gru/grukservices.c 	kfree(p);
p                  62 drivers/misc/sgi-gru/grukservices.h 		void *p, unsigned int bytes, int nasid, int vector, int apicid);
p                  63 drivers/misc/sgi-gru/grulib.h #define THREAD_POINTER(p, th)		(p + GRU_GSEG_PAGESIZE * (th))
p                 154 drivers/misc/sgi-gru/grumain.c static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
p                 161 drivers/misc/sgi-gru/grumain.c 		i = find_first_bit(p, mmax);
p                 164 drivers/misc/sgi-gru/grumain.c 		__clear_bit(i, p);
p                 449 drivers/misc/sgi-gru/grumain.c static void prefetch_data(void *p, int num, int stride)
p                 452 drivers/misc/sgi-gru/grumain.c 		prefetchw(p);
p                 453 drivers/misc/sgi-gru/grumain.c 		p += stride;
p                  29 drivers/misc/sgi-gru/gruprocfs.c static int statistics_show(struct seq_file *s, void *p)
p                 114 drivers/misc/sgi-gru/gruprocfs.c static int mcs_statistics_show(struct seq_file *s, void *p)
p                 140 drivers/misc/sgi-gru/gruprocfs.c static int options_show(struct seq_file *s, void *p)
p                 603 drivers/misc/sgi-gru/grutables.h #define uv_cpu_socket_number(p)		((cpu_physical_id(p) >> 5) & 1)
p                 604 drivers/misc/sgi-gru/grutables.h #define uv_cpu_ht_number(p)		(cpu_physical_id(p) & 1)
p                 605 drivers/misc/sgi-gru/grutables.h #define uv_cpu_core_number(p)		(((cpu_physical_id(p) >> 2) & 4) |	\
p                 606 drivers/misc/sgi-gru/grutables.h 					((cpu_physical_id(p) >> 1) & 3))
p                  84 drivers/misc/sram-exec.c 	struct sram_partition *part = NULL, *p;
p                  90 drivers/misc/sram-exec.c 	list_for_each_entry(p, &exec_pool_list, list) {
p                  91 drivers/misc/sram-exec.c 		if (p->pool == pool)
p                  92 drivers/misc/sram-exec.c 			part = p;
p                 751 drivers/misc/vmw_balloon.c 					   struct page **p)
p                 755 drivers/misc/vmw_balloon.c 		*p = pfn_to_page(b->batch_page[idx].pfn);
p                 760 drivers/misc/vmw_balloon.c 	*p = b->page;
p                 829 drivers/misc/vmw_balloon.c 			       struct page *p)
p                 835 drivers/misc/vmw_balloon.c 					{ .pfn = page_to_pfn(p) };
p                 837 drivers/misc/vmw_balloon.c 		b->page = p;
p                  24 drivers/mmc/core/pwrseq.c 	struct mmc_pwrseq *p;
p                  31 drivers/mmc/core/pwrseq.c 	list_for_each_entry(p, &pwrseq_list, pwrseq_node) {
p                  32 drivers/mmc/core/pwrseq.c 		if (p->dev->of_node == np) {
p                  33 drivers/mmc/core/pwrseq.c 			if (!try_module_get(p->owner))
p                  37 drivers/mmc/core/pwrseq.c 				host->pwrseq = p;
p                  30 drivers/mmc/core/pwrseq_emmc.c #define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
p                  32 drivers/mmc/core/pwrseq_sd8787.c #define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq)
p                  34 drivers/mmc/core/pwrseq_simple.c #define to_pwrseq_simple(p) container_of(p, struct mmc_pwrseq_simple, pwrseq)
p                 228 drivers/mmc/host/davinci_mmc.c 	u8 *p;
p                 236 drivers/mmc/host/davinci_mmc.c 	p = host->buffer;
p                 248 drivers/mmc/host/davinci_mmc.c 			writel(*((u32 *)p), host->base + DAVINCI_MMCDXR);
p                 249 drivers/mmc/host/davinci_mmc.c 			p = p + 4;
p                 252 drivers/mmc/host/davinci_mmc.c 			iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3));
p                 253 drivers/mmc/host/davinci_mmc.c 			p = p + (n & 3);
p                 257 drivers/mmc/host/davinci_mmc.c 			*((u32 *)p) = readl(host->base + DAVINCI_MMCDRR);
p                 258 drivers/mmc/host/davinci_mmc.c 			p  = p + 4;
p                 261 drivers/mmc/host/davinci_mmc.c 			ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3));
p                 262 drivers/mmc/host/davinci_mmc.c 			p = p + (n & 3);
p                 265 drivers/mmc/host/davinci_mmc.c 	host->buffer = p;
p                 501 drivers/mmc/host/dw_mmc.c 		struct idmac_desc_64addr *p;
p                 507 drivers/mmc/host/dw_mmc.c 		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
p                 508 drivers/mmc/host/dw_mmc.c 								i++, p++) {
p                 509 drivers/mmc/host/dw_mmc.c 			p->des6 = (host->sg_dma +
p                 513 drivers/mmc/host/dw_mmc.c 			p->des7 = (u64)(host->sg_dma +
p                 517 drivers/mmc/host/dw_mmc.c 			p->des0 = 0;
p                 518 drivers/mmc/host/dw_mmc.c 			p->des1 = 0;
p                 519 drivers/mmc/host/dw_mmc.c 			p->des2 = 0;
p                 520 drivers/mmc/host/dw_mmc.c 			p->des3 = 0;
p                 524 drivers/mmc/host/dw_mmc.c 		p->des6 = host->sg_dma & 0xffffffff;
p                 525 drivers/mmc/host/dw_mmc.c 		p->des7 = (u64)host->sg_dma >> 32;
p                 526 drivers/mmc/host/dw_mmc.c 		p->des0 = IDMAC_DES0_ER;
p                 529 drivers/mmc/host/dw_mmc.c 		struct idmac_desc *p;
p                 535 drivers/mmc/host/dw_mmc.c 		for (i = 0, p = host->sg_cpu;
p                 537 drivers/mmc/host/dw_mmc.c 		     i++, p++) {
p                 538 drivers/mmc/host/dw_mmc.c 			p->des3 = cpu_to_le32(host->sg_dma +
p                 540 drivers/mmc/host/dw_mmc.c 			p->des0 = 0;
p                 541 drivers/mmc/host/dw_mmc.c 			p->des1 = 0;
p                 545 drivers/mmc/host/dw_mmc.c 		p->des3 = cpu_to_le32(host->sg_dma);
p                 546 drivers/mmc/host/dw_mmc.c 		p->des0 = cpu_to_le32(IDMAC_DES0_ER);
p                 376 drivers/mmc/host/mvsdio.c 		u16 *p = host->pio_ptr;
p                 379 drivers/mmc/host/mvsdio.c 			readsw(iobase + MVSD_FIFO, p, 16);
p                 380 drivers/mmc/host/mvsdio.c 			p += 16;
p                 391 drivers/mmc/host/mvsdio.c 				put_unaligned(mvsd_read(MVSD_FIFO), p++);
p                 392 drivers/mmc/host/mvsdio.c 				put_unaligned(mvsd_read(MVSD_FIFO), p++);
p                 400 drivers/mmc/host/mvsdio.c 				memcpy(p, ((void *)&val) + 4 - s, s);
p                 416 drivers/mmc/host/mvsdio.c 		host->pio_ptr = p;
p                 422 drivers/mmc/host/mvsdio.c 		u16 *p = host->pio_ptr;
p                 431 drivers/mmc/host/mvsdio.c 			mvsd_write(MVSD_FIFO, get_unaligned(p++));
p                 432 drivers/mmc/host/mvsdio.c 			mvsd_write(MVSD_FIFO, get_unaligned(p++));
p                 439 drivers/mmc/host/mvsdio.c 				memcpy(((void *)&val) + 4 - s, p, s);
p                 453 drivers/mmc/host/mvsdio.c 		host->pio_ptr = p;
p                1606 drivers/mmc/host/omap_hsmmc.c 		struct pinctrl *p = devm_pinctrl_get(host->dev);
p                1607 drivers/mmc/host/omap_hsmmc.c 		if (IS_ERR(p)) {
p                1608 drivers/mmc/host/omap_hsmmc.c 			ret = PTR_ERR(p);
p                1611 drivers/mmc/host/omap_hsmmc.c 		if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
p                1613 drivers/mmc/host/omap_hsmmc.c 			devm_pinctrl_put(p);
p                1618 drivers/mmc/host/omap_hsmmc.c 		if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) {
p                1620 drivers/mmc/host/omap_hsmmc.c 			devm_pinctrl_put(p);
p                1624 drivers/mmc/host/omap_hsmmc.c 		devm_pinctrl_put(p);
p                 478 drivers/mmc/host/s3cmci.c 			u8 *p = (u8 *)host->pio_ptr;
p                 481 drivers/mmc/host/s3cmci.c 				*p++ = data;
p                 132 drivers/mmc/host/sdhci-cadence.c 	struct sdhci_cdns_phy_param *p = priv->phy_params;
p                 142 drivers/mmc/host/sdhci-cadence.c 		p->addr = sdhci_cdns_phy_cfgs[i].addr;
p                 143 drivers/mmc/host/sdhci-cadence.c 		p->data = val;
p                 144 drivers/mmc/host/sdhci-cadence.c 		p++;
p                 306 drivers/mmc/host/sdhci-sprd.c 	u32 *p = sprd_host->phy_delay;
p                 347 drivers/mmc/host/sdhci-sprd.c 		sdhci_writel(host, p[timing], SDHCI_SPRD_REG_32_DLL_DLY);
p                 471 drivers/mmc/host/sdhci-sprd.c 	u32 *p = sprd_host->phy_delay;
p                 488 drivers/mmc/host/sdhci-sprd.c 	sdhci_writel(host, p[MMC_TIMING_MMC_HS400 + 1],
p                 495 drivers/mmc/host/sdhci-sprd.c 	u32 *p = sprd_host->phy_delay;
p                 506 drivers/mmc/host/sdhci-sprd.c 		p[index] = val[0] | (val[1] << 8) | (val[2] << 16) | (val[3] << 24);
p                 477 drivers/mmc/host/sh_mmcif.c 	struct sh_mmcif_plat_data *p = dev->platform_data;
p                 478 drivers/mmc/host/sh_mmcif.c 	bool sup_pclk = p ? p->sup_pclk : false;
p                 598 drivers/mmc/host/sh_mmcif.c static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
p                 612 drivers/mmc/host/sh_mmcif.c 		host->pio_ptr = p;
p                 634 drivers/mmc/host/sh_mmcif.c 	u32 *p = sg_virt(data->sg);
p                 644 drivers/mmc/host/sh_mmcif.c 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
p                 676 drivers/mmc/host/sh_mmcif.c 	u32 *p = host->pio_ptr;
p                 688 drivers/mmc/host/sh_mmcif.c 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
p                 690 drivers/mmc/host/sh_mmcif.c 	if (!sh_mmcif_next_block(host, p))
p                 714 drivers/mmc/host/sh_mmcif.c 	u32 *p = sg_virt(data->sg);
p                 724 drivers/mmc/host/sh_mmcif.c 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
p                 756 drivers/mmc/host/sh_mmcif.c 	u32 *p = host->pio_ptr;
p                 768 drivers/mmc/host/sh_mmcif.c 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
p                 770 drivers/mmc/host/sh_mmcif.c 	if (!sh_mmcif_next_block(host, p))
p                1251 drivers/mmc/host/usdhi6rol0.c 	u32 *p;
p                1260 drivers/mmc/host/usdhi6rol0.c 		p = host->blk_page + host->offset;
p                1262 drivers/mmc/host/usdhi6rol0.c 		p = usdhi6_sg_map(host);
p                1263 drivers/mmc/host/usdhi6rol0.c 		if (!p) {
p                1269 drivers/mmc/host/usdhi6rol0.c 	for (i = 0; i < data->blksz / 4; i++, p++)
p                1270 drivers/mmc/host/usdhi6rol0.c 		*p = usdhi6_read(host, USDHI6_SD_BUF0);
p                1275 drivers/mmc/host/usdhi6rol0.c 		((u8 *)p)[2 * i] = ((u8 *)&d)[0];
p                1277 drivers/mmc/host/usdhi6rol0.c 			((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1];
p                1291 drivers/mmc/host/usdhi6rol0.c 	u32 *p;
p                1300 drivers/mmc/host/usdhi6rol0.c 		p = host->blk_page + host->offset;
p                1302 drivers/mmc/host/usdhi6rol0.c 		p = usdhi6_sg_map(host);
p                1303 drivers/mmc/host/usdhi6rol0.c 		if (!p) {
p                1309 drivers/mmc/host/usdhi6rol0.c 	for (i = 0; i < data->blksz / 4; i++, p++)
p                1310 drivers/mmc/host/usdhi6rol0.c 		usdhi6_write(host, USDHI6_SD_BUF0, *p);
p                1315 drivers/mmc/host/usdhi6rol0.c 		((u8 *)&d)[0] = ((u8 *)p)[2 * i];
p                1317 drivers/mmc/host/usdhi6rol0.c 			((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1];
p                  59 drivers/mtd/devices/block2mtd.c 	u_long *p;
p                  68 drivers/mtd/devices/block2mtd.c 		for (p=page_address(page); p<max; p++)
p                  69 drivers/mtd/devices/block2mtd.c 			if (*p != -1UL) {
p                1592 drivers/mtd/devices/docg3.c static int flashcontrol_show(struct seq_file *s, void *p)
p                1614 drivers/mtd/devices/docg3.c static int asic_mode_show(struct seq_file *s, void *p)
p                1651 drivers/mtd/devices/docg3.c static int device_id_show(struct seq_file *s, void *p)
p                1665 drivers/mtd/devices/docg3.c static int protection_show(struct seq_file *s, void *p)
p                1525 drivers/mtd/devices/st_spi_fsm.c 	uint8_t *p;
p                1538 drivers/mtd/devices/st_spi_fsm.c 	p = ((uintptr_t)buf & 0x3) ? (uint8_t *)page_buf : buf;
p                1552 drivers/mtd/devices/st_spi_fsm.c 		stfsm_read_fifo(fsm, (uint32_t *)p, size_lb);
p                1556 drivers/mtd/devices/st_spi_fsm.c 		memcpy(p + size_lb, &tmp, size_mop);
p                1588 drivers/mtd/devices/st_spi_fsm.c 	const uint8_t *p;
p                1604 drivers/mtd/devices/st_spi_fsm.c 		p = (uint8_t *)page_buf;
p                1606 drivers/mtd/devices/st_spi_fsm.c 		p = buf;
p                1635 drivers/mtd/devices/st_spi_fsm.c 		stfsm_write_fifo(fsm, (uint32_t *)p, size_lb);
p                1636 drivers/mtd/devices/st_spi_fsm.c 		p += size_lb;
p                1643 drivers/mtd/devices/st_spi_fsm.c 			t[i] = *p++;
p                 812 drivers/mtd/inftlcore.c 	char *p, *pend;
p                 819 drivers/mtd/inftlcore.c 	for (p = buffer; p < pend && !*p; p++)
p                 822 drivers/mtd/inftlcore.c 	if (p < pend) {
p                  65 drivers/mtd/maps/intel_vr_nor.c static void vr_nor_destroy_partitions(struct vr_nor_mtd *p)
p                  67 drivers/mtd/maps/intel_vr_nor.c 	mtd_device_unregister(p->info);
p                  70 drivers/mtd/maps/intel_vr_nor.c static int vr_nor_init_partitions(struct vr_nor_mtd *p)
p                  74 drivers/mtd/maps/intel_vr_nor.c 	return mtd_device_register(p->info, NULL, 0);
p                  77 drivers/mtd/maps/intel_vr_nor.c static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
p                  79 drivers/mtd/maps/intel_vr_nor.c 	map_destroy(p->info);
p                  82 drivers/mtd/maps/intel_vr_nor.c static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
p                  88 drivers/mtd/maps/intel_vr_nor.c 	for (type = probe_types; !p->info && *type; type++)
p                  89 drivers/mtd/maps/intel_vr_nor.c 		p->info = do_map_probe(*type, &p->map);
p                  90 drivers/mtd/maps/intel_vr_nor.c 	if (!p->info)
p                  93 drivers/mtd/maps/intel_vr_nor.c 	p->info->dev.parent = &p->dev->dev;
p                  98 drivers/mtd/maps/intel_vr_nor.c static void vr_nor_destroy_maps(struct vr_nor_mtd *p)
p                 103 drivers/mtd/maps/intel_vr_nor.c 	exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
p                 105 drivers/mtd/maps/intel_vr_nor.c 	writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
p                 108 drivers/mtd/maps/intel_vr_nor.c 	iounmap(p->map.virt);
p                 111 drivers/mtd/maps/intel_vr_nor.c 	iounmap(p->csr_base);
p                 118 drivers/mtd/maps/intel_vr_nor.c static int vr_nor_init_maps(struct vr_nor_mtd *p)
p                 125 drivers/mtd/maps/intel_vr_nor.c 	csr_phys = pci_resource_start(p->dev, EXP_CSR_MBAR);
p                 126 drivers/mtd/maps/intel_vr_nor.c 	csr_len = pci_resource_len(p->dev, EXP_CSR_MBAR);
p                 127 drivers/mtd/maps/intel_vr_nor.c 	win_phys = pci_resource_start(p->dev, EXP_WIN_MBAR);
p                 128 drivers/mtd/maps/intel_vr_nor.c 	win_len = pci_resource_len(p->dev, EXP_WIN_MBAR);
p                 136 drivers/mtd/maps/intel_vr_nor.c 	p->csr_base = ioremap_nocache(csr_phys, csr_len);
p                 137 drivers/mtd/maps/intel_vr_nor.c 	if (!p->csr_base)
p                 140 drivers/mtd/maps/intel_vr_nor.c 	exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
p                 142 drivers/mtd/maps/intel_vr_nor.c 		dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
p                 148 drivers/mtd/maps/intel_vr_nor.c 		dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
p                 151 drivers/mtd/maps/intel_vr_nor.c 	p->map.name = DRV_NAME;
p                 152 drivers/mtd/maps/intel_vr_nor.c 	p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
p                 153 drivers/mtd/maps/intel_vr_nor.c 	p->map.phys = win_phys + CS0_START;
p                 154 drivers/mtd/maps/intel_vr_nor.c 	p->map.size = CS0_SIZE;
p                 155 drivers/mtd/maps/intel_vr_nor.c 	p->map.virt = ioremap_nocache(p->map.phys, p->map.size);
p                 156 drivers/mtd/maps/intel_vr_nor.c 	if (!p->map.virt) {
p                 160 drivers/mtd/maps/intel_vr_nor.c 	simple_map_init(&p->map);
p                 164 drivers/mtd/maps/intel_vr_nor.c 	writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
p                 169 drivers/mtd/maps/intel_vr_nor.c 	iounmap(p->csr_base);
p                 180 drivers/mtd/maps/intel_vr_nor.c 	struct vr_nor_mtd *p = pci_get_drvdata(dev);
p                 182 drivers/mtd/maps/intel_vr_nor.c 	vr_nor_destroy_partitions(p);
p                 183 drivers/mtd/maps/intel_vr_nor.c 	vr_nor_destroy_mtd_setup(p);
p                 184 drivers/mtd/maps/intel_vr_nor.c 	vr_nor_destroy_maps(p);
p                 185 drivers/mtd/maps/intel_vr_nor.c 	kfree(p);
p                 192 drivers/mtd/maps/intel_vr_nor.c 	struct vr_nor_mtd *p = NULL;
p                 204 drivers/mtd/maps/intel_vr_nor.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 206 drivers/mtd/maps/intel_vr_nor.c 	if (!p)
p                 209 drivers/mtd/maps/intel_vr_nor.c 	p->dev = dev;
p                 211 drivers/mtd/maps/intel_vr_nor.c 	err = vr_nor_init_maps(p);
p                 215 drivers/mtd/maps/intel_vr_nor.c 	err = vr_nor_mtd_setup(p);
p                 219 drivers/mtd/maps/intel_vr_nor.c 	err = vr_nor_init_partitions(p);
p                 223 drivers/mtd/maps/intel_vr_nor.c 	pci_set_drvdata(dev, p);
p                 228 drivers/mtd/maps/intel_vr_nor.c 	map_destroy(p->info);
p                 232 drivers/mtd/maps/intel_vr_nor.c 	exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
p                 234 drivers/mtd/maps/intel_vr_nor.c 	writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
p                 237 drivers/mtd/maps/intel_vr_nor.c 	iounmap(p->map.virt);
p                 240 drivers/mtd/maps/intel_vr_nor.c 	iounmap(p->csr_base);
p                 243 drivers/mtd/maps/intel_vr_nor.c 	kfree(p);
p                  56 drivers/mtd/maps/physmap-gemini.c 	struct pinctrl *p;
p                  70 drivers/mtd/maps/physmap-gemini.c 	ret = pinctrl_select_state(gf->p, gf->enabled_state);
p                  81 drivers/mtd/maps/physmap-gemini.c 	ret = pinctrl_select_state(gf->p, gf->disabled_state);
p                 177 drivers/mtd/maps/physmap-gemini.c 	gf->p = devm_pinctrl_get(dev);
p                 178 drivers/mtd/maps/physmap-gemini.c 	if (IS_ERR(gf->p)) {
p                 180 drivers/mtd/maps/physmap-gemini.c 		ret = PTR_ERR(gf->p);
p                 184 drivers/mtd/maps/physmap-gemini.c 	gf->enabled_state = pinctrl_lookup_state(gf->p, "enabled");
p                 188 drivers/mtd/maps/physmap-gemini.c 	gf->disabled_state = pinctrl_lookup_state(gf->p, "disabled");
p                 192 drivers/mtd/maps/physmap-gemini.c 		ret = pinctrl_select_state(gf->p, gf->disabled_state);
p                 554 drivers/mtd/mtdchar.c 	struct blkpg_partition p;
p                 559 drivers/mtd/mtdchar.c 	if (copy_from_user(&p, arg->data, sizeof(p)))
p                 570 drivers/mtd/mtdchar.c 		p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
p                 572 drivers/mtd/mtdchar.c 		return mtd_add_partition(mtd, p.devname, p.start, p.length);
p                 576 drivers/mtd/mtdchar.c 		if (p.pno < 0)
p                 579 drivers/mtd/mtdchar.c 		return mtd_del_partition(mtd, p.pno);
p                 338 drivers/mtd/mtdcore.c static int mtd_partid_show(struct seq_file *s, void *p)
p                 359 drivers/mtd/mtdcore.c static int mtd_partname_show(struct seq_file *s, void *p)
p                 314 drivers/mtd/mtdpart.c static inline void free_partition(struct mtd_part *p)
p                 316 drivers/mtd/mtdpart.c 	kfree(p->mtd.name);
p                 317 drivers/mtd/mtdpart.c 	kfree(p);
p                 761 drivers/mtd/mtdpart.c 	struct mtd_part_parser *p, *ret = NULL;
p                 765 drivers/mtd/mtdpart.c 	list_for_each_entry(p, &part_parsers, list)
p                 766 drivers/mtd/mtdpart.c 		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
p                 767 drivers/mtd/mtdpart.c 			ret = p;
p                 776 drivers/mtd/mtdpart.c static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
p                 778 drivers/mtd/mtdpart.c 	module_put(p->owner);
p                 791 drivers/mtd/mtdpart.c int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
p                 793 drivers/mtd/mtdpart.c 	p->owner = owner;
p                 795 drivers/mtd/mtdpart.c 	if (!p->cleanup)
p                 796 drivers/mtd/mtdpart.c 		p->cleanup = &mtd_part_parser_cleanup_default;
p                 799 drivers/mtd/mtdpart.c 	list_add(&p->list, &part_parsers);
p                 806 drivers/mtd/mtdpart.c void deregister_mtd_parser(struct mtd_part_parser *p)
p                 809 drivers/mtd/mtdpart.c 	list_del(&p->list);
p                 862 drivers/mtd/mtdpart.c 	struct mtd_part_parser *p, *ret = NULL;
p                 866 drivers/mtd/mtdpart.c 	list_for_each_entry(p, &part_parsers, list) {
p                 869 drivers/mtd/mtdpart.c 		matches = p->of_match_table;
p                 875 drivers/mtd/mtdpart.c 			    try_module_get(p->owner)) {
p                 876 drivers/mtd/mtdpart.c 				ret = p;
p                 205 drivers/mtd/mtdswap.c 	struct rb_node **p, *parent = NULL;
p                 208 drivers/mtd/mtdswap.c 	p = &root->rb_node;
p                 209 drivers/mtd/mtdswap.c 	while (*p) {
p                 210 drivers/mtd/mtdswap.c 		parent = *p;
p                 213 drivers/mtd/mtdswap.c 			p = &(*p)->rb_right;
p                 215 drivers/mtd/mtdswap.c 			p = &(*p)->rb_left;
p                 218 drivers/mtd/mtdswap.c 	rb_link_node(&eb->rb, parent, p);
p                 238 drivers/mtd/mtdswap.c 	struct rb_node *p;
p                 241 drivers/mtd/mtdswap.c 	p = rb_first(root);
p                 243 drivers/mtd/mtdswap.c 	while (i < idx && p) {
p                 244 drivers/mtd/mtdswap.c 		p = rb_next(p);
p                 248 drivers/mtd/mtdswap.c 	return p;
p                  35 drivers/mtd/nand/onenand/onenand_bbt.c 	uint8_t *p = buf;
p                  39 drivers/mtd/nand/onenand/onenand_bbt.c 		if (p[i] != td->pattern[i])
p                 478 drivers/mtd/nand/onenand/samsung.c 	unsigned char *p;
p                 481 drivers/mtd/nand/onenand/samsung.c 		p = onenand->page_buf;
p                 483 drivers/mtd/nand/onenand/samsung.c 			p += this->writesize;
p                 485 drivers/mtd/nand/onenand/samsung.c 		p = onenand->oob_buf;
p                 487 drivers/mtd/nand/onenand/samsung.c 			p += mtd->oobsize;
p                 490 drivers/mtd/nand/onenand/samsung.c 	return p;
p                 497 drivers/mtd/nand/onenand/samsung.c 	unsigned char *p;
p                 499 drivers/mtd/nand/onenand/samsung.c 	p = s3c_get_bufferram(mtd, area);
p                 500 drivers/mtd/nand/onenand/samsung.c 	memcpy(buffer, p + offset, count);
p                 508 drivers/mtd/nand/onenand/samsung.c 	unsigned char *p;
p                 510 drivers/mtd/nand/onenand/samsung.c 	p = s3c_get_bufferram(mtd, area);
p                 511 drivers/mtd/nand/onenand/samsung.c 	memcpy(p + offset, buffer, count);
p                 619 drivers/mtd/nand/onenand/samsung.c 	void __iomem *p;
p                 625 drivers/mtd/nand/onenand/samsung.c 	p = this->base + area;
p                 628 drivers/mtd/nand/onenand/samsung.c 			p += this->writesize;
p                 630 drivers/mtd/nand/onenand/samsung.c 			p += mtd->oobsize;
p                 653 drivers/mtd/nand/onenand/samsung.c 		dma_src = onenand->phys_base + (p - this->base);
p                 657 drivers/mtd/nand/onenand/samsung.c 		dma_src = onenand->phys_base + (p - this->base);
p                 678 drivers/mtd/nand/onenand/samsung.c 		memcpy(this->page_buf, p, mtd->writesize);
p                 679 drivers/mtd/nand/onenand/samsung.c 		p = this->page_buf + offset;
p                 682 drivers/mtd/nand/onenand/samsung.c 	memcpy(buffer, p, count);
p                 125 drivers/mtd/nand/raw/au1550nd.c 	u16 *p = (u16 *) buf;
p                 129 drivers/mtd/nand/raw/au1550nd.c 		writew(p[i], this->legacy.IO_ADDR_W);
p                 146 drivers/mtd/nand/raw/au1550nd.c 	u16 *p = (u16 *) buf;
p                 150 drivers/mtd/nand/raw/au1550nd.c 		p[i] = readw(this->legacy.IO_ADDR_R);
p                1117 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	struct brcmnand_cfg *p = &host->hwcfg;
p                1120 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	unsigned int ecc_level = p->ecc_level;
p                1121 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	int sas = p->spare_area_size << p->sector_size_1k;
p                1122 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	int sectors = p->page_size / (512 << p->sector_size_1k);
p                1124 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	if (p->sector_size_1k)
p                1127 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	if (is_hamming_ecc(host->ctrl, p)) {
p                1140 drivers/mtd/nand/raw/brcmnand/brcmnand.c 	if (p->page_size == 512)
p                 401 drivers/mtd/nand/raw/cafe_nand.c 			int p = pos[i];
p                 405 drivers/mtd/nand/raw/cafe_nand.c 			if (p > 1374) {
p                 408 drivers/mtd/nand/raw/cafe_nand.c 			} else if (p == 0) {
p                 414 drivers/mtd/nand/raw/cafe_nand.c 			} else if (p == 1365) {
p                 417 drivers/mtd/nand/raw/cafe_nand.c 			} else if (p > 1365) {
p                 418 drivers/mtd/nand/raw/cafe_nand.c 				if ((p & 1) == 1) {
p                 419 drivers/mtd/nand/raw/cafe_nand.c 					oob[3*p/2 - 2048] ^= pat[i] >> 4;
p                 420 drivers/mtd/nand/raw/cafe_nand.c 					oob[3*p/2 - 2047] ^= pat[i] << 4;
p                 422 drivers/mtd/nand/raw/cafe_nand.c 					oob[3*p/2 - 2049] ^= pat[i] >> 8;
p                 423 drivers/mtd/nand/raw/cafe_nand.c 					oob[3*p/2 - 2048] ^= pat[i];
p                 425 drivers/mtd/nand/raw/cafe_nand.c 			} else if ((p & 1) == 1) {
p                 426 drivers/mtd/nand/raw/cafe_nand.c 				buf[3*p/2] ^= pat[i] >> 4;
p                 427 drivers/mtd/nand/raw/cafe_nand.c 				buf[3*p/2 + 1] ^= pat[i] << 4;
p                 429 drivers/mtd/nand/raw/cafe_nand.c 				buf[3*p/2 - 1] ^= pat[i] >> 8;
p                 430 drivers/mtd/nand/raw/cafe_nand.c 				buf[3*p/2] ^= pat[i];
p                 258 drivers/mtd/nand/raw/davinci_nand.c 	u32 raw_ecc[4], *p;
p                 277 drivers/mtd/nand/raw/davinci_nand.c 	for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
p                 278 drivers/mtd/nand/raw/davinci_nand.c 		*ecc_code++ =   p[0]        & 0xff;
p                 279 drivers/mtd/nand/raw/davinci_nand.c 		*ecc_code++ = ((p[0] >>  8) & 0x03) | ((p[0] >> 14) & 0xfc);
p                 280 drivers/mtd/nand/raw/davinci_nand.c 		*ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] <<  4) & 0xf0);
p                 281 drivers/mtd/nand/raw/davinci_nand.c 		*ecc_code++ = ((p[1] >>  4) & 0x3f) | ((p[1] >> 10) & 0xc0);
p                 282 drivers/mtd/nand/raw/davinci_nand.c 		*ecc_code++ =  (p[1] >> 18) & 0xff;
p                 538 drivers/mtd/nand/raw/fsmc_nand.c 		u32 *p = (u32 *)buf;
p                 542 drivers/mtd/nand/raw/fsmc_nand.c 			writel_relaxed(p[i], host->data_va);
p                 561 drivers/mtd/nand/raw/fsmc_nand.c 		u32 *p = (u32 *)buf;
p                 565 drivers/mtd/nand/raw/fsmc_nand.c 			p[i] = readl_relaxed(host->data_va);
p                 678 drivers/mtd/nand/raw/fsmc_nand.c 	u8 *p = buf;
p                 691 drivers/mtd/nand/raw/fsmc_nand.c 	for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
p                 694 drivers/mtd/nand/raw/fsmc_nand.c 		ret = nand_read_data_op(chip, p, eccsize, false);
p                 721 drivers/mtd/nand/raw/fsmc_nand.c 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
p                 723 drivers/mtd/nand/raw/fsmc_nand.c 		stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
p                1100 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	void __iomem *p;
p                1103 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	p = devm_ioremap_resource(&pdev->dev, r);
p                1104 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	if (IS_ERR(p))
p                1105 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		return PTR_ERR(p);
p                1108 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		res->gpmi_regs = p;
p                1110 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 		res->bch_regs = p;
p                1296 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	unsigned char *p;
p                1311 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	p   = payload + nfc_geo->block_mark_byte_offset;
p                1320 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	from_data = (p[0] >> bit) | (p[1] << (8 - bit));
p                1329 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	p[0] = (p[0] & mask) | (from_oob << bit);
p                1332 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 	p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
p                  51 drivers/mtd/nand/raw/internals.h 				      struct nand_onfi_params *p);
p                 143 drivers/mtd/nand/raw/internals.h u16 onfi_crc16(u16 crc, u8 const *p, size_t len);
p                 425 drivers/mtd/nand/raw/mtk_ecc.c void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
p                 431 drivers/mtd/nand/raw/mtk_ecc.c 		if (*p <= ecc_strength[i]) {
p                 433 drivers/mtd/nand/raw/mtk_ecc.c 				*p = ecc_strength[i];
p                 434 drivers/mtd/nand/raw/mtk_ecc.c 			else if (*p != ecc_strength[i])
p                 435 drivers/mtd/nand/raw/mtk_ecc.c 				*p = ecc_strength[i - 1];
p                 440 drivers/mtd/nand/raw/mtk_ecc.c 	*p = ecc_strength[ecc->caps->num_ecc_strength - 1];
p                  41 drivers/mtd/nand/raw/mtk_ecc.h void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
p                 184 drivers/mtd/nand/raw/mtk_nand.c static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i)
p                 186 drivers/mtd/nand/raw/mtk_nand.c 	return (u8 *)p + i * chip->ecc.size;
p                1008 drivers/mtd/nand/raw/mtk_nand.c 				      u32 len, u8 *p, int pg)
p                1010 drivers/mtd/nand/raw/mtk_nand.c 	return mtk_nfc_read_subpage(nand_to_mtd(chip), chip, off, len, p, pg,
p                1014 drivers/mtd/nand/raw/mtk_nand.c static int mtk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *p, int oob_on,
p                1019 drivers/mtd/nand/raw/mtk_nand.c 	return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
p                1150 drivers/mtd/nand/raw/nand_base.c 	u8 *p = buf;
p                1176 drivers/mtd/nand/raw/nand_base.c 		p[i] = chip->legacy.read_byte(chip);
p                1891 drivers/mtd/nand/raw/nand_base.c 		u8 *p = buf;
p                1895 drivers/mtd/nand/raw/nand_base.c 			p[i] = chip->legacy.read_byte(chip);
p                1935 drivers/mtd/nand/raw/nand_base.c 		const u8 *p = buf;
p                1939 drivers/mtd/nand/raw/nand_base.c 			chip->legacy.write_byte(chip, p[i]);
p                2705 drivers/mtd/nand/raw/nand_base.c 	uint8_t *p = buf;
p                2712 drivers/mtd/nand/raw/nand_base.c 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
p                2713 drivers/mtd/nand/raw/nand_base.c 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
p                2721 drivers/mtd/nand/raw/nand_base.c 	p = buf;
p                2723 drivers/mtd/nand/raw/nand_base.c 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
p                2726 drivers/mtd/nand/raw/nand_base.c 		stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
p                2750 drivers/mtd/nand/raw/nand_base.c 	uint8_t *p;
p                2770 drivers/mtd/nand/raw/nand_base.c 	p = bufpoi + data_col_addr;
p                2771 drivers/mtd/nand/raw/nand_base.c 	ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
p                2776 drivers/mtd/nand/raw/nand_base.c 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
p                2777 drivers/mtd/nand/raw/nand_base.c 		chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
p                2822 drivers/mtd/nand/raw/nand_base.c 	p = bufpoi + data_col_addr;
p                2823 drivers/mtd/nand/raw/nand_base.c 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
p                2826 drivers/mtd/nand/raw/nand_base.c 		stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
p                2831 drivers/mtd/nand/raw/nand_base.c 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
p                2864 drivers/mtd/nand/raw/nand_base.c 	uint8_t *p = buf;
p                2873 drivers/mtd/nand/raw/nand_base.c 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
p                2876 drivers/mtd/nand/raw/nand_base.c 		ret = nand_read_data_op(chip, p, eccsize, false);
p                2880 drivers/mtd/nand/raw/nand_base.c 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
p                2893 drivers/mtd/nand/raw/nand_base.c 	p = buf;
p                2895 drivers/mtd/nand/raw/nand_base.c 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
p                2898 drivers/mtd/nand/raw/nand_base.c 		stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
p                2902 drivers/mtd/nand/raw/nand_base.c 			stat = nand_check_erased_ecc_chunk(p, eccsize,
p                2938 drivers/mtd/nand/raw/nand_base.c 	uint8_t *p = buf;
p                2957 drivers/mtd/nand/raw/nand_base.c 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
p                2962 drivers/mtd/nand/raw/nand_base.c 		ret = nand_read_data_op(chip, p, eccsize, false);
p                2966 drivers/mtd/nand/raw/nand_base.c 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
p                2968 drivers/mtd/nand/raw/nand_base.c 		stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
p                2972 drivers/mtd/nand/raw/nand_base.c 			stat = nand_check_erased_ecc_chunk(p, eccsize,
p                3006 drivers/mtd/nand/raw/nand_base.c 	uint8_t *p = buf;
p                3014 drivers/mtd/nand/raw/nand_base.c 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
p                3019 drivers/mtd/nand/raw/nand_base.c 		ret = nand_read_data_op(chip, p, eccsize, false);
p                3038 drivers/mtd/nand/raw/nand_base.c 		stat = chip->ecc.correct(chip, p, oob, NULL);
p                3054 drivers/mtd/nand/raw/nand_base.c 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
p                3710 drivers/mtd/nand/raw/nand_base.c 	const uint8_t *p = buf;
p                3713 drivers/mtd/nand/raw/nand_base.c 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
p                3714 drivers/mtd/nand/raw/nand_base.c 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
p                3739 drivers/mtd/nand/raw/nand_base.c 	const uint8_t *p = buf;
p                3745 drivers/mtd/nand/raw/nand_base.c 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
p                3748 drivers/mtd/nand/raw/nand_base.c 		ret = nand_write_data_op(chip, p, eccsize, false);
p                3752 drivers/mtd/nand/raw/nand_base.c 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
p                3855 drivers/mtd/nand/raw/nand_base.c 	const uint8_t *p = buf;
p                3863 drivers/mtd/nand/raw/nand_base.c 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
p                3866 drivers/mtd/nand/raw/nand_base.c 		ret = nand_write_data_op(chip, p, eccsize, false);
p                3879 drivers/mtd/nand/raw/nand_base.c 		chip->ecc.calculate(chip, p, oob);
p                  26 drivers/mtd/nand/raw/nand_jedec.c 	struct nand_jedec_params *p;
p                  40 drivers/mtd/nand/raw/nand_jedec.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                  41 drivers/mtd/nand/raw/nand_jedec.c 	if (!p)
p                  51 drivers/mtd/nand/raw/nand_jedec.c 		ret = nand_read_data_op(chip, p, sizeof(*p), true);
p                  57 drivers/mtd/nand/raw/nand_jedec.c 		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
p                  58 drivers/mtd/nand/raw/nand_jedec.c 				le16_to_cpu(p->crc))
p                  68 drivers/mtd/nand/raw/nand_jedec.c 	val = le16_to_cpu(p->revision);
p                  79 drivers/mtd/nand/raw/nand_jedec.c 	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
p                  80 drivers/mtd/nand/raw/nand_jedec.c 	sanitize_string(p->model, sizeof(p->model));
p                  81 drivers/mtd/nand/raw/nand_jedec.c 	chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
p                  87 drivers/mtd/nand/raw/nand_jedec.c 	memorg->pagesize = le32_to_cpu(p->byte_per_page);
p                  92 drivers/mtd/nand/raw/nand_jedec.c 			1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
p                  95 drivers/mtd/nand/raw/nand_jedec.c 	memorg->oobsize = le16_to_cpu(p->spare_bytes_per_page);
p                  98 drivers/mtd/nand/raw/nand_jedec.c 	memorg->luns_per_target = p->lun_count;
p                  99 drivers/mtd/nand/raw/nand_jedec.c 	memorg->planes_per_lun = 1 << p->multi_plane_addr;
p                 103 drivers/mtd/nand/raw/nand_jedec.c 		1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
p                 104 drivers/mtd/nand/raw/nand_jedec.c 	memorg->bits_per_cell = p->bits_per_cell;
p                 106 drivers/mtd/nand/raw/nand_jedec.c 	if (le16_to_cpu(p->features) & JEDEC_FEATURE_16_BIT_BUS)
p                 110 drivers/mtd/nand/raw/nand_jedec.c 	ecc = &p->ecc_info[0];
p                 122 drivers/mtd/nand/raw/nand_jedec.c 	kfree(p);
p                 146 drivers/mtd/nand/raw/nand_legacy.c 	u16 *p = (u16 *) buf;
p                 148 drivers/mtd/nand/raw/nand_legacy.c 	iowrite16_rep(chip->legacy.IO_ADDR_W, p, len >> 1);
p                 161 drivers/mtd/nand/raw/nand_legacy.c 	u16 *p = (u16 *) buf;
p                 163 drivers/mtd/nand/raw/nand_legacy.c 	ioread16_rep(chip->legacy.IO_ADDR_R, p, len >> 1);
p                  34 drivers/mtd/nand/raw/nand_macronix.c 	struct nand_parameters *p = &chip->parameters;
p                  37 drivers/mtd/nand/raw/nand_macronix.c 	if (!p->onfi)
p                  40 drivers/mtd/nand/raw/nand_macronix.c 	mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor;
p                  47 drivers/mtd/nand/raw/nand_macronix.c 	if (p->supports_set_get_features) {
p                  48 drivers/mtd/nand/raw/nand_macronix.c 		bitmap_set(p->set_feature_list,
p                  50 drivers/mtd/nand/raw/nand_macronix.c 		bitmap_set(p->get_feature_list,
p                  81 drivers/mtd/nand/raw/nand_micron.c 	struct nand_parameters *p = &chip->parameters;
p                  83 drivers/mtd/nand/raw/nand_micron.c 	if (p->onfi) {
p                  84 drivers/mtd/nand/raw/nand_micron.c 		struct nand_onfi_vendor_micron *micron = (void *)p->onfi->vendor;
p                  90 drivers/mtd/nand/raw/nand_micron.c 	if (p->supports_set_get_features) {
p                  91 drivers/mtd/nand/raw/nand_micron.c 		set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list);
p                  92 drivers/mtd/nand/raw/nand_micron.c 		set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list);
p                  93 drivers/mtd/nand/raw/nand_micron.c 		set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list);
p                  94 drivers/mtd/nand/raw/nand_micron.c 		set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list);
p                 534 drivers/mtd/nand/raw/nand_micron.c 					 struct nand_onfi_params *p)
p                 541 drivers/mtd/nand/raw/nand_micron.c 	if (le16_to_cpu(p->revision) == 0)
p                 542 drivers/mtd/nand/raw/nand_micron.c 		p->revision = cpu_to_le16(ONFI_VERSION_1_0);
p                  19 drivers/mtd/nand/raw/nand_onfi.c u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
p                  23 drivers/mtd/nand/raw/nand_onfi.c 		crc ^= *p++ << 8;
p                  33 drivers/mtd/nand/raw/nand_onfi.c 					    struct nand_onfi_params *p)
p                  43 drivers/mtd/nand/raw/nand_onfi.c 	len = le16_to_cpu(p->ext_param_page_length) * 16;
p                  55 drivers/mtd/nand/raw/nand_onfi.c 					 sizeof(*p) * p->num_of_param_pages,
p                 144 drivers/mtd/nand/raw/nand_onfi.c 	struct nand_onfi_params *p;
p                 158 drivers/mtd/nand/raw/nand_onfi.c 	p = kzalloc((sizeof(*p) * 3), GFP_KERNEL);
p                 159 drivers/mtd/nand/raw/nand_onfi.c 	if (!p)
p                 169 drivers/mtd/nand/raw/nand_onfi.c 		ret = nand_read_data_op(chip, &p[i], sizeof(*p), true);
p                 175 drivers/mtd/nand/raw/nand_onfi.c 		if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) ==
p                 176 drivers/mtd/nand/raw/nand_onfi.c 				le16_to_cpu(p->crc)) {
p                 178 drivers/mtd/nand/raw/nand_onfi.c 				memcpy(p, &p[i], sizeof(*p));
p                 184 drivers/mtd/nand/raw/nand_onfi.c 		const void *srcbufs[3] = {p, p + 1, p + 2};
p                 187 drivers/mtd/nand/raw/nand_onfi.c 		nand_bit_wise_majority(srcbufs, ARRAY_SIZE(srcbufs), p,
p                 188 drivers/mtd/nand/raw/nand_onfi.c 				       sizeof(*p));
p                 190 drivers/mtd/nand/raw/nand_onfi.c 		if (onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254) !=
p                 191 drivers/mtd/nand/raw/nand_onfi.c 				le16_to_cpu(p->crc)) {
p                 199 drivers/mtd/nand/raw/nand_onfi.c 		chip->manufacturer.desc->ops->fixup_onfi_param_page(chip, p);
p                 202 drivers/mtd/nand/raw/nand_onfi.c 	val = le16_to_cpu(p->revision);
p                 219 drivers/mtd/nand/raw/nand_onfi.c 	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
p                 220 drivers/mtd/nand/raw/nand_onfi.c 	sanitize_string(p->model, sizeof(p->model));
p                 221 drivers/mtd/nand/raw/nand_onfi.c 	chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
p                 227 drivers/mtd/nand/raw/nand_onfi.c 	memorg->pagesize = le32_to_cpu(p->byte_per_page);
p                 236 drivers/mtd/nand/raw/nand_onfi.c 			1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
p                 239 drivers/mtd/nand/raw/nand_onfi.c 	memorg->oobsize = le16_to_cpu(p->spare_bytes_per_page);
p                 242 drivers/mtd/nand/raw/nand_onfi.c 	memorg->luns_per_target = p->lun_count;
p                 243 drivers/mtd/nand/raw/nand_onfi.c 	memorg->planes_per_lun = 1 << p->interleaved_bits;
p                 247 drivers/mtd/nand/raw/nand_onfi.c 		1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
p                 248 drivers/mtd/nand/raw/nand_onfi.c 	memorg->max_bad_eraseblocks_per_lun = le32_to_cpu(p->blocks_per_lun);
p                 249 drivers/mtd/nand/raw/nand_onfi.c 	memorg->bits_per_cell = p->bits_per_cell;
p                 251 drivers/mtd/nand/raw/nand_onfi.c 	if (le16_to_cpu(p->features) & ONFI_FEATURE_16_BIT_BUS)
p                 254 drivers/mtd/nand/raw/nand_onfi.c 	if (p->ecc_bits != 0xff) {
p                 255 drivers/mtd/nand/raw/nand_onfi.c 		chip->base.eccreq.strength = p->ecc_bits;
p                 258 drivers/mtd/nand/raw/nand_onfi.c 		(le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
p                 270 drivers/mtd/nand/raw/nand_onfi.c 		if (nand_flash_detect_ext_param_page(chip, p))
p                 277 drivers/mtd/nand/raw/nand_onfi.c 	if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_SET_GET_FEATURES) {
p                 292 drivers/mtd/nand/raw/nand_onfi.c 	onfi->tPROG = le16_to_cpu(p->t_prog);
p                 293 drivers/mtd/nand/raw/nand_onfi.c 	onfi->tBERS = le16_to_cpu(p->t_bers);
p                 294 drivers/mtd/nand/raw/nand_onfi.c 	onfi->tR = le16_to_cpu(p->t_r);
p                 295 drivers/mtd/nand/raw/nand_onfi.c 	onfi->tCCS = le16_to_cpu(p->t_ccs);
p                 296 drivers/mtd/nand/raw/nand_onfi.c 	onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode);
p                 297 drivers/mtd/nand/raw/nand_onfi.c 	onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
p                 298 drivers/mtd/nand/raw/nand_onfi.c 	memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
p                 302 drivers/mtd/nand/raw/nand_onfi.c 	kfree(p);
p                 309 drivers/mtd/nand/raw/nand_onfi.c 	kfree(p);
p                  92 drivers/mtd/nand/raw/ndfc.c 	uint8_t *p = (uint8_t *)&ecc;
p                  97 drivers/mtd/nand/raw/ndfc.c 	ecc_code[0] = p[1];
p                  98 drivers/mtd/nand/raw/ndfc.c 	ecc_code[1] = p[2];
p                  99 drivers/mtd/nand/raw/ndfc.c 	ecc_code[2] = p[3];
p                 114 drivers/mtd/nand/raw/ndfc.c 	uint32_t *p = (uint32_t *) buf;
p                 117 drivers/mtd/nand/raw/ndfc.c 		*p++ = in_be32(ndfc->ndfcbase + NDFC_DATA);
p                 123 drivers/mtd/nand/raw/ndfc.c 	uint32_t *p = (uint32_t *) buf;
p                 126 drivers/mtd/nand/raw/ndfc.c 		out_be32(ndfc->ndfcbase + NDFC_DATA, *p++);
p                 287 drivers/mtd/nand/raw/omap2.c 	u_char *p = (u_char *)buf;
p                 291 drivers/mtd/nand/raw/omap2.c 		iowrite8(*p++, info->nand.legacy.IO_ADDR_W);
p                 321 drivers/mtd/nand/raw/omap2.c 	u16 *p = (u16 *) buf;
p                 327 drivers/mtd/nand/raw/omap2.c 		iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
p                 347 drivers/mtd/nand/raw/omap2.c 	u32 *p = (u32 *)buf;
p                 355 drivers/mtd/nand/raw/omap2.c 		p = (u32 *) (buf + len % 4);
p                 365 drivers/mtd/nand/raw/omap2.c 			omap_read_buf16(mtd, (u_char *)p, len);
p                 367 drivers/mtd/nand/raw/omap2.c 			omap_read_buf8(mtd, (u_char *)p, len);
p                 373 drivers/mtd/nand/raw/omap2.c 			ioread32_rep(info->nand.legacy.IO_ADDR_R, p, r_count);
p                 374 drivers/mtd/nand/raw/omap2.c 			p += r_count;
p                 395 drivers/mtd/nand/raw/omap2.c 	u16 *p = (u16 *)buf;
p                 402 drivers/mtd/nand/raw/omap2.c 		p = (u16 *)(buf + 1);
p                 412 drivers/mtd/nand/raw/omap2.c 			omap_write_buf16(mtd, (u_char *)p, len);
p                 414 drivers/mtd/nand/raw/omap2.c 			omap_write_buf8(mtd, (u_char *)p, len);
p                 421 drivers/mtd/nand/raw/omap2.c 				iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
p                 737 drivers/mtd/nand/raw/stm32_fmc2_nand.c 	u8 *p = buf;
p                 747 drivers/mtd/nand/raw/stm32_fmc2_nand.c 	     s++, i += eccbytes, p += eccsize) {
p                 751 drivers/mtd/nand/raw/stm32_fmc2_nand.c 		ret = nand_change_read_column_op(chip, s * eccsize, p,
p                 763 drivers/mtd/nand/raw/stm32_fmc2_nand.c 		stat = chip->ecc.correct(chip, p, ecc_code, ecc_calc);
p                 766 drivers/mtd/nand/raw/stm32_fmc2_nand.c 			stat = nand_check_erased_ecc_chunk(p, eccsize,
p                 905 drivers/mtd/nand/raw/stm32_fmc2_nand.c 	const u8 *p = buf;
p                 916 drivers/mtd/nand/raw/stm32_fmc2_nand.c 		sg_set_buf(sg, p, eccsize);
p                 917 drivers/mtd/nand/raw/stm32_fmc2_nand.c 		p += eccsize;
p                 945 drivers/mtd/nand/raw/stm32_fmc2_nand.c 		p = fmc2->ecc_buf;
p                 947 drivers/mtd/nand/raw/stm32_fmc2_nand.c 			sg_set_buf(sg, p, fmc2->dma_ecc_len);
p                 948 drivers/mtd/nand/raw/stm32_fmc2_nand.c 			p += fmc2->dma_ecc_len;
p                1376 drivers/mtd/nand/raw/sunxi_nand.c #define sunxi_nand_lookup_timing(l, p, c) \
p                1377 drivers/mtd/nand/raw/sunxi_nand.c 			_sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
p                  53 drivers/mtd/parsers/afs.c 	u32 *p = words;
p                  57 drivers/mtd/parsers/afs.c 		sum += *p++;
p                  62 drivers/mtd/parsers/afs.c static u32 word_sum_v2(u32 *p, u32 num)
p                  70 drivers/mtd/parsers/afs.c 		val = p[i];
p                 125 drivers/mtd/parsers/cmdlinepart.c 		char *p;
p                 128 drivers/mtd/parsers/cmdlinepart.c 		p = strchr(name, delim);
p                 129 drivers/mtd/parsers/cmdlinepart.c 		if (!p) {
p                 133 drivers/mtd/parsers/cmdlinepart.c 		name_len = p - name;
p                 134 drivers/mtd/parsers/cmdlinepart.c 		s = p + 1;
p                 221 drivers/mtd/parsers/cmdlinepart.c 		char *p, *mtd_id;
p                 226 drivers/mtd/parsers/cmdlinepart.c 		p = strchr(s, ':');
p                 227 drivers/mtd/parsers/cmdlinepart.c 		if (!p) {
p                 231 drivers/mtd/parsers/cmdlinepart.c 		mtd_id_len = p - mtd_id;
p                 233 drivers/mtd/parsers/cmdlinepart.c 		dbg(("parsing <%s>\n", p+1));
p                 239 drivers/mtd/parsers/cmdlinepart.c 		parts = newpart(p + 1,		/* cmdline */
p                  52 drivers/mtd/spi-nor/spi-nor.c #define SFDP_PARAM_HEADER_ID(p)	(((p)->id_msb << 8) | (p)->id_lsb)
p                  53 drivers/mtd/spi-nor/spi-nor.c #define SFDP_PARAM_HEADER_PTP(p) \
p                  54 drivers/mtd/spi-nor/spi-nor.c 	(((p)->parameter_table_pointer[2] << 16) | \
p                  55 drivers/mtd/spi-nor/spi-nor.c 	 ((p)->parameter_table_pointer[1] <<  8) | \
p                  56 drivers/mtd/spi-nor/spi-nor.c 	 ((p)->parameter_table_pointer[0] <<  0))
p                  88 drivers/mtd/tests/readtest.c 		char *p = line;
p                  90 drivers/mtd/tests/readtest.c 		p += sprintf(p, "%05x: ", i);
p                  92 drivers/mtd/tests/readtest.c 			p += sprintf(p, "%02x", (unsigned int)iobuf[i]);
p                 102 drivers/mtd/tests/readtest.c 			char *p = line;
p                 104 drivers/mtd/tests/readtest.c 			p += sprintf(p, "%05x: ", i);
p                 106 drivers/mtd/tests/readtest.c 				p += sprintf(p, "%02x",
p                 106 drivers/mtd/tests/subpagetest.c static void print_subpage(unsigned char *p)
p                 112 drivers/mtd/tests/subpagetest.c 			printk("%02x", *p++);
p                 108 drivers/mtd/ubi/attach.c 	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
p                 111 drivers/mtd/ubi/attach.c 	while (*p) {
p                 112 drivers/mtd/ubi/attach.c 		parent = *p;
p                 125 drivers/mtd/ubi/attach.c 			p = &(*p)->rb_left;
p                 127 drivers/mtd/ubi/attach.c 			p = &(*p)->rb_right;
p                 143 drivers/mtd/ubi/attach.c 	rb_link_node(&av->rb, parent, p);
p                 570 drivers/mtd/ubi/attach.c 	struct rb_node **p, *parent = NULL;
p                 590 drivers/mtd/ubi/attach.c 	p = &av->root.rb_node;
p                 591 drivers/mtd/ubi/attach.c 	while (*p) {
p                 594 drivers/mtd/ubi/attach.c 		parent = *p;
p                 598 drivers/mtd/ubi/attach.c 				p = &(*p)->rb_left;
p                 600 drivers/mtd/ubi/attach.c 				p = &(*p)->rb_right;
p                 704 drivers/mtd/ubi/attach.c 	rb_link_node(&aeb->u.rb, parent, p);
p                 621 drivers/mtd/ubi/block.c 	struct ubiblock_param *p;
p                 632 drivers/mtd/ubi/block.c 		p = &ubiblock_param[i];
p                 634 drivers/mtd/ubi/block.c 		desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
p                 638 drivers/mtd/ubi/block.c 			       p->ubi_num, p->vol_id, PTR_ERR(desc));
p                 649 drivers/mtd/ubi/block.c 			       vi.name, p->ubi_num, p->vol_id, ret);
p                1211 drivers/mtd/ubi/build.c 		struct mtd_dev_param *p = &mtd_dev_param[i];
p                1216 drivers/mtd/ubi/build.c 		mtd = open_mtd_device(p->name);
p                1220 drivers/mtd/ubi/build.c 			       p->name, err);
p                1228 drivers/mtd/ubi/build.c 		err = ubi_attach_mtd_dev(mtd, p->ubi_num,
p                1229 drivers/mtd/ubi/build.c 					 p->vid_hdr_offs, p->max_beb_per1024);
p                1353 drivers/mtd/ubi/build.c 	struct mtd_dev_param *p;
p                1393 drivers/mtd/ubi/build.c 	p = &mtd_dev_param[mtd_devs];
p                1394 drivers/mtd/ubi/build.c 	strcpy(&p->name[0], tokens[0]);
p                1398 drivers/mtd/ubi/build.c 		p->vid_hdr_offs = bytes_str_to_int(token);
p                1400 drivers/mtd/ubi/build.c 		if (p->vid_hdr_offs < 0)
p                1401 drivers/mtd/ubi/build.c 			return p->vid_hdr_offs;
p                1406 drivers/mtd/ubi/build.c 		int err = kstrtoint(token, 10, &p->max_beb_per1024);
p                1417 drivers/mtd/ubi/build.c 		int err = kstrtoint(token, 10, &p->ubi_num);
p                1425 drivers/mtd/ubi/build.c 		p->ubi_num = UBI_DEV_NUM_AUTO;
p                 214 drivers/mtd/ubi/eba.c 	struct rb_node *p;
p                 216 drivers/mtd/ubi/eba.c 	p = ubi->ltree.rb_node;
p                 217 drivers/mtd/ubi/eba.c 	while (p) {
p                 220 drivers/mtd/ubi/eba.c 		le = rb_entry(p, struct ubi_ltree_entry, rb);
p                 223 drivers/mtd/ubi/eba.c 			p = p->rb_left;
p                 225 drivers/mtd/ubi/eba.c 			p = p->rb_right;
p                 228 drivers/mtd/ubi/eba.c 				p = p->rb_left;
p                 230 drivers/mtd/ubi/eba.c 				p = p->rb_right;
p                 275 drivers/mtd/ubi/eba.c 		struct rb_node **p, *parent = NULL;
p                 283 drivers/mtd/ubi/eba.c 		p = &ubi->ltree.rb_node;
p                 284 drivers/mtd/ubi/eba.c 		while (*p) {
p                 285 drivers/mtd/ubi/eba.c 			parent = *p;
p                 289 drivers/mtd/ubi/eba.c 				p = &(*p)->rb_left;
p                 291 drivers/mtd/ubi/eba.c 				p = &(*p)->rb_right;
p                 295 drivers/mtd/ubi/eba.c 					p = &(*p)->rb_left;
p                 297 drivers/mtd/ubi/eba.c 					p = &(*p)->rb_right;
p                 301 drivers/mtd/ubi/eba.c 		rb_link_node(&le->rb, parent, p);
p                  28 drivers/mtd/ubi/fastmap-wl.c 	struct rb_node *p;
p                  32 drivers/mtd/ubi/fastmap-wl.c 	ubi_rb_for_each_entry(p, e, root, u.rb) {
p                  62 drivers/mtd/ubi/fastmap-wl.c 	struct rb_node *p;
p                  65 drivers/mtd/ubi/fastmap-wl.c 	ubi_rb_for_each_entry(p, e, root, u.rb)
p                 208 drivers/mtd/ubi/fastmap.c 	struct rb_node **p = &av->root.rb_node, *parent = NULL;
p                 210 drivers/mtd/ubi/fastmap.c 	while (*p) {
p                 211 drivers/mtd/ubi/fastmap.c 		parent = *p;
p                 216 drivers/mtd/ubi/fastmap.c 				p = &(*p)->rb_left;
p                 218 drivers/mtd/ubi/fastmap.c 				p = &(*p)->rb_right;
p                 228 drivers/mtd/ubi/fastmap.c 	rb_link_node(&aeb->u.rb, parent, p);
p                 246 drivers/mtd/ubi/fastmap.c 	struct rb_node **p = &av->root.rb_node, *parent = NULL;
p                 250 drivers/mtd/ubi/fastmap.c 	while (*p) {
p                 251 drivers/mtd/ubi/fastmap.c 		parent = *p;
p                 256 drivers/mtd/ubi/fastmap.c 				p = &(*p)->rb_left;
p                 258 drivers/mtd/ubi/fastmap.c 				p = &(*p)->rb_right;
p                 321 drivers/mtd/ubi/fastmap.c 	rb_link_node(&new_aeb->u.rb, parent, p);
p                 970 drivers/mtd/ubi/io.c 	void *p = vidb->buffer;
p                 975 drivers/mtd/ubi/io.c 	read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
p                1054 drivers/mtd/ubi/io.c 	void *p = vidb->buffer;
p                1075 drivers/mtd/ubi/io.c 	err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
p                1242 drivers/mtd/ubi/io.c 	void *p;
p                1252 drivers/mtd/ubi/io.c 	p = vidb->buffer;
p                1253 drivers/mtd/ubi/io.c 	err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
p                 141 drivers/mtd/ubi/wl.c 	struct rb_node **p, *parent = NULL;
p                 143 drivers/mtd/ubi/wl.c 	p = &root->rb_node;
p                 144 drivers/mtd/ubi/wl.c 	while (*p) {
p                 147 drivers/mtd/ubi/wl.c 		parent = *p;
p                 151 drivers/mtd/ubi/wl.c 			p = &(*p)->rb_left;
p                 153 drivers/mtd/ubi/wl.c 			p = &(*p)->rb_right;
p                 157 drivers/mtd/ubi/wl.c 				p = &(*p)->rb_left;
p                 159 drivers/mtd/ubi/wl.c 				p = &(*p)->rb_right;
p                 163 drivers/mtd/ubi/wl.c 	rb_link_node(&e->u.rb, parent, p);
p                 238 drivers/mtd/ubi/wl.c 	struct rb_node *p;
p                 240 drivers/mtd/ubi/wl.c 	p = root->rb_node;
p                 241 drivers/mtd/ubi/wl.c 	while (p) {
p                 244 drivers/mtd/ubi/wl.c 		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
p                 252 drivers/mtd/ubi/wl.c 			p = p->rb_left;
p                 254 drivers/mtd/ubi/wl.c 			p = p->rb_right;
p                 258 drivers/mtd/ubi/wl.c 				p = p->rb_left;
p                 260 drivers/mtd/ubi/wl.c 				p = p->rb_right;
p                 277 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *p;
p                 281 drivers/mtd/ubi/wl.c 		list_for_each_entry(p, &ubi->pq[i], u.list)
p                 282 drivers/mtd/ubi/wl.c 			if (p == e)
p                 321 drivers/mtd/ubi/wl.c 	struct rb_node *p;
p                 328 drivers/mtd/ubi/wl.c 	p = root->rb_node;
p                 329 drivers/mtd/ubi/wl.c 	while (p) {
p                 332 drivers/mtd/ubi/wl.c 		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
p                 334 drivers/mtd/ubi/wl.c 			p = p->rb_left;
p                 336 drivers/mtd/ubi/wl.c 			p = p->rb_right;
p                  42 drivers/net/Space.c static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe)
p                  46 drivers/net/Space.c 	for (; p->probe; p++) {
p                  47 drivers/net/Space.c 		if (autoprobe && p->status)
p                  49 drivers/net/Space.c 		dev = p->probe(unit);
p                  53 drivers/net/Space.c 			p->status = PTR_ERR(dev);
p                 327 drivers/net/appletalk/ipddp.c         struct ipddp_route *p;
p                 333 drivers/net/appletalk/ipddp.c                 p = ipddp_route_list->next;
p                 335 drivers/net/appletalk/ipddp.c                 ipddp_route_list = p;
p                 101 drivers/net/arcnet/arc-rimi.c 	void __iomem *p;
p                 107 drivers/net/arcnet/arc-rimi.c 	p = ioremap(addr, size);
p                 108 drivers/net/arcnet/arc-rimi.c 	if (p) {
p                 109 drivers/net/arcnet/arc-rimi.c 		if (arcnet_readb(p, COM9026_REG_R_STATUS) == TESTvalue)
p                 113 drivers/net/arcnet/arc-rimi.c 		iounmap(p);
p                 127 drivers/net/arcnet/arc-rimi.c 	void __iomem *p;
p                 131 drivers/net/arcnet/arc-rimi.c 	p = ioremap(dev->mem_start, MIRROR_SIZE);
p                 132 drivers/net/arcnet/arc-rimi.c 	if (!p) {
p                 140 drivers/net/arcnet/arc-rimi.c 		iounmap(p);
p                 147 drivers/net/arcnet/arc-rimi.c 	arcnet_writeb(TESTvalue, p, COM9026_REG_W_INTMASK);
p                 148 drivers/net/arcnet/arc-rimi.c 	arcnet_writeb(TESTvalue, p, COM9026_REG_W_COMMAND);
p                 158 drivers/net/arcnet/arc-rimi.c 	if (arcnet_readb(p, COM9026_REG_R_STATUS) == TESTvalue &&
p                 193 drivers/net/arcnet/arc-rimi.c 	iounmap(p);
p                 104 drivers/net/arcnet/com90xx.c 	u_long *p;
p                 207 drivers/net/arcnet/com90xx.c 	for (index = 0, p = &shmems[0]; index < numshmems; p++, index++) {
p                 216 drivers/net/arcnet/com90xx.c 		arc_cont(D_INIT, "%lXh ", *p);
p                 218 drivers/net/arcnet/com90xx.c 		if (!request_mem_region(*p, MIRROR_SIZE, "arcnet (90xx)")) {
p                 225 drivers/net/arcnet/com90xx.c 		base = ioremap(*p, MIRROR_SIZE);
p                 262 drivers/net/arcnet/com90xx.c 		release_mem_region(*p, MIRROR_SIZE);
p                 264 drivers/net/arcnet/com90xx.c 		*p-- = shmems[--numshmems];
p                 281 drivers/net/arcnet/com90xx.c 	for (p = &shmems[0]; p < shmems + numshmems; p++) {
p                 288 drivers/net/arcnet/com90xx.c 		arc_cont(D_INIT, "%lXh ", *p);
p                 392 drivers/net/arcnet/com90xx.c 				arc_cont(D_INIT, "%lXh)\n", *p);
p                 438 drivers/net/arcnet/com90xx.c 	void __iomem *p;
p                 444 drivers/net/arcnet/com90xx.c 	p = ioremap(addr, size);
p                 445 drivers/net/arcnet/com90xx.c 	if (p) {
p                 446 drivers/net/arcnet/com90xx.c 		if (arcnet_readb(p, COM9026_REG_R_STATUS) == TESTvalue)
p                 450 drivers/net/arcnet/com90xx.c 		iounmap(p);
p                 461 drivers/net/arcnet/com90xx.c 				void __iomem *p)
p                 472 drivers/net/arcnet/com90xx.c 		iounmap(p);
p                 484 drivers/net/arcnet/com90xx.c 	if (arcnet_readb(p, COM9026_REG_R_STATUS) == TESTvalue &&
p                 502 drivers/net/arcnet/com90xx.c 	iounmap(p);
p                 511 drivers/net/bonding/bond_options.c 	char *p, valstr[BOND_OPT_MAX_NAMELEN + 1] = { 0, };
p                 530 drivers/net/bonding/bond_options.c 		p = strchr(val->string, '\n');
p                 531 drivers/net/bonding/bond_options.c 		if (p)
p                 532 drivers/net/bonding/bond_options.c 			*p = '\0';
p                 533 drivers/net/bonding/bond_options.c 		for (p = val->string; *p; p++)
p                 534 drivers/net/bonding/bond_options.c 			if (!(isdigit(*p) || isspace(*p)))
p                 539 drivers/net/bonding/bond_options.c 		if (*p) {
p                 607 drivers/net/bonding/bond_options.c 	char *p;
p                 614 drivers/net/bonding/bond_options.c 				p = strchr(val->string, '\n');
p                 615 drivers/net/bonding/bond_options.c 				if (p)
p                 616 drivers/net/bonding/bond_options.c 					*p = '\0';
p                1129 drivers/net/bonding/bond_options.c 	char *p, *primary = newval->string;
p                1135 drivers/net/bonding/bond_options.c 	p = strchr(primary, '\n');
p                1136 drivers/net/bonding/bond_options.c 	if (p)
p                1137 drivers/net/bonding/bond_options.c 		*p = '\0';
p                 648 drivers/net/can/c_can/c_can.c 	struct pinctrl *p;
p                 662 drivers/net/can/c_can/c_can.c 	p = pinctrl_get_select(priv->device, "active");
p                 663 drivers/net/can/c_can/c_can.c 	if (!IS_ERR(p))
p                 664 drivers/net/can/c_can/c_can.c 		pinctrl_put(p);
p                  26 drivers/net/can/janz-ican3.c #define DPM_PAGE_ADDR(p)	((p) * DPM_PAGE_SIZE)
p                 723 drivers/net/can/kvaser_pciefd.c static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
p                 731 drivers/net/can/kvaser_pciefd.c 	memset(p, 0, sizeof(*p));
p                 734 drivers/net/can/kvaser_pciefd.c 		p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
p                 737 drivers/net/can/kvaser_pciefd.c 		p->header[0] |= KVASER_PCIEFD_RPACKET_RTR;
p                 740 drivers/net/can/kvaser_pciefd.c 		p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
p                 742 drivers/net/can/kvaser_pciefd.c 	p->header[0] |= cf->can_id & CAN_EFF_MASK;
p                 743 drivers/net/can/kvaser_pciefd.c 	p->header[1] |= can_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
p                 744 drivers/net/can/kvaser_pciefd.c 	p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
p                 747 drivers/net/can/kvaser_pciefd.c 		p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
p                 749 drivers/net/can/kvaser_pciefd.c 			p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
p                 751 drivers/net/can/kvaser_pciefd.c 			p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
p                 754 drivers/net/can/kvaser_pciefd.c 	p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK;
p                 757 drivers/net/can/kvaser_pciefd.c 	memcpy(p->data, cf->data, packet_size);
p                1137 drivers/net/can/kvaser_pciefd.c 					    struct kvaser_pciefd_rx_packet *p,
p                1145 drivers/net/can/kvaser_pciefd.c 	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
p                1153 drivers/net/can/kvaser_pciefd.c 	if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
p                1160 drivers/net/can/kvaser_pciefd.c 		if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
p                1163 drivers/net/can/kvaser_pciefd.c 		if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
p                1173 drivers/net/can/kvaser_pciefd.c 	cf->can_id = p->header[0] & CAN_EFF_MASK;
p                1174 drivers/net/can/kvaser_pciefd.c 	if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
p                1177 drivers/net/can/kvaser_pciefd.c 	cf->len = can_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
p                1179 drivers/net/can/kvaser_pciefd.c 	if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
p                1187 drivers/net/can/kvaser_pciefd.c 		ns_to_ktime(div_u64(p->timestamp * 1000,
p                1220 drivers/net/can/kvaser_pciefd.c static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
p                1226 drivers/net/can/kvaser_pciefd.c 	if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
p                1227 drivers/net/can/kvaser_pciefd.c 	    p->header[0] & KVASER_PCIEFD_SPACK_IRM)
p                1231 drivers/net/can/kvaser_pciefd.c 	else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
p                1235 drivers/net/can/kvaser_pciefd.c 	else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR)
p                1247 drivers/net/can/kvaser_pciefd.c 					struct kvaser_pciefd_rx_packet *p)
p                1259 drivers/net/can/kvaser_pciefd.c 	bec.txerr = p->header[0] & 0xff;
p                1260 drivers/net/can/kvaser_pciefd.c 	bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
p                1262 drivers/net/can/kvaser_pciefd.c 	kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
p                1294 drivers/net/can/kvaser_pciefd.c 		ns_to_ktime(div_u64(p->timestamp * 1000,
p                1309 drivers/net/can/kvaser_pciefd.c 					     struct kvaser_pciefd_rx_packet *p)
p                1312 drivers/net/can/kvaser_pciefd.c 	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
p                1319 drivers/net/can/kvaser_pciefd.c 	kvaser_pciefd_rx_error_frame(can, p);
p                1329 drivers/net/can/kvaser_pciefd.c 					    struct kvaser_pciefd_rx_packet *p)
p                1336 drivers/net/can/kvaser_pciefd.c 	bec.txerr = p->header[0] & 0xff;
p                1337 drivers/net/can/kvaser_pciefd.c 	bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
p                1339 drivers/net/can/kvaser_pciefd.c 	kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
p                1368 drivers/net/can/kvaser_pciefd.c 			ns_to_ktime(div_u64(p->timestamp * 1000,
p                1386 drivers/net/can/kvaser_pciefd.c 					      struct kvaser_pciefd_rx_packet *p)
p                1391 drivers/net/can/kvaser_pciefd.c 	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
p                1402 drivers/net/can/kvaser_pciefd.c 	if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
p                1403 drivers/net/can/kvaser_pciefd.c 	    p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
p                1404 drivers/net/can/kvaser_pciefd.c 	    p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
p                1405 drivers/net/can/kvaser_pciefd.c 	    cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
p                1417 drivers/net/can/kvaser_pciefd.c 	} else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
p                1418 drivers/net/can/kvaser_pciefd.c 		   p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
p                1419 drivers/net/can/kvaser_pciefd.c 		   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
p                1428 drivers/net/can/kvaser_pciefd.c 	} else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
p                1429 drivers/net/can/kvaser_pciefd.c 		   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) {
p                1431 drivers/net/can/kvaser_pciefd.c 		kvaser_pciefd_handle_status_resp(can, p);
p                1437 drivers/net/can/kvaser_pciefd.c 	} else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
p                1448 drivers/net/can/kvaser_pciefd.c 					    struct kvaser_pciefd_rx_packet *p)
p                1451 drivers/net/can/kvaser_pciefd.c 	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
p                1459 drivers/net/can/kvaser_pciefd.c 	if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
p                1467 drivers/net/can/kvaser_pciefd.c 		int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
p                1482 drivers/net/can/kvaser_pciefd.c 					     struct kvaser_pciefd_rx_packet *p)
p                1491 drivers/net/can/kvaser_pciefd.c 	if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
p                1511 drivers/net/can/kvaser_pciefd.c 					   struct kvaser_pciefd_rx_packet *p)
p                1515 drivers/net/can/kvaser_pciefd.c 	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
p                1522 drivers/net/can/kvaser_pciefd.c 	if (p->header[0] & KVASER_PCIEFD_APACKET_CT)
p                1525 drivers/net/can/kvaser_pciefd.c 	if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) {
p                1526 drivers/net/can/kvaser_pciefd.c 		kvaser_pciefd_handle_nack_packet(can, p);
p                1530 drivers/net/can/kvaser_pciefd.c 	if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
p                1533 drivers/net/can/kvaser_pciefd.c 		int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
p                1554 drivers/net/can/kvaser_pciefd.c 					      struct kvaser_pciefd_rx_packet *p)
p                1557 drivers/net/can/kvaser_pciefd.c 	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
p                1576 drivers/net/can/kvaser_pciefd.c 	struct kvaser_pciefd_rx_packet *p = &packet;
p                1588 drivers/net/can/kvaser_pciefd.c 	p->header[0] = le32_to_cpu(buffer[pos++]);
p                1589 drivers/net/can/kvaser_pciefd.c 	p->header[1] = le32_to_cpu(buffer[pos++]);
p                1594 drivers/net/can/kvaser_pciefd.c 	p->timestamp = le64_to_cpu(timestamp);
p                1596 drivers/net/can/kvaser_pciefd.c 	type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf;
p                1599 drivers/net/can/kvaser_pciefd.c 		ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
p                1600 drivers/net/can/kvaser_pciefd.c 		if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
p                1603 drivers/net/can/kvaser_pciefd.c 			data_len = can_dlc2len(p->header[1] >>
p                1610 drivers/net/can/kvaser_pciefd.c 		ret = kvaser_pciefd_handle_ack_packet(pcie, p);
p                1614 drivers/net/can/kvaser_pciefd.c 		ret = kvaser_pciefd_handle_status_packet(pcie, p);
p                1618 drivers/net/can/kvaser_pciefd.c 		ret = kvaser_pciefd_handle_error_packet(pcie, p);
p                1622 drivers/net/can/kvaser_pciefd.c 		ret = kvaser_pciefd_handle_eack_packet(pcie, p);
p                1626 drivers/net/can/kvaser_pciefd.c 		ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
p                 321 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_TMC(p)			(0x0250 + (0x01 * (p)))
p                 323 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_TMSTS(p)			(0x02d0 + (0x01 * (p)))
p                 402 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_C_TMID(p)		(0x1000 + (0x10 * (p)))
p                 403 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_C_TMPTR(p)		(0x1004 + (0x10 * (p)))
p                 404 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_C_TMDF0(p)		(0x1008 + (0x10 * (p)))
p                 405 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_C_TMDF1(p)		(0x100c + (0x10 * (p)))
p                 453 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_F_TMID(p)		(0x4000 + (0x20 * (p)))
p                 454 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_F_TMPTR(p)		(0x4004 + (0x20 * (p)))
p                 455 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_F_TMFDCTR(p)		(0x4008 + (0x20 * (p)))
p                 456 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_F_TMDF(p, b)		(0x400c + (0x20 * (p)) + (0x04 * (b)))
p                 111 drivers/net/can/usb/peak_usb/pcan_usb.c static int pcan_usb_send_cmd(struct peak_usb_device *dev, u8 f, u8 n, u8 *p)
p                 123 drivers/net/can/usb/peak_usb/pcan_usb.c 	if (p)
p                 125 drivers/net/can/usb/peak_usb/pcan_usb.c 			p, PCAN_USB_CMD_ARGS_LEN);
p                 141 drivers/net/can/usb/peak_usb/pcan_usb.c static int pcan_usb_wait_rsp(struct peak_usb_device *dev, u8 f, u8 n, u8 *p)
p                 162 drivers/net/can/usb/peak_usb/pcan_usb.c 	else if (p)
p                 163 drivers/net/can/usb/peak_usb/pcan_usb.c 		memcpy(p, dev->cmd_buf + PCAN_USB_CMD_ARGS,
p                  55 drivers/net/can/usb/peak_usb/pcan_usb_core.c void pcan_dump_mem(char *prompt, void *p, int l)
p                  60 drivers/net/can/usb/peak_usb/pcan_usb_core.c 		       DUMP_WIDTH, 1, p, l, false);
p                 139 drivers/net/can/usb/peak_usb/pcan_usb_core.h void pcan_dump_mem(char *prompt, void *p, int l);
p                 316 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	unsigned int p;
p                 326 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 		p = usb_sndctrlpipe(dev->udev, 0);
p                 330 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 		p = usb_rcvctrlpipe(dev->udev, 0);
p                 336 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	err = usb_control_msg(dev->udev, p, req_id, req_type, req_value, 0,
p                1067 drivers/net/dsa/b53/b53_common.c 	struct ethtool_eee *p = &dev->ports[port].eee;
p                1152 drivers/net/dsa/b53/b53_common.c 	p->eee_enabled = b53_eee_init(ds, port, phydev);
p                1976 drivers/net/dsa/b53/b53_common.c 	struct ethtool_eee *p = &dev->ports[port].eee;
p                1983 drivers/net/dsa/b53/b53_common.c 	e->eee_enabled = p->eee_enabled;
p                1993 drivers/net/dsa/b53/b53_common.c 	struct ethtool_eee *p = &dev->ports[port].eee;
p                1998 drivers/net/dsa/b53/b53_common.c 	p->eee_enabled = e->eee_enabled;
p                 398 drivers/net/dsa/b53/b53_srab.c 	struct b53_srab_port_priv *p = &priv->port_intrs[port];
p                 400 drivers/net/dsa/b53/b53_srab.c 	if (p->mode != PHY_INTERFACE_MODE_SGMII)
p                 417 drivers/net/dsa/b53/b53_srab.c 	struct b53_srab_port_priv *p = &priv->port_intrs[port];
p                 423 drivers/net/dsa/b53/b53_srab.c 	if (p->irq == -ENXIO)
p                 426 drivers/net/dsa/b53/b53_srab.c 	ret = request_threaded_irq(p->irq, b53_srab_port_isr,
p                 428 drivers/net/dsa/b53/b53_srab.c 				   dev_name(dev->dev), p);
p                 430 drivers/net/dsa/b53/b53_srab.c 		p->irq_enabled = true;
p                 438 drivers/net/dsa/b53/b53_srab.c 	struct b53_srab_port_priv *p = &priv->port_intrs[port];
p                 440 drivers/net/dsa/b53/b53_srab.c 	if (p->irq_enabled) {
p                 441 drivers/net/dsa/b53/b53_srab.c 		free_irq(p->irq, p);
p                 442 drivers/net/dsa/b53/b53_srab.c 		p->irq_enabled = false;
p                 538 drivers/net/dsa/b53/b53_srab.c 	struct b53_srab_port_priv *p;
p                 554 drivers/net/dsa/b53/b53_srab.c 		p = &priv->port_intrs[port];
p                 559 drivers/net/dsa/b53/b53_srab.c 			p->mode = PHY_INTERFACE_MODE_SGMII;
p                 565 drivers/net/dsa/b53/b53_srab.c 			p->mode = PHY_INTERFACE_MODE_MII;
p                 568 drivers/net/dsa/b53/b53_srab.c 			p->mode = PHY_INTERFACE_MODE_GMII;
p                 571 drivers/net/dsa/b53/b53_srab.c 			p->mode = PHY_INTERFACE_MODE_RGMII;
p                 574 drivers/net/dsa/b53/b53_srab.c 			p->mode = PHY_INTERFACE_MODE_INTERNAL;
p                 577 drivers/net/dsa/b53/b53_srab.c 			p->mode = PHY_INTERFACE_MODE_NA;
p                 581 drivers/net/dsa/b53/b53_srab.c 		if (p->mode != PHY_INTERFACE_MODE_NA)
p                 583 drivers/net/dsa/b53/b53_srab.c 				 port, phy_modes(p->mode));
p                 641 drivers/net/dsa/bcm_sf2.c 	struct ethtool_eee *p = &priv->dev->ports[port].eee;
p                 646 drivers/net/dsa/bcm_sf2.c 		p->eee_enabled = b53_eee_init(ds, port, phydev);
p                 738 drivers/net/dsa/bcm_sf2.c 	struct net_device *p = ds->ports[port].cpu_dp->master;
p                 743 drivers/net/dsa/bcm_sf2.c 	if (p->ethtool_ops->get_wol)
p                 744 drivers/net/dsa/bcm_sf2.c 		p->ethtool_ops->get_wol(p, &pwol);
p                 762 drivers/net/dsa/bcm_sf2.c 	struct net_device *p = ds->ports[port].cpu_dp->master;
p                 767 drivers/net/dsa/bcm_sf2.c 	if (p->ethtool_ops->get_wol)
p                 768 drivers/net/dsa/bcm_sf2.c 		p->ethtool_ops->get_wol(p, &pwol);
p                 786 drivers/net/dsa/bcm_sf2.c 	return p->ethtool_ops->set_wol(p, wol);
p                1049 drivers/net/dsa/bcm_sf2_cfp.c 	struct net_device *p = ds->ports[port].cpu_dp->master;
p                1080 drivers/net/dsa/bcm_sf2_cfp.c 	if (p->ethtool_ops->get_rxnfc) {
p                1081 drivers/net/dsa/bcm_sf2_cfp.c 		ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs);
p                1092 drivers/net/dsa/bcm_sf2_cfp.c 	struct net_device *p = ds->ports[port].cpu_dp->master;
p                1119 drivers/net/dsa/bcm_sf2_cfp.c 	if (p->ethtool_ops->set_rxnfc) {
p                1120 drivers/net/dsa/bcm_sf2_cfp.c 		ret = p->ethtool_ops->set_rxnfc(p, nfc);
p                 148 drivers/net/dsa/lan9303-core.c # define LAN9303_SWE_GLB_INGR_IGMP_PORT(p) BIT(10 + p)
p                 826 drivers/net/dsa/lan9303-core.c 	int p;
p                 828 drivers/net/dsa/lan9303-core.c 	for (p = 1; p < LAN9303_NUM_PORTS; p++) {
p                 829 drivers/net/dsa/lan9303-core.c 		int ret = lan9303_disable_processing_port(chip, p);
p                  63 drivers/net/dsa/lantiq_gswip.c #define GSWIP_MDIO_PHYp(p)		(0x15 - (p))
p                 140 drivers/net/dsa/lantiq_gswip.c #define GSWIP_BM_PCFGp(p)		(0x080 + ((p) * 2))
p                 144 drivers/net/dsa/lantiq_gswip.c #define GSWIP_BM_RMON_CTRLp(p)		(0x81 + ((p) * 2))
p                 175 drivers/net/dsa/lantiq_gswip.c #define GSWIP_PCE_PCTRL_0p(p)		(0x480 + ((p) * 0xA))
p                 185 drivers/net/dsa/lantiq_gswip.c #define GSWIP_PCE_VCTRL(p)		(0x485 + ((p) * 0xA))
p                 191 drivers/net/dsa/lantiq_gswip.c #define GSWIP_PCE_DEFPVID(p)		(0x486 + ((p) * 0xA))
p                 194 drivers/net/dsa/lantiq_gswip.c #define GSWIP_MAC_CTRL_2p(p)		(0x905 + ((p) * 0xC))
p                 198 drivers/net/dsa/lantiq_gswip.c #define GSWIP_FDMA_PCTRLp(p)		(0xA80 + ((p) * 0x6))
p                 209 drivers/net/dsa/lantiq_gswip.c #define GSWIP_SDMA_PCTRLp(p)		(0xBC0 + ((p) * 0x6))
p                 463 drivers/net/dsa/microchip/ksz8795.c 	u8 p = phy;
p                 467 drivers/net/dsa/microchip/ksz8795.c 		ksz_pread8(dev, p, P_NEG_RESTART_CTRL, &restart);
p                 468 drivers/net/dsa/microchip/ksz8795.c 		ksz_pread8(dev, p, P_SPEED_STATUS, &speed);
p                 469 drivers/net/dsa/microchip/ksz8795.c 		ksz_pread8(dev, p, P_FORCE_CTRL, &ctrl);
p                 494 drivers/net/dsa/microchip/ksz8795.c 		ksz_pread8(dev, p, P_LINK_STATUS, &link);
p                 512 drivers/net/dsa/microchip/ksz8795.c 		ksz_pread8(dev, p, P_LOCAL_CTRL, &ctrl);
p                 526 drivers/net/dsa/microchip/ksz8795.c 		ksz_pread8(dev, p, P_REMOTE_STATUS, &link);
p                 551 drivers/net/dsa/microchip/ksz8795.c 	u8 p = phy;
p                 560 drivers/net/dsa/microchip/ksz8795.c 		ksz_pread8(dev, p, P_SPEED_STATUS, &speed);
p                 567 drivers/net/dsa/microchip/ksz8795.c 			ksz_pwrite8(dev, p, P_SPEED_STATUS, data);
p                 568 drivers/net/dsa/microchip/ksz8795.c 		ksz_pread8(dev, p, P_FORCE_CTRL, &ctrl);
p                 576 drivers/net/dsa/microchip/ksz8795.c 		if (dev->ports[p].fiber)
p                 587 drivers/net/dsa/microchip/ksz8795.c 			ksz_pwrite8(dev, p, P_FORCE_CTRL, data);
p                 588 drivers/net/dsa/microchip/ksz8795.c 		ksz_pread8(dev, p, P_NEG_RESTART_CTRL, &restart);
p                 619 drivers/net/dsa/microchip/ksz8795.c 			ksz_pwrite8(dev, p, P_NEG_RESTART_CTRL, data);
p                 622 drivers/net/dsa/microchip/ksz8795.c 		ksz_pread8(dev, p, P_LOCAL_CTRL, &ctrl);
p                 640 drivers/net/dsa/microchip/ksz8795.c 			ksz_pwrite8(dev, p, P_LOCAL_CTRL, data);
p                 681 drivers/net/dsa/microchip/ksz8795.c 	struct ksz_port *p;
p                 685 drivers/net/dsa/microchip/ksz8795.c 	p = &dev->ports[port];
p                 699 drivers/net/dsa/microchip/ksz8795.c 		    p->stp_state == BR_STATE_DISABLED)
p                 700 drivers/net/dsa/microchip/ksz8795.c 			member = dev->host_mask | p->vid_member;
p                 717 drivers/net/dsa/microchip/ksz8795.c 			member = dev->host_mask | p->vid_member;
p                 723 drivers/net/dsa/microchip/ksz8795.c 		    p->stp_state == BR_STATE_DISABLED)
p                 724 drivers/net/dsa/microchip/ksz8795.c 			member = dev->host_mask | p->vid_member;
p                 732 drivers/net/dsa/microchip/ksz8795.c 	p->stp_state = state;
p                 743 drivers/net/dsa/microchip/ksz8795.c 	if (member >= 0 && member != p->member)
p                 763 drivers/net/dsa/microchip/ksz8795.c 	struct ksz_port *p;
p                 774 drivers/net/dsa/microchip/ksz8795.c 		p = &dev->ports[index];
p                 775 drivers/net/dsa/microchip/ksz8795.c 		if (!p->on)
p                 784 drivers/net/dsa/microchip/ksz8795.c 		p = &dev->ports[index];
p                 785 drivers/net/dsa/microchip/ksz8795.c 		if (!p->on)
p                 925 drivers/net/dsa/microchip/ksz8795.c 	struct ksz_port *p = &dev->ports[port];
p                 949 drivers/net/dsa/microchip/ksz8795.c 			p->phydev.speed = SPEED_100;
p                 953 drivers/net/dsa/microchip/ksz8795.c 			p->phydev.speed = SPEED_100;
p                 958 drivers/net/dsa/microchip/ksz8795.c 			p->phydev.speed = SPEED_1000;
p                 971 drivers/net/dsa/microchip/ksz8795.c 			p->phydev.speed = SPEED_1000;
p                 975 drivers/net/dsa/microchip/ksz8795.c 		p->phydev.duplex = 1;
p                 981 drivers/net/dsa/microchip/ksz8795.c 		member = dev->host_mask | p->vid_member;
p                 985 drivers/net/dsa/microchip/ksz8795.c 		if (p->phydev.link)
p                 994 drivers/net/dsa/microchip/ksz8795.c 	struct ksz_port *p;
p                1004 drivers/net/dsa/microchip/ksz8795.c 	p = &dev->ports[dev->cpu_port];
p                1005 drivers/net/dsa/microchip/ksz8795.c 	p->vid_member = dev->port_mask;
p                1006 drivers/net/dsa/microchip/ksz8795.c 	p->on = 1;
p                1012 drivers/net/dsa/microchip/ksz8795.c 		p = &dev->ports[i];
p                1017 drivers/net/dsa/microchip/ksz8795.c 		p->vid_member = BIT(i);
p                1018 drivers/net/dsa/microchip/ksz8795.c 		p->member = dev->port_mask;
p                1024 drivers/net/dsa/microchip/ksz8795.c 		p->on = 1;
p                1025 drivers/net/dsa/microchip/ksz8795.c 		p->phy = 1;
p                1028 drivers/net/dsa/microchip/ksz8795.c 		p = &dev->ports[i];
p                1029 drivers/net/dsa/microchip/ksz8795.c 		if (!p->on)
p                1033 drivers/net/dsa/microchip/ksz8795.c 			p->fiber = 1;
p                1034 drivers/net/dsa/microchip/ksz8795.c 		if (p->fiber)
p                 235 drivers/net/dsa/microchip/ksz9477.c 	struct ksz_port *p = &dev->ports[port];
p                 241 drivers/net/dsa/microchip/ksz9477.c 	data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
p                 270 drivers/net/dsa/microchip/ksz9477.c 	struct ksz_port *p = &dev->ports[port];
p                 273 drivers/net/dsa/microchip/ksz9477.c 	mutex_lock(&p->mib.cnt_mutex);
p                 277 drivers/net/dsa/microchip/ksz9477.c 	p->freeze = freeze;
p                 278 drivers/net/dsa/microchip/ksz9477.c 	mutex_unlock(&p->mib.cnt_mutex);
p                 320 drivers/net/dsa/microchip/ksz9477.c 		struct ksz_port *p = &dev->ports[addr];
p                 345 drivers/net/dsa/microchip/ksz9477.c 			if (p->phydev.speed == SPEED_1000)
p                 400 drivers/net/dsa/microchip/ksz9477.c 	struct ksz_port *p = &dev->ports[port];
p                 417 drivers/net/dsa/microchip/ksz9477.c 		    p->stp_state == BR_STATE_DISABLED)
p                 418 drivers/net/dsa/microchip/ksz9477.c 			member = dev->host_mask | p->vid_member;
p                 430 drivers/net/dsa/microchip/ksz9477.c 		member = dev->host_mask | p->vid_member;
p                 443 drivers/net/dsa/microchip/ksz9477.c 		    p->stp_state == BR_STATE_DISABLED)
p                 444 drivers/net/dsa/microchip/ksz9477.c 			member = dev->host_mask | p->vid_member;
p                 452 drivers/net/dsa/microchip/ksz9477.c 	p->stp_state = state;
p                 464 drivers/net/dsa/microchip/ksz9477.c 	if (member >= 0 && member != p->member)
p                1193 drivers/net/dsa/microchip/ksz9477.c 	struct ksz_port *p = &dev->ports[port];
p                1240 drivers/net/dsa/microchip/ksz9477.c 			p->phydev.speed = SPEED_100;
p                1245 drivers/net/dsa/microchip/ksz9477.c 			p->phydev.speed = SPEED_100;
p                1250 drivers/net/dsa/microchip/ksz9477.c 			p->phydev.speed = SPEED_1000;
p                1263 drivers/net/dsa/microchip/ksz9477.c 			p->phydev.speed = SPEED_1000;
p                1267 drivers/net/dsa/microchip/ksz9477.c 		p->phydev.duplex = 1;
p                1275 drivers/net/dsa/microchip/ksz9477.c 		member = dev->host_mask | p->vid_member;
p                1279 drivers/net/dsa/microchip/ksz9477.c 		if (p->phydev.link)
p                1293 drivers/net/dsa/microchip/ksz9477.c 	struct ksz_port *p;
p                1321 drivers/net/dsa/microchip/ksz9477.c 			p = &dev->ports[dev->cpu_port];
p                1322 drivers/net/dsa/microchip/ksz9477.c 			p->vid_member = dev->port_mask;
p                1323 drivers/net/dsa/microchip/ksz9477.c 			p->on = 1;
p                1332 drivers/net/dsa/microchip/ksz9477.c 		p = &dev->ports[i];
p                1337 drivers/net/dsa/microchip/ksz9477.c 		p->vid_member = (1 << i);
p                1338 drivers/net/dsa/microchip/ksz9477.c 		p->member = dev->port_mask;
p                1340 drivers/net/dsa/microchip/ksz9477.c 		p->on = 1;
p                1342 drivers/net/dsa/microchip/ksz9477.c 			p->phy = 1;
p                1344 drivers/net/dsa/microchip/ksz9477.c 			p->sgmii = 1;
p                1347 drivers/net/dsa/microchip/ksz9477.c 			p->phy = 0;
p                  25 drivers/net/dsa/microchip/ksz_common.c 	struct ksz_port *p;
p                  31 drivers/net/dsa/microchip/ksz_common.c 		p = &dev->ports[i];
p                  36 drivers/net/dsa/microchip/ksz_common.c 		if (p->stp_state == BR_STATE_FORWARDING &&
p                  37 drivers/net/dsa/microchip/ksz_common.c 		    p->member != dev->member)
p                  72 drivers/net/dsa/microchip/ksz_common.c 	struct ksz_port *p;
p                  79 drivers/net/dsa/microchip/ksz_common.c 		p = &dev->ports[i];
p                  80 drivers/net/dsa/microchip/ksz_common.c 		mib = &p->mib;
p                  86 drivers/net/dsa/microchip/ksz_common.c 		if (!p->read) {
p                  93 drivers/net/dsa/microchip/ksz_common.c 		p->read = false;
p                 150 drivers/net/dsa/microchip/ksz_common.c 	struct ksz_port *p = &dev->ports[port];
p                 154 drivers/net/dsa/microchip/ksz_common.c 		p->read = true;
p                 237 drivers/net/dsa/mt7530.c _mt7530_read(struct mt7530_dummy_poll *p)
p                 239 drivers/net/dsa/mt7530.c 	struct mii_bus		*bus = p->priv->bus;
p                 244 drivers/net/dsa/mt7530.c 	val = mt7530_mii_read(p->priv, p->reg);
p                 254 drivers/net/dsa/mt7530.c 	struct mt7530_dummy_poll p;
p                 256 drivers/net/dsa/mt7530.c 	INIT_MT7530_DUMMY_POLL(&p, priv, reg);
p                 257 drivers/net/dsa/mt7530.c 	return _mt7530_read(&p);
p                 294 drivers/net/dsa/mt7530.c 	struct mt7530_dummy_poll p;
p                 300 drivers/net/dsa/mt7530.c 	INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_ATC);
p                 301 drivers/net/dsa/mt7530.c 	ret = readx_poll_timeout(_mt7530_read, &p, val,
p                 947 drivers/net/dsa/mt7530.c 	struct mt7530_dummy_poll p;
p                 954 drivers/net/dsa/mt7530.c 	INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_VTCR);
p                 955 drivers/net/dsa/mt7530.c 	ret = readx_poll_timeout(_mt7530_read, &p, val,
p                1176 drivers/net/dsa/mt7530.c 	struct mt7530_dummy_poll p;
p                1220 drivers/net/dsa/mt7530.c 	INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
p                1221 drivers/net/dsa/mt7530.c 	ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
p                 121 drivers/net/dsa/mt7530.h #define  ETAG_CTRL_P(p, x)		(((x) & 0x3) << ((p) << 1))
p                 122 drivers/net/dsa/mt7530.h #define  ETAG_CTRL_P_MASK(p)		ETAG_CTRL_P(p, 3)
p                 500 drivers/net/dsa/mt7530.h static inline void INIT_MT7530_DUMMY_POLL(struct mt7530_dummy_poll *p,
p                 503 drivers/net/dsa/mt7530.h 	p->priv = priv;
p                 504 drivers/net/dsa/mt7530.h 	p->reg = reg;
p                 115 drivers/net/dsa/mv88e6060.c static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
p                 117 drivers/net/dsa/mv88e6060.c 	int addr = REG_PORT(p);
p                 126 drivers/net/dsa/mv88e6060.c 			dsa_is_cpu_port(priv->ds, p) ?
p                 140 drivers/net/dsa/mv88e6060.c 			((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
p                 141 drivers/net/dsa/mv88e6060.c 			(dsa_is_cpu_port(priv->ds, p) ?
p                 143 drivers/net/dsa/mv88e6060.c 			 BIT(dsa_to_port(priv->ds, p)->cpu_dp->index)));
p                 152 drivers/net/dsa/mv88e6060.c 	return reg_write(priv, addr, PORT_ASSOC_VECTOR, BIT(p));
p                  15 drivers/net/dsa/mv88e6060.h #define REG_PORT(p)		(0x8 + (p))
p                1027 drivers/net/dsa/mv88e6xxx/chip.c 	u16 *p = _p;
p                1032 drivers/net/dsa/mv88e6xxx/chip.c 	memset(p, 0xff, 32 * sizeof(u16));
p                1040 drivers/net/dsa/mv88e6xxx/chip.c 			p[i] = reg;
p                 150 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
p                 154 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->valid,    31, 31, size, op);
p                 155 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->rdwrset,  30, 30, size, op);
p                 156 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->errors,   29, 29, size, op);
p                 157 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->valident, 27, 27, size, op);
p                 192 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &hostcmd, 25, 23, size, op);
p                 268 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
p                 271 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->valid,    31, 31, size, op);
p                 272 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->rdwrset,  30, 30, size, op);
p                 273 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->errors,   29, 29, size, op);
p                 274 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->valident, 27, 27, size, op);
p                 296 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
p                 301 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 		sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD);
p                 328 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
p                 333 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 		sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD);
p                 362 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	u8 *p = buf + SJA1105_SIZE_VLAN_LOOKUP_ENTRY + 4;
p                 365 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->valid,    31, 31, size, op);
p                 366 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->rdwrset,  30, 30, size, op);
p                 367 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->valident, 27, 27, size, op);
p                 379 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	u8 *p = buf + SJA1105_SIZE_L2_FORWARDING_ENTRY;
p                 382 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->valid,   31, 31, size, op);
p                 383 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->errors,  30, 30, size, op);
p                 384 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
p                 385 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->index,    4,  0, size, op);
p                 434 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	u8 *p = buf + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
p                 436 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->valid,   31, 31, size, op);
p                 437 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->errors,  30, 30, size, op);
p                 438 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
p                 439 drivers/net/dsa/sja1105/sja1105_dynamic_config.c 	sja1105_packing(p, &cmd->index,    2,  0, size, op);
p                  77 drivers/net/dsa/sja1105/sja1105_ethtool.c 	u32 *p = buf;
p                  79 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x0, &status->n_runt,       31, 24, 4);
p                  80 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x0, &status->n_soferr,     23, 16, 4);
p                  81 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x0, &status->n_alignerr,   15,  8, 4);
p                  82 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x0, &status->n_miierr,      7,  0, 4);
p                  83 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->typeerr,      27, 27, 4);
p                  84 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->sizeerr,      26, 26, 4);
p                  85 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->tctimeout,    25, 25, 4);
p                  86 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->priorerr,     24, 24, 4);
p                  87 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->nomaster,     23, 23, 4);
p                  88 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->memov,        22, 22, 4);
p                  89 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->memerr,       21, 21, 4);
p                  90 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->invtyp,       19, 19, 4);
p                  91 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->intcyov,      18, 18, 4);
p                  92 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->domerr,       17, 17, 4);
p                  93 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->pcfbagdrop,   16, 16, 4);
p                  94 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->spcprior,     15, 12, 4);
p                  95 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->ageprior,     11,  8, 4);
p                  96 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->portdrop,      6,  6, 4);
p                  97 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->lendrop,       5,  5, 4);
p                  98 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->bagdrop,       4,  4, 4);
p                  99 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->policeerr,     3,  3, 4);
p                 100 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->drpnona664err, 2,  2, 4);
p                 101 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->spcerr,        1,  1, 4);
p                 102 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->agedrp,        0,  0, 4);
p                 110 drivers/net/dsa/sja1105/sja1105_ethtool.c 	u32 *p = buf;
p                 112 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0xF, &status->n_n664err,    31,  0, 4);
p                 113 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0xE, &status->n_vlanerr,    31,  0, 4);
p                 114 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0xD, &status->n_unreleased, 31,  0, 4);
p                 115 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0xC, &status->n_sizeerr,    31,  0, 4);
p                 116 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0xB, &status->n_crcerr,     31,  0, 4);
p                 117 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0xA, &status->n_vlnotfound, 31,  0, 4);
p                 118 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x9, &status->n_ctpolerr,   31,  0, 4);
p                 119 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x8, &status->n_polerr,     31,  0, 4);
p                 120 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x7, &status->n_rxfrmsh,    31,  0, 4);
p                 121 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x6, &status->n_rxfrm,      31,  0, 4);
p                 122 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x5, &status->n_rxbytesh,   31,  0, 4);
p                 123 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x4, &status->n_rxbyte,     31,  0, 4);
p                 124 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x3, &status->n_txfrmsh,    31,  0, 4);
p                 125 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x2, &status->n_txfrm,      31,  0, 4);
p                 126 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->n_txbytesh,   31,  0, 4);
p                 127 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x0, &status->n_txbyte,     31,  0, 4);
p                 139 drivers/net/dsa/sja1105/sja1105_ethtool.c 	u32 *p = buf;
p                 141 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x3, &status->n_qfull,        31,  0, 4);
p                 142 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x2, &status->n_part_drop,    31,  0, 4);
p                 143 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x1, &status->n_egr_disabled, 31,  0, 4);
p                 144 drivers/net/dsa/sja1105/sja1105_ethtool.c 	sja1105_unpack(p + 0x0, &status->n_not_reach,    31,  0, 4);
p                 152 drivers/net/dsa/sja1105/sja1105_ethtool.c 	u32 *p = buf;
p                 156 drivers/net/dsa/sja1105/sja1105_ethtool.c 		sja1105_unpack(p + i, &status->qlevel_hwm[i], 24, 16, 4);
p                 157 drivers/net/dsa/sja1105/sja1105_ethtool.c 		sja1105_unpack(p + i, &status->qlevel[i],      8,  0, 4);
p                 385 drivers/net/dsa/sja1105/sja1105_ethtool.c 	u8 *p = data;
p                 391 drivers/net/dsa/sja1105/sja1105_ethtool.c 			strlcpy(p, sja1105_port_stats[i], ETH_GSTRING_LEN);
p                 392 drivers/net/dsa/sja1105/sja1105_ethtool.c 			p += ETH_GSTRING_LEN;
p                 398 drivers/net/dsa/sja1105/sja1105_ethtool.c 			strlcpy(p, sja1105pqrs_extra_port_stats[i],
p                 400 drivers/net/dsa/sja1105/sja1105_ethtool.c 			p += ETH_GSTRING_LEN;
p                 321 drivers/net/dsa/sja1105/sja1105_spi.c 	u32 *p = buf;
p                 328 drivers/net/dsa/sja1105/sja1105_spi.c 	p--;
p                 329 drivers/net/dsa/sja1105/sja1105_spi.c 	sja1105_unpack(p + 0x1, &status->configs,   31, 31, 4);
p                 330 drivers/net/dsa/sja1105/sja1105_spi.c 	sja1105_unpack(p + 0x1, &status->crcchkl,   30, 30, 4);
p                 331 drivers/net/dsa/sja1105/sja1105_spi.c 	sja1105_unpack(p + 0x1, &status->ids,       29, 29, 4);
p                 332 drivers/net/dsa/sja1105/sja1105_spi.c 	sja1105_unpack(p + 0x1, &status->crcchkg,   28, 28, 4);
p                 627 drivers/net/dsa/sja1105/sja1105_static_config.c 	char *p = buf;
p                 630 drivers/net/dsa/sja1105/sja1105_static_config.c 	sja1105_pack(p, &config->device_id, 31, 0, 4);
p                 631 drivers/net/dsa/sja1105/sja1105_static_config.c 	p += SJA1105_SIZE_DEVICE_ID;
p                 644 drivers/net/dsa/sja1105/sja1105_static_config.c 		sja1105_table_header_pack_with_crc(p, &header);
p                 645 drivers/net/dsa/sja1105/sja1105_static_config.c 		p += SJA1105_SIZE_TABLE_HEADER;
p                 646 drivers/net/dsa/sja1105/sja1105_static_config.c 		table_start = p;
p                 651 drivers/net/dsa/sja1105/sja1105_static_config.c 			memset(p, 0, table->ops->packed_entry_size);
p                 652 drivers/net/dsa/sja1105/sja1105_static_config.c 			table->ops->packing(p, entry_ptr, PACK);
p                 653 drivers/net/dsa/sja1105/sja1105_static_config.c 			p += table->ops->packed_entry_size;
p                 655 drivers/net/dsa/sja1105/sja1105_static_config.c 		sja1105_table_write_crc(table_start, p);
p                 656 drivers/net/dsa/sja1105/sja1105_static_config.c 		p += 4;
p                 666 drivers/net/dsa/sja1105/sja1105_static_config.c 	memset(p, 0, SJA1105_SIZE_TABLE_HEADER);
p                 667 drivers/net/dsa/sja1105/sja1105_static_config.c 	sja1105_table_header_packing(p, &header, PACK);
p                 124 drivers/net/ethernet/8390/axnet_cs.c 	void *p = (char *)netdev_priv(dev) + sizeof(struct ei_device);
p                 125 drivers/net/ethernet/8390/axnet_cs.c 	return p;
p                 584 drivers/net/ethernet/8390/axnet_cs.c 	u_short p = mdio_read(mii_addr, info->phy_id, 5);
p                 587 drivers/net/ethernet/8390/axnet_cs.c 	    info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00;
p                 588 drivers/net/ethernet/8390/axnet_cs.c 	    if (p)
p                 590 drivers/net/ethernet/8390/axnet_cs.c 			    (p & 0x0180) ? 100 : 10, (p & 0x0140) ? 'F' : 'H');
p                 280 drivers/net/ethernet/8390/mac8390.c 			volatile unsigned short *p = (unsigned short *)(membase + (j * 0x1000));
p                 281 drivers/net/ethernet/8390/mac8390.c 			if (*p != (0xA5A0 | j))
p                 216 drivers/net/ethernet/8390/pcnet_cs.c 	char *p = netdev_priv(dev);
p                 217 drivers/net/ethernet/8390/pcnet_cs.c 	return (struct pcnet_dev *)(p + sizeof(struct ei_device));
p                1052 drivers/net/ethernet/8390/pcnet_cs.c 	u_short p = mdio_read(mii_addr, info->phy_id, 5);
p                1056 drivers/net/ethernet/8390/pcnet_cs.c 	    outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG);
p                1059 drivers/net/ethernet/8390/pcnet_cs.c 	    write_asic(dev->base_addr, 4, (p & 0x140) ? DL19FDUPLX : 0);
p                1063 drivers/net/ethernet/8390/pcnet_cs.c 		if (p)
p                1066 drivers/net/ethernet/8390/pcnet_cs.c 				((p & 0x0180) ? "100" : "10"),
p                1067 drivers/net/ethernet/8390/pcnet_cs.c 				((p & 0x0140) ? 'F' : 'H'));
p                  78 drivers/net/ethernet/aeroflex/greth.c static int greth_set_mac_add(struct net_device *dev, void *p);
p                1016 drivers/net/ethernet/aeroflex/greth.c static int greth_set_mac_add(struct net_device *dev, void *p)
p                1018 drivers/net/ethernet/aeroflex/greth.c 	struct sockaddr *addr = p;
p                1122 drivers/net/ethernet/aeroflex/greth.c static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
p                1127 drivers/net/ethernet/aeroflex/greth.c 	u32 *buff = p;
p                 366 drivers/net/ethernet/allwinner/sun4i-emac.c static int emac_set_mac_address(struct net_device *dev, void *p)
p                 368 drivers/net/ethernet/allwinner/sun4i-emac.c 	struct sockaddr *addr = p;
p                2715 drivers/net/ethernet/alteon/acenic.c static int ace_set_mac_addr(struct net_device *dev, void *p)
p                2719 drivers/net/ethernet/alteon/acenic.c 	struct sockaddr *addr=p;
p                 783 drivers/net/ethernet/alteon/acenic.h static int ace_set_mac_addr(struct net_device *dev, void *p);
p                 235 drivers/net/ethernet/amd/am79c961a.c 	unsigned char *p;
p                 255 drivers/net/ethernet/amd/am79c961a.c 	for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
p                 256 drivers/net/ethernet/amd/am79c961a.c 		write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
p                1499 drivers/net/ethernet/amd/amd8111e.c static int amd8111e_set_mac_address(struct net_device *dev, void *p)
p                1503 drivers/net/ethernet/amd/amd8111e.c 	struct sockaddr *addr = p;
p                 112 drivers/net/ethernet/amd/ni65.c #define PORT p->cmdr_addr
p                 254 drivers/net/ethernet/amd/ni65.c static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
p                 260 drivers/net/ethernet/amd/ni65.c static void ni65_free_buffer(struct priv *p);
p                 271 drivers/net/ethernet/amd/ni65.c static void ni65_set_performance(struct priv *p)
p                 275 drivers/net/ethernet/amd/ni65.c 	if( !(cards[p->cardno].config & 0x02) )
p                 296 drivers/net/ethernet/amd/ni65.c 	struct priv *p = dev->ml_priv;
p                 298 drivers/net/ethernet/amd/ni65.c                         cards[p->cardno].cardname,dev);
p                 322 drivers/net/ethernet/amd/ni65.c 	struct priv *p = dev->ml_priv;
p                 333 drivers/net/ethernet/amd/ni65.c 			if(p->tmd_skb[i]) {
p                 334 drivers/net/ethernet/amd/ni65.c 				dev_kfree_skb(p->tmd_skb[i]);
p                 335 drivers/net/ethernet/amd/ni65.c 				p->tmd_skb[i] = NULL;
p                 346 drivers/net/ethernet/amd/ni65.c 	struct priv *p = dev->ml_priv;
p                 349 drivers/net/ethernet/amd/ni65.c 	release_region(dev->base_addr, cards[p->cardno].total_size);
p                 350 drivers/net/ethernet/amd/ni65.c 	ni65_free_buffer(p);
p                 420 drivers/net/ethernet/amd/ni65.c 	struct priv *p;
p                 453 drivers/net/ethernet/amd/ni65.c 	p = dev->ml_priv;
p                 454 drivers/net/ethernet/amd/ni65.c 	p->cmdr_addr = ioaddr + cards[i].cmd_offset;
p                 455 drivers/net/ethernet/amd/ni65.c 	p->cardno = i;
p                 456 drivers/net/ethernet/amd/ni65.c 	spin_lock_init(&p->ring_lock);
p                 458 drivers/net/ethernet/amd/ni65.c 	printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
p                 464 drivers/net/ethernet/amd/ni65.c 		 ni65_free_buffer(p);
p                 465 drivers/net/ethernet/amd/ni65.c 		 release_region(ioaddr, cards[p->cardno].total_size);
p                 477 drivers/net/ethernet/amd/ni65.c 		p->features = INIT_RING_BEFORE_START;
p                 481 drivers/net/ethernet/amd/ni65.c 		p->features = 0x0;
p                 506 drivers/net/ethernet/amd/ni65.c 				ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
p                 519 drivers/net/ethernet/amd/ni65.c 				ni65_free_buffer(p);
p                 520 drivers/net/ethernet/amd/ni65.c 				release_region(ioaddr, cards[p->cardno].total_size);
p                 533 drivers/net/ethernet/amd/ni65.c 			ni65_init_lance(p,dev->dev_addr,0,0);
p                 541 drivers/net/ethernet/amd/ni65.c 				ni65_free_buffer(p);
p                 542 drivers/net/ethernet/amd/ni65.c 				release_region(ioaddr, cards[p->cardno].total_size);
p                 551 drivers/net/ethernet/amd/ni65.c 	if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
p                 554 drivers/net/ethernet/amd/ni65.c 		ni65_free_buffer(p);
p                 555 drivers/net/ethernet/amd/ni65.c 		release_region(ioaddr, cards[p->cardno].total_size);
p                 569 drivers/net/ethernet/amd/ni65.c static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
p                 577 drivers/net/ethernet/amd/ni65.c 		p->ib.eaddr[i] = daddr[i];
p                 580 drivers/net/ethernet/amd/ni65.c 		p->ib.filter[i] = filter;
p                 581 drivers/net/ethernet/amd/ni65.c 	p->ib.mode = mode;
p                 583 drivers/net/ethernet/amd/ni65.c 	p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
p                 584 drivers/net/ethernet/amd/ni65.c 	p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
p                 586 drivers/net/ethernet/amd/ni65.c 	pib = (u32) isa_virt_to_bus(&p->ib);
p                 641 drivers/net/ethernet/amd/ni65.c 	struct priv *p;
p                 651 drivers/net/ethernet/amd/ni65.c 	p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
p                 652 drivers/net/ethernet/amd/ni65.c 	memset((char *)p, 0, sizeof(struct priv));
p                 653 drivers/net/ethernet/amd/ni65.c 	p->self = ptr;
p                 658 drivers/net/ethernet/amd/ni65.c 		p->tmd_skb[i] = NULL;
p                 660 drivers/net/ethernet/amd/ni65.c 		p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
p                 661 drivers/net/ethernet/amd/ni65.c 		if(!p->tmdbounce[i]) {
p                 662 drivers/net/ethernet/amd/ni65.c 			ni65_free_buffer(p);
p                 670 drivers/net/ethernet/amd/ni65.c 		p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
p                 671 drivers/net/ethernet/amd/ni65.c 		if(!p->recv_skb[i]) {
p                 672 drivers/net/ethernet/amd/ni65.c 			ni65_free_buffer(p);
p                 676 drivers/net/ethernet/amd/ni65.c 		p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
p                 677 drivers/net/ethernet/amd/ni65.c 		if(!p->recvbounce[i]) {
p                 678 drivers/net/ethernet/amd/ni65.c 			ni65_free_buffer(p);
p                 690 drivers/net/ethernet/amd/ni65.c static void ni65_free_buffer(struct priv *p)
p                 694 drivers/net/ethernet/amd/ni65.c 	if(!p)
p                 698 drivers/net/ethernet/amd/ni65.c 		kfree(p->tmdbounce[i]);
p                 700 drivers/net/ethernet/amd/ni65.c 		dev_kfree_skb(p->tmd_skb[i]);
p                 707 drivers/net/ethernet/amd/ni65.c 		dev_kfree_skb(p->recv_skb[i]);
p                 709 drivers/net/ethernet/amd/ni65.c 		kfree(p->recvbounce[i]);
p                 712 drivers/net/ethernet/amd/ni65.c 	kfree(p->self);
p                 719 drivers/net/ethernet/amd/ni65.c static void ni65_stop_start(struct net_device *dev,struct priv *p)
p                 728 drivers/net/ethernet/amd/ni65.c 	if(p->features & INIT_RING_BEFORE_START) {
p                 736 drivers/net/ethernet/amd/ni65.c 		if(p->xmit_queued) {
p                 738 drivers/net/ethernet/amd/ni65.c 				if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
p                 740 drivers/net/ethernet/amd/ni65.c 				p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
p                 741 drivers/net/ethernet/amd/ni65.c 				if(p->tmdlast == p->tmdnum)
p                 747 drivers/net/ethernet/amd/ni65.c 			struct tmd *tmdp = p->tmdhead + i;
p                 749 drivers/net/ethernet/amd/ni65.c 			skb_save[i] = p->tmd_skb[i];
p                 757 drivers/net/ethernet/amd/ni65.c 			struct rmd *rmdp = p->rmdhead + i;
p                 760 drivers/net/ethernet/amd/ni65.c 		p->tmdnum = p->xmit_queued = 0;
p                 764 drivers/net/ethernet/amd/ni65.c 			int num = (i + p->tmdlast) & (TMDNUM-1);
p                 765 drivers/net/ethernet/amd/ni65.c 			p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
p                 766 drivers/net/ethernet/amd/ni65.c 			p->tmdhead[i].blen = blen[num];
p                 767 drivers/net/ethernet/amd/ni65.c 			if(p->tmdhead[i].u.s.status & XMIT_OWN) {
p                 768 drivers/net/ethernet/amd/ni65.c 				 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
p                 769 drivers/net/ethernet/amd/ni65.c 				 p->xmit_queued = 1;
p                 773 drivers/net/ethernet/amd/ni65.c 			p->tmd_skb[i] = skb_save[num];
p                 776 drivers/net/ethernet/amd/ni65.c 		p->rmdnum = p->tmdlast = 0;
p                 777 drivers/net/ethernet/amd/ni65.c 		if(!p->lock)
p                 778 drivers/net/ethernet/amd/ni65.c 			if (p->tmdnum || !p->xmit_queued)
p                 792 drivers/net/ethernet/amd/ni65.c 	 struct priv *p = dev->ml_priv;
p                 795 drivers/net/ethernet/amd/ni65.c 	 p->lock = 0;
p                 796 drivers/net/ethernet/amd/ni65.c 	 p->xmit_queued = 0;
p                 808 drivers/net/ethernet/amd/ni65.c 							cards[p->cardno].cardname,(int) i);
p                 815 drivers/net/ethernet/amd/ni65.c 	 p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
p                 818 drivers/net/ethernet/amd/ni65.c 		 struct tmd *tmdp = p->tmdhead + i;
p                 820 drivers/net/ethernet/amd/ni65.c 		 if(p->tmd_skb[i]) {
p                 821 drivers/net/ethernet/amd/ni65.c 			 dev_kfree_skb(p->tmd_skb[i]);
p                 822 drivers/net/ethernet/amd/ni65.c 			 p->tmd_skb[i] = NULL;
p                 832 drivers/net/ethernet/amd/ni65.c 		 struct rmd *rmdp = p->rmdhead + i;
p                 834 drivers/net/ethernet/amd/ni65.c 		 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
p                 836 drivers/net/ethernet/amd/ni65.c 		 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
p                 844 drivers/net/ethernet/amd/ni65.c 		 ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
p                 846 drivers/net/ethernet/amd/ni65.c 		 ni65_init_lance(p,dev->dev_addr,0xff,0x0);
p                 848 drivers/net/ethernet/amd/ni65.c 		 ni65_init_lance(p,dev->dev_addr,0x00,0x00);
p                 856 drivers/net/ethernet/amd/ni65.c 		 ni65_set_performance(p);
p                 875 drivers/net/ethernet/amd/ni65.c 	struct priv *p;
p                 878 drivers/net/ethernet/amd/ni65.c 	p = dev->ml_priv;
p                 880 drivers/net/ethernet/amd/ni65.c 	spin_lock(&p->ring_lock);
p                 908 drivers/net/ethernet/amd/ni65.c 					printk("%02x ",p->rmdhead[i].u.s.status);
p                 915 drivers/net/ethernet/amd/ni65.c 				ni65_stop_start(dev,p);
p                 927 drivers/net/ethernet/amd/ni65.c 		 num2 = (p->rmdnum + i) & (RMDNUM-1);
p                 928 drivers/net/ethernet/amd/ni65.c 		 if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
p                 935 drivers/net/ethernet/amd/ni65.c 			num1 = (p->rmdnum + k) & (RMDNUM-1);
p                 936 drivers/net/ethernet/amd/ni65.c 			if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
p                 947 drivers/net/ethernet/amd/ni65.c 				sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
p                 951 drivers/net/ethernet/amd/ni65.c 			printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
p                 954 drivers/net/ethernet/amd/ni65.c 		p->rmdnum = num1;
p                 956 drivers/net/ethernet/amd/ni65.c 		if((p->rmdhead[num2].u.s.status & RCV_OWN))
p                 967 drivers/net/ethernet/amd/ni65.c 		ni65_stop_start(dev,p);
p                 972 drivers/net/ethernet/amd/ni65.c 	spin_unlock(&p->ring_lock);
p                 982 drivers/net/ethernet/amd/ni65.c 	struct priv *p = dev->ml_priv;
p                 984 drivers/net/ethernet/amd/ni65.c 	while(p->xmit_queued)
p                 986 drivers/net/ethernet/amd/ni65.c 		struct tmd *tmdp = p->tmdhead + p->tmdlast;
p                1008 drivers/net/ethernet/amd/ni65.c 				if(p->features & INIT_RING_BEFORE_START) {
p                1010 drivers/net/ethernet/amd/ni65.c 					ni65_stop_start(dev,p);
p                1014 drivers/net/ethernet/amd/ni65.c 				 ni65_stop_start(dev,p);
p                1028 drivers/net/ethernet/amd/ni65.c 		if(p->tmd_skb[p->tmdlast]) {
p                1029 drivers/net/ethernet/amd/ni65.c 			 dev_consume_skb_irq(p->tmd_skb[p->tmdlast]);
p                1030 drivers/net/ethernet/amd/ni65.c 			 p->tmd_skb[p->tmdlast] = NULL;
p                1034 drivers/net/ethernet/amd/ni65.c 		p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
p                1035 drivers/net/ethernet/amd/ni65.c 		if(p->tmdlast == p->tmdnum)
p                1036 drivers/net/ethernet/amd/ni65.c 			p->xmit_queued = 0;
p                1049 drivers/net/ethernet/amd/ni65.c 	struct priv *p = dev->ml_priv;
p                1051 drivers/net/ethernet/amd/ni65.c 	rmdp = p->rmdhead + p->rmdnum;
p                1095 drivers/net/ethernet/amd/ni65.c 					skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
p                1098 drivers/net/ethernet/amd/ni65.c 					struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
p                1100 drivers/net/ethernet/amd/ni65.c 					p->recv_skb[p->rmdnum] = skb;
p                1107 drivers/net/ethernet/amd/ni65.c 				skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
p                1127 drivers/net/ethernet/amd/ni65.c 		p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
p                1128 drivers/net/ethernet/amd/ni65.c 		rmdp = p->rmdhead + p->rmdnum;
p                1139 drivers/net/ethernet/amd/ni65.c 	struct priv *p = dev->ml_priv;
p                1143 drivers/net/ethernet/amd/ni65.c 		printk("%02x ",p->tmdhead[i].u.s.status);
p                1157 drivers/net/ethernet/amd/ni65.c 	struct priv *p = dev->ml_priv;
p                1161 drivers/net/ethernet/amd/ni65.c 	if (test_and_set_bit(0, (void*)&p->lock)) {
p                1175 drivers/net/ethernet/amd/ni65.c 			skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
p                1179 drivers/net/ethernet/amd/ni65.c 				memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
p                1182 drivers/net/ethernet/amd/ni65.c 			spin_lock_irqsave(&p->ring_lock, flags);
p                1183 drivers/net/ethernet/amd/ni65.c 			tmdp = p->tmdhead + p->tmdnum;
p                1184 drivers/net/ethernet/amd/ni65.c 			tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
p                1185 drivers/net/ethernet/amd/ni65.c 			p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
p                1190 drivers/net/ethernet/amd/ni65.c 			spin_lock_irqsave(&p->ring_lock, flags);
p                1192 drivers/net/ethernet/amd/ni65.c 			tmdp = p->tmdhead + p->tmdnum;
p                1194 drivers/net/ethernet/amd/ni65.c 			p->tmd_skb[p->tmdnum] = skb;
p                1202 drivers/net/ethernet/amd/ni65.c 		p->xmit_queued = 1;
p                1203 drivers/net/ethernet/amd/ni65.c 		p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
p                1205 drivers/net/ethernet/amd/ni65.c 		if(p->tmdnum != p->tmdlast)
p                1208 drivers/net/ethernet/amd/ni65.c 		p->lock = 0;
p                1210 drivers/net/ethernet/amd/ni65.c 		spin_unlock_irqrestore(&p->ring_lock, flags);
p                  99 drivers/net/ethernet/apm/xgene-v2/ethtool.c 	u8 *p = data;
p                 106 drivers/net/ethernet/apm/xgene-v2/ethtool.c 		memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN);
p                 107 drivers/net/ethernet/apm/xgene-v2/ethtool.c 		p += ETH_GSTRING_LEN;
p                 111 drivers/net/ethernet/apm/xgene-v2/ethtool.c 		memcpy(p, gstrings_extd_stats[i].name, ETH_GSTRING_LEN);
p                 112 drivers/net/ethernet/apm/xgene-v2/ethtool.c 		p += ETH_GSTRING_LEN;
p                 194 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c 	u8 *p = data;
p                 200 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c 		memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN);
p                 201 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c 		p += ETH_GSTRING_LEN;
p                 205 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c 		memcpy(p, gstrings_extd_stats[i].name, ETH_GSTRING_LEN);
p                 206 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c 		p += ETH_GSTRING_LEN;
p                 683 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
p                 685 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
p                 688 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	if (ioread32(p->ring_csr_addr + SRST_ADDR))
p                 426 drivers/net/ethernet/apm/xgene/xgene_enet_hw.h bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
p                1148 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	void *p;
p                1177 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				p = page_pool->frag_page;
p                1178 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				if (p)
p                1179 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 					devm_kfree(dev, p);
p                1181 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				p = page_pool->frag_dma_addr;
p                1182 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				if (p)
p                1183 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 					devm_kfree(dev, p);
p                1269 drivers/net/ethernet/apm/xgene/xgene_enet_main.c static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
p                1273 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	if (p->enet_id == XGENE_ENET1) {
p                1274 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		switch (p->phy_mode) {
p                1279 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			owner = (!p->port_id) ? RING_OWNER_ETH0 :
p                1284 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
p                  14 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
p                  16 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	iowrite32(val, p->eth_csr_addr + offset);
p                  19 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_enet_wr_clkrst_csr(struct xgene_enet_pdata *p, u32 offset,
p                  22 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	iowrite32(val, p->base_addr + offset);
p                  25 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p,
p                  28 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	iowrite32(val, p->eth_ring_if_addr + offset);
p                  31 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
p                  34 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	iowrite32(val, p->eth_diag_csr_addr + offset);
p                  45 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset)
p                  47 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	return ioread32(p->eth_csr_addr + offset);
p                  50 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset)
p                  52 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	return ioread32(p->eth_diag_csr_addr + offset);
p                  55 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static u32 xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *p, u32 offset)
p                  57 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	return ioread32(p->mcx_mac_csr_addr + offset);
p                  60 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
p                  62 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	struct net_device *ndev = p->ndev;
p                  66 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	shutdown = xgene_enet_rd_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR);
p                  67 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
p                  74 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
p                  77 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
p                 104 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
p                 108 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	val = (p->enet_id == XGENE_ENET1) ? 0xffffffff : 0;
p                 109 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
p                 110 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
p                 113 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id,
p                 120 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
p                 123 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data);
p                 126 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
p                 132 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	netdev_err(p->ndev, "MII_MGMT write failed\n");
p                 135 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg)
p                 141 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
p                 142 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
p                 145 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
p                 147 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 			data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR);
p                 148 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 			xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0);
p                 155 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	netdev_err(p->ndev, "MII_MGMT read failed\n");
p                 160 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
p                 162 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1);
p                 163 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0);
p                 166 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
p                 169 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	u8 *dev_addr = p->ndev->dev_addr;
p                 173 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0);
p                 175 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR);
p                 177 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1);
p                 180 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
p                 184 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	data = xgene_mii_phy_read(p, INT_PHY_ADDR,
p                 188 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		p->phy_speed = SPEED_1000;
p                 190 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		p->phy_speed = SPEED_100;
p                 192 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		p->phy_speed = SPEED_10;
p                 197 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmii_configure(struct xgene_enet_pdata *p)
p                 199 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2,
p                 201 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x9000);
p                 202 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
p                 205 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmii_tbi_control_reset(struct xgene_enet_pdata *p)
p                 207 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2,
p                 209 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
p                 212 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmii_reset(struct xgene_enet_pdata *p)
p                 216 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	if (p->phy_speed == SPEED_UNKNOWN)
p                 219 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	value = xgene_mii_phy_read(p, INT_PHY_ADDR,
p                 222 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		xgene_sgmii_tbi_control_reset(p);
p                 225 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmac_set_speed(struct xgene_enet_pdata *p)
p                 231 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_sgmii_reset(p);
p                 233 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	if (p->enet_id == XGENE_ENET1) {
p                 234 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		icm0_addr = ICM_CONFIG0_REG_0_ADDR + p->port_id * OFFSET_8;
p                 235 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		icm2_addr = ICM_CONFIG2_REG_0_ADDR + p->port_id * OFFSET_4;
p                 243 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	icm0 = xgene_enet_rd_mcx_csr(p, icm0_addr);
p                 244 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	icm2 = xgene_enet_rd_mcx_csr(p, icm2_addr);
p                 245 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	mc2 = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
p                 246 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	intf_ctl = xgene_enet_rd_mac(p, INTERFACE_CONTROL_ADDR);
p                 248 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	switch (p->phy_speed) {
p                 268 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		value = xgene_enet_rd_csr(p, debug_addr);
p                 270 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		xgene_enet_wr_csr(p, debug_addr, value);
p                 275 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, mc2);
p                 276 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, intf_ctl);
p                 277 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mcx_csr(p, icm0_addr, icm0);
p                 278 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mcx_csr(p, icm2_addr, icm2);
p                 286 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p)
p                 290 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_sgmii_configure(p);
p                 293 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		data = xgene_mii_phy_read(p, INT_PHY_ADDR,
p                 300 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		netdev_err(p->ndev, "Auto-negotiation failed\n");
p                 303 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
p                 307 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR);
p                 314 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data);
p                 317 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmac_flowctl_tx(struct xgene_enet_pdata *p, bool enable)
p                 319 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_sgmac_rxtx(p, TX_FLOW_EN, enable);
p                 321 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	p->mac_ops->enable_tx_pause(p, enable);
p                 329 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmac_init(struct xgene_enet_pdata *p)
p                 337 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	if (!(p->enet_id == XGENE_ENET2 && p->mdio_driver))
p                 338 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		xgene_sgmac_reset(p);
p                 340 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_sgmii_enable_autoneg(p);
p                 341 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_sgmac_set_speed(p);
p                 342 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_sgmac_set_mac_addr(p);
p                 344 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	if (p->enet_id == XGENE_ENET1) {
p                 348 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		offset = p->port_id * OFFSET_4;
p                 357 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	data = xgene_enet_rd_csr(p, enet_spare_cfg_reg);
p                 359 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
p                 362 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
p                 364 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
p                 367 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	data = xgene_enet_rd_csr(p, rsif_config_reg);
p                 369 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_csr(p, rsif_config_reg, data);
p                 372 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	multi_dpf_reg = (p->enet_id == XGENE_ENET1) ? CSR_MULTI_DPF0_ADDR :
p                 374 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	data = xgene_enet_rd_mcx_csr(p, multi_dpf_reg);
p                 376 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mcx_csr(p, multi_dpf_reg, data);
p                 378 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	if (p->enet_id != XGENE_ENET1) {
p                 379 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		data = xgene_enet_rd_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR);
p                 381 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		xgene_enet_wr_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR, data);
p                 384 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	pause_thres_reg = (p->enet_id == XGENE_ENET1) ? RXBUF_PAUSE_THRESH :
p                 386 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	pause_off_thres_reg = (p->enet_id == XGENE_ENET1) ?
p                 389 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	if (p->enet_id == XGENE_ENET1) {
p                 390 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		data1 = xgene_enet_rd_csr(p, pause_thres_reg);
p                 391 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		data2 = xgene_enet_rd_csr(p, pause_off_thres_reg);
p                 393 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		if (!(p->port_id % 2)) {
p                 401 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		xgene_enet_wr_csr(p, pause_thres_reg, data1);
p                 402 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		xgene_enet_wr_csr(p, pause_off_thres_reg, data2);
p                 405 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		xgene_enet_wr_csr(p, pause_thres_reg, data);
p                 408 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_sgmac_flowctl_tx(p, p->tx_pause);
p                 409 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_sgmac_flowctl_rx(p, p->rx_pause);
p                 412 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
p                 413 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
p                 414 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0);
p                 417 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p)
p                 419 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_sgmac_rxtx(p, RX_EN, true);
p                 422 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p)
p                 424 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_sgmac_rxtx(p, TX_EN, true);
p                 427 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p)
p                 429 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_sgmac_rxtx(p, RX_EN, false);
p                 432 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
p                 434 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_sgmac_rxtx(p, TX_EN, false);
p                 437 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static int xgene_enet_reset(struct xgene_enet_pdata *p)
p                 439 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	struct device *dev = &p->pdev->dev;
p                 441 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	if (!xgene_ring_mgr_init(p))
p                 444 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	if (p->mdio_driver && p->enet_id == XGENE_ENET2) {
p                 445 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		xgene_enet_config_ring_if_assoc(p);
p                 449 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	if (p->enet_id == XGENE_ENET2)
p                 450 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		xgene_enet_wr_clkrst_csr(p, XGENET_CONFIG_REG_ADDR, SGMII_EN);
p                 453 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		if (!IS_ERR(p->clk)) {
p                 454 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 			clk_prepare_enable(p->clk);
p                 456 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 			clk_disable_unprepare(p->clk);
p                 458 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 			clk_prepare_enable(p->clk);
p                 465 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		status = acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
p                 468 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 			acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
p                 474 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	if (!p->port_id) {
p                 475 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		xgene_enet_ecc_init(p);
p                 476 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		xgene_enet_config_ring_if_assoc(p);
p                 482 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
p                 487 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	u32 offset = p->port_id * MAC_OFFSET;
p                 490 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	if (p->enet_id == XGENE_ENET1) {
p                 499 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
p                 505 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
p                 524 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
p                 526 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	struct device *dev = &p->pdev->dev;
p                 529 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		if (!IS_ERR(p->clk))
p                 530 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 			clk_disable_unprepare(p->clk);
p                 536 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	struct xgene_enet_pdata *p = container_of(to_delayed_work(work),
p                 538 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	struct net_device *ndev = p->ndev;
p                 541 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	link = xgene_enet_link_status(p);
p                 545 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 			xgene_sgmac_set_speed(p);
p                 546 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 			xgene_sgmac_rx_enable(p);
p                 547 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 			xgene_sgmac_tx_enable(p);
p                 549 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 				    p->phy_speed);
p                 554 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 			xgene_sgmac_rx_disable(p);
p                 555 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 			xgene_sgmac_tx_disable(p);
p                 562 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	schedule_delayed_work(&p->link_work, poll_interval);
p                 565 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c static void xgene_sgmac_enable_tx_pause(struct xgene_enet_pdata *p, bool enable)
p                 569 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	if (p->enet_id == XGENE_ENET1) {
p                 570 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		ecm_cfg_addr = (!(p->port_id % 2)) ? CSR_ECM_CFG_0_ADDR :
p                 576 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	data = xgene_enet_rd_mcx_csr(p, ecm_cfg_addr);
p                 581 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	xgene_enet_wr_mcx_csr(p, ecm_cfg_addr, data);
p                 524 drivers/net/ethernet/apple/bmac.c 	unsigned char *p = addr;
p                 533 drivers/net/ethernet/apple/bmac.c 		dev->dev_addr[i] = p[i];
p                 376 drivers/net/ethernet/apple/mace.c     unsigned char *p = addr;
p                 388 drivers/net/ethernet/apple/mace.c 	out_8(&mb->padr, dev->dev_addr[i] = p[i]);
p                 322 drivers/net/ethernet/apple/macmace.c 	unsigned char *p = addr;
p                 334 drivers/net/ethernet/apple/macmace.c 		mb->padr = dev->dev_addr[i] = p[i];
p                  15 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 				struct ethtool_regs *regs, void *p)
p                  20 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 	memset(p, 0, regs_count * sizeof(u32));
p                  21 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 	aq_nic_get_regs(aq_nic, regs, p);
p                 135 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 	u8 *p = data;
p                 138 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 		memcpy(p, aq_ethtool_stat_names,
p                 140 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 		p = p + sizeof(aq_ethtool_stat_names);
p                 145 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 				snprintf(p, ETH_GSTRING_LEN,
p                 147 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 				p += ETH_GSTRING_LEN;
p                 688 drivers/net/ethernet/aquantia/atlantic/aq_nic.c int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
p                 690 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	u32 *regs_buff = p;
p                 130 drivers/net/ethernet/aquantia/atlantic/aq_nic.h int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
p                 285 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c 				  u32 *p, u32 cnt)
p                 320 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c 		*(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
p                 330 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
p                 346 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c 			aq_hw_write_reg(self, 0x328, p[offset]);
p                 363 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c 			aq_hw_write_reg(self, 0x20C, p[offset]);
p                 736 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
p                 760 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c 	*p = chip_features;
p                 441 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
p                 476 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h 				  u32 *p, u32 cnt);
p                 767 drivers/net/ethernet/arc/emac_main.c static int arc_emac_set_address(struct net_device *ndev, void *p)
p                 769 drivers/net/ethernet/arc/emac_main.c 	struct sockaddr *addr = p;
p                 127 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 			   struct ethtool_regs *regs, void *p)
p                 131 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	u32 *regs_buff = p;
p                 134 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	memset(p, 0, AT_REGS_LEN);
p                 137 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_PM_CTRL, 		  p++);
p                 138 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL,  p++);
p                 139 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_TWSI_CTRL, 		  p++);
p                 140 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL,   p++);
p                 141 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_MASTER_CTRL, 	  p++);
p                 142 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_MANUAL_TIMER_INIT,    p++);
p                 143 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_IRQ_MODRT_TIMER_INIT, p++);
p                 144 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_GPHY_CTRL, 		  p++);
p                 145 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_LINK_CTRL, 		  p++);
p                 146 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_IDLE_STATUS, 	  p++);
p                 147 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_MDIO_CTRL, 		  p++);
p                 148 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_SERDES,		  p++);
p                 149 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_MAC_CTRL, 		  p++);
p                 150 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_MAC_IPG_IFG, 	  p++);
p                 151 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_MAC_STA_ADDR, 	  p++);
p                 152 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_MAC_STA_ADDR+4, 	  p++);
p                 153 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_RX_HASH_TABLE, 	  p++);
p                 154 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_RX_HASH_TABLE+4, 	  p++);
p                 155 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_RXQ_CTRL, 		  p++);
p                 156 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_TXQ_CTRL, 		  p++);
p                 157 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_MTU, 		  p++);
p                 158 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c 	AT_READ_REG(hw, REG_WOL_CTRL, 		  p++);
p                 448 drivers/net/ethernet/atheros/atl1c/atl1c_main.c static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
p                 451 drivers/net/ethernet/atheros/atl1c/atl1c_main.c 	struct sockaddr *addr = p;
p                 146 drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c 			   struct ethtool_regs *regs, void *p)
p                 150 drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c 	u32 *regs_buff = p;
p                 153 drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c 	memset(p, 0, AT_REGS_LEN * sizeof(u32));
p                 371 drivers/net/ethernet/atheros/atl1e/atl1e_main.c static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
p                 374 drivers/net/ethernet/atheros/atl1e/atl1e_main.c 	struct sockaddr *addr = p;
p                 101 drivers/net/ethernet/atheros/atl1e/atl1e_param.c 			struct atl1e_opt_list { int i; char *str; } *p;
p                 139 drivers/net/ethernet/atheros/atl1e/atl1e_param.c 				ent = &opt->arg.l.p[i];
p                 124 drivers/net/ethernet/atheros/atlx/atl1.c 			} *p;
p                 160 drivers/net/ethernet/atheros/atlx/atl1.c 				ent = &opt->arg.l.p[i];
p                3173 drivers/net/ethernet/atheros/atlx/atl1.c 	char *p;
p                3176 drivers/net/ethernet/atheros/atlx/atl1.c 		p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
p                3178 drivers/net/ethernet/atheros/atlx/atl1.c 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                3399 drivers/net/ethernet/atheros/atlx/atl1.c 	void *p)
p                3404 drivers/net/ethernet/atheros/atlx/atl1.c 	u32 *regbuf = p;
p                3578 drivers/net/ethernet/atheros/atlx/atl1.c 	u8 *p = data;
p                3584 drivers/net/ethernet/atheros/atlx/atl1.c 			memcpy(p, atl1_gstrings_stats[i].stat_string,
p                3586 drivers/net/ethernet/atheros/atlx/atl1.c 			p += ETH_GSTRING_LEN;
p                 929 drivers/net/ethernet/atheros/atlx/atl2.c static int atl2_set_mac(struct net_device *netdev, void *p)
p                 932 drivers/net/ethernet/atheros/atlx/atl2.c 	struct sockaddr *addr = p;
p                1839 drivers/net/ethernet/atheros/atlx/atl2.c 	struct ethtool_regs *regs, void *p)
p                1843 drivers/net/ethernet/atheros/atlx/atl2.c 	u32 *regs_buff = p;
p                1846 drivers/net/ethernet/atheros/atlx/atl2.c 	memset(p, 0, sizeof(u32) * ATL2_REGS_LEN);
p                2894 drivers/net/ethernet/atheros/atlx/atl2.c 			struct atl2_opt_list { int i; char *str; } *p;
p                2928 drivers/net/ethernet/atheros/atlx/atl2.c 			ent = &opt->arg.l.p[i];
p                  61 drivers/net/ethernet/atheros/atlx/atlx.c static int atlx_set_mac(struct net_device *netdev, void *p)
p                  64 drivers/net/ethernet/atheros/atlx/atlx.c 	struct sockaddr *addr = p;
p                1379 drivers/net/ethernet/broadcom/b44.c static int b44_set_mac_addr(struct net_device *dev, void *p)
p                1382 drivers/net/ethernet/broadcom/b44.c 	struct sockaddr *addr = p;
p                 233 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		dma_addr_t p;
p                 245 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			p = dma_map_single(&priv->pdev->dev, skb->data,
p                 248 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			desc->address = p;
p                 646 drivers/net/ethernet/broadcom/bcm63xx_enet.c static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
p                 649 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	struct sockaddr *addr = p;
p                 861 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	void *p;
p                 926 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
p                 927 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	if (!p) {
p                 933 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	priv->rx_desc_cpu = p;
p                 937 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
p                 938 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	if (!p) {
p                 944 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	priv->tx_desc_cpu = p;
p                1347 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		char *p;
p                1354 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		p = (char *)priv + s->stat_offset;
p                1357 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			*(u64 *)p += val;
p                1359 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			*(u32 *)p += val;
p                1396 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		char *p;
p                1400 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			p = (char *)&netdev->stats;
p                1402 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			p = (char *)priv;
p                1403 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		p += s->stat_offset;
p                1405 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			*(u64 *)p : *(u32 *)p;
p                2085 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	void *p;
p                2109 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
p                2110 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	if (!p) {
p                2117 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	priv->rx_desc_cpu = p;
p                2121 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
p                2122 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	if (!p) {
p                2129 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	priv->tx_desc_cpu = p;
p                2550 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		char *p;
p                2560 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		p = (char *)priv + s->stat_offset;
p                2564 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			*(u64 *)p = ((u64)hi << 32 | lo);
p                2566 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			*(u32 *)p = lo;
p                2572 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		char *p;
p                2577 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			p = (char *)&netdev->stats + s->stat_offset;
p                2579 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			p = (char *)priv + s->stat_offset;
p                2582 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			*(u64 *)p : *(u32 *)p;
p                2764 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	void __iomem *p[3];
p                2770 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		p[i] = devm_platform_ioremap_resource(pdev, i);
p                2771 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		if (IS_ERR(p[i]))
p                2772 drivers/net/ethernet/broadcom/bcm63xx_enet.c 			return PTR_ERR(p[i]);
p                2775 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
p                 390 drivers/net/ethernet/broadcom/bcmsysport.c 		char *p;
p                 421 drivers/net/ethernet/broadcom/bcmsysport.c 		p = (char *)priv + s->stat_offset;
p                 422 drivers/net/ethernet/broadcom/bcmsysport.c 		*(u32 *)p = val;
p                 469 drivers/net/ethernet/broadcom/bcmsysport.c 		char *p;
p                 473 drivers/net/ethernet/broadcom/bcmsysport.c 			p = (char *)&dev->stats;
p                 475 drivers/net/ethernet/broadcom/bcmsysport.c 			p = (char *)stats64;
p                 477 drivers/net/ethernet/broadcom/bcmsysport.c 			p = (char *)priv;
p                 481 drivers/net/ethernet/broadcom/bcmsysport.c 		p += s->stat_offset;
p                 487 drivers/net/ethernet/broadcom/bcmsysport.c 				data[i] = *(u64 *)p;
p                 490 drivers/net/ethernet/broadcom/bcmsysport.c 			data[i] = *(u32 *)p;
p                1805 drivers/net/ethernet/broadcom/bcmsysport.c static int bcm_sysport_change_mac(struct net_device *dev, void *p)
p                1808 drivers/net/ethernet/broadcom/bcmsysport.c 	struct sockaddr *addr = p;
p                7067 drivers/net/ethernet/broadcom/bnx2.c 	u32 *p = _p, i, offset;
p                7097 drivers/net/ethernet/broadcom/bnx2.c 	memset(p, 0, BNX2_REGDUMP_LEN);
p                7104 drivers/net/ethernet/broadcom/bnx2.c 	p += offset;
p                7106 drivers/net/ethernet/broadcom/bnx2.c 		*p++ = BNX2_RD(bp, offset);
p                7110 drivers/net/ethernet/broadcom/bnx2.c 			p = (u32 *) (orig_p + offset);
p                7902 drivers/net/ethernet/broadcom/bnx2.c bnx2_change_mac_addr(struct net_device *dev, void *p)
p                7904 drivers/net/ethernet/broadcom/bnx2.c 	struct sockaddr *addr = p;
p                2021 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
p                4313 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c int bnx2x_change_mac_addr(struct net_device *dev, void *p)
p                4315 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	struct sockaddr *addr = p;
p                 539 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h int bnx2x_change_mac_addr(struct net_device *dev, void *p);
p                1793 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		struct bnx2x_dcbx_cos_params *p =
p                1796 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		p->strict = cos_data.data[i].strict;
p                1797 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		p->bw_tbl = cos_data.data[i].cos_bw;
p                1798 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		p->pri_bitmask = cos_data.data[i].pri_join_mask;
p                1799 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		p->pauseable = cos_data.data[i].pausable;
p                1802 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		if (p->bw_tbl != DCBX_INVALID_COS_BW ||
p                1803 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		    p->strict != BNX2X_DCBX_STRICT_INVALID) {
p                1804 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 			if (p->pri_bitmask == 0)
p                1809 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 				if (p->pauseable &&
p                1811 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 						p->pri_bitmask) != 0)
p                1815 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 				if (!p->pauseable &&
p                1817 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 						p->pri_bitmask) != 0)
p                1823 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		if (p->pauseable)
p                 850 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset)
p                 878 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 						*p++ = REG_RD(bp, addr);
p                 886 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
p                 907 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 				*p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4);
p                 916 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 				*p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
p                 924 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 			*p++ = REG_RD(bp, wreg_addr_p->addr + i*4);
p                 931 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 				*p++ = REG_RD(bp, addr + j*4);
p                 939 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 		bnx2x_read_pages_regs(bp, p, preset);
p                 945 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
p                 957 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 		__bnx2x_get_preset_regs(bp, p, preset_idx);
p                 958 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 		p += __bnx2x_get_preset_regs_len(bp, preset_idx);
p                 965 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	u32 *p = _p;
p                 970 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	memset(p, 0, regs->len);
p                1002 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	memcpy(p, &dump_hdr, sizeof(struct dump_header));
p                1003 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	p += dump_hdr.header_size + 1;
p                1011 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	__bnx2x_get_regs(bp, p);
p                1060 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	u32 *p = buffer;
p                1093 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	memcpy(p, &dump_hdr, sizeof(struct dump_header));
p                1094 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	p += dump_hdr.header_size + 1;
p                1097 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	__bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
p                3055 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
p                3060 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 		storm_memset_func_cfg(bp, &tcfg, p->func_id);
p                3064 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 	storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
p                3065 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 	storm_memset_func_en(bp, p->func_id, 1);
p                3068 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 	if (p->spq_active) {
p                3069 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 		storm_memset_spq_addr(bp, p->spq_map, p->func_id);
p                3071 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 		       XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
p                10559 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
p                12653 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 				      struct bnx2x_mcast_ramrod_params *p,
p                12662 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 	INIT_LIST_HEAD(&p->mcast_list);
p                12678 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 		list_add_tail(&mc_mac->link, &p->mcast_list);
p                12683 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 	p->mcast_list_len = mc_count;
p                1225 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			   struct bnx2x_vlan_mac_ramrod_params *p,
p                1229 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
p                1252 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
p                1255 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
p                1258 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
p                1261 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	__set_bit(RAMROD_RESTORE, &p->ramrod_flags);
p                1263 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	return bnx2x_config_vlan_mac(bp, p);
p                1902 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_vlan_mac_ramrod_params *p)
p                1905 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
p                1906 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
p                1914 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	switch (p->user_req.cmd) {
p                1923 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
p                1937 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			   struct bnx2x_vlan_mac_ramrod_params *p)
p                1940 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
p                1941 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	unsigned long *ramrod_flags = &p->ramrod_flags;
p                1949 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
p                1968 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
p                1969 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 						   &p->ramrod_flags);
p                1977 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
p                1993 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 							   p->vlan_mac_obj,
p                1994 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 							   &p->ramrod_flags);
p                2024 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_vlan_mac_ramrod_params p;
p                2053 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	memset(&p, 0, sizeof(p));
p                2054 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	p.vlan_mac_obj = o;
p                2055 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	p.ramrod_flags = *ramrod_flags;
p                2056 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
p                2061 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	__clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
p                2062 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
p                2063 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	__clear_bit(RAMROD_CONT, &p.ramrod_flags);
p                2074 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
p                2075 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
p                2076 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			rc = bnx2x_config_vlan_mac(bp, &p);
p                2088 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	p.ramrod_flags = *ramrod_flags;
p                2089 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	__set_bit(RAMROD_CONT, &p.ramrod_flags);
p                2091 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	return bnx2x_config_vlan_mac(bp, &p);
p                2306 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				 struct bnx2x_rx_mode_ramrod_params *p)
p                2309 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	u32 mask = (1 << p->cl_id);
p                2312 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		(struct tstorm_eth_mac_filter_config *)p->rdata;
p                2321 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
p                2325 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
p                2329 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
p                2334 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
p                2339 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
p                2342 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
p                2377 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	__storm_memset_mac_filters(bp, mac_filters, p->func_id);
p                2380 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	clear_bit(p->state, p->pstate);
p                2445 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				struct bnx2x_rx_mode_ramrod_params *p)
p                2447 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct eth_filter_rules_ramrod_data *data = p->rdata;
p                2457 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
p                2458 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		data->rules[rule_idx].client_id = p->cl_id;
p                2459 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		data->rules[rule_idx].func_id = p->func_id;
p                2464 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
p                2470 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
p                2471 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		data->rules[rule_idx].client_id = p->cl_id;
p                2472 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		data->rules[rule_idx].func_id = p->func_id;
p                2477 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
p                2488 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
p                2490 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
p                2492 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			data->rules[rule_idx].func_id = p->func_id;
p                2497 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
p                2504 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
p                2506 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			data->rules[rule_idx].func_id = p->func_id;
p                2511 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
p                2521 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
p                2524 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			 data->header.rule_cnt, p->rx_accept_flags,
p                2525 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			 p->tx_accept_flags);
p                2535 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
p                2536 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			   U64_HI(p->rdata_mapping),
p                2537 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			   U64_LO(p->rdata_mapping),
p                2547 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				      struct bnx2x_rx_mode_ramrod_params *p)
p                2549 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	return bnx2x_state_wait(bp, p->state, p->pstate);
p                2553 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				    struct bnx2x_rx_mode_ramrod_params *p)
p                2560 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			 struct bnx2x_rx_mode_ramrod_params *p)
p                2565 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	rc = p->rx_mode_obj->config_rx_mode(bp, p);
p                2570 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
p                2571 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		rc = p->rx_mode_obj->wait_comp(bp, p);
p                2671 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				   struct bnx2x_mcast_ramrod_params *p,
p                2682 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		macs_list_len = p->mcast_list_len;
p                2685 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (!p->mcast_list_len)
p                2728 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		list_for_each_entry(pos, &p->mcast_list, link) {
p                2746 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		new_cmd->data.macs_num = p->mcast_list_len;
p                3117 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				struct bnx2x_mcast_ramrod_params *p)
p                3121 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_mcast_obj *o = p->mcast_obj;
p                3166 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
p                3173 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
p                3187 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
p                3192 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	for (i = 0; i < p->mcast_list_len; i++) {
p                3198 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				 p->mcast_list_len - i - 1);
p                3217 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			struct bnx2x_mcast_ramrod_params *p,
p                3221 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_mcast_obj *o = p->mcast_obj;
p                3224 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
p                3228 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		bnx2x_mcast_hdl_add(bp, o, p, &cnt);
p                3232 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		bnx2x_mcast_hdl_del(bp, o, p, &cnt);
p                3245 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	p->mcast_list_len = 0;
p                3251 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				   struct bnx2x_mcast_ramrod_params *p,
p                3254 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_mcast_obj *o = p->mcast_obj;
p                3272 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		p->mcast_list_len = reg_sz;
p                3281 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		o->set_registry_size(o, reg_sz + p->mcast_list_len);
p                3293 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		o->set_registry_size(o, reg_sz + p->mcast_list_len);
p                3303 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	o->total_pending_num += p->mcast_list_len;
p                3309 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				      struct bnx2x_mcast_ramrod_params *p,
p                3313 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_mcast_obj *o = p->mcast_obj;
p                3316 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	o->total_pending_num -= p->mcast_list_len;
p                3330 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 					struct bnx2x_mcast_ramrod_params *p,
p                3333 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
p                3372 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				struct bnx2x_mcast_ramrod_params *p,
p                3375 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
p                3376 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_mcast_obj *o = p->mcast_obj;
p                3384 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
p                3396 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (p->mcast_list_len > 0)
p                3397 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
p                3408 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
p                3433 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags) || !cnt) {
p                3458 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				    struct bnx2x_mcast_ramrod_params *p,
p                3468 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		p->mcast_list_len = 1;
p                3474 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				       struct bnx2x_mcast_ramrod_params *p,
p                3488 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 					   struct bnx2x_mcast_ramrod_params *p,
p                3494 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
p                3508 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
p                3526 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				 struct bnx2x_mcast_ramrod_params *p,
p                3530 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_mcast_obj *o = p->mcast_obj;
p                3536 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
p                3544 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
p                3557 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
p                3580 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				   struct bnx2x_mcast_ramrod_params *p,
p                3583 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_mcast_obj *o = p->mcast_obj;
p                3599 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		p->mcast_list_len = reg_sz;
p                3601 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		   cmd, p->mcast_list_len);
p                3610 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		if (p->mcast_list_len > o->max_cmd_len) {
p                3619 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
p                3620 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		if (p->mcast_list_len > 0)
p                3621 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			o->set_registry_size(o, p->mcast_list_len);
p                3633 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (p->mcast_list_len)
p                3640 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				      struct bnx2x_mcast_ramrod_params *p,
p                3644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_mcast_obj *o = p->mcast_obj;
p                3652 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (p->mcast_list_len)
p                3691 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 					struct bnx2x_mcast_ramrod_params *p,
p                3694 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
p                3748 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
p                3752 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_mcast_obj *o = p->mcast_obj;
p                3876 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				struct bnx2x_mcast_ramrod_params *p,
p                3879 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_mcast_obj *o = p->mcast_obj;
p                3895 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
p                3903 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
p                3915 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
p                3930 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
p                3977 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		       struct bnx2x_mcast_ramrod_params *p,
p                3980 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_mcast_obj *o = p->mcast_obj;
p                3990 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	rc = o->validate(bp, p, cmd);
p                3995 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if ((!p->mcast_list_len) && (!o->check_sched(o)))
p                3999 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	   o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
p                4006 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
p                4013 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		p->mcast_list_len = 0;
p                4022 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		rc = o->config_mcast(bp, p, cmd);
p                4027 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
p                4037 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	o->revert(bp, p, old_reg_size, cmd);
p                4322 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
p                4326 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	memset(p, 0, sizeof(*p));
p                4329 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
p                4332 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	atomic_set(&p->credit, credit);
p                4335 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	p->pool_sz = credit;
p                4337 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	p->base_pool_offset = base;
p                4342 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	p->check = bnx2x_credit_pool_check;
p                4346 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		p->put      = bnx2x_credit_pool_put;
p                4347 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		p->get      = bnx2x_credit_pool_get;
p                4348 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		p->put_entry = bnx2x_credit_pool_put_entry;
p                4349 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		p->get_entry = bnx2x_credit_pool_get_entry;
p                4351 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		p->put      = bnx2x_credit_pool_always_true;
p                4352 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		p->get      = bnx2x_credit_pool_always_true;
p                4353 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
p                4354 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
p                4359 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
p                4360 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
p                4365 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				struct bnx2x_credit_pool_obj *p, u8 func_id,
p                4380 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
p                4391 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
p                4394 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			bnx2x_init_credit_pool(p, 0, 0);
p                4411 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			bnx2x_init_credit_pool(p, -1, cam_sz);
p                4414 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			bnx2x_init_credit_pool(p, 0, 0);
p                4420 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				 struct bnx2x_credit_pool_obj *p,
p                4428 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		bnx2x_init_credit_pool(p, 0, -1);
p                4436 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit);
p                4439 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			bnx2x_init_credit_pool(p, 0, 0);
p                4453 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 					struct bnx2x_config_rss_params *p)
p                4460 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
p                4482 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 			   struct bnx2x_config_rss_params *p)
p                4484 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_rss_config_obj *o = p->rss_obj;
p                4501 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
p                4503 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
p                4511 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
p                4514 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
p                4517 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
p                4520 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
p                4523 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
p                4526 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
p                4529 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags))
p                4532 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags))
p                4535 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags))
p                4539 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
p                4541 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		const u8 *src = (const u8 *)p->rss_key;
p                4556 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	data->rss_result_mask = p->rss_result_mask;
p                4564 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	memcpy(data->indirection_table, p->ind_table,
p                4568 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
p                4572 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		bnx2x_debug_print_ind_table(bp, p);
p                4600 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		     struct bnx2x_config_rss_params *p)
p                4603 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	struct bnx2x_rss_config_obj *o = p->rss_obj;
p                4607 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
p                4609 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 		   p->ramrod_flags);
p                4615 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	rc = o->config_rss(bp, p);
p                4621 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
p                 417 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 		       struct bnx2x_vlan_mac_ramrod_params *p,
p                 500 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 			      struct bnx2x_rx_mode_ramrod_params *p);
p                 503 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 			 struct bnx2x_rx_mode_ramrod_params *p);
p                 594 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 			    struct bnx2x_mcast_ramrod_params *p,
p                 612 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 			   struct bnx2x_mcast_ramrod_params *p,
p                 641 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 			struct bnx2x_mcast_ramrod_params *p,
p                 648 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 		       struct bnx2x_mcast_ramrod_params *p,
p                 770 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 			  struct bnx2x_config_rss_params *p);
p                1443 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 			   struct bnx2x_vlan_mac_ramrod_params *p);
p                1446 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 			struct bnx2x_vlan_mac_ramrod_params *p,
p                1464 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 			 struct bnx2x_rx_mode_ramrod_params *p);
p                1496 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 		       struct bnx2x_mcast_ramrod_params *p,
p                1501 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 				struct bnx2x_credit_pool_obj *p, u8 func_id,
p                1504 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 				 struct bnx2x_credit_pool_obj *p, u8 func_id,
p                1506 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
p                1523 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 		     struct bnx2x_config_rss_params *p);
p                 169 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c 			   struct bnx2x_vf_queue_construct_params *p,
p                 172 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c 	struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
p                 173 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c 	struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
p                2163 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c 	struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
p                2165 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c 	p->vf->state = p->state;
p                 448 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h 			   struct bnx2x_vf_queue_construct_params *p,
p                10874 drivers/net/ethernet/broadcom/bnxt/bnxt.c static int bnxt_change_mac_addr(struct net_device *dev, void *p)
p                10876 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct sockaddr *addr = p;
p                2252 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	char	*p;
p                2260 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 	for (p = data; *p != 0; p++) {
p                2263 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		while (*p != 0 && *p != '\n') {
p                2264 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 			value = p;
p                2265 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 			while (*p != 0 && *p != '\t' && *p != '\n')
p                2266 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 				p++;
p                2269 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 			if (*p != '\t')
p                2271 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 			*p = 0;
p                2273 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 			p++;
p                2275 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		if (*p == 0)
p                2277 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 		*p = 0;
p                 337 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	const u8 *p = mask;
p                 341 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		if (p[i] != 0)
p                 349 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	const u8 *p = mask;
p                 353 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		if (p[i] != 0xff)
p                 376 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 	const u8 *p = key;
p                 380 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 		if (p[i] != 0)
p                  63 drivers/net/ethernet/broadcom/genet/bcmgenet.c #define WORDS_PER_BD(p)		(p->hw_params->words_per_bd)
p                 950 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		char *p;
p                 983 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		p = (char *)priv + s->stat_offset;
p                 984 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		*(u32 *)p = val;
p                1002 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		char *p;
p                1006 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			p = (char *)&dev->stats;
p                1008 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			p = (char *)priv;
p                1009 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		p += s->stat_offset;
p                1012 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			data[i] = *(unsigned long *)p;
p                1014 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			data[i] = *(u32 *)p;
p                1064 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct ethtool_eee *p = &priv->eee;
p                1072 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	e->eee_enabled = p->eee_enabled;
p                1073 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	e->eee_active = p->eee_active;
p                1082 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct ethtool_eee *p = &priv->eee;
p                1091 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	p->eee_enabled = e->eee_enabled;
p                1093 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (!p->eee_enabled) {
p                3156 drivers/net/ethernet/broadcom/genet/bcmgenet.c static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
p                3158 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct sockaddr *addr = p;
p                 514 drivers/net/ethernet/broadcom/genet/bcmgenet.h #define GENET_IS_V1(p)	((p)->version == GENET_V1)
p                 515 drivers/net/ethernet/broadcom/genet/bcmgenet.h #define GENET_IS_V2(p)	((p)->version == GENET_V2)
p                 516 drivers/net/ethernet/broadcom/genet/bcmgenet.h #define GENET_IS_V3(p)	((p)->version == GENET_V3)
p                 517 drivers/net/ethernet/broadcom/genet/bcmgenet.h #define GENET_IS_V4(p)	((p)->version == GENET_V4)
p                 518 drivers/net/ethernet/broadcom/genet/bcmgenet.h #define GENET_IS_V5(p)	((p)->version == GENET_V5)
p                9376 drivers/net/ethernet/broadcom/tg3.c static int tg3_set_mac_addr(struct net_device *dev, void *p)
p                9379 drivers/net/ethernet/broadcom/tg3.c 	struct sockaddr *addr = p;
p                15547 drivers/net/ethernet/broadcom/tg3.c 			struct subsys_tbl_ent *p;
p                15552 drivers/net/ethernet/broadcom/tg3.c 			p = tg3_lookup_by_subsys(tp);
p                15553 drivers/net/ethernet/broadcom/tg3.c 			if (p) {
p                15554 drivers/net/ethernet/broadcom/tg3.c 				tp->phy_id = p->phy_id;
p                17393 drivers/net/ethernet/broadcom/tg3.c 		u32 *p = buf, i;
p                17396 drivers/net/ethernet/broadcom/tg3.c 			p[i] = i;
p                17417 drivers/net/ethernet/broadcom/tg3.c 			if (p[i] == i)
p                17429 drivers/net/ethernet/broadcom/tg3.c 					"(%d != %d)\n", __func__, p[i], i);
p                 644 drivers/net/ethernet/cadence/macb_main.c 	u32 *p = &bp->hw_stats.macb.rx_pause_frames;
p                 648 drivers/net/ethernet/cadence/macb_main.c 	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
p                 650 drivers/net/ethernet/cadence/macb_main.c 	for (; p < end; p++, offset += 4)
p                 651 drivers/net/ethernet/cadence/macb_main.c 		*p += bp->macb_reg_readl(bp, offset);
p                2510 drivers/net/ethernet/cadence/macb_main.c 	u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
p                2512 drivers/net/ethernet/cadence/macb_main.c 	for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
p                2517 drivers/net/ethernet/cadence/macb_main.c 		*p += val;
p                2523 drivers/net/ethernet/cadence/macb_main.c 			*(++p) += val;
p                2594 drivers/net/ethernet/cadence/macb_main.c static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
p                2604 drivers/net/ethernet/cadence/macb_main.c 		for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
p                2605 drivers/net/ethernet/cadence/macb_main.c 			memcpy(p, gem_statistics[i].stat_string,
p                2609 drivers/net/ethernet/cadence/macb_main.c 			for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
p                2612 drivers/net/ethernet/cadence/macb_main.c 				memcpy(p, stat_string, ETH_GSTRING_LEN);
p                2672 drivers/net/ethernet/cadence/macb_main.c 			  void *p)
p                2676 drivers/net/ethernet/cadence/macb_main.c 	u32 *regs_buff = p;
p                 400 drivers/net/ethernet/calxeda/xgmac.c #define tx_dma_ring_space(p) \
p                 401 drivers/net/ethernet/calxeda/xgmac.c 	dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
p                 404 drivers/net/ethernet/calxeda/xgmac.c static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
p                 407 drivers/net/ethernet/calxeda/xgmac.c 		p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
p                 410 drivers/net/ethernet/calxeda/xgmac.c 		p->buf_size = cpu_to_le32(buf_sz);
p                 413 drivers/net/ethernet/calxeda/xgmac.c static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
p                 415 drivers/net/ethernet/calxeda/xgmac.c 	u32 len = le32_to_cpu(p->buf_size);
p                 420 drivers/net/ethernet/calxeda/xgmac.c static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
p                 423 drivers/net/ethernet/calxeda/xgmac.c 	struct xgmac_dma_desc *end = p + ring_size - 1;
p                 425 drivers/net/ethernet/calxeda/xgmac.c 	memset(p, 0, sizeof(*p) * ring_size);
p                 427 drivers/net/ethernet/calxeda/xgmac.c 	for (; p <= end; p++)
p                 428 drivers/net/ethernet/calxeda/xgmac.c 		desc_set_buf_len(p, buf_sz);
p                 433 drivers/net/ethernet/calxeda/xgmac.c static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
p                 435 drivers/net/ethernet/calxeda/xgmac.c 	memset(p, 0, sizeof(*p) * ring_size);
p                 436 drivers/net/ethernet/calxeda/xgmac.c 	p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
p                 439 drivers/net/ethernet/calxeda/xgmac.c static inline int desc_get_owner(struct xgmac_dma_desc *p)
p                 441 drivers/net/ethernet/calxeda/xgmac.c 	return le32_to_cpu(p->flags) & DESC_OWN;
p                 444 drivers/net/ethernet/calxeda/xgmac.c static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
p                 447 drivers/net/ethernet/calxeda/xgmac.c 	p->flags = cpu_to_le32(DESC_OWN);
p                 450 drivers/net/ethernet/calxeda/xgmac.c static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
p                 452 drivers/net/ethernet/calxeda/xgmac.c 	u32 tmpflags = le32_to_cpu(p->flags);
p                 455 drivers/net/ethernet/calxeda/xgmac.c 	p->flags = cpu_to_le32(tmpflags);
p                 458 drivers/net/ethernet/calxeda/xgmac.c static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
p                 460 drivers/net/ethernet/calxeda/xgmac.c 	u32 tmpflags = le32_to_cpu(p->flags);
p                 462 drivers/net/ethernet/calxeda/xgmac.c 	p->flags = cpu_to_le32(tmpflags);
p                 465 drivers/net/ethernet/calxeda/xgmac.c static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
p                 467 drivers/net/ethernet/calxeda/xgmac.c 	return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
p                 470 drivers/net/ethernet/calxeda/xgmac.c static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
p                 472 drivers/net/ethernet/calxeda/xgmac.c 	return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
p                 475 drivers/net/ethernet/calxeda/xgmac.c static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
p                 477 drivers/net/ethernet/calxeda/xgmac.c 	return le32_to_cpu(p->buf1_addr);
p                 480 drivers/net/ethernet/calxeda/xgmac.c static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
p                 483 drivers/net/ethernet/calxeda/xgmac.c 	p->buf1_addr = cpu_to_le32(paddr);
p                 485 drivers/net/ethernet/calxeda/xgmac.c 		p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
p                 488 drivers/net/ethernet/calxeda/xgmac.c static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
p                 491 drivers/net/ethernet/calxeda/xgmac.c 	desc_set_buf_len(p, len);
p                 492 drivers/net/ethernet/calxeda/xgmac.c 	desc_set_buf_addr(p, paddr, len);
p                 495 drivers/net/ethernet/calxeda/xgmac.c static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
p                 497 drivers/net/ethernet/calxeda/xgmac.c 	u32 data = le32_to_cpu(p->flags);
p                 515 drivers/net/ethernet/calxeda/xgmac.c static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
p                 518 drivers/net/ethernet/calxeda/xgmac.c 	u32 status = le32_to_cpu(p->flags);
p                 542 drivers/net/ethernet/calxeda/xgmac.c static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
p                 546 drivers/net/ethernet/calxeda/xgmac.c 	u32 status = le32_to_cpu(p->flags);
p                 547 drivers/net/ethernet/calxeda/xgmac.c 	u32 ext_status = le32_to_cpu(p->ext_status);
p                 679 drivers/net/ethernet/calxeda/xgmac.c 	struct xgmac_dma_desc *p;
p                 687 drivers/net/ethernet/calxeda/xgmac.c 		p = priv->dma_rx + entry;
p                 702 drivers/net/ethernet/calxeda/xgmac.c 			desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
p                 709 drivers/net/ethernet/calxeda/xgmac.c 		desc_set_rx_owner(p);
p                 792 drivers/net/ethernet/calxeda/xgmac.c 	struct xgmac_dma_desc *p;
p                 802 drivers/net/ethernet/calxeda/xgmac.c 		p = priv->dma_rx + i;
p                 803 drivers/net/ethernet/calxeda/xgmac.c 		dma_unmap_single(priv->device, desc_get_buf_addr(p),
p                 813 drivers/net/ethernet/calxeda/xgmac.c 	struct xgmac_dma_desc *p;
p                 822 drivers/net/ethernet/calxeda/xgmac.c 		p = priv->dma_tx + i;
p                 823 drivers/net/ethernet/calxeda/xgmac.c 		if (desc_get_tx_fs(p))
p                 824 drivers/net/ethernet/calxeda/xgmac.c 			dma_unmap_single(priv->device, desc_get_buf_addr(p),
p                 825 drivers/net/ethernet/calxeda/xgmac.c 					 desc_get_buf_len(p), DMA_TO_DEVICE);
p                 827 drivers/net/ethernet/calxeda/xgmac.c 			dma_unmap_page(priv->device, desc_get_buf_addr(p),
p                 828 drivers/net/ethernet/calxeda/xgmac.c 				       desc_get_buf_len(p), DMA_TO_DEVICE);
p                 830 drivers/net/ethernet/calxeda/xgmac.c 		if (desc_get_tx_ls(p))
p                 871 drivers/net/ethernet/calxeda/xgmac.c 		struct xgmac_dma_desc *p = priv->dma_tx + entry;
p                 874 drivers/net/ethernet/calxeda/xgmac.c 		if (desc_get_owner(p))
p                 880 drivers/net/ethernet/calxeda/xgmac.c 		if (desc_get_tx_fs(p))
p                 881 drivers/net/ethernet/calxeda/xgmac.c 			dma_unmap_single(priv->device, desc_get_buf_addr(p),
p                 882 drivers/net/ethernet/calxeda/xgmac.c 					 desc_get_buf_len(p), DMA_TO_DEVICE);
p                 884 drivers/net/ethernet/calxeda/xgmac.c 			dma_unmap_page(priv->device, desc_get_buf_addr(p),
p                 885 drivers/net/ethernet/calxeda/xgmac.c 				       desc_get_buf_len(p), DMA_TO_DEVICE);
p                 888 drivers/net/ethernet/calxeda/xgmac.c 		if (desc_get_tx_ls(p)) {
p                 889 drivers/net/ethernet/calxeda/xgmac.c 			desc_get_tx_status(priv, p);
p                1170 drivers/net/ethernet/calxeda/xgmac.c 	struct xgmac_dma_desc *p;
p                1181 drivers/net/ethernet/calxeda/xgmac.c 		p = priv->dma_rx + entry;
p                1182 drivers/net/ethernet/calxeda/xgmac.c 		if (desc_get_owner(p))
p                1189 drivers/net/ethernet/calxeda/xgmac.c 		ip_checksum = desc_get_rx_status(priv, p);
p                1200 drivers/net/ethernet/calxeda/xgmac.c 		frame_len = desc_get_rx_frame_len(p);
p                1205 drivers/net/ethernet/calxeda/xgmac.c 		dma_unmap_single(priv->device, desc_get_buf_addr(p),
p                1471 drivers/net/ethernet/calxeda/xgmac.c static int xgmac_set_mac_address(struct net_device *dev, void *p)
p                1475 drivers/net/ethernet/calxeda/xgmac.c 	struct sockaddr *addr = p;
p                1592 drivers/net/ethernet/calxeda/xgmac.c 	void *p = priv;
p                1600 drivers/net/ethernet/calxeda/xgmac.c 			*data++ = *(u32 *)(p +
p                1619 drivers/net/ethernet/calxeda/xgmac.c 	u8 *p = data;
p                1624 drivers/net/ethernet/calxeda/xgmac.c 			memcpy(p, xgmac_gstrings_stats[i].stat_string,
p                1626 drivers/net/ethernet/calxeda/xgmac.c 			p += ETH_GSTRING_LEN;
p                  71 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h #define    CN23XX_SLI_CTL_PORT(p)                  \
p                  72 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h 		(CN23XX_SLI_CTL_PORT_START + ((p) * CN23XX_PORT_OFFSET))
p                2018 drivers/net/ethernet/cavium/liquidio/lio_main.c static int liquidio_set_mac(struct net_device *netdev, void *p)
p                2023 drivers/net/ethernet/cavium/liquidio/lio_main.c 	struct sockaddr *addr = (struct sockaddr *)p;
p                1118 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c static int liquidio_set_mac(struct net_device *netdev, void *p)
p                1120 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c 	struct sockaddr *addr = (struct sockaddr *)p;
p                 584 drivers/net/ethernet/cavium/liquidio/octeon_console.c 	char *p;
p                 641 drivers/net/ethernet/cavium/liquidio/octeon_console.c 		p = strstr(uboot_ver, "mips");
p                 642 drivers/net/ethernet/cavium/liquidio/octeon_console.c 		if (p) {
p                 643 drivers/net/ethernet/cavium/liquidio/octeon_console.c 			p--;
p                 644 drivers/net/ethernet/cavium/liquidio/octeon_console.c 			*p = '\0';
p                 158 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
p                 163 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	spin_lock_irqsave(&p->lock, flags);
p                 164 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
p                 166 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
p                 167 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	spin_unlock_irqrestore(&p->lock, flags);
p                 170 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
p                 175 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	spin_lock_irqsave(&p->lock, flags);
p                 176 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
p                 178 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
p                 179 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	spin_unlock_irqrestore(&p->lock, flags);
p                 182 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
p                 184 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	octeon_mgmt_set_rx_irq(p, 1);
p                 187 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
p                 189 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	octeon_mgmt_set_rx_irq(p, 0);
p                 192 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
p                 194 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	octeon_mgmt_set_tx_irq(p, 1);
p                 197 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
p                 199 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	octeon_mgmt_set_tx_irq(p, 0);
p                 214 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = netdev_priv(netdev);
p                 216 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
p                 228 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		__skb_queue_tail(&p->rx_list, skb);
p                 232 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		re.s.addr = dma_map_single(p->dev, skb->data,
p                 237 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		p->rx_ring[p->rx_next_fill] = re.d64;
p                 238 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dma_sync_single_for_device(p->dev, p->rx_ring_handle,
p                 241 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		p->rx_next_fill =
p                 242 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			(p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
p                 243 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		p->rx_current_fill++;
p                 245 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		cvmx_write_csr(p->mix + MIX_IRING2, 1);
p                 249 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
p                 257 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
p                 259 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		spin_lock_irqsave(&p->tx_list.lock, flags);
p                 261 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
p                 264 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			spin_unlock_irqrestore(&p->tx_list.lock, flags);
p                 268 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
p                 272 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		re.d64 = p->tx_ring[p->tx_next_clean];
p                 273 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		p->tx_next_clean =
p                 274 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			(p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
p                 275 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		skb = __skb_dequeue(&p->tx_list);
p                 281 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
p                 282 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		p->tx_current_fill--;
p                 284 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		spin_unlock_irqrestore(&p->tx_list.lock, flags);
p                 286 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dma_unmap_single(p->dev, re.s.addr, re.s.len,
p                 296 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
p                 298 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
p                 307 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
p                 310 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (cleaned && netif_queue_stopped(p->netdev))
p                 311 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		netif_wake_queue(p->netdev);
p                 316 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
p                 317 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	octeon_mgmt_clean_tx_buffers(p);
p                 318 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	octeon_mgmt_enable_tx_irq(p);
p                 323 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = netdev_priv(netdev);
p                 328 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
p                 329 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
p                 333 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		spin_lock_irqsave(&p->lock, flags);
p                 336 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		spin_unlock_irqrestore(&p->lock, flags);
p                 342 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = netdev_priv(netdev);
p                 349 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
p                 350 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
p                 354 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		spin_lock_irqsave(&p->lock, flags);
p                 357 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		spin_unlock_irqrestore(&p->lock, flags);
p                 365 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
p                 370 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
p                 374 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	re.d64 = p->rx_ring[p->rx_next];
p                 375 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
p                 376 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->rx_current_fill--;
p                 377 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	*pskb = __skb_dequeue(&p->rx_list);
p                 379 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	dma_unmap_single(p->dev, re.s.addr,
p                 387 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
p                 389 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct net_device *netdev = p->netdev;
p                 399 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
p                 405 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		if (p->has_rx_tstamp) {
p                 428 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
p                 459 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
p                 468 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
p                 472 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
p                 478 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
p                 481 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		rc = octeon_mgmt_receive_one(p);
p                 486 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
p                 489 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	octeon_mgmt_rx_fill_ring(p->netdev);
p                 496 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
p                 497 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct net_device *netdev = p->netdev;
p                 500 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	work_done = octeon_mgmt_receive_packets(p, budget);
p                 505 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		octeon_mgmt_enable_rx_irq(p);
p                 513 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
p                 520 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
p                 522 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
p                 525 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
p                 526 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_read_csr(p->mix + MIX_CTL);
p                 529 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
p                 531 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
p                 536 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
p                 559 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = netdev_priv(netdev);
p                 601 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	spin_lock_irqsave(&p->lock, flags);
p                 604 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
p                 607 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
p                 614 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
p                 616 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
p                 617 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
p                 618 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
p                 619 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
p                 620 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
p                 621 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
p                 622 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
p                 626 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
p                 628 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	spin_unlock_irqrestore(&p->lock, flags);
p                 645 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = netdev_priv(netdev);
p                 653 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
p                 659 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
p                 668 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = netdev_priv(netdev);
p                 671 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
p                 674 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
p                 675 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_read_csr(p->mix + MIX_ISR);
p                 678 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		octeon_mgmt_disable_rx_irq(p);
p                 679 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		napi_schedule(&p->napi);
p                 682 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		octeon_mgmt_disable_tx_irq(p);
p                 683 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		tasklet_schedule(&p->tx_clean_tasklet);
p                 692 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = netdev_priv(netdev);
p                 748 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		p->has_rx_tstamp = false;
p                 749 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
p                 751 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
p                 768 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		p->has_rx_tstamp = have_hw_timestamps;
p                 770 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		if (p->has_rx_tstamp) {
p                 771 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
p                 773 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
p                 799 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
p                 804 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
p                 808 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
p                 813 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
p                 822 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
p                 827 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
p                 831 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
p                 834 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c static void octeon_mgmt_update_link(struct octeon_mgmt *p)
p                 836 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct net_device *ndev = p->netdev;
p                 840 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
p                 882 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
p                 885 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
p                 891 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
p                 892 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
p                 901 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
p                 907 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = netdev_priv(netdev);
p                 915 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	spin_lock_irqsave(&p->lock, flags);
p                 918 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (!phydev->link && p->last_link)
p                 922 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	    (p->last_duplex != phydev->duplex ||
p                 923 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	     p->last_link != phydev->link ||
p                 924 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	     p->last_speed != phydev->speed)) {
p                 925 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		octeon_mgmt_disable_link(p);
p                 927 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		octeon_mgmt_update_link(p);
p                 928 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		octeon_mgmt_enable_link(p);
p                 931 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->last_link = phydev->link;
p                 932 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->last_speed = phydev->speed;
p                 933 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->last_duplex = phydev->duplex;
p                 935 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	spin_unlock_irqrestore(&p->lock, flags);
p                 948 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = netdev_priv(netdev);
p                 951 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (octeon_is_simulation() || p->phy_np == NULL) {
p                 957 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	phydev = of_phy_connect(netdev, p->phy_np,
p                 969 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = netdev_priv(netdev);
p                 981 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
p                 983 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (!p->tx_ring)
p                 985 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->tx_ring_handle =
p                 986 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dma_map_single(p->dev, p->tx_ring,
p                 989 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->tx_next = 0;
p                 990 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->tx_next_clean = 0;
p                 991 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->tx_current_fill = 0;
p                 994 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
p                 996 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (!p->rx_ring)
p                 998 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->rx_ring_handle =
p                 999 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dma_map_single(p->dev, p->rx_ring,
p                1003 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->rx_next = 0;
p                1004 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->rx_next_fill = 0;
p                1005 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->rx_current_fill = 0;
p                1007 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	octeon_mgmt_reset_hw(p);
p                1009 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
p                1014 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
p                1016 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
p                1033 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		if (p->port) {
p                1046 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	oring1.s.obase = p->tx_ring_handle >> 3;
p                1048 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
p                1051 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	iring1.s.ibase = p->rx_ring_handle >> 3;
p                1053 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
p                1072 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
p                1076 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
p                1089 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
p                1091 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
p                1100 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
p                1106 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
p                1107 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
p                1115 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
p                1117 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
p                1120 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
p                1125 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
p                1127 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		cvmx_read_csr(p->agl_prt_ctl);
p                1145 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
p                1146 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
p                1147 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
p                1149 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
p                1150 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
p                1151 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
p                1154 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
p                1156 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
p                1158 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
p                1165 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
p                1170 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
p                1176 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
p                1181 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
p                1205 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
p                1208 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	octeon_mgmt_disable_link(p);
p                1210 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		octeon_mgmt_update_link(p);
p                1211 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	octeon_mgmt_enable_link(p);
p                1213 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->last_link = 0;
p                1214 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->last_speed = 0;
p                1224 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	napi_enable(&p->napi);
p                1228 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	octeon_mgmt_reset_hw(p);
p                1229 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	dma_unmap_single(p->dev, p->rx_ring_handle,
p                1232 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	kfree(p->rx_ring);
p                1234 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	dma_unmap_single(p->dev, p->tx_ring_handle,
p                1237 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	kfree(p->tx_ring);
p                1243 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = netdev_priv(netdev);
p                1245 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	napi_disable(&p->napi);
p                1253 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	octeon_mgmt_reset_hw(p);
p                1255 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	free_irq(p->irq, netdev);
p                1258 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	skb_queue_purge(&p->tx_list);
p                1259 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	skb_queue_purge(&p->rx_list);
p                1261 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	dma_unmap_single(p->dev, p->rx_ring_handle,
p                1264 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	kfree(p->rx_ring);
p                1266 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	dma_unmap_single(p->dev, p->tx_ring_handle,
p                1269 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	kfree(p->tx_ring);
p                1277 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = netdev_priv(netdev);
p                1285 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	re.s.addr = dma_map_single(p->dev, skb->data,
p                1289 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	spin_lock_irqsave(&p->tx_list.lock, flags);
p                1291 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
p                1292 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		spin_unlock_irqrestore(&p->tx_list.lock, flags);
p                1294 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		spin_lock_irqsave(&p->tx_list.lock, flags);
p                1297 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (unlikely(p->tx_current_fill >=
p                1299 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		spin_unlock_irqrestore(&p->tx_list.lock, flags);
p                1300 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		dma_unmap_single(p->dev, re.s.addr, re.s.len,
p                1305 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	__skb_queue_tail(&p->tx_list, skb);
p                1308 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->tx_ring[p->tx_next] = re.d64;
p                1309 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
p                1310 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->tx_current_fill++;
p                1312 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	spin_unlock_irqrestore(&p->tx_list.lock, flags);
p                1314 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	dma_sync_single_for_device(p->dev, p->tx_ring_handle,
p                1322 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	cvmx_write_csr(p->mix + MIX_ORING2, 1);
p                1334 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = netdev_priv(netdev);
p                1336 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	octeon_mgmt_receive_packets(p, 16);
p                1385 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p;
p                1401 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p = netdev_priv(netdev);
p                1402 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
p                1405 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->netdev = netdev;
p                1406 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->dev = &pdev->dev;
p                1407 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->has_rx_tstamp = false;
p                1411 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		p->port = be32_to_cpup(data);
p                1418 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
p                1424 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->irq = result;
p                1447 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->mix_phys = res_mix->start;
p                1448 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->mix_size = resource_size(res_mix);
p                1449 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->agl_phys = res_agl->start;
p                1450 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->agl_size = resource_size(res_agl);
p                1451 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
p                1452 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
p                1455 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
p                1463 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
p                1471 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
p                1472 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 				     p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
p                1479 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
p                1480 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
p                1481 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
p                1482 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 					   p->agl_prt_ctl_size);
p                1483 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (!p->mix || !p->agl || !p->agl_prt_ctl) {
p                1489 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	spin_lock_init(&p->lock);
p                1491 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	skb_queue_head_init(&p->tx_list);
p                1492 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	skb_queue_head_init(&p->rx_list);
p                1493 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	tasklet_init(&p->tx_clean_tasklet,
p                1494 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		     octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
p                1511 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
p                1526 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	of_node_put(p->phy_np);
p                1534 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	struct octeon_mgmt *p = netdev_priv(netdev);
p                1537 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	of_node_put(p->phy_np);
p                 368 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	u64 *p = (u64 *)reg;
p                 374 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	memset(p, 0, NIC_VF_REG_COUNT);
p                 376 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG);
p                 379 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_reg_read(nic,
p                 382 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	p[i++] = nicvf_reg_read(nic, NIC_VF_INT);
p                 383 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S);
p                 384 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C);
p                 385 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
p                 386 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
p                 389 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3));
p                 393 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_reg_read(nic,
p                 397 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_reg_read(nic,
p                 400 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG);
p                 404 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q);
p                 405 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q);
p                 406 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q);
p                 407 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q);
p                 408 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q);
p                 409 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q);
p                 410 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q);
p                 411 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q);
p                 412 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q);
p                 413 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q);
p                 418 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q);
p                 419 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic,
p                 422 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
p                 426 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q);
p                 427 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q);
p                 428 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q);
p                 429 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q);
p                 430 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q);
p                 431 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
p                 432 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
p                 433 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
p                 437 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = 0;
p                 438 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
p                 440 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
p                 444 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q);
p                 445 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q);
p                 446 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q);
p                 447 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q);
p                 448 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q);
p                 449 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q);
p                 450 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic,
p                 452 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic,
p                 455 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
p                1611 drivers/net/ethernet/cavium/thunder/nicvf_main.c static int nicvf_set_mac_address(struct net_device *netdev, void *p)
p                1613 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	struct sockaddr *addr = p;
p                 347 drivers/net/ethernet/chelsio/cxgb/common.h 		     struct adapter_params *p);
p                 125 drivers/net/ethernet/chelsio/cxgb/cxgb2.c static void link_report(struct port_info *p)
p                 127 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	if (!netif_carrier_ok(p->dev))
p                 128 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		netdev_info(p->dev, "link down\n");
p                 132 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		switch (p->link_config.speed) {
p                 138 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		netdev_info(p->dev, "link up, %s, %s-duplex\n",
p                 139 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 			    s, p->link_config.duplex == DUPLEX_FULL
p                 147 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	struct port_info *p = &adapter->port[port_id];
p                 149 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	if (link_stat != netif_carrier_ok(p->dev)) {
p                 151 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 			netif_carrier_on(p->dev);
p                 153 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 			netif_carrier_off(p->dev);
p                 154 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		link_report(p);
p                 175 drivers/net/ethernet/chelsio/cxgb/cxgb2.c static void link_start(struct port_info *p)
p                 177 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	struct cmac *mac = p->mac;
p                 181 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		mac->ops->macaddress_set(mac, p->dev->dev_addr);
p                 182 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	t1_set_rxmode(p->dev);
p                 183 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	t1_link_start(p->phy, mac, &p->link_config);
p                 268 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	struct port_info *p = &adapter->port[dev->if_port];
p                 269 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	struct cmac *mac = p->mac;
p                 294 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	struct port_info *p = &adapter->port[dev->if_port];
p                 299 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	pstats = p->mac->ops->statistics_update(p->mac,
p                 538 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	u32 *p = buf + start;
p                 541 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		*p++ = readl(ap->regs + start);
p                 571 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	struct port_info *p = &adapter->port[dev->if_port];
p                 574 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	supported = p->link_config.supported;
p                 575 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	advertising = p->link_config.advertising;
p                 578 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		cmd->base.speed = p->link_config.speed;
p                 579 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		cmd->base.duplex = p->link_config.duplex;
p                 586 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	cmd->base.phy_address = p->phy->mdio.prtad;
p                 587 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	cmd->base.autoneg = p->link_config.autoneg;
p                 636 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	struct port_info *p = &adapter->port[dev->if_port];
p                 637 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	struct link_config *lc = &p->link_config;
p                 668 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		t1_link_start(p->phy, p->mac, lc);
p                 676 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	struct port_info *p = &adapter->port[dev->if_port];
p                 678 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
p                 679 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
p                 680 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
p                 687 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	struct port_info *p = &adapter->port[dev->if_port];
p                 688 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	struct link_config *lc = &p->link_config;
p                 703 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 			t1_link_start(p->phy, p->mac, lc);
p                 707 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 			p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
p                 840 drivers/net/ethernet/chelsio/cxgb/cxgb2.c static int t1_set_mac_addr(struct net_device *dev, void *p)
p                 844 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	struct sockaddr *addr = p;
p                 902 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		struct port_info *p = &adapter->port[i];
p                 904 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		if (netif_running(p->dev))
p                 905 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 			p->mac->ops->statistics_update(p->mac,
p                 240 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sched_port p[MAX_NPORTS];
p                 290 drivers/net/ethernet/chelsio/cxgb/sge.c 		__skb_queue_purge(&s->p[s->port].skbq);
p                 301 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sched_port *p = &s->p[port];
p                 306 drivers/net/ethernet/chelsio/cxgb/sge.c 		p->speed = speed;
p                 308 drivers/net/ethernet/chelsio/cxgb/sge.c 		p->mtu = mtu;
p                 311 drivers/net/ethernet/chelsio/cxgb/sge.c 		unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
p                 312 drivers/net/ethernet/chelsio/cxgb/sge.c 		do_div(drain, (p->mtu + 50) * 1000);
p                 313 drivers/net/ethernet/chelsio/cxgb/sge.c 		p->drain_bits_per_1024ns = (unsigned int) drain;
p                 315 drivers/net/ethernet/chelsio/cxgb/sge.c 		if (p->speed < 1000)
p                 316 drivers/net/ethernet/chelsio/cxgb/sge.c 			p->drain_bits_per_1024ns =
p                 317 drivers/net/ethernet/chelsio/cxgb/sge.c 				90 * p->drain_bits_per_1024ns / 100;
p                 321 drivers/net/ethernet/chelsio/cxgb/sge.c 		p->drain_bits_per_1024ns -= 16;
p                 322 drivers/net/ethernet/chelsio/cxgb/sge.c 		s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
p                 323 drivers/net/ethernet/chelsio/cxgb/sge.c 		max_avail_segs = max(1U, 4096 / (p->mtu - 40));
p                 326 drivers/net/ethernet/chelsio/cxgb/sge.c 		max_avail_segs = max(1U, 9000 / (p->mtu - 40));
p                 330 drivers/net/ethernet/chelsio/cxgb/sge.c 		 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
p                 331 drivers/net/ethernet/chelsio/cxgb/sge.c 		 p->speed, s->max_avail, max_avail_segs,
p                 332 drivers/net/ethernet/chelsio/cxgb/sge.c 		 p->drain_bits_per_1024ns);
p                 334 drivers/net/ethernet/chelsio/cxgb/sge.c 	return max_avail_segs * (p->mtu - 40);
p                 361 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct sched_port *p = &s->p[port];
p                 362 drivers/net/ethernet/chelsio/cxgb/sge.c 	p->drain_bits_per_1024ns = val * 1024 / 1000;
p                 385 drivers/net/ethernet/chelsio/cxgb/sge.c 		skb_queue_head_init(&s->p[i].skbq);
p                 411 drivers/net/ethernet/chelsio/cxgb/sge.c 		struct sched_port *p = &s->p[i];
p                 414 drivers/net/ethernet/chelsio/cxgb/sge.c 		delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
p                 415 drivers/net/ethernet/chelsio/cxgb/sge.c 		p->avail = min(p->avail + delta_avail, s->max_avail);
p                 443 drivers/net/ethernet/chelsio/cxgb/sge.c 		skbq = &s->p[skb->dev->if_port].skbq;
p                 455 drivers/net/ethernet/chelsio/cxgb/sge.c 		skbq = &s->p[s->port].skbq;
p                 463 drivers/net/ethernet/chelsio/cxgb/sge.c 		if (len <= s->p[s->port].avail) {
p                 464 drivers/net/ethernet/chelsio/cxgb/sge.c 			s->p[s->port].avail -= len;
p                 555 drivers/net/ethernet/chelsio/cxgb/sge.c static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
p                 564 drivers/net/ethernet/chelsio/cxgb/sge.c 		q->size = p->freelQ_size[i];
p                 675 drivers/net/ethernet/chelsio/cxgb/sge.c static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
p                 685 drivers/net/ethernet/chelsio/cxgb/sge.c 		q->size = p->cmdQ_size[i];
p                 748 drivers/net/ethernet/chelsio/cxgb/sge.c static void configure_sge(struct sge *sge, struct sge_params *p)
p                 783 drivers/net/ethernet/chelsio/cxgb/sge.c 	t1_sge_set_coalesce_params(sge, p);
p                1355 drivers/net/ethernet/chelsio/cxgb/sge.c 	const struct cpl_rx_pkt *p;
p                1366 drivers/net/ethernet/chelsio/cxgb/sge.c 	p = (const struct cpl_rx_pkt *) skb->data;
p                1367 drivers/net/ethernet/chelsio/cxgb/sge.c 	if (p->iff >= adapter->params.nports) {
p                1371 drivers/net/ethernet/chelsio/cxgb/sge.c 	__skb_pull(skb, sizeof(*p));
p                1373 drivers/net/ethernet/chelsio/cxgb/sge.c 	st = this_cpu_ptr(sge->port_stats[p->iff]);
p                1374 drivers/net/ethernet/chelsio/cxgb/sge.c 	dev = adapter->port[p->iff].dev;
p                1377 drivers/net/ethernet/chelsio/cxgb/sge.c 	if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
p                1385 drivers/net/ethernet/chelsio/cxgb/sge.c 	if (p->vlan_valid) {
p                1387 drivers/net/ethernet/chelsio/cxgb/sge.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
p                1907 drivers/net/ethernet/chelsio/cxgb/sge.c int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
p                1909 drivers/net/ethernet/chelsio/cxgb/sge.c 	sge->fixed_intrtimer = p->rx_coalesce_usecs *
p                1919 drivers/net/ethernet/chelsio/cxgb/sge.c int t1_sge_configure(struct sge *sge, struct sge_params *p)
p                1921 drivers/net/ethernet/chelsio/cxgb/sge.c 	if (alloc_rx_resources(sge, p))
p                1923 drivers/net/ethernet/chelsio/cxgb/sge.c 	if (alloc_tx_resources(sge, p)) {
p                1927 drivers/net/ethernet/chelsio/cxgb/sge.c 	configure_sge(sge, p);
p                1935 drivers/net/ethernet/chelsio/cxgb/sge.c 	p->large_buf_capacity = jumbo_payload_capacity(sge);
p                2058 drivers/net/ethernet/chelsio/cxgb/sge.c struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p)
p                2094 drivers/net/ethernet/chelsio/cxgb/sge.c 	p->cmdQ_size[0] = SGE_CMDQ0_E_N;
p                2095 drivers/net/ethernet/chelsio/cxgb/sge.c 	p->cmdQ_size[1] = SGE_CMDQ1_E_N;
p                2096 drivers/net/ethernet/chelsio/cxgb/sge.c 	p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
p                2097 drivers/net/ethernet/chelsio/cxgb/sge.c 	p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
p                2100 drivers/net/ethernet/chelsio/cxgb/sge.c 			p->rx_coalesce_usecs = 15;
p                2102 drivers/net/ethernet/chelsio/cxgb/sge.c 			p->rx_coalesce_usecs = 50;
p                2104 drivers/net/ethernet/chelsio/cxgb/sge.c 		p->rx_coalesce_usecs = 50;
p                2106 drivers/net/ethernet/chelsio/cxgb/sge.c 	p->coalesce_enable = 0;
p                2107 drivers/net/ethernet/chelsio/cxgb/sge.c 	p->sample_interval_usecs = 0;
p                 195 drivers/net/ethernet/chelsio/cxgb/subr.c 	int p;
p                 198 drivers/net/ethernet/chelsio/cxgb/subr.c 	for_each_port(adapter, p)
p                 199 drivers/net/ethernet/chelsio/cxgb/subr.c 		if (cause & (1 << p)) {
p                 200 drivers/net/ethernet/chelsio/cxgb/subr.c 			struct cphy *phy = adapter->port[p].phy;
p                 204 drivers/net/ethernet/chelsio/cxgb/subr.c 				t1_link_changed(adapter, p);
p                 703 drivers/net/ethernet/chelsio/cxgb/subr.c 		int p;
p                 710 drivers/net/ethernet/chelsio/cxgb/subr.c 		for_each_port(adapter, p) {
p                 711 drivers/net/ethernet/chelsio/cxgb/subr.c 			phy = adapter->port[p].phy;
p                 714 drivers/net/ethernet/chelsio/cxgb/subr.c 			    t1_link_changed(adapter, p);
p                 895 drivers/net/ethernet/chelsio/cxgb/subr.c 		     struct adapter_params *p)
p                 897 drivers/net/ethernet/chelsio/cxgb/subr.c 	p->chip_version = bi->chip_term;
p                 898 drivers/net/ethernet/chelsio/cxgb/subr.c 	p->is_asic = (p->chip_version != CHBT_TERM_FPGA);
p                 899 drivers/net/ethernet/chelsio/cxgb/subr.c 	if (p->chip_version == CHBT_TERM_T1 ||
p                 900 drivers/net/ethernet/chelsio/cxgb/subr.c 	    p->chip_version == CHBT_TERM_T2 ||
p                 901 drivers/net/ethernet/chelsio/cxgb/subr.c 	    p->chip_version == CHBT_TERM_FPGA) {
p                 906 drivers/net/ethernet/chelsio/cxgb/subr.c 			p->chip_revision = TERM_T1B;
p                 908 drivers/net/ethernet/chelsio/cxgb/subr.c 			p->chip_revision = TERM_T2;
p                 994 drivers/net/ethernet/chelsio/cxgb/subr.c static void get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
p                1000 drivers/net/ethernet/chelsio/cxgb/subr.c 	p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)];
p                1001 drivers/net/ethernet/chelsio/cxgb/subr.c 	p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32;
p                1002 drivers/net/ethernet/chelsio/cxgb/subr.c 	p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0;
p                  18 drivers/net/ethernet/chelsio/cxgb/tp.c static void tp_init(adapter_t * ap, const struct tp_params *p,
p                  28 drivers/net/ethernet/chelsio/cxgb/tp.c 	if (!p->pm_size)
p                  39 drivers/net/ethernet/chelsio/cxgb/tp.c 	       V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
p                  59 drivers/net/ethernet/chelsio/cxgb/tp.c struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p)
p                 165 drivers/net/ethernet/chelsio/cxgb/tp.c int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk)
p                 169 drivers/net/ethernet/chelsio/cxgb/tp.c 	tp_init(adapter, p, tp_clk);
p                  60 drivers/net/ethernet/chelsio/cxgb/tp.h struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p);
p                  72 drivers/net/ethernet/chelsio/cxgb/tp.h int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk);
p                 617 drivers/net/ethernet/chelsio/cxgb/vsc7326.c 	}, *p = hw_stats;
p                 623 drivers/net/ethernet/chelsio/cxgb/vsc7326.c 		rmon_update(mac, CRA(0x4, port, p->reg), stats + p->offset);
p                 324 drivers/net/ethernet/chelsio/cxgb3/adapter.h void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
p                 326 drivers/net/ethernet/chelsio/cxgb3/adapter.h 		      int irq_vec_idx, const struct qset_params *p,
p                 451 drivers/net/ethernet/chelsio/cxgb3/common.h static inline unsigned int t3_mc5_size(const struct mc5 *p)
p                 453 drivers/net/ethernet/chelsio/cxgb3/common.h 	return p->tcam_size;
p                 465 drivers/net/ethernet/chelsio/cxgb3/common.h static inline unsigned int t3_mc7_size(const struct mc7 *p)
p                 467 drivers/net/ethernet/chelsio/cxgb3/common.h 	return p->size;
p                 644 drivers/net/ethernet/chelsio/cxgb3/common.h void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
p                 734 drivers/net/ethernet/chelsio/cxgb3/common.h void t3_sge_prep(struct adapter *adap, struct sge_params *p);
p                 735 drivers/net/ethernet/chelsio/cxgb3/common.h void t3_sge_init(struct adapter *adap, struct sge_params *p);
p                 162 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		const struct port_info *p = netdev_priv(dev);
p                 164 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		switch (p->link_config.speed) {
p                 177 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			    s, p->link_config.duplex == DUPLEX_FULL
p                1033 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	const __be32 *p;
p                1055 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	p = (const __be32 *)fw->data;
p                1057 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		csum += ntohl(p[i]);
p                1066 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		*cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
p                1067 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		*cache++ = be32_to_cpu(p[i]) & 0xffff;
p                1168 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
p                1172 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
p                1654 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 					    struct port_info *p, int idx)
p                1659 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
p                1732 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	u32 *p = buf + start;
p                1735 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		*p++ = t3_read_reg(ap, start);
p                1771 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct port_info *p = netdev_priv(dev);
p                1775 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	if (p->link_config.autoneg != AUTONEG_ENABLE)
p                1777 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	p->phy.ops->autoneg_restart(&p->phy);
p                1807 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct port_info *p = netdev_priv(dev);
p                1811 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 						p->link_config.supported);
p                1813 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 						p->link_config.advertising);
p                1816 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		cmd->base.speed = p->link_config.speed;
p                1817 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		cmd->base.duplex = p->link_config.duplex;
p                1827 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	cmd->base.phy_address = p->phy.mdio.prtad;
p                1828 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	cmd->base.autoneg = p->link_config.autoneg;
p                1870 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct port_info *p = netdev_priv(dev);
p                1871 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct link_config *lc = &p->link_config;
p                1911 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t3_link_start(&p->phy, &p->mac, lc);
p                1918 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct port_info *p = netdev_priv(dev);
p                1920 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
p                1921 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
p                1922 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
p                1928 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct port_info *p = netdev_priv(dev);
p                1929 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct link_config *lc = &p->link_config;
p                1944 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t3_link_start(&p->phy, &p->mac, lc);
p                1948 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
p                2059 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	__le32 *p;
p                2088 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
p                2089 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		err = t3_seeprom_write(adapter, aligned_offset, *p);
p                2394 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct tp_params *p = &adapter->params.tp;
p                2399 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		m.tx_pg_sz = p->tx_pg_size;
p                2400 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		m.tx_num_pg = p->tx_num_pgs;
p                2401 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		m.rx_pg_sz = p->rx_pg_size;
p                2402 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		m.rx_num_pg = p->rx_num_pgs;
p                2403 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
p                2410 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct tp_params *p = &adapter->params.tp;
p                2430 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			m.tx_num_pg = p->tx_num_pgs;
p                2432 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			m.rx_num_pg = p->rx_num_pgs;
p                2435 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
p                2436 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
p                2438 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		p->rx_pg_size = m.rx_pg_sz;
p                2439 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		p->tx_pg_size = m.tx_pg_sz;
p                2440 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		p->rx_num_pgs = m.rx_num_pg;
p                2441 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		p->tx_num_pgs = m.tx_num_pg;
p                2574 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static int cxgb_set_mac_addr(struct net_device *dev, void *p)
p                2578 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct sockaddr *addr = p;
p                2645 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct port_info *p = netdev_priv(dev);
p                2649 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t3_mac_update_stats(&p->mac);
p                2661 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct port_info *p = netdev_priv(dev);
p                2665 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		link_fault = p->link_fault;
p                2673 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
p                2675 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
p                2692 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct port_info *p = netdev_priv(dev);
p                2700 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			status = t3b2_mac_watchdog_task(&p->mac);
p                2702 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			p->mac.stats.num_toggled++;
p                2704 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			struct cmac *mac = &p->mac;
p                2709 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t3_link_start(&p->phy, mac, &p->link_config);
p                2711 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t3_port_intr_enable(adapter, p->port_id);
p                2712 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			p->mac.stats.num_resets++;
p                2723 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	const struct adapter_params *p = &adapter->params;
p                2732 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	if (!p->linkpoll_period ||
p                2733 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	    (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
p                2734 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	    p->stats_update_period) {
p                2739 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	if (p->rev == T3_REV_B2)
p                2845 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		struct port_info *p = netdev_priv(dev);
p                2848 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
p                 455 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		struct iscsi_ipv4addr *p = data;
p                 456 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		struct port_info *pi = netdev_priv(p->dev);
p                 457 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		p->ipv4addr = pi->iscsi_ipv4addr;
p                 504 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	union active_open_entry *p = atid2entry(t, atid);
p                 505 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	void *ctx = p->t3c_tid.ctx;
p                 508 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	p->next = t->afree;
p                 509 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->afree = p;
p                 524 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	union listen_entry *p = stid2entry(t, stid);
p                 527 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	p->next = t->sfree;
p                 528 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	t->sfree = p;
p                 570 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		struct t3c_tid_entry *p = td->tid_release_list;
p                 572 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		td->tid_release_list = p->ctx;
p                 581 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			p->ctx = (void *)td->tid_release_list;
p                 582 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			td->tid_release_list = p;
p                 585 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		mk_tid_release(skb, p - td->tid_maps.tid_tab);
p                 587 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		p->ctx = NULL;
p                 607 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
p                 610 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	p->ctx = (void *)td->tid_release_list;
p                 611 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	p->client = NULL;
p                 612 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	td->tid_release_list = p;
p                 613 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	if (!p->ctx || td->release_list_incomplete)
p                 660 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		union active_open_entry *p = t->afree;
p                 662 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		atid = (p - t->atid_tab) + t->atid_base;
p                 663 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		t->afree = p->next;
p                 664 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		p->t3c_tid.ctx = ctx;
p                 665 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		p->t3c_tid.client = client;
p                 682 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		union listen_entry *p = t->sfree;
p                 684 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		stid = (p - t->stid_tab) + t->stid_base;
p                 685 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		t->sfree = p->next;
p                 686 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		p->t3c_tid.ctx = ctx;
p                 687 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		p->t3c_tid.client = client;
p                 761 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	union opcode_tid *p = cplhdr(skb);
p                 762 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	unsigned int stid = G_TID(ntohl(p->opcode_tid));
p                 767 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	    t3c_tid->client->handlers[p->opcode]) {
p                 768 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		return t3c_tid->client->handlers[p->opcode] (dev, skb,
p                 772 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		       dev->name, p->opcode);
p                 779 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	union opcode_tid *p = cplhdr(skb);
p                 780 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
p                 785 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	    t3c_tid->client->handlers[p->opcode]) {
p                 786 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		return t3c_tid->client->handlers[p->opcode]
p                 790 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		       dev->name, p->opcode);
p                 846 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	union opcode_tid *p = cplhdr(skb);
p                 847 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
p                 852 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	    t3c_tid->client->handlers[p->opcode]) {
p                 853 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		return t3c_tid->client->handlers[p->opcode]
p                 918 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct cpl_trace_pkt *p = cplhdr(skb);
p                 922 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	skb_pull(skb, sizeof(*p));
p                1028 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			union opcode_tid *p = cplhdr(skb);
p                1031 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			       dev->name, opcode, G_TID(ntohl(p->opcode_tid)));
p                 221 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	struct l2t_entry *end, *e, **p;
p                 243 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
p                 244 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			if (*p == e) {
p                 245 drivers/net/ethernet/chelsio/cxgb3/l2t.c 				*p = e->next;
p                 307 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	struct port_info *p;
p                 324 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	p = netdev_priv(dev);
p                 325 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	smt_idx = p->port_id;
p                 623 drivers/net/ethernet/chelsio/cxgb3/sge.c 	void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
p                 625 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (!p)
p                 631 drivers/net/ethernet/chelsio/cxgb3/sge.c 			dma_free_coherent(&pdev->dev, len, p, *phys);
p                 636 drivers/net/ethernet/chelsio/cxgb3/sge.c 	return p;
p                1584 drivers/net/ethernet/chelsio/cxgb3/sge.c 	const dma_addr_t *p;
p                1589 drivers/net/ethernet/chelsio/cxgb3/sge.c 	p = dui->addr;
p                1592 drivers/net/ethernet/chelsio/cxgb3/sge.c 		pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) -
p                1597 drivers/net/ethernet/chelsio/cxgb3/sge.c 		pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
p                1604 drivers/net/ethernet/chelsio/cxgb3/sge.c 	dma_addr_t *p;
p                1609 drivers/net/ethernet/chelsio/cxgb3/sge.c 	for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
p                1610 drivers/net/ethernet/chelsio/cxgb3/sge.c 		*p++ = be64_to_cpu(sgl->addr[0]);
p                1611 drivers/net/ethernet/chelsio/cxgb3/sge.c 		*p++ = be64_to_cpu(sgl->addr[1]);
p                1614 drivers/net/ethernet/chelsio/cxgb3/sge.c 		*p = be64_to_cpu(sgl->addr[0]);
p                2081 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
p                2085 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb_pull(skb, sizeof(*p) + pad);
p                2086 drivers/net/ethernet/chelsio/cxgb3/sge.c 	skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
p                2088 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
p                2089 drivers/net/ethernet/chelsio/cxgb3/sge.c 	    p->csum == htons(0xffff) && !p->fragment) {
p                2096 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (p->vlan_valid) {
p                2098 drivers/net/ethernet/chelsio/cxgb3/sge.c 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
p                3012 drivers/net/ethernet/chelsio/cxgb3/sge.c void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
p                3014 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
p                3015 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs->rspq.polling = p->polling;
p                3016 drivers/net/ethernet/chelsio/cxgb3/sge.c 	qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
p                3036 drivers/net/ethernet/chelsio/cxgb3/sge.c 		      int irq_vec_idx, const struct qset_params *p,
p                3047 drivers/net/ethernet/chelsio/cxgb3/sge.c 	q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
p                3054 drivers/net/ethernet/chelsio/cxgb3/sge.c 	q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
p                3061 drivers/net/ethernet/chelsio/cxgb3/sge.c 	q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
p                3074 drivers/net/ethernet/chelsio/cxgb3/sge.c 		q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
p                3082 drivers/net/ethernet/chelsio/cxgb3/sge.c 		q->txq[i].size = p->txq_size[i];
p                3093 drivers/net/ethernet/chelsio/cxgb3/sge.c 	q->fl[0].size = p->fl_size;
p                3094 drivers/net/ethernet/chelsio/cxgb3/sge.c 	q->fl[1].size = p->jumbo_size;
p                3097 drivers/net/ethernet/chelsio/cxgb3/sge.c 	q->rspq.size = p->rspq_size;
p                3137 drivers/net/ethernet/chelsio/cxgb3/sge.c 					  p->cong_thres, 1, 0);
p                3173 drivers/net/ethernet/chelsio/cxgb3/sge.c 	t3_update_qset_coalesce(q, p);
p                3311 drivers/net/ethernet/chelsio/cxgb3/sge.c void t3_sge_init(struct adapter *adap, struct sge_params *p)
p                3350 drivers/net/ethernet/chelsio/cxgb3/sge.c void t3_sge_prep(struct adapter *adap, struct sge_params *p)
p                3354 drivers/net/ethernet/chelsio/cxgb3/sge.c 	p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
p                3358 drivers/net/ethernet/chelsio/cxgb3/sge.c 		struct qset_params *q = p->qset + i;
p                  84 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
p                  88 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		t3_write_reg(adapter, p->reg_addr + offset, p->val);
p                  89 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p++;
p                 709 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
p                 730 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
p                 733 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
p                 736 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
p                 739 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
p                 742 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
p                 745 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
p                 749 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
p                 750 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
p                 752 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
p                 753 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
p                 755 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 				  &p->xauicfg[0]);
p                 759 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 				  &p->xauicfg[1]);
p                 764 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	ret = hex2bin(p->eth_base, vpd.na_data, 6);
p                1022 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	const __be32 *p = (const __be32 *)tp_sram;
p                1026 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		csum += ntohl(p[i]);
p                1128 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	const __be32 *p = (const __be32 *)fw_data;
p                1137 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		csum += ntohl(p[i]);
p                1898 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		struct port_info *p = adap2pinfo(adapter, i);
p                1900 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		if (!(p->phy.caps & SUPPORTED_IRQ))
p                1904 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 			int phy_cause = p->phy.ops->intr_handler(&p->phy);
p                1909 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 				p->phy.fifo_errors++;
p                2605 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c static void partition_mem(struct adapter *adap, const struct tp_params *p)
p                2624 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		     p->chan_rx_size | (p->chan_tx_size >> 16));
p                2627 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
p                2628 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
p                2630 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
p                2633 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
p                2634 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
p                2636 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	pstructs = p->rx_num_pgs + p->tx_num_pgs;
p                2646 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
p                2649 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
p                2650 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
p                2654 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
p                2656 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
p                2670 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c static void tp_config(struct adapter *adap, const struct tp_params *p)
p                2953 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c static void ulp_config(struct adapter *adap, const struct tp_params *p)
p                2955 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	unsigned int m = p->chan_rx_size;
p                2957 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
p                2958 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
p                2959 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
p                2960 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
p                2961 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
p                2962 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
p                2963 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
p                3072 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c static int tp_init(struct adapter *adap, const struct tp_params *p)
p                3076 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	tp_config(adap, p);
p                3210 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	const struct mc7_timing_params *p = &mc7_timings[mem_type];
p                3237 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		     V_ACTTOPREDLY(p->ActToPreDly) |
p                3238 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
p                3239 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
p                3240 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
p                3455 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
p                3463 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->variant = PCI_VARIANT_PCIE;
p                3465 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->width = (val >> 4) & 0x3f;
p                3470 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
p                3471 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	p->width = (pci_mode & F_64BIT) ? 64 : 32;
p                3474 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->variant = PCI_VARIANT_PCI;
p                3476 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
p                3478 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
p                3480 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->variant = PCI_VARIANT_PCIX_266_MODE2;
p                3690 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		struct tp_params *p = &adapter->params.tp;
p                3696 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
p                3697 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
p                3698 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
p                3699 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->cm_size = t3_mc7_size(&adapter->cm);
p                3700 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->chan_rx_size = p->pmrx_size / 2;	/* only 1 Rx channel */
p                3701 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->chan_tx_size = p->pmtx_size / p->nchan;
p                3702 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->rx_pg_size = 64 * 1024;
p                3703 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
p                3704 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
p                3705 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
p                3706 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->ntimer_qs = p->cm_size >= (128 << 20) ||
p                3733 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		struct port_info *p = adap2pinfo(adapter, i);
p                3745 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->phy.mdio.dev = adapter->port[i];
p                3746 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
p                3750 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		mac_prep(&p->mac, adapter, j);
p                3762 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		init_link_config(&p->link_config, p->phy.caps);
p                3763 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->phy.ops->power_down(&p->phy, 1);
p                3770 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		if (!(p->phy.caps & SUPPORTED_IRQ) &&
p                3797 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		struct port_info *p = adap2pinfo(adapter, i);
p                3803 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
p                3806 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		p->phy.ops->power_down(&p->phy, 1);
p                1634 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
p                1635 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
p                1723 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
p                1727 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
p                  99 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	struct seq_tab *p;
p                 101 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	p = __seq_open_private(f, &seq_tab_ops, sizeof(*p) + rows * width);
p                 102 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (p) {
p                 103 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		p->show = show;
p                 104 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		p->rows = rows;
p                 105 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		p->width = width;
p                 106 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		p->skip_first = have_header != 0;
p                 108 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	return p;
p                 114 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c static int seq_tab_trim(struct seq_tab *p, unsigned int new_rows)
p                 116 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (new_rows > p->rows)
p                 118 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	p->rows = new_rows;
p                 128 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		const u32 *p = v;
p                 132 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
p                 133 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
p                 134 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[6], p[7]);
p                 144 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		const u32 *p = v;
p                 146 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		seq_printf(seq, "  %02x   %08x %08x\n", p[5] & 0xff, p[6],
p                 147 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[7]);
p                 149 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
p                 150 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[4] & 0xff, p[5] >> 8);
p                 151 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		seq_printf(seq, "  %02x   %x%07x %x%07x\n", (p[0] >> 4) & 0xff,
p                 152 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4);
p                 163 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		const u32 *p = v;
p                 166 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (p[9] >> 16) & 0xff,       /* Status */
p                 167 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[9] & 0xffff, p[8] >> 16, /* Inst */
p                 168 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[8] & 0xffff, p[7] >> 16, /* Data */
p                 169 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[7] & 0xffff, p[6] >> 16, /* PC */
p                 170 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[2], p[1], p[0],      /* LS0 Stat, Addr and Data */
p                 171 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[5], p[4], p[3]);     /* LS1 Stat, Addr and Data */
p                 181 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		const u32 *p = v;
p                 184 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[3] & 0xff, p[2], p[1], p[0]);
p                 186 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
p                 187 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
p                 189 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
p                 190 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
p                 191 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[6] >> 16);
p                 200 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	struct seq_tab *p;
p                 209 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		p = seq_open_tab(file, (adap->params.cim_la_size / 10) + 1,
p                 214 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		p = seq_open_tab(file, adap->params.cim_la_size / 8,
p                 219 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (!p)
p                 222 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	ret = t4_cim_read_la(adap, (u32 *)p->data, NULL);
p                 238 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	const u32 *p = v;
p                 244 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f,
p                 245 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[5] & 0xffff, p[4], p[3], p[2], p[1], p[0]);
p                 250 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (p[4] >> 6) & 0xff, p[4] & 0x3f,
p                 251 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[3], p[2], p[1], p[0]);
p                 258 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	struct seq_tab *p;
p                 261 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	p = seq_open_tab(file, 2 * CIM_PIFLA_SIZE, 6 * sizeof(u32), 1,
p                 263 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (!p)
p                 266 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	t4_cim_read_pif_la(adap, (u32 *)p->data,
p                 267 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (u32 *)p->data + 6 * CIM_PIFLA_SIZE, NULL, NULL);
p                 281 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	const u32 *p = v;
p                 287 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[4], p[3], p[2], p[1], p[0]);
p                 293 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
p                 294 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
p                 295 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (p[1] >> 2) | ((p[2] & 3) << 30),
p                 296 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
p                 297 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[0] & 1);
p                 304 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	struct seq_tab *p;
p                 307 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	p = seq_open_tab(file, 2 * CIM_MALA_SIZE, 5 * sizeof(u32), 1,
p                 309 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (!p)
p                 312 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	t4_cim_read_ma_la(adap, (u32 *)p->data,
p                 313 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			  (u32 *)p->data + 5 * CIM_MALA_SIZE);
p                 341 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	u32 *p = stat;
p                 366 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
p                 369 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   IBQRDADDR_G(p[0]), IBQWRADDR_G(p[1]),
p                 370 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
p                 371 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   QUEREMFLITS_G(p[2]) * 16);
p                 372 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	for ( ; i < CIM_NUM_IBQ + cim_num_obq; i++, p += 4, wr += 2)
p                 375 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   QUERDADDR_G(p[0]) & 0x3fff, wr[0] - base[i],
p                 376 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
p                 377 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   QUEREMFLITS_G(p[2]) * 16);
p                 384 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	const u32 *p = v;
p                 386 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	seq_printf(seq, "%#06x: %08x %08x %08x %08x\n", idx * 16, p[0], p[1],
p                 387 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		   p[2], p[3]);
p                 394 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	struct seq_tab *p;
p                 398 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	p = seq_open_tab(file, CIM_IBQ_SIZE, 4 * sizeof(u32), 0, cimq_show);
p                 399 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (!p)
p                 402 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	ret = t4_read_cim_ibq(adap, qid, (u32 *)p->data, CIM_IBQ_SIZE * 4);
p                 421 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	struct seq_tab *p;
p                 425 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	p = seq_open_tab(file, 6 * CIM_OBQ_SIZE, 4 * sizeof(u32), 0, cimq_show);
p                 426 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (!p)
p                 429 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	ret = t4_read_cim_obq(adap, qid, (u32 *)p->data, 6 * CIM_OBQ_SIZE * 4);
p                 433 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		seq_tab_trim(p, ret / 4);
p                 454 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			    const struct field_desc *p)
p                 459 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	while (p->name) {
p                 460 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		u64 mask = (1ULL << p->width) - 1;
p                 461 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		int len = scnprintf(buf, sizeof(buf), "%s: %llu", p->name,
p                 462 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 				    ((unsigned long long)v >> p->start) & mask);
p                 470 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		p++;
p                 514 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	const u64 *p = v;
p                 516 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	field_desc_show(seq, *p, tp_la0);
p                 522 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	const u64 *p = v;
p                 526 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	field_desc_show(seq, p[0], tp_la0);
p                 527 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
p                 528 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		field_desc_show(seq, p[1], tp_la0);
p                 628 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	const u64 *p = v;
p                 632 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	field_desc_show(seq, p[0], tp_la0);
p                 633 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
p                 634 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		field_desc_show(seq, p[1], (p[0] & BIT(17)) ? tp_la2 : tp_la1);
p                 640 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	struct seq_tab *p;
p                 645 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0,
p                 649 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0,
p                 653 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		p = seq_open_tab(file, TPLA_SIZE, sizeof(u64), 0, tp_la_show);
p                 655 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (!p)
p                 658 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	t4_tp_read_la(adap, (u64 *)p->data, NULL);
p                 696 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	const u32 *p = v;
p                 703 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
p                 709 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	struct seq_tab *p;
p                 712 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	p = seq_open_tab(file, ULPRX_LA_SIZE, 8 * sizeof(u32), 1,
p                 714 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (!p)
p                 717 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	t4_ulprx_read_la(adap, (u32 *)p->data);
p                1418 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	char *s, *p, *word, *end;
p                1431 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	p = s = kzalloc(count + 1, GFP_USER);
p                1456 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	while (p) {
p                1457 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		while (isspace(*p))
p                1458 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			p++;
p                1459 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		word = strsep(&p, " ");
p                2000 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	struct seq_tab *p;
p                2003 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	p = seq_open_tab(file, nentries / 8, 8 * sizeof(u16), 0, rss_show);
p                2004 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (!p)
p                2007 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	ret = t4_read_rss(adap, (u16 *)p->data);
p                2209 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	char s[100], *p;
p                2220 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	for (p = s, i = 9; i >= 0; i--) {
p                2222 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		for (j = 0; j < 8; j++, p++) {
p                2223 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			if (!isxdigit(*p))
p                2225 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			key[i] = (key[i] << 4) | hex2val(*p);
p                2296 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	struct seq_tab *p;
p                2301 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	p = seq_open_tab(file, 8, sizeof(*pfconf), 1, rss_pf_config_show);
p                2302 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (!p)
p                2305 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	pfconf = (struct rss_pf_conf *)p->data;
p                2364 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	struct seq_tab *p;
p                2368 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	p = seq_open_tab(file, vfcount, sizeof(*vfconf), 1, rss_vf_config_show);
p                2369 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	if (!p)
p                2372 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 	vfconf = (struct rss_vf_conf *)p->data;
p                 240 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 				   const struct port_info *p,
p                 244 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
p                 245 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
p                 248 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	for (i = 0; i < p->nqsets; i++, rx++, tx++) {
p                 323 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	struct port_info *p = netdev_priv(dev);
p                 327 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	if (p->link_cfg.autoneg != AUTONEG_ENABLE)
p                 329 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
p                 793 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	struct port_info *p = netdev_priv(dev);
p                 795 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
p                 796 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
p                 797 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
p                 803 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	struct port_info *p = netdev_priv(dev);
p                 804 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	struct link_config *lc = &p->link_cfg;
p                 818 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		return t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan,
p                1158 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	u32 aligned_offset, aligned_len, *p;
p                1197 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
p                1198 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		err = eeprom_wr_phys(adapter, aligned_offset, *p);
p                1290 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
p                1297 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	if (!p)
p                1300 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		p[n] = pi->rss[n];
p                1304 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
p                1316 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	if (!p)
p                1322 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 			pi->rss[i] = p[i];
p                 195 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		const struct port_info *p = netdev_priv(dev);
p                 197 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		switch (p->link_cfg.speed) {
p                 221 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 				dev->name, p->link_cfg.speed);
p                 226 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			    fc[p->link_cfg.fc]);
p                 574 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		const struct cpl_sge_egr_update *p = (void *)rsp;
p                 575 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
p                 592 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		const struct cpl_fw6_msg *p = (void *)rsp;
p                 595 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		const struct fw_port_cmd *pcmd = (const void *)p->data;
p                 625 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			if (p->type == 0)
p                 626 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 				t4_handle_fw_rpl(q->adap, p->data);
p                 628 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		const struct cpl_l2t_write_rpl *p = (void *)rsp;
p                 630 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		do_l2t_write_rpl(q->adap, p);
p                 632 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		const struct cpl_smt_write_rpl *p = (void *)rsp;
p                 634 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		do_smt_write_rpl(q->adap, p);
p                 636 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		const struct cpl_set_tcb_rpl *p = (void *)rsp;
p                 638 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		filter_rpl(q->adap, p);
p                 640 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		const struct cpl_act_open_rpl *p = (void *)rsp;
p                 642 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		hash_filter_rpl(q->adap, p);
p                 644 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		const struct cpl_abort_rpl_rss *p = (void *)rsp;
p                 646 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		hash_del_filter_rpl(q->adap, p);
p                 648 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		const struct cpl_srq_table_rpl *p = (void *)rsp;
p                 650 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		do_srq_table_rpl(q->adap, p);
p                 896 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
p                 898 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	qid -= p->ingr_start;
p                 899 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
p                1221 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		union aopen_entry *p = t->afree;
p                1223 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		atid = (p - t->atid_tab) + t->atid_base;
p                1224 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		t->afree = p->next;
p                1225 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		p->data = data;
p                1238 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
p                1241 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	p->next = t->afree;
p                1242 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t->afree = p;
p                1368 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	void **p = &t->tid_tab[tid];
p                1372 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	*p = adap->tid_release_head;
p                1374 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	adap->tid_release_head = (void **)((uintptr_t)p | chan);
p                1394 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		void **p = adap->tid_release_head;
p                1395 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		unsigned int chan = (uintptr_t)p & 3;
p                1396 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		p = (void *)p - chan;
p                1398 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		adap->tid_release_head = *p;
p                1399 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		*p = NULL;
p                1406 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		mk_tid_release(skb, chan, p - adap->tids.tid_tab);
p                2570 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct port_info *p = netdev_priv(dev);
p                2571 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct adapter *adapter = p->adapter;
p                2582 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
p                2583 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 				 &p->stats_base);
p                3020 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static int cxgb_set_mac_addr(struct net_device *dev, void *p)
p                3023 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct sockaddr *addr = p;
p                3060 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct ch_sched_params p;
p                3107 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	memset(&p, 0, sizeof(p));
p                3108 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	p.type = SCHED_CLASS_TYPE_PACKET;
p                3109 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	p.u.params.level    = SCHED_CLASS_LEVEL_CL_RL;
p                3110 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	p.u.params.mode     = SCHED_CLASS_MODE_CLASS;
p                3111 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
p                3112 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
p                3113 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	p.u.params.channel  = pi->tx_chan;
p                3114 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	p.u.params.class    = SCHED_CLS_NONE;
p                3115 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	p.u.params.minrate  = 0;
p                3116 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	p.u.params.maxrate  = req_rate;
p                3117 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	p.u.params.weight   = 0;
p                3118 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	p.u.params.pktsize  = dev->mtu;
p                3120 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	e = cxgb4_sched_class_alloc(dev, &p);
p                 731 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 			const struct cxgb4_uld_info *p)
p                 746 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 		ret = cfg_queues_uld(adap, type, p);
p                 749 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 		ret = setup_sge_queues_uld(adap, type, p->lro);
p                 762 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 		ret = setup_sge_txq_uld(adap, type, p);
p                 765 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 		adap->uld[type] = *p;
p                 376 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 	void *(*add)(const struct cxgb4_lld_info *p);
p                 389 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
p                 266 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	struct l2t_entry *end, *e, **p;
p                 287 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
p                 288 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			if (*p == e) {
p                 289 drivers/net/ethernet/chelsio/cxgb4/l2t.c 				*p = e->next;
p                 301 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	struct l2t_entry *end, *e, **p;
p                 329 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
p                 330 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			if (*p == e) {
p                 331 drivers/net/ethernet/chelsio/cxgb4/l2t.c 				*p = e->next;
p                 124 drivers/net/ethernet/chelsio/cxgb4/l2t.h void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
p                  42 drivers/net/ethernet/chelsio/cxgb4/sched.c 				 struct ch_sched_params *p,
p                  50 drivers/net/ethernet/chelsio/cxgb4/sched.c 	e = &s->tab[p->u.params.class];
p                  53 drivers/net/ethernet/chelsio/cxgb4/sched.c 		err = t4_sched_params(adap, p->type,
p                  54 drivers/net/ethernet/chelsio/cxgb4/sched.c 				      p->u.params.level, p->u.params.mode,
p                  55 drivers/net/ethernet/chelsio/cxgb4/sched.c 				      p->u.params.rateunit,
p                  56 drivers/net/ethernet/chelsio/cxgb4/sched.c 				      p->u.params.ratemode,
p                  57 drivers/net/ethernet/chelsio/cxgb4/sched.c 				      p->u.params.channel, e->idx,
p                  58 drivers/net/ethernet/chelsio/cxgb4/sched.c 				      p->u.params.minrate, p->u.params.maxrate,
p                  59 drivers/net/ethernet/chelsio/cxgb4/sched.c 				      p->u.params.weight, p->u.params.pktsize);
p                 143 drivers/net/ethernet/chelsio/cxgb4/sched.c static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
p                 153 drivers/net/ethernet/chelsio/cxgb4/sched.c 	if (p->queue < 0 || p->queue >= pi->nqsets)
p                 156 drivers/net/ethernet/chelsio/cxgb4/sched.c 	txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
p                 184 drivers/net/ethernet/chelsio/cxgb4/sched.c static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
p                 194 drivers/net/ethernet/chelsio/cxgb4/sched.c 	if (p->queue < 0 || p->queue >= pi->nqsets)
p                 201 drivers/net/ethernet/chelsio/cxgb4/sched.c 	txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
p                 205 drivers/net/ethernet/chelsio/cxgb4/sched.c 	err = t4_sched_queue_unbind(pi, p);
p                 211 drivers/net/ethernet/chelsio/cxgb4/sched.c 	memcpy(&qe->param, p, sizeof(qe->param));
p                 355 drivers/net/ethernet/chelsio/cxgb4/sched.c 						const struct ch_sched_params *p)
p                 361 drivers/net/ethernet/chelsio/cxgb4/sched.c 	if (!p) {
p                 375 drivers/net/ethernet/chelsio/cxgb4/sched.c 		memcpy(&tp, p, sizeof(tp));
p                 401 drivers/net/ethernet/chelsio/cxgb4/sched.c 						struct ch_sched_params *p)
p                 407 drivers/net/ethernet/chelsio/cxgb4/sched.c 	if (!p)
p                 410 drivers/net/ethernet/chelsio/cxgb4/sched.c 	class_id = p->u.params.class;
p                 421 drivers/net/ethernet/chelsio/cxgb4/sched.c 	e = t4_sched_class_lookup(pi, p);
p                 430 drivers/net/ethernet/chelsio/cxgb4/sched.c 		memcpy(&np, p, sizeof(np));
p                 455 drivers/net/ethernet/chelsio/cxgb4/sched.c 					    struct ch_sched_params *p)
p                 463 drivers/net/ethernet/chelsio/cxgb4/sched.c 	class_id = p->u.params.class;
p                 467 drivers/net/ethernet/chelsio/cxgb4/sched.c 	return t4_sched_class_alloc(pi, p);
p                 104 drivers/net/ethernet/chelsio/cxgb4/sched.h 					    struct ch_sched_params *p);
p                 304 drivers/net/ethernet/chelsio/cxgb4/sge.c 	const struct ulptx_sge_pair *p;
p                 320 drivers/net/ethernet/chelsio/cxgb4/sge.c 	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
p                 321 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
p                 322 drivers/net/ethernet/chelsio/cxgb4/sge.c unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
p                 323 drivers/net/ethernet/chelsio/cxgb4/sge.c 				       ntohl(p->len[0]), DMA_TO_DEVICE);
p                 324 drivers/net/ethernet/chelsio/cxgb4/sge.c 			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
p                 325 drivers/net/ethernet/chelsio/cxgb4/sge.c 				       ntohl(p->len[1]), DMA_TO_DEVICE);
p                 326 drivers/net/ethernet/chelsio/cxgb4/sge.c 			p++;
p                 327 drivers/net/ethernet/chelsio/cxgb4/sge.c 		} else if ((u8 *)p == (u8 *)q->stat) {
p                 328 drivers/net/ethernet/chelsio/cxgb4/sge.c 			p = (const struct ulptx_sge_pair *)q->desc;
p                 330 drivers/net/ethernet/chelsio/cxgb4/sge.c 		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
p                 334 drivers/net/ethernet/chelsio/cxgb4/sge.c 				       ntohl(p->len[0]), DMA_TO_DEVICE);
p                 336 drivers/net/ethernet/chelsio/cxgb4/sge.c 				       ntohl(p->len[1]), DMA_TO_DEVICE);
p                 337 drivers/net/ethernet/chelsio/cxgb4/sge.c 			p = (const struct ulptx_sge_pair *)&addr[2];
p                 341 drivers/net/ethernet/chelsio/cxgb4/sge.c 			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
p                 342 drivers/net/ethernet/chelsio/cxgb4/sge.c 				       ntohl(p->len[0]), DMA_TO_DEVICE);
p                 344 drivers/net/ethernet/chelsio/cxgb4/sge.c 				       ntohl(p->len[1]), DMA_TO_DEVICE);
p                 345 drivers/net/ethernet/chelsio/cxgb4/sge.c 			p = (const struct ulptx_sge_pair *)&addr[1];
p                 351 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if ((u8 *)p == (u8 *)q->stat)
p                 352 drivers/net/ethernet/chelsio/cxgb4/sge.c 			p = (const struct ulptx_sge_pair *)q->desc;
p                 353 drivers/net/ethernet/chelsio/cxgb4/sge.c 		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
p                 355 drivers/net/ethernet/chelsio/cxgb4/sge.c 		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
p                 719 drivers/net/ethernet/chelsio/cxgb4/sge.c 	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
p                 721 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if (!p)
p                 727 drivers/net/ethernet/chelsio/cxgb4/sge.c 			dma_free_coherent(dev, len, p, *phys);
p                 733 drivers/net/ethernet/chelsio/cxgb4/sge.c 	return p;
p                1044 drivers/net/ethernet/chelsio/cxgb4/sge.c 	u64 *p;
p                1059 drivers/net/ethernet/chelsio/cxgb4/sge.c 	p = PTR_ALIGN(pos, 8);
p                1060 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if ((uintptr_t)p & 8)
p                1061 drivers/net/ethernet/chelsio/cxgb4/sge.c 		*p = 0;
p                1069 drivers/net/ethernet/chelsio/cxgb4/sge.c 	u64 *p;
p                1081 drivers/net/ethernet/chelsio/cxgb4/sge.c 	p = PTR_ALIGN(pos, 8);
p                1082 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if ((uintptr_t)p & 8) {
p                1083 drivers/net/ethernet/chelsio/cxgb4/sge.c 		*p = 0;
p                1084 drivers/net/ethernet/chelsio/cxgb4/sge.c 		return p + 1;
p                1086 drivers/net/ethernet/chelsio/cxgb4/sge.c 	return p;
p                2489 drivers/net/ethernet/chelsio/cxgb4/sge.c 	u64 *p;
p                2500 drivers/net/ethernet/chelsio/cxgb4/sge.c 	p = PTR_ALIGN(pos, 8);
p                2501 drivers/net/ethernet/chelsio/cxgb4/sge.c 	if ((uintptr_t)p & 8) {
p                2502 drivers/net/ethernet/chelsio/cxgb4/sge.c 		*p = 0;
p                2503 drivers/net/ethernet/chelsio/cxgb4/sge.c 		return p + 1;
p                2505 drivers/net/ethernet/chelsio/cxgb4/sge.c 	return p;
p                2684 drivers/net/ethernet/chelsio/cxgb4/sge.c 	const struct page_frag *p;
p                2686 drivers/net/ethernet/chelsio/cxgb4/sge.c 	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
p                2687 drivers/net/ethernet/chelsio/cxgb4/sge.c 		put_page(p->page);
p                  75 drivers/net/ethernet/chelsio/cxgb4/smt.h void do_smt_write_rpl(struct adapter *p, const struct cpl_smt_write_rpl *rpl);
p                 295 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	const __be64 *p = cmd;
p                 381 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
p                2750 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
p                2832 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
p                2833 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	strim(p->id);
p                2834 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	memcpy(p->ec, vpd + ec, EC_LEN);
p                2835 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	strim(p->ec);
p                2837 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
p                2838 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	strim(p->sn);
p                2840 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
p                2841 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	strim(p->pn);
p                2842 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
p                2843 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	strim((char *)p->na);
p                2859 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
p                2866 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	ret = t4_get_raw_vpd_params(adapter, p);
p                2880 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->cclk = cclk_val;
p                3657 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	const __be32 *p = (const __be32 *)fw_data;
p                3687 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		csum += be32_to_cpu(p[i]);
p                3957 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		u32 *p = la_buf + i;
p                3962 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
p                3963 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			*p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
p                6192 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			int p;
p                6197 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
p                6198 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 				mps_bg_map[p] = val & 0xff;
p                6357 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p                6368 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
p                6369 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
p                6370 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
p                6371 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
p                6372 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
p                6373 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
p                6374 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
p                6375 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
p                6376 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
p                6377 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
p                6378 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
p                6379 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
p                6380 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
p                6381 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
p                6382 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
p                6383 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
p                6384 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
p                6385 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
p                6386 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
p                6387 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
p                6388 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
p                6389 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
p                6390 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
p                6394 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			p->tx_frames_64 -= p->tx_pause;
p                6396 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			p->tx_mcast_frames -= p->tx_pause;
p                6398 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
p                6399 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
p                6400 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
p                6401 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
p                6402 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
p                6403 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
p                6404 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
p                6405 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
p                6406 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
p                6407 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
p                6408 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
p                6409 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
p                6410 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
p                6411 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
p                6412 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
p                6413 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
p                6414 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
p                6415 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
p                6416 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
p                6417 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
p                6418 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
p                6419 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
p                6420 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
p                6421 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
p                6422 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
p                6423 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
p                6424 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
p                6428 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			p->rx_frames_64 -= p->rx_pause;
p                6430 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			p->rx_mcast_frames -= p->rx_pause;
p                6433 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
p                6434 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
p                6435 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
p                6436 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
p                6437 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
p                6438 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
p                6439 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
p                6440 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
p                6454 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
p                6465 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->octets           = GET_STAT(BYTES);
p                6466 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->frames           = GET_STAT(FRAMES);
p                6467 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->bcast_frames     = GET_STAT(BCAST);
p                6468 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->mcast_frames     = GET_STAT(MCAST);
p                6469 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->ucast_frames     = GET_STAT(UCAST);
p                6470 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->error_frames     = GET_STAT(ERROR);
p                6472 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->frames_64        = GET_STAT(64B);
p                6473 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->frames_65_127    = GET_STAT(65B_127B);
p                6474 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->frames_128_255   = GET_STAT(128B_255B);
p                6475 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->frames_256_511   = GET_STAT(256B_511B);
p                6476 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->frames_512_1023  = GET_STAT(512B_1023B);
p                6477 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->frames_1024_1518 = GET_STAT(1024B_1518B);
p                6478 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->frames_1519_max  = GET_STAT(1519B_MAX);
p                6479 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->drop             = GET_STAT(DROP_FRAMES);
p                6481 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
p                6482 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
p                6483 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
p                6484 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
p                6485 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
p                6486 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
p                6487 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
p                6488 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
p                7462 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	__be32 *p = &c.param[0].mnem;
p                7475 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		*p++ = cpu_to_be32(*params++);
p                7477 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			*p = cpu_to_be32(*(val + i));
p                7478 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		p++;
p                7483 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
p                7484 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			*val++ = be32_to_cpu(*p);
p                7524 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	__be32 *p = &c.param[0].mnem;
p                7537 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		*p++ = cpu_to_be32(*params++);
p                7538 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		*p++ = cpu_to_be32(*val++);
p                7764 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	struct fw_vi_mac_exact *p;
p                7779 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p = c.u.exact;
p                7780 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
p                7782 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
p                7807 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	struct fw_vi_mac_raw *p = &c.u.raw;
p                7820 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
p                7824 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
p                7827 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
p                7831 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
p                7832 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
p                7859 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	struct fw_vi_mac_vni *p = c.u.exact_vni;
p                7870 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
p                7872 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
p                7873 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
p                7875 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->lookup_type_to_vni =
p                7879 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->vni_mask_pkd = cpu_to_be32(FW_VI_MAC_CMD_VNI_MASK_V(vni_mask));
p                7882 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
p                7907 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	struct fw_vi_mac_raw *p = &c.u.raw;
p                7919 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
p                7922 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
p                7925 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
p                7929 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
p                7930 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
p                7934 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
p                7982 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		struct fw_vi_mac_exact *p;
p                7995 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
p                7996 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			p->valid_to_idx =
p                8000 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			memcpy(p->macaddr, addr[offset + i],
p                8001 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			       sizeof(p->macaddr));
p                8012 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
p                8014 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 					be16_to_cpu(p->valid_to_idx));
p                8070 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		struct fw_vi_mac_exact *p;
p                8083 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
p                8084 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			p->valid_to_idx = cpu_to_be16(
p                8087 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
p                8094 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
p                8096 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 						be16_to_cpu(p->valid_to_idx));
p                8135 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	struct fw_vi_mac_exact *p = c.u.exact;
p                8147 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
p                8150 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
p                8154 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
p                8844 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	const struct fw_port_cmd *p = (const void *)rpl;
p                8846 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
p                8852 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
p                8870 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
p                8876 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		p->speed = val & PCI_EXP_LNKSTA_CLS;
p                8877 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
p                9564 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		struct port_info *p = adap2pinfo(adap, i);
p                9569 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 				    FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
p                9574 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
p                 513 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
p                 514 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
p                 520 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		cpl = (void *)p;
p                 534 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		const struct cpl_sge_egr_update *p = cpl;
p                 535 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 		unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
p                 309 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	const struct ulptx_sge_pair *p;
p                 325 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
p                 326 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
p                 328 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
p                 329 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
p                 330 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
p                 331 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
p                 332 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			p++;
p                 333 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		} else if ((u8 *)p == (u8 *)tq->stat) {
p                 334 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			p = (const struct ulptx_sge_pair *)tq->desc;
p                 336 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		} else if ((u8 *)p + 8 == (u8 *)tq->stat) {
p                 340 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
p                 342 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
p                 343 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			p = (const struct ulptx_sge_pair *)&addr[2];
p                 347 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
p                 348 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
p                 350 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
p                 351 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			p = (const struct ulptx_sge_pair *)&addr[1];
p                 357 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		if ((u8 *)p == (u8 *)tq->stat)
p                 358 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			p = (const struct ulptx_sge_pair *)tq->desc;
p                 359 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		addr = ((u8 *)p + 16 <= (u8 *)tq->stat
p                 360 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			? p->addr[0]
p                 362 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
p                1055 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	u64 *p;
p                1071 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	p = PTR_ALIGN(pos, 8);
p                1072 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	if ((uintptr_t)p & 8)
p                1073 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		*p = 0;
p                 138 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	const __be64 *p;
p                 236 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	for (i = 0, p = cmd; i < size; i += 8)
p                 237 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
p                 628 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	struct fw_params_param *p;
p                 641 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
p                 642 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		p->mnem = htonl(*params++);
p                 646 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
p                 647 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 			*vals++ = be32_to_cpu(p->val);
p                 666 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	struct fw_params_param *p;
p                 679 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
p                 680 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		p->mnem = cpu_to_be32(*params++);
p                 681 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		p->val = cpu_to_be32(*vals++);
p                1546 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		struct fw_vi_mac_exact *p;
p                1559 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
p                1560 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 			p->valid_to_idx = cpu_to_be16(
p                1563 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
p                1572 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
p                1574 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 				be16_to_cpu(p->valid_to_idx));
p                1630 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		struct fw_vi_mac_exact *p;
p                1643 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) {
p                1644 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 			p->valid_to_idx = cpu_to_be16(
p                1647 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
p                1655 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
p                1657 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 						be16_to_cpu(p->valid_to_idx));
p                1695 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	struct fw_vi_mac_exact *p = &cmd.u.exact[0];
p                1713 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
p                1715 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
p                1719 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		p = &rpl.u.exact[0];
p                1720 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
p                1222 drivers/net/ethernet/cirrus/cs89x0.c static int set_mac_address(struct net_device *dev, void *p)
p                1225 drivers/net/ethernet/cirrus/cs89x0.c 	struct sockaddr *addr = p;
p                1025 drivers/net/ethernet/cisco/enic/enic_main.c static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
p                1028 drivers/net/ethernet/cisco/enic/enic_main.c 	struct sockaddr *saddr = p;
p                1051 drivers/net/ethernet/cisco/enic/enic_main.c static int enic_set_mac_address(struct net_device *netdev, void *p)
p                1053 drivers/net/ethernet/cisco/enic/enic_main.c 	struct sockaddr *saddr = p;
p                2038 drivers/net/ethernet/cortina/gemini.c 	u64 *p;
p                2045 drivers/net/ethernet/cortina/gemini.c 		p = values;
p                2049 drivers/net/ethernet/cortina/gemini.c 			*p++ = port->hw_stats[i];
p                2052 drivers/net/ethernet/cortina/gemini.c 	values = p;
p                2056 drivers/net/ethernet/cortina/gemini.c 		p = values;
p                2060 drivers/net/ethernet/cortina/gemini.c 			*p++ = port->rx_stats[i];
p                2062 drivers/net/ethernet/cortina/gemini.c 			*p++ = port->rx_csum_stats[i];
p                2063 drivers/net/ethernet/cortina/gemini.c 		*p++ = port->rx_napi_exits;
p                2066 drivers/net/ethernet/cortina/gemini.c 	values = p;
p                2070 drivers/net/ethernet/cortina/gemini.c 		p = values;
p                 969 drivers/net/ethernet/dec/tulip/de4x5.c static void    srom_exec(struct net_device *dev, u_char *p);
p                 996 drivers/net/ethernet/dec/tulip/de4x5.c static void    de4x5_dbg_srom(struct de4x5_srom *p);
p                1002 drivers/net/ethernet/dec/tulip/de4x5.c static int     type0_infoblock(struct net_device *dev, u_char count, u_char *p);
p                1003 drivers/net/ethernet/dec/tulip/de4x5.c static int     type1_infoblock(struct net_device *dev, u_char count, u_char *p);
p                1004 drivers/net/ethernet/dec/tulip/de4x5.c static int     type2_infoblock(struct net_device *dev, u_char count, u_char *p);
p                1005 drivers/net/ethernet/dec/tulip/de4x5.c static int     type3_infoblock(struct net_device *dev, u_char count, u_char *p);
p                1006 drivers/net/ethernet/dec/tulip/de4x5.c static int     type4_infoblock(struct net_device *dev, u_char count, u_char *p);
p                1007 drivers/net/ethernet/dec/tulip/de4x5.c static int     type5_infoblock(struct net_device *dev, u_char count, u_char *p);
p                1008 drivers/net/ethernet/dec/tulip/de4x5.c static int     compact_infoblock(struct net_device *dev, u_char count, u_char *p);
p                3599 drivers/net/ethernet/dec/tulip/de4x5.c     struct sk_buff *p;
p                3605 drivers/net/ethernet/dec/tulip/de4x5.c     p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
p                3606 drivers/net/ethernet/dec/tulip/de4x5.c     if (!p) return NULL;
p                3608 drivers/net/ethernet/dec/tulip/de4x5.c     tmp = virt_to_bus(p->data);
p                3610 drivers/net/ethernet/dec/tulip/de4x5.c     skb_reserve(p, i);
p                3614 drivers/net/ethernet/dec/tulip/de4x5.c     lp->rx_skb[index] = p;
p                3625 drivers/net/ethernet/dec/tulip/de4x5.c     p = netdev_alloc_skb(dev, len + 2);
p                3626 drivers/net/ethernet/dec/tulip/de4x5.c     if (!p) return NULL;
p                3628 drivers/net/ethernet/dec/tulip/de4x5.c     skb_reserve(p, 2);	                               /* Align */
p                3631 drivers/net/ethernet/dec/tulip/de4x5.c 	skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, tlen);
p                3632 drivers/net/ethernet/dec/tulip/de4x5.c 	skb_put_data(p, lp->rx_bufs, len - tlen);
p                3634 drivers/net/ethernet/dec/tulip/de4x5.c 	skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, len);
p                3637 drivers/net/ethernet/dec/tulip/de4x5.c     return p;
p                3964 drivers/net/ethernet/dec/tulip/de4x5.c 	__le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
p                3968 drivers/net/ethernet/dec/tulip/de4x5.c 	    *p = cpu_to_le16(tmp);
p                3975 drivers/net/ethernet/dec/tulip/de4x5.c 	p = (__le16 *)&lp->srom;
p                3978 drivers/net/ethernet/dec/tulip/de4x5.c 	    *p++ = cpu_to_le16(tmp);
p                4299 drivers/net/ethernet/dec/tulip/de4x5.c     u_char *p;
p                4316 drivers/net/ethernet/dec/tulip/de4x5.c     p  = (u_char *)&lp->srom + 26;
p                4319 drivers/net/ethernet/dec/tulip/de4x5.c 	for (i=count; i; --i, p+=3) {
p                4320 drivers/net/ethernet/dec/tulip/de4x5.c 	    if (lp->device == *p) break;
p                4330 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->infoleaf_offset = get_unaligned_le16(p + 1);
p                4346 drivers/net/ethernet/dec/tulip/de4x5.c     u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
p                4349 drivers/net/ethernet/dec/tulip/de4x5.c     p+=2;
p                4351 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->cache.gepc = (*p++ | GEP_CTRL);
p                4356 drivers/net/ethernet/dec/tulip/de4x5.c     count = *p++;
p                4360 drivers/net/ethernet/dec/tulip/de4x5.c 	if (*p < 128) {
p                4361 drivers/net/ethernet/dec/tulip/de4x5.c 	    p += COMPACT_LEN;
p                4362 drivers/net/ethernet/dec/tulip/de4x5.c 	} else if (*(p+1) == 5) {
p                4363 drivers/net/ethernet/dec/tulip/de4x5.c 	    type5_infoblock(dev, 1, p);
p                4364 drivers/net/ethernet/dec/tulip/de4x5.c 	    p += ((*p & BLOCK_LEN) + 1);
p                4365 drivers/net/ethernet/dec/tulip/de4x5.c 	} else if (*(p+1) == 4) {
p                4366 drivers/net/ethernet/dec/tulip/de4x5.c 	    p += ((*p & BLOCK_LEN) + 1);
p                4367 drivers/net/ethernet/dec/tulip/de4x5.c 	} else if (*(p+1) == 3) {
p                4368 drivers/net/ethernet/dec/tulip/de4x5.c 	    type3_infoblock(dev, 1, p);
p                4369 drivers/net/ethernet/dec/tulip/de4x5.c 	    p += ((*p & BLOCK_LEN) + 1);
p                4370 drivers/net/ethernet/dec/tulip/de4x5.c 	} else if (*(p+1) == 2) {
p                4371 drivers/net/ethernet/dec/tulip/de4x5.c 	    p += ((*p & BLOCK_LEN) + 1);
p                4372 drivers/net/ethernet/dec/tulip/de4x5.c 	} else if (*(p+1) == 1) {
p                4373 drivers/net/ethernet/dec/tulip/de4x5.c 	    type1_infoblock(dev, 1, p);
p                4374 drivers/net/ethernet/dec/tulip/de4x5.c 	    p += ((*p & BLOCK_LEN) + 1);
p                4376 drivers/net/ethernet/dec/tulip/de4x5.c 	    p += ((*p & BLOCK_LEN) + 1);
p                4386 drivers/net/ethernet/dec/tulip/de4x5.c srom_exec(struct net_device *dev, u_char *p)
p                4390 drivers/net/ethernet/dec/tulip/de4x5.c     u_char count = (p ? *p++ : 0);
p                4391 drivers/net/ethernet/dec/tulip/de4x5.c     u_short *w = (u_short *)p;
p                4399 drivers/net/ethernet/dec/tulip/de4x5.c 		                                   *p++ : get_unaligned_le16(w++)), dev);
p                4425 drivers/net/ethernet/dec/tulip/de4x5.c     u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
p                4429 drivers/net/ethernet/dec/tulip/de4x5.c     p+=2;
p                4432 drivers/net/ethernet/dec/tulip/de4x5.c     lp->cache.gepc = (*p++ | GEP_CTRL);
p                4435 drivers/net/ethernet/dec/tulip/de4x5.c     count = *p++;
p                4438 drivers/net/ethernet/dec/tulip/de4x5.c     if (*p < 128) {
p                4439 drivers/net/ethernet/dec/tulip/de4x5.c 	next_tick = dc_infoblock[COMPACT](dev, count, p);
p                4441 drivers/net/ethernet/dec/tulip/de4x5.c 	next_tick = dc_infoblock[*(p+1)](dev, count, p);
p                4463 drivers/net/ethernet/dec/tulip/de4x5.c     u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
p                4467 drivers/net/ethernet/dec/tulip/de4x5.c     p+=2;
p                4470 drivers/net/ethernet/dec/tulip/de4x5.c     count = *p++;
p                4473 drivers/net/ethernet/dec/tulip/de4x5.c     if (*p < 128) {
p                4474 drivers/net/ethernet/dec/tulip/de4x5.c 	next_tick = dc_infoblock[COMPACT](dev, count, p);
p                4476 drivers/net/ethernet/dec/tulip/de4x5.c 	next_tick = dc_infoblock[*(p+1)](dev, count, p);
p                4498 drivers/net/ethernet/dec/tulip/de4x5.c     u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
p                4502 drivers/net/ethernet/dec/tulip/de4x5.c     p+=2;
p                4505 drivers/net/ethernet/dec/tulip/de4x5.c     count = *p++;
p                4508 drivers/net/ethernet/dec/tulip/de4x5.c     if (*p < 128) {
p                4509 drivers/net/ethernet/dec/tulip/de4x5.c 	next_tick = dc_infoblock[COMPACT](dev, count, p);
p                4511 drivers/net/ethernet/dec/tulip/de4x5.c 	next_tick = dc_infoblock[*(p+1)](dev, count, p);
p                4532 drivers/net/ethernet/dec/tulip/de4x5.c compact_infoblock(struct net_device *dev, u_char count, u_char *p)
p                4539 drivers/net/ethernet/dec/tulip/de4x5.c 	if (*(p+COMPACT_LEN) < 128) {
p                4540 drivers/net/ethernet/dec/tulip/de4x5.c 	    return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
p                4542 drivers/net/ethernet/dec/tulip/de4x5.c 	    return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
p                4550 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->infoblock_media = (*p++) & COMPACT_MC;
p                4551 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->cache.gep = *p++;
p                4552 drivers/net/ethernet/dec/tulip/de4x5.c 	csr6 = *p++;
p                4553 drivers/net/ethernet/dec/tulip/de4x5.c 	flags = *p++;
p                4572 drivers/net/ethernet/dec/tulip/de4x5.c type0_infoblock(struct net_device *dev, u_char count, u_char *p)
p                4575 drivers/net/ethernet/dec/tulip/de4x5.c     u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
p                4579 drivers/net/ethernet/dec/tulip/de4x5.c 	if (*(p+len) < 128) {
p                4580 drivers/net/ethernet/dec/tulip/de4x5.c 	    return dc_infoblock[COMPACT](dev, count, p+len);
p                4582 drivers/net/ethernet/dec/tulip/de4x5.c 	    return dc_infoblock[*(p+len+1)](dev, count, p+len);
p                4590 drivers/net/ethernet/dec/tulip/de4x5.c 	p+=2;
p                4591 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->infoblock_media = (*p++) & BLOCK0_MC;
p                4592 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->cache.gep = *p++;
p                4593 drivers/net/ethernet/dec/tulip/de4x5.c 	csr6 = *p++;
p                4594 drivers/net/ethernet/dec/tulip/de4x5.c 	flags = *p++;
p                4612 drivers/net/ethernet/dec/tulip/de4x5.c type1_infoblock(struct net_device *dev, u_char count, u_char *p)
p                4615 drivers/net/ethernet/dec/tulip/de4x5.c     u_char len = (*p & BLOCK_LEN)+1;
p                4619 drivers/net/ethernet/dec/tulip/de4x5.c 	if (*(p+len) < 128) {
p                4620 drivers/net/ethernet/dec/tulip/de4x5.c 	    return dc_infoblock[COMPACT](dev, count, p+len);
p                4622 drivers/net/ethernet/dec/tulip/de4x5.c 	    return dc_infoblock[*(p+len+1)](dev, count, p+len);
p                4626 drivers/net/ethernet/dec/tulip/de4x5.c     p += 2;
p                4629 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->active = *p++;
p                4630 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
p                4631 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
p                4632 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->phy[lp->active].mc  = get_unaligned_le16(p); p += 2;
p                4633 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
p                4634 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
p                4635 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->phy[lp->active].ttm = get_unaligned_le16(p);
p                4639 drivers/net/ethernet/dec/tulip/de4x5.c         lp->active = *p;
p                4651 drivers/net/ethernet/dec/tulip/de4x5.c type2_infoblock(struct net_device *dev, u_char count, u_char *p)
p                4654 drivers/net/ethernet/dec/tulip/de4x5.c     u_char len = (*p & BLOCK_LEN)+1;
p                4658 drivers/net/ethernet/dec/tulip/de4x5.c 	if (*(p+len) < 128) {
p                4659 drivers/net/ethernet/dec/tulip/de4x5.c 	    return dc_infoblock[COMPACT](dev, count, p+len);
p                4661 drivers/net/ethernet/dec/tulip/de4x5.c 	    return dc_infoblock[*(p+len+1)](dev, count, p+len);
p                4668 drivers/net/ethernet/dec/tulip/de4x5.c 	p += 2;
p                4669 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->infoblock_media = (*p) & MEDIA_CODE;
p                4671 drivers/net/ethernet/dec/tulip/de4x5.c         if ((*p++) & EXT_FIELD) {
p                4672 drivers/net/ethernet/dec/tulip/de4x5.c 	    lp->cache.csr13 = get_unaligned_le16(p); p += 2;
p                4673 drivers/net/ethernet/dec/tulip/de4x5.c 	    lp->cache.csr14 = get_unaligned_le16(p); p += 2;
p                4674 drivers/net/ethernet/dec/tulip/de4x5.c 	    lp->cache.csr15 = get_unaligned_le16(p); p += 2;
p                4680 drivers/net/ethernet/dec/tulip/de4x5.c         lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
p                4681 drivers/net/ethernet/dec/tulip/de4x5.c         lp->cache.gep  = ((s32)(get_unaligned_le16(p)) << 16);
p                4692 drivers/net/ethernet/dec/tulip/de4x5.c type3_infoblock(struct net_device *dev, u_char count, u_char *p)
p                4695 drivers/net/ethernet/dec/tulip/de4x5.c     u_char len = (*p & BLOCK_LEN)+1;
p                4699 drivers/net/ethernet/dec/tulip/de4x5.c 	if (*(p+len) < 128) {
p                4700 drivers/net/ethernet/dec/tulip/de4x5.c 	    return dc_infoblock[COMPACT](dev, count, p+len);
p                4702 drivers/net/ethernet/dec/tulip/de4x5.c 	    return dc_infoblock[*(p+len+1)](dev, count, p+len);
p                4706 drivers/net/ethernet/dec/tulip/de4x5.c     p += 2;
p                4709 drivers/net/ethernet/dec/tulip/de4x5.c         lp->active = *p++;
p                4711 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
p                4712 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
p                4713 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->phy[lp->active].mc  = get_unaligned_le16(p); p += 2;
p                4714 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
p                4715 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
p                4716 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
p                4717 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->phy[lp->active].mci = *p;
p                4721 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->active = *p;
p                4734 drivers/net/ethernet/dec/tulip/de4x5.c type4_infoblock(struct net_device *dev, u_char count, u_char *p)
p                4737 drivers/net/ethernet/dec/tulip/de4x5.c     u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
p                4741 drivers/net/ethernet/dec/tulip/de4x5.c 	if (*(p+len) < 128) {
p                4742 drivers/net/ethernet/dec/tulip/de4x5.c 	    return dc_infoblock[COMPACT](dev, count, p+len);
p                4744 drivers/net/ethernet/dec/tulip/de4x5.c 	    return dc_infoblock[*(p+len+1)](dev, count, p+len);
p                4751 drivers/net/ethernet/dec/tulip/de4x5.c 	p+=2;
p                4752 drivers/net/ethernet/dec/tulip/de4x5.c 	lp->infoblock_media = (*p++) & MEDIA_CODE;
p                4756 drivers/net/ethernet/dec/tulip/de4x5.c         lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
p                4757 drivers/net/ethernet/dec/tulip/de4x5.c         lp->cache.gep  = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
p                4758 drivers/net/ethernet/dec/tulip/de4x5.c 	csr6 = *p++;
p                4759 drivers/net/ethernet/dec/tulip/de4x5.c 	flags = *p++;
p                4779 drivers/net/ethernet/dec/tulip/de4x5.c type5_infoblock(struct net_device *dev, u_char count, u_char *p)
p                4782 drivers/net/ethernet/dec/tulip/de4x5.c     u_char len = (*p & BLOCK_LEN)+1;
p                4786 drivers/net/ethernet/dec/tulip/de4x5.c 	if (*(p+len) < 128) {
p                4787 drivers/net/ethernet/dec/tulip/de4x5.c 	    return dc_infoblock[COMPACT](dev, count, p+len);
p                4789 drivers/net/ethernet/dec/tulip/de4x5.c 	    return dc_infoblock[*(p+len+1)](dev, count, p+len);
p                4795 drivers/net/ethernet/dec/tulip/de4x5.c 	p+=2;
p                4796 drivers/net/ethernet/dec/tulip/de4x5.c         lp->rst = p;
p                5183 drivers/net/ethernet/dec/tulip/de4x5.c     char *p, *q, t;
p                5190 drivers/net/ethernet/dec/tulip/de4x5.c     if ((p = strstr(args, dev->name))) {
p                5191 drivers/net/ethernet/dec/tulip/de4x5.c 	if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
p                5195 drivers/net/ethernet/dec/tulip/de4x5.c 	if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
p                5197 drivers/net/ethernet/dec/tulip/de4x5.c 	if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
p                5198 drivers/net/ethernet/dec/tulip/de4x5.c 	    if (strstr(p, "TP_NW")) {
p                5200 drivers/net/ethernet/dec/tulip/de4x5.c 	    } else if (strstr(p, "TP")) {
p                5202 drivers/net/ethernet/dec/tulip/de4x5.c 	    } else if (strstr(p, "BNC_AUI")) {
p                5204 drivers/net/ethernet/dec/tulip/de4x5.c 	    } else if (strstr(p, "BNC")) {
p                5206 drivers/net/ethernet/dec/tulip/de4x5.c 	    } else if (strstr(p, "AUI")) {
p                5208 drivers/net/ethernet/dec/tulip/de4x5.c 	    } else if (strstr(p, "10Mb")) {
p                5210 drivers/net/ethernet/dec/tulip/de4x5.c 	    } else if (strstr(p, "100Mb")) {
p                5212 drivers/net/ethernet/dec/tulip/de4x5.c 	    } else if (strstr(p, "AUTO")) {
p                5316 drivers/net/ethernet/dec/tulip/de4x5.c de4x5_dbg_srom(struct de4x5_srom *p)
p                5321 drivers/net/ethernet/dec/tulip/de4x5.c 	printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
p                5322 drivers/net/ethernet/dec/tulip/de4x5.c 	printk("Sub-system ID:        %04x\n", *((u_short *)p->sub_system_id));
p                5323 drivers/net/ethernet/dec/tulip/de4x5.c 	printk("ID Block CRC:         %02x\n", (u_char)(p->id_block_crc));
p                5324 drivers/net/ethernet/dec/tulip/de4x5.c 	printk("SROM version:         %02x\n", (u_char)(p->version));
p                5325 drivers/net/ethernet/dec/tulip/de4x5.c 	printk("# controllers:        %02x\n", (u_char)(p->num_controllers));
p                5327 drivers/net/ethernet/dec/tulip/de4x5.c 	printk("Hardware Address:     %pM\n", p->ieee_addr);
p                5328 drivers/net/ethernet/dec/tulip/de4x5.c 	printk("CRC checksum:         %04x\n", (u_short)(p->chksum));
p                5330 drivers/net/ethernet/dec/tulip/de4x5.c 	    printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
p                 208 drivers/net/ethernet/dec/tulip/eeprom.c 		unsigned char *p = (void *)ee_data + ee_data[27];
p                 212 drivers/net/ethernet/dec/tulip/eeprom.c 		u16 media = get_u16(p);
p                 214 drivers/net/ethernet/dec/tulip/eeprom.c 		p += 2;
p                 216 drivers/net/ethernet/dec/tulip/eeprom.c 			csr12dir = *p++;
p                 217 drivers/net/ethernet/dec/tulip/eeprom.c 		count = *p++;
p                 244 drivers/net/ethernet/dec/tulip/eeprom.c 			if ((p[0] & 0x80) == 0) { /* 21140 Compact block. */
p                 246 drivers/net/ethernet/dec/tulip/eeprom.c 				leaf->media = p[0] & 0x3f;
p                 247 drivers/net/ethernet/dec/tulip/eeprom.c 				leaf->leafdata = p;
p                 248 drivers/net/ethernet/dec/tulip/eeprom.c 				if ((p[2] & 0x61) == 0x01)	/* Bogus, but Znyx boards do it. */
p                 250 drivers/net/ethernet/dec/tulip/eeprom.c 				p += 4;
p                 252 drivers/net/ethernet/dec/tulip/eeprom.c 				leaf->type = p[1];
p                 253 drivers/net/ethernet/dec/tulip/eeprom.c 				if (p[1] == 0x05) {
p                 255 drivers/net/ethernet/dec/tulip/eeprom.c 					leaf->media = p[2] & 0x0f;
p                 256 drivers/net/ethernet/dec/tulip/eeprom.c 				} else if (tp->chip_id == DM910X && p[1] == 0x80) {
p                 261 drivers/net/ethernet/dec/tulip/eeprom.c 					leaf->leafdata = p + 2;
p                 262 drivers/net/ethernet/dec/tulip/eeprom.c 					p += (p[0] & 0x3f) + 1;
p                 264 drivers/net/ethernet/dec/tulip/eeprom.c 				} else if (p[1] & 1) {
p                 269 drivers/net/ethernet/dec/tulip/eeprom.c 					gpr_len=p[3]*2;
p                 270 drivers/net/ethernet/dec/tulip/eeprom.c 					reset_len=p[4+gpr_len]*2;
p                 271 drivers/net/ethernet/dec/tulip/eeprom.c 					new_advertise |= get_u16(&p[7+gpr_len+reset_len]);
p                 274 drivers/net/ethernet/dec/tulip/eeprom.c 					leaf->media = p[2] & MEDIA_MASK;
p                 285 drivers/net/ethernet/dec/tulip/eeprom.c 					if (p[1] == 2  &&  leaf->media == 0) {
p                 286 drivers/net/ethernet/dec/tulip/eeprom.c 						if (p[2] & 0x40) {
p                 287 drivers/net/ethernet/dec/tulip/eeprom.c 							u32 base15 = get_unaligned((u16*)&p[7]);
p                 289 drivers/net/ethernet/dec/tulip/eeprom.c 								(get_unaligned((u16*)&p[9])<<16) + base15;
p                 291 drivers/net/ethernet/dec/tulip/eeprom.c 								(get_unaligned((u16*)&p[11])<<16) + base15;
p                 293 drivers/net/ethernet/dec/tulip/eeprom.c 							mtable->csr15dir = get_unaligned((u16*)&p[3])<<16;
p                 294 drivers/net/ethernet/dec/tulip/eeprom.c 							mtable->csr15val = get_unaligned((u16*)&p[5])<<16;
p                 298 drivers/net/ethernet/dec/tulip/eeprom.c 				leaf->leafdata = p + 2;
p                 299 drivers/net/ethernet/dec/tulip/eeprom.c 				p += (p[0] & 0x3f) + 1;
p                 177 drivers/net/ethernet/dec/tulip/media.c 		unsigned char *p = mleaf->leafdata;
p                 182 drivers/net/ethernet/dec/tulip/media.c 					   p[1]);
p                 183 drivers/net/ethernet/dec/tulip/media.c 			dev->if_port = p[0];
p                 186 drivers/net/ethernet/dec/tulip/media.c 			iowrite32(p[1], ioaddr + CSR12);
p                 187 drivers/net/ethernet/dec/tulip/media.c 			new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18);
p                 193 drivers/net/ethernet/dec/tulip/media.c 				setup[i] = get_u16(&p[i*2 + 1]);
p                 195 drivers/net/ethernet/dec/tulip/media.c 			dev->if_port = p[0] & MEDIA_MASK;
p                 211 drivers/net/ethernet/dec/tulip/media.c 			if (p[0] & 0x40) {	/* SIA (CSR13-15) setup values are provided. */
p                 246 drivers/net/ethernet/dec/tulip/media.c 			int phy_num = p[0];
p                 247 drivers/net/ethernet/dec/tulip/media.c 			int init_length = p[1];
p                 253 drivers/net/ethernet/dec/tulip/media.c 				u16 *init_sequence = (u16*)(p+2);
p                 254 drivers/net/ethernet/dec/tulip/media.c 				u16 *reset_sequence = &((u16*)(p+3))[init_length];
p                 255 drivers/net/ethernet/dec/tulip/media.c 				int reset_length = p[2 + init_length*2];
p                 279 drivers/net/ethernet/dec/tulip/media.c 				u8 *init_sequence = p + 2;
p                 280 drivers/net/ethernet/dec/tulip/media.c 				u8 *reset_sequence = p + 3 + init_length;
p                 281 drivers/net/ethernet/dec/tulip/media.c 				int reset_length = p[2 + init_length];
p                 327 drivers/net/ethernet/dec/tulip/media.c 				setup[i] = get_u16(&p[i*2 + 1]);
p                  42 drivers/net/ethernet/dec/tulip/timer.c 		unsigned char *p;
p                  54 drivers/net/ethernet/dec/tulip/timer.c 		p = mleaf->leafdata;
p                  59 drivers/net/ethernet/dec/tulip/timer.c 			s8 bitnum = p[offset];
p                  60 drivers/net/ethernet/dec/tulip/timer.c 			if (p[offset+1] & 0x80) {
p                  81 drivers/net/ethernet/dec/tulip/timer.c 				if ((p[2] & 0x61) == 0x01)	/* Bogus Znyx board. */
p                 342 drivers/net/ethernet/dnet.c 	u32 *p = &bp->hw_stats.rx_pkt_ignr;
p                 345 drivers/net/ethernet/dnet.c 	WARN_ON((unsigned long)(end - p - 1) !=
p                 348 drivers/net/ethernet/dnet.c 	for (; p < end; p++, reg++)
p                 349 drivers/net/ethernet/dnet.c 		*p += readl(reg);
p                 352 drivers/net/ethernet/dnet.c 	p = &bp->hw_stats.tx_unicast;
p                 355 drivers/net/ethernet/dnet.c 	WARN_ON((unsigned long)(end - p - 1) !=
p                 358 drivers/net/ethernet/dnet.c 	for (; p < end; p++, reg++)
p                 359 drivers/net/ethernet/dnet.c 		*p += readl(reg);
p                2649 drivers/net/ethernet/emulex/benet/be_cmds.c 	const u8 *p = fw->data;
p                2651 drivers/net/ethernet/emulex/benet/be_cmds.c 	p += header_size;
p                2652 drivers/net/ethernet/emulex/benet/be_cmds.c 	while (p < (fw->data + fw->size)) {
p                2653 drivers/net/ethernet/emulex/benet/be_cmds.c 		fsec = (struct flash_section_info *)p;
p                2656 drivers/net/ethernet/emulex/benet/be_cmds.c 		p += 32;
p                2661 drivers/net/ethernet/emulex/benet/be_cmds.c static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
p                2677 drivers/net/ethernet/emulex/benet/be_cmds.c 	if (!memcmp(crc, p + crc_offset, 4))
p                2757 drivers/net/ethernet/emulex/benet/be_cmds.c 	const u8 *p;
p                2850 drivers/net/ethernet/emulex/benet/be_cmds.c 		p = fw->data + filehdr_size + pflashcomp[i].offset +
p                2852 drivers/net/ethernet/emulex/benet/be_cmds.c 		if (p + pflashcomp[i].size > fw->data + fw->size)
p                2855 drivers/net/ethernet/emulex/benet/be_cmds.c 		status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
p                2929 drivers/net/ethernet/emulex/benet/be_cmds.c 	const u8 *p;
p                2995 drivers/net/ethernet/emulex/benet/be_cmds.c 		p = fw->data + filehdr_size + img_offset + img_hdrs_size;
p                2996 drivers/net/ethernet/emulex/benet/be_cmds.c 		if (p + img_size > fw->data + fw->size)
p                2999 drivers/net/ethernet/emulex/benet/be_cmds.c 		status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
p                 376 drivers/net/ethernet/emulex/benet/be_ethtool.c 	void *p;
p                 380 drivers/net/ethernet/emulex/benet/be_ethtool.c 		p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
p                 381 drivers/net/ethernet/emulex/benet/be_ethtool.c 		data[i] = *(u32 *)p;
p                 395 drivers/net/ethernet/emulex/benet/be_ethtool.c 			p = (u8 *)stats + et_rx_stats[i].offset;
p                 396 drivers/net/ethernet/emulex/benet/be_ethtool.c 			data[base + i] = *(u32 *)p;
p                 412 drivers/net/ethernet/emulex/benet/be_ethtool.c 				p = (u8 *)stats + et_tx_stats[i].offset;
p                 415 drivers/net/ethernet/emulex/benet/be_ethtool.c 						*(u64 *)p : *(u32 *)p;
p                 307 drivers/net/ethernet/emulex/benet/be_main.c static int be_mac_addr_set(struct net_device *netdev, void *p)
p                 311 drivers/net/ethernet/emulex/benet/be_main.c 	struct sockaddr *addr = p;
p                 813 drivers/net/ethernet/ethoc.c static int ethoc_set_mac_address(struct net_device *dev, void *p)
p                 815 drivers/net/ethernet/ethoc.c 	const struct sockaddr *addr = p;
p                 936 drivers/net/ethernet/ethoc.c 			   void *p)
p                 939 drivers/net/ethernet/ethoc.c 	u32 *regs_buff = p;
p                 414 drivers/net/ethernet/ezchip/nps_enet.c static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p)
p                 416 drivers/net/ethernet/ezchip/nps_enet.c 	struct sockaddr *addr = p;
p                 422 drivers/net/ethernet/ezchip/nps_enet.c 	res = eth_mac_addr(ndev, p);
p                 213 drivers/net/ethernet/faraday/ftgmac100.c static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
p                 217 drivers/net/ethernet/faraday/ftgmac100.c 	ret = eth_prepare_mac_addr_change(dev, p);
p                 221 drivers/net/ethernet/faraday/ftgmac100.c 	eth_commit_mac_addr_change(dev, p);
p                2177 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	int cleaned = qman_p_poll_dqrr(np->p, budget);
p                2181 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
p                2183 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
p                2220 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		percpu_priv->np.p = portal;
p                 127 drivers/net/ethernet/freescale/dpaa/dpaa_eth.h 	struct qman_portal *p;
p                 152 drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c 	u8 *p = data;
p                 158 drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c 			strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
p                 159 drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c 			p += ETH_GSTRING_LEN;
p                 162 drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c 			strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
p                 163 drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c 			p += ETH_GSTRING_LEN;
p                1709 drivers/net/ethernet/freescale/enetc/enetc.c 	char *p = (char *)si - si->pad;
p                1711 drivers/net/ethernet/freescale/enetc/enetc.c 	kfree(p);
p                1723 drivers/net/ethernet/freescale/enetc/enetc.c 	struct enetc_si *si, *p;
p                1763 drivers/net/ethernet/freescale/enetc/enetc.c 	p = kzalloc(alloc_size, GFP_KERNEL);
p                1764 drivers/net/ethernet/freescale/enetc/enetc.c 	if (!p) {
p                1769 drivers/net/ethernet/freescale/enetc/enetc.c 	si = PTR_ALIGN(p, ENETC_SI_ALIGN);
p                1770 drivers/net/ethernet/freescale/enetc/enetc.c 	si->pad = (char *)si - (char *)p;
p                 212 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c 	u8 *p = data;
p                 218 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c 			strlcpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
p                 219 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c 			p += ETH_GSTRING_LEN;
p                 223 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c 				snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j],
p                 225 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c 				p += ETH_GSTRING_LEN;
p                 230 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c 				snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j],
p                 232 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c 				p += ETH_GSTRING_LEN;
p                 240 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c 			strlcpy(p, enetc_port_counters[i].name,
p                 242 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c 			p += ETH_GSTRING_LEN;
p                3112 drivers/net/ethernet/freescale/fec_main.c fec_set_mac_address(struct net_device *ndev, void *p)
p                3115 drivers/net/ethernet/freescale/fec_main.c 	struct sockaddr *addr = p;
p                 804 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 			 void *p)
p                 813 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	r = (*fep->ops->get_regs)(dev, p, &len);
p                  93 drivers/net/ethernet/freescale/fs_enet/fs_enet.h 	int (*get_regs)(struct net_device *dev, void *p, int *sizep);
p                 483 drivers/net/ethernet/freescale/fs_enet/mac-fcc.c static int get_regs(struct net_device *dev, void *p, int *sizep)
p                 490 drivers/net/ethernet/freescale/fs_enet/mac-fcc.c 	memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t));
p                 491 drivers/net/ethernet/freescale/fs_enet/mac-fcc.c 	p = (char *)p + sizeof(fcc_t);
p                 493 drivers/net/ethernet/freescale/fs_enet/mac-fcc.c 	memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t));
p                 494 drivers/net/ethernet/freescale/fs_enet/mac-fcc.c 	p = (char *)p + sizeof(fcc_enet_t);
p                 496 drivers/net/ethernet/freescale/fs_enet/mac-fcc.c 	memcpy_fromio(p, fep->fcc.fcccp, 1);
p                 442 drivers/net/ethernet/freescale/fs_enet/mac-fec.c static int get_regs(struct net_device *dev, void *p, int *sizep)
p                 449 drivers/net/ethernet/freescale/fs_enet/mac-fec.c 	memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec));
p                 429 drivers/net/ethernet/freescale/fs_enet/mac-scc.c static int get_regs(struct net_device *dev, void *p, int *sizep)
p                 436 drivers/net/ethernet/freescale/fs_enet/mac-scc.c 	memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
p                 437 drivers/net/ethernet/freescale/fs_enet/mac-scc.c 	p = (char *)p + sizeof(scc_t);
p                 439 drivers/net/ethernet/freescale/fs_enet/mac-scc.c 	memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *));
p                  43 drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c static inline void bb_set(u32 __iomem *p, u32 m)
p                  45 drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c 	out_be32(p, in_be32(p) | m);
p                  48 drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c static inline void bb_clr(u32 __iomem *p, u32 m)
p                  50 drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c 	out_be32(p, in_be32(p) & ~m);
p                  53 drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c static inline int bb_read(u32 __iomem *p, u32 m)
p                  55 drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c 	return (in_be32(p) & m) != 0;
p                  83 drivers/net/ethernet/freescale/fsl_pq_mdio.c 	uint32_t __iomem * (*get_tbipa)(void __iomem *p);
p                 201 drivers/net/ethernet/freescale/fsl_pq_mdio.c static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p)
p                 203 drivers/net/ethernet/freescale/fsl_pq_mdio.c 	struct gfar __iomem *enet_regs = p;
p                 212 drivers/net/ethernet/freescale/fsl_pq_mdio.c static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p)
p                 214 drivers/net/ethernet/freescale/fsl_pq_mdio.c 	return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs));
p                 220 drivers/net/ethernet/freescale/fsl_pq_mdio.c static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
p                 222 drivers/net/ethernet/freescale/fsl_pq_mdio.c 	return p;
p                 231 drivers/net/ethernet/freescale/fsl_pq_mdio.c static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
p                 233 drivers/net/ethernet/freescale/fsl_pq_mdio.c 	struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii);
p                 367 drivers/net/ethernet/freescale/gianfar.c static int gfar_set_mac_addr(struct net_device *dev, void *p)
p                 369 drivers/net/ethernet/freescale/gianfar.c 	eth_mac_addr(dev, p);
p                3364 drivers/net/ethernet/freescale/ucc_geth.c static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
p                3367 drivers/net/ethernet/freescale/ucc_geth.c 	struct sockaddr *addr = p;
p                 197 drivers/net/ethernet/freescale/ucc_geth_ethtool.c                struct ethtool_regs *regs, void *p)
p                 202 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	u32 *buff = p;
p                 550 drivers/net/ethernet/hisilicon/hisi_femac.c static int hisi_femac_set_mac_address(struct net_device *dev, void *p)
p                 553 drivers/net/ethernet/hisilicon/hisi_femac.c 	struct sockaddr *skaddr = p;
p                 442 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
p                 446 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 	ret = eth_mac_addr(dev, p);
p                  39 drivers/net/ethernet/hisilicon/hns/hnae.c 	struct page *p = dev_alloc_pages(order);
p                  41 drivers/net/ethernet/hisilicon/hns/hnae.c 	if (!p)
p                  44 drivers/net/ethernet/hisilicon/hns/hnae.c 	cb->priv = p;
p                  47 drivers/net/ethernet/hisilicon/hns/hnae.c 	cb->buf  = page_address(p);
p                 300 drivers/net/ethernet/hisilicon/hns/hnae.h #define ring_ptr_move_fw(ring, p) \
p                 301 drivers/net/ethernet/hisilicon/hns/hnae.h 	((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
p                 302 drivers/net/ethernet/hisilicon/hns/hnae.h #define ring_ptr_move_bw(ring, p) \
p                 303 drivers/net/ethernet/hisilicon/hns/hnae.h 	((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
p                 507 drivers/net/ethernet/hisilicon/hns/hnae.h 	int (*get_mac_addr)(struct hnae_handle *handle, void **p);
p                 508 drivers/net/ethernet/hisilicon/hns/hnae.h 	int (*set_mac_addr)(struct hnae_handle *handle, void *p);
p                 212 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p)
p                 217 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	if (!p || !is_valid_ether_addr((const u8 *)p)) {
p                 222 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	ret = hns_mac_change_vf_addr(mac_cb, handle->vf_id, p);
p                 729 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	u64 *p = data;
p                 742 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		hns_rcb_get_stats(handle->qs[idx], p);
p                 743 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		p += hns_rcb_get_ring_sset_count((int)ETH_SS_STATS);
p                 746 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	hns_ppe_get_stats(ppe_cb, p);
p                 747 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	p += hns_ppe_get_sset_count((int)ETH_SS_STATS);
p                 749 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	hns_mac_get_stats(mac_cb, p);
p                 750 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	p += hns_mac_get_sset_count(mac_cb, (int)ETH_SS_STATS);
p                 753 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
p                 764 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	u8 *p = data;
p                 775 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		hns_rcb_get_strings(stringset, p, idx);
p                 776 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset);
p                 779 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	hns_ppe_get_strings(ppe_cb, stringset, p);
p                 780 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset);
p                 782 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	hns_mac_get_strings(mac_cb, stringset, p);
p                 783 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
p                 786 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
p                 861 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	u32 *p = data;
p                 866 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	hns_ppe_get_regs(ppe_cb, p);
p                 867 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	p += hns_ppe_get_regs_count();
p                 869 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[0], p);
p                 870 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	p += hns_rcb_get_common_regs_count();
p                 873 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		hns_rcb_get_ring_regs(handle->qs[i], p);
p                 874 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		p += hns_rcb_get_ring_regs_count();
p                 877 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	hns_mac_get_regs(vf_cb->mac_cb, p);
p                 878 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	p += hns_mac_get_regs_count(vf_cb->mac_cb);
p                 881 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p);
p                  38 drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h #define MAC_IS_BROADCAST(p)	((*(p) == 0xff) && (*((p) + 1) == 0xff) && \
p                  39 drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h 		(*((p) + 2) == 0xff) &&  (*((p) + 3) == 0xff)  && \
p                  40 drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h 		(*((p) + 4) == 0xff) && (*((p) + 5) == 0xff))
p                  43 drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h #define MAC_IS_L3_MULTICAST(p) ((*((p) + 0) == 0x01) && \
p                  44 drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h 			(*((p) + 1) == 0x00)   && \
p                  45 drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h 			(*((p) + 2) == 0x5e))
p                  48 drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h #define MAC_IS_ALL_ZEROS(p)   ((*(p) == 0) && (*((p) + 1) == 0) && \
p                  49 drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h 	(*((p) + 2) == 0) && (*((p) + 3) == 0) && \
p                  50 drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h 	(*((p) + 4) == 0) && (*((p) + 5) == 0))
p                  53 drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h #define MAC_IS_MULTICAST(p)	((*((u8 *)((p) + 0)) & 0x01) ? (1) : (0))
p                2263 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	u32 *p = data;
p                2268 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG);
p                2269 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[1] = dsaf_read_dev(ddev, DSAF_CFG_0_REG);
p                2270 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[2] = dsaf_read_dev(ddev, DSAF_ECC_ERR_INVERT_0_REG);
p                2271 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[3] = dsaf_read_dev(ddev, DSAF_ABNORMAL_TIMEOUT_0_REG);
p                2272 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[4] = dsaf_read_dev(ddev, DSAF_FSM_TIMEOUT_0_REG);
p                2273 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[5] = dsaf_read_dev(ddev, DSAF_DSA_REG_CNT_CLR_CE_REG);
p                2274 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[6] = dsaf_read_dev(ddev, DSAF_DSA_SBM_INF_FIFO_THRD_REG);
p                2275 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[7] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_SEL_REG);
p                2276 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[8] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_CNT_REG);
p                2278 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[9] = dsaf_read_dev(ddev, DSAF_PFC_EN_0_REG + port * 4);
p                2279 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[10] = dsaf_read_dev(ddev, DSAF_PFC_UNIT_CNT_0_REG + port * 4);
p                2280 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[11] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4);
p                2281 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[12] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4);
p                2282 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[13] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4);
p                2283 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[14] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4);
p                2284 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[15] = dsaf_read_dev(ddev, DSAF_PPE_INT_MSK_0_REG + port * 4);
p                2285 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[16] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_MSK_0_REG + port * 4);
p                2286 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[17] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4);
p                2287 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[18] = dsaf_read_dev(ddev, DSAF_PPE_INT_SRC_0_REG + port * 4);
p                2288 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[19] =  dsaf_read_dev(ddev, DSAF_ROCEE_INT_SRC_0_REG + port * 4);
p                2289 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[20] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4);
p                2290 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[21] = dsaf_read_dev(ddev, DSAF_PPE_INT_STS_0_REG + port * 4);
p                2291 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[22] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_STS_0_REG + port * 4);
p                2292 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[23] = dsaf_read_dev(ddev, DSAF_PPE_QID_CFG_0_REG + port * 4);
p                2295 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[24 + i] = dsaf_read_dev(ddev,
p                2298 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[32] = dsaf_read_dev(ddev, DSAF_MIX_DEF_QID_0_REG + port * 4);
p                2301 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[33 + i] = dsaf_read_dev(ddev,
p                2305 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[41 + i] = dsaf_read_dev(ddev,
p                2309 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[170] = dsaf_read_dev(ddev, DSAF_INODE_CUT_THROUGH_CFG_0_REG);
p                2311 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[171] = dsaf_read_dev(ddev,
p                2316 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[172 + i] = dsaf_read_dev(ddev,
p                2318 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[175 + i] = dsaf_read_dev(ddev,
p                2320 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[178 + i] = dsaf_read_dev(ddev,
p                2322 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[181 + i] = dsaf_read_dev(ddev,
p                2324 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[184 + i] = dsaf_read_dev(ddev,
p                2326 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[187 + i] = dsaf_read_dev(ddev,
p                2328 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[190 + i] = dsaf_read_dev(ddev,
p                2332 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[193 + i] = dsaf_read_dev(ddev, reg_tmp + j * 0x80);
p                2333 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[196 + i] = dsaf_read_dev(ddev,
p                2335 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[199 + i] = dsaf_read_dev(ddev,
p                2337 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[202 + i] = dsaf_read_dev(ddev,
p                2339 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[205 + i] = dsaf_read_dev(ddev,
p                2341 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[208 + i] = dsaf_read_dev(ddev,
p                2343 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[211 + i] = dsaf_read_dev(ddev,
p                2345 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[214 + i] = dsaf_read_dev(ddev,
p                2347 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[217 + i] = dsaf_read_dev(ddev,
p                2349 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[220 + i] = dsaf_read_dev(ddev,
p                2351 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[223 + i] = dsaf_read_dev(ddev,
p                2353 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[226 + i] = dsaf_read_dev(ddev,
p                2357 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
p                2361 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[230 + i] = dsaf_read_dev(ddev,
p                2365 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[233] = dsaf_read_dev(ddev,
p                2371 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[234 + i] = dsaf_read_dev(ddev,
p                2373 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[237 + i] = dsaf_read_dev(ddev,
p                2375 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[240 + i] = dsaf_read_dev(ddev,
p                2377 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[243 + i] = dsaf_read_dev(ddev,
p                2379 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[246 + i] = dsaf_read_dev(ddev,
p                2381 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[249 + i] = dsaf_read_dev(ddev,
p                2383 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[252 + i] = dsaf_read_dev(ddev,
p                2385 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[255 + i] = dsaf_read_dev(ddev,
p                2387 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[258 + i] = dsaf_read_dev(ddev,
p                2389 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[261 + i] = dsaf_read_dev(ddev,
p                2391 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[264 + i] = dsaf_read_dev(ddev,
p                2393 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[267 + i] = dsaf_read_dev(ddev,
p                2395 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[270 + i] = dsaf_read_dev(ddev,
p                2397 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[273 + i] = dsaf_read_dev(ddev,
p                2399 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[276 + i] = dsaf_read_dev(ddev,
p                2401 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[279 + i] = dsaf_read_dev(ddev,
p                2403 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[282 + i] = dsaf_read_dev(ddev,
p                2405 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[285 + i] = dsaf_read_dev(ddev,
p                2407 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[288 + i] = dsaf_read_dev(ddev,
p                2409 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[291 + i] = dsaf_read_dev(ddev,
p                2411 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[294 + i] = dsaf_read_dev(ddev,
p                2413 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[297 + i] = dsaf_read_dev(ddev,
p                2415 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[300 + i] = dsaf_read_dev(ddev,
p                2417 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[303 + i] = dsaf_read_dev(ddev,
p                2419 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[306 + i] = dsaf_read_dev(ddev,
p                2421 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[309 + i] = dsaf_read_dev(ddev,
p                2423 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[312 + i] = dsaf_read_dev(ddev,
p                2429 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[315 + i] = dsaf_read_dev(ddev,
p                2431 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[323 + i] = dsaf_read_dev(ddev,
p                2433 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[331 + i] = dsaf_read_dev(ddev,
p                2435 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[339 + i] = dsaf_read_dev(ddev,
p                2437 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[347 + i] = dsaf_read_dev(ddev,
p                2439 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[355 + i] = dsaf_read_dev(ddev,
p                2443 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
p                2444 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
p                2445 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
p                2449 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[366 + i] = dsaf_read_dev(ddev,
p                2451 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[369 + i] = dsaf_read_dev(ddev,
p                2453 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[372 + i] = dsaf_read_dev(ddev,
p                2455 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[375 + i] = dsaf_read_dev(ddev,
p                2457 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[378 + i] = dsaf_read_dev(ddev,
p                2459 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[381 + i] = dsaf_read_dev(ddev,
p                2461 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[384 + i] = dsaf_read_dev(ddev,
p                2463 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[387 + i] = dsaf_read_dev(ddev,
p                2465 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[390 + i] = dsaf_read_dev(ddev,
p                2467 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[393 + i] = dsaf_read_dev(ddev,
p                2471 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[396] = dsaf_read_dev(ddev,
p                2473 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[397] = dsaf_read_dev(ddev,
p                2475 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[398] = dsaf_read_dev(ddev,
p                2477 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[399] = dsaf_read_dev(ddev,
p                2479 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[400] = dsaf_read_dev(ddev,
p                2481 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[401] = dsaf_read_dev(ddev,
p                2483 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[402] = dsaf_read_dev(ddev,
p                2485 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[403] = dsaf_read_dev(ddev,
p                2487 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[404] = dsaf_read_dev(ddev,
p                2489 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[405] = dsaf_read_dev(ddev,
p                2491 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[406] = dsaf_read_dev(ddev,
p                2493 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[407] = dsaf_read_dev(ddev,
p                2495 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[408] = dsaf_read_dev(ddev,
p                2501 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[409 + i] = dsaf_read_dev(ddev,
p                2503 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[412 + i] = dsaf_read_dev(ddev,
p                2505 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
p                2506 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[418 + i] = dsaf_read_dev(ddev,
p                2508 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[421 + i] = dsaf_read_dev(ddev,
p                2510 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
p                2511 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
p                2512 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[430 + i] = dsaf_read_dev(ddev,
p                2514 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[433 + i] = dsaf_read_dev(ddev,
p                2516 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[436 + i] = dsaf_read_dev(ddev,
p                2518 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[439 + i] = dsaf_read_dev(ddev,
p                2520 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[442 + i] = dsaf_read_dev(ddev,
p                2525 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
p                2526 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
p                2527 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
p                2528 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
p                2529 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
p                2530 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
p                2531 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
p                2532 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
p                2533 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
p                2534 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
p                2535 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
p                2536 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
p                2537 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
p                2538 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
p                2539 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
p                2540 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
p                2541 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
p                2542 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
p                2543 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
p                2544 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
p                2545 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
p                2546 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
p                2547 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
p                2551 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[468 + 2 * i] = dsaf_read_dev(ddev,
p                2553 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[469 + 2 * i] = dsaf_read_dev(ddev,
p                2557 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
p                2558 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
p                2559 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
p                2560 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
p                2561 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
p                2562 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
p                2563 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
p                2564 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
p                2565 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
p                2566 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
p                2567 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
p                2568 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
p                2571 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
p                2572 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
p                2573 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
p                2574 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[499] = dsaf_read_dev(ddev,
p                2576 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
p                2577 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
p                2580 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
p                2584 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[i] = 0xdddddddd;
p                2641 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	u64 *p = data;
p                2646 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[0] = hw_stats->pad_drop;
p                2647 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[1] = hw_stats->man_pkts;
p                2648 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[2] = hw_stats->rx_pkts;
p                2649 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[3] = hw_stats->rx_pkt_id;
p                2650 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[4] = hw_stats->rx_pause_frame;
p                2651 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[5] = hw_stats->release_buf_num;
p                2652 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[6] = hw_stats->sbm_drop;
p                2653 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[7] = hw_stats->crc_false;
p                2654 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[8] = hw_stats->bp_drop;
p                2655 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[9] = hw_stats->rslt_drop;
p                2656 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[10] = hw_stats->local_addr_false;
p                2657 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[11] = hw_stats->vlan_drop;
p                2658 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[12] = hw_stats->stp_drop;
p                2661 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 			p[13 + i + 0 * DSAF_PRIO_NR] = hw_stats->rx_pfc[i];
p                2662 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 			p[13 + i + 1 * DSAF_PRIO_NR] = hw_stats->tx_pfc[i];
p                2664 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		p[29] = hw_stats->tx_pkts;
p                2665 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 		return &p[30];
p                2668 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p[13] = hw_stats->tx_pkts;
p                2669 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	return &p[14];
p                2680 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	u64 *p = data;
p                2684 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	p = hns_dsaf_get_node_stats(ddev, p, node_num);
p                2688 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c 	(void)hns_dsaf_get_node_stats(ddev, p, node_num);
p                  68 drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h #define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
p                 583 drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c #define hns_xgmac_cpy_q(p, q) \
p                 585 drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c 		*(p) = (u32)(q);\
p                 586 drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c 		*((p) + 1) = (u32)((q) >> 32);\
p                1181 drivers/net/ethernet/hisilicon/hns/hns_enet.c static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
p                1185 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct sockaddr *mac_addr = p;
p                 835 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	u64 *p = data;
p                 851 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[0] = net_stats->rx_packets;
p                 852 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[1] = net_stats->tx_packets;
p                 853 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[2] = net_stats->rx_bytes;
p                 854 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[3] = net_stats->tx_bytes;
p                 855 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[4] = net_stats->rx_errors;
p                 856 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[5] = net_stats->tx_errors;
p                 857 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[6] = net_stats->rx_dropped;
p                 858 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[7] = net_stats->tx_dropped;
p                 859 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[8] = net_stats->multicast;
p                 860 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[9] = net_stats->collisions;
p                 861 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[10] = net_stats->rx_over_errors;
p                 862 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[11] = net_stats->rx_crc_errors;
p                 863 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[12] = net_stats->rx_frame_errors;
p                 864 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[13] = net_stats->rx_fifo_errors;
p                 865 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[14] = net_stats->rx_missed_errors;
p                 866 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[15] = net_stats->tx_aborted_errors;
p                 867 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[16] = net_stats->tx_carrier_errors;
p                 868 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[17] = net_stats->tx_fifo_errors;
p                 869 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[18] = net_stats->tx_heartbeat_errors;
p                 870 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[19] = net_stats->rx_length_errors;
p                 871 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[20] = net_stats->tx_window_errors;
p                 872 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[21] = net_stats->rx_compressed;
p                 873 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[22] = net_stats->tx_compressed;
p                 875 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[23] = netdev->rx_dropped.counter;
p                 876 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[24] = netdev->tx_dropped.counter;
p                 878 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	p[25] = priv->tx_timeout_count;
p                 881 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	h->dev->ops->get_stats(h, &p[26]);
p                  74 drivers/net/ethernet/hisilicon/hns3/hnae3.h #define ring_ptr_move_fw(ring, p) \
p                  75 drivers/net/ethernet/hisilicon/hns3/hnae3.h 	((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
p                  76 drivers/net/ethernet/hisilicon/hns3/hnae3.h #define ring_ptr_move_bw(ring, p) \
p                  77 drivers/net/ethernet/hisilicon/hns3/hnae3.h 	((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
p                 427 drivers/net/ethernet/hisilicon/hns3/hnae3.h 	void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p);
p                 428 drivers/net/ethernet/hisilicon/hns3/hnae3.h 	int (*set_mac_addr)(struct hnae3_handle *handle, void *p,
p                1407 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
p                1410 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct sockaddr *mac_addr = p;
p                2124 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct page *p;
p                2126 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	p = dev_alloc_pages(order);
p                2127 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!p)
p                2130 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	cb->priv = p;
p                2133 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	cb->buf  = page_address(p);
p                 517 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	u64 *p = data;
p                 532 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	p = hns3_get_stats_tqps(h, p);
p                 535 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	h->ae_algo->ops->get_stats(h, p);
p                  27 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
p                 765 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	u8 *p = (char *)data;
p                 770 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
p                 771 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 					   size, p);
p                 772 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		p = hclge_tqps_get_strings(handle, p);
p                 775 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
p                 777 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			p += ETH_GSTRING_LEN;
p                 780 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
p                 782 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			p += ETH_GSTRING_LEN;
p                 785 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			memcpy(p,
p                 788 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			p += ETH_GSTRING_LEN;
p                 791 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
p                 793 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 			p += ETH_GSTRING_LEN;
p                 802 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	u64 *p;
p                 804 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
p                 806 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	p = hclge_tqps_get_stats(handle, p);
p                7489 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
p                7494 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
p                7497 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
p                7500 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	const unsigned char *new_addr = (const unsigned char *)p;
p                 219 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	u8 *p = (char *)data;
p                 222 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 		p = hclgevf_tqps_get_strings(handle, p);
p                1177 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
p                1181 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
p                1184 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
p                1189 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 	u8 *new_mac_addr = (u8 *)p;
p                1250 drivers/net/ethernet/hp/hp100.c 	u_int *p;
p                1296 drivers/net/ethernet/hp/hp100.c 		for (p = (ringptr->pdl); p < (ringptr->pdl + 5); p++)
p                1297 drivers/net/ethernet/hp/hp100.c 			printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n", dev->name, (u_int) p, (u_int) * p);
p                2812 drivers/net/ethernet/hp/hp100.c 	struct hp100_private *p = netdev_priv(d);
p                2817 drivers/net/ethernet/hp/hp100.c 	if (p->mode == 1)	/* busmaster */
p                2818 drivers/net/ethernet/hp/hp100.c 		pci_free_consistent(p->pci_dev, MAX_RINGSIZE + 0x0f,
p                2819 drivers/net/ethernet/hp/hp100.c 				    p->page_vaddr_algn,
p                2820 drivers/net/ethernet/hp/hp100.c 				    virt_to_whatever(d, p->page_vaddr_algn));
p                2821 drivers/net/ethernet/hp/hp100.c 	if (p->mem_ptr_virt)
p                2822 drivers/net/ethernet/hp/hp100.c 		iounmap(p->mem_ptr_virt);
p                 606 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 	char *p;
p                 614 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 			p = (char *)&txq_stats +
p                 617 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 					sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                 627 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 			p = (char *)&rxq_stats +
p                 630 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 					sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                 642 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 	char *p;
p                 651 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 		p = (char *)&vport_stats + hinic_function_stats[j].offset;
p                 653 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 				sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                 670 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 		p = (char *)port_stats + hinic_port_stats[j].offset;
p                 672 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 				sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                 705 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 	char *p = (char *)data;
p                 711 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 			memcpy(p, hinic_function_stats[i].name,
p                 713 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 			p += ETH_GSTRING_LEN;
p                 717 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 			memcpy(p, hinic_port_stats[i].name,
p                 719 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 			p += ETH_GSTRING_LEN;
p                 724 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 				sprintf(p, hinic_tx_queue_stats[j].name, i);
p                 725 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 				p += ETH_GSTRING_LEN;
p                 731 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 				sprintf(p, hinic_rx_queue_stats[j].name, i);
p                 732 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 				p += ETH_GSTRING_LEN;
p                 396 drivers/net/ethernet/i825xx/82596.c 		struct i596_reg *p = (struct i596_reg *) (dev->base_addr);
p                 397 drivers/net/ethernet/i825xx/82596.c 		p->porthi = ((c) | (u32) (x)) & 0xffff;
p                 398 drivers/net/ethernet/i825xx/82596.c 		p->portlo = ((c) | (u32) (x)) >> 16;
p                  64 drivers/net/ethernet/i825xx/sun3_82586.c #define make32(ptr16) (p->memtop + (swab16((unsigned short) (ptr16))) )
p                  65 drivers/net/ethernet/i825xx/sun3_82586.c #define make24(ptr32) (char *)swab32(( ((unsigned long) (ptr32)) - p->base))
p                  66 drivers/net/ethernet/i825xx/sun3_82586.c #define make16(ptr32) (swab16((unsigned short) ((unsigned long)(ptr32) - (unsigned long) p->memtop )))
p                 101 drivers/net/ethernet/i825xx/sun3_82586.c     if(!p->scb->cmd_cuc) break; \
p                 104 drivers/net/ethernet/i825xx/sun3_82586.c       printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \
p                 105 drivers/net/ethernet/i825xx/sun3_82586.c        if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } }
p                 109 drivers/net/ethernet/i825xx/sun3_82586.c     if(!p->scb->cmd_ruc) break; \
p                 112 drivers/net/ethernet/i825xx/sun3_82586.c       printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \
p                 113 drivers/net/ethernet/i825xx/sun3_82586.c        if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } }
p                 210 drivers/net/ethernet/i825xx/sun3_82586.c 	struct priv *p = &pb;
p                 214 drivers/net/ethernet/i825xx/sun3_82586.c 	p->base = (unsigned long) dvma_btov(0);
p                 215 drivers/net/ethernet/i825xx/sun3_82586.c 	p->memtop = (char *)dvma_btov((unsigned long)where);
p                 216 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
p                 217 drivers/net/ethernet/i825xx/sun3_82586.c 	memset((char *)p->scp,0, sizeof(struct scp_struct));
p                 219 drivers/net/ethernet/i825xx/sun3_82586.c 		if(((char *)p->scp)[i])
p                 221 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scp->sysbus = SYSBUSVAL;				/* 1 = 8Bit-Bus, 0 = 16 Bit */
p                 222 drivers/net/ethernet/i825xx/sun3_82586.c 	if(p->scp->sysbus != SYSBUSVAL)
p                 227 drivers/net/ethernet/i825xx/sun3_82586.c 	p->iscp = (struct iscp_struct *) iscp_addr;
p                 228 drivers/net/ethernet/i825xx/sun3_82586.c 	memset((char *)p->iscp,0, sizeof(struct iscp_struct));
p                 230 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scp->iscp = make24(p->iscp);
p                 231 drivers/net/ethernet/i825xx/sun3_82586.c 	p->iscp->busy = 1;
p                 237 drivers/net/ethernet/i825xx/sun3_82586.c 	if(p->iscp->busy) /* i82586 clears 'busy' after successful init */
p                 248 drivers/net/ethernet/i825xx/sun3_82586.c 	struct priv *p = netdev_priv(dev);
p                 253 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scp	= (struct scp_struct *)	(p->base + SCP_DEFAULT_ADDRESS);
p                 254 drivers/net/ethernet/i825xx/sun3_82586.c 	p->iscp	= (struct iscp_struct *) dvma_btov(dev->mem_start);
p                 255 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb  = (struct scb_struct *)  ((char *)p->iscp + sizeof(struct iscp_struct));
p                 257 drivers/net/ethernet/i825xx/sun3_82586.c 	memset((char *) p->iscp,0,sizeof(struct iscp_struct));
p                 258 drivers/net/ethernet/i825xx/sun3_82586.c 	memset((char *) p->scp ,0,sizeof(struct scp_struct));
p                 260 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scp->iscp = make24(p->iscp);
p                 261 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scp->sysbus = SYSBUSVAL;
p                 262 drivers/net/ethernet/i825xx/sun3_82586.c 	p->iscp->scb_offset = make16(p->scb);
p                 263 drivers/net/ethernet/i825xx/sun3_82586.c 	p->iscp->scb_base = make24(dvma_btov(dev->mem_start));
p                 265 drivers/net/ethernet/i825xx/sun3_82586.c 	p->iscp->busy = 1;
p                 271 drivers/net/ethernet/i825xx/sun3_82586.c 	if(p->iscp->busy)
p                 274 drivers/net/ethernet/i825xx/sun3_82586.c 	p->reseted = 0;
p                 276 drivers/net/ethernet/i825xx/sun3_82586.c 	memset((char *)p->scb,0,sizeof(struct scb_struct));
p                 408 drivers/net/ethernet/i825xx/sun3_82586.c 	struct priv *p = netdev_priv(dev);
p                 416 drivers/net/ethernet/i825xx/sun3_82586.c 	ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
p                 433 drivers/net/ethernet/i825xx/sun3_82586.c 		int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
p                 443 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cbl_offset	= make16(cfg_cmd);
p                 444 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cmd_ruc		= 0;
p                 446 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cmd_cuc		= CUC_START; /* cmd.-unit start */
p                 469 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cbl_offset = make16(ias_cmd);
p                 471 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
p                 492 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cbl_offset = make16(tdr_cmd);
p                 493 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
p                 507 drivers/net/ethernet/i825xx/sun3_82586.c 		p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
p                 541 drivers/net/ethernet/i825xx/sun3_82586.c 		p->scb->cbl_offset = make16(mc_cmd);
p                 542 drivers/net/ethernet/i825xx/sun3_82586.c 		p->scb->cmd_cuc = CUC_START;
p                 557 drivers/net/ethernet/i825xx/sun3_82586.c 		p->nop_cmds[i] 			= (struct nop_cmd_struct *)ptr;
p                 558 drivers/net/ethernet/i825xx/sun3_82586.c 		p->nop_cmds[i]->cmd_cmd		= swab16(CMD_NOP);
p                 559 drivers/net/ethernet/i825xx/sun3_82586.c 		p->nop_cmds[i]->cmd_status 	= 0;
p                 560 drivers/net/ethernet/i825xx/sun3_82586.c 		p->nop_cmds[i]->cmd_link	= make16((p->nop_cmds[i]));
p                 566 drivers/net/ethernet/i825xx/sun3_82586.c 		p->nop_cmds[i]			= (struct nop_cmd_struct *)ptr;
p                 567 drivers/net/ethernet/i825xx/sun3_82586.c 		p->nop_cmds[i]->cmd_cmd		= swab16(CMD_NOP);
p                 568 drivers/net/ethernet/i825xx/sun3_82586.c 		p->nop_cmds[i]->cmd_status	= 0;
p                 569 drivers/net/ethernet/i825xx/sun3_82586.c 		p->nop_cmds[i]->cmd_link	= make16((p->nop_cmds[i]));
p                 581 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/
p                 583 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */
p                 585 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
p                 592 drivers/net/ethernet/i825xx/sun3_82586.c 		memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct));
p                 593 drivers/net/ethernet/i825xx/sun3_82586.c 		memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct));
p                 594 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]);
p                 595 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_cmds[i]->cmd_status = swab16(STAT_COMPL);
p                 596 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_cmds[i]->cmd_cmd = swab16(CMD_XMIT | CMD_INT);
p                 597 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
p                 598 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_buffs[i]->next = 0xffff;
p                 599 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
p                 602 drivers/net/ethernet/i825xx/sun3_82586.c 	p->xmit_count = 0;
p                 603 drivers/net/ethernet/i825xx/sun3_82586.c 	p->xmit_last	= 0;
p                 605 drivers/net/ethernet/i825xx/sun3_82586.c 	p->nop_point	= 0;
p                 612 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cbl_offset = make16(p->nop_cmds[0]);
p                 613 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cmd_cuc = CUC_START;
p                 617 drivers/net/ethernet/i825xx/sun3_82586.c 	p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]);
p                 618 drivers/net/ethernet/i825xx/sun3_82586.c 	p->xmit_cmds[0]->cmd_cmd	= swab16(CMD_XMIT | CMD_SUSPEND | CMD_INT);
p                 624 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
p                 644 drivers/net/ethernet/i825xx/sun3_82586.c 	struct priv *p = netdev_priv(dev);
p                 646 drivers/net/ethernet/i825xx/sun3_82586.c 	memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd));
p                 647 drivers/net/ethernet/i825xx/sun3_82586.c 	p->rfd_first = rfd;
p                 649 drivers/net/ethernet/i825xx/sun3_82586.c 	for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) {
p                 650 drivers/net/ethernet/i825xx/sun3_82586.c 		rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) );
p                 653 drivers/net/ethernet/i825xx/sun3_82586.c 	rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP;	 /* RU suspend */
p                 655 drivers/net/ethernet/i825xx/sun3_82586.c 	ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) );
p                 658 drivers/net/ethernet/i825xx/sun3_82586.c 	ptr = (void *) (rbd + p->num_recv_buffs);
p                 661 drivers/net/ethernet/i825xx/sun3_82586.c 	memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs));
p                 663 drivers/net/ethernet/i825xx/sun3_82586.c 	for(i=0;i<p->num_recv_buffs;i++)
p                 665 drivers/net/ethernet/i825xx/sun3_82586.c 		rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs));
p                 671 drivers/net/ethernet/i825xx/sun3_82586.c 	p->rfd_top	= p->rfd_first;
p                 672 drivers/net/ethernet/i825xx/sun3_82586.c 	p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
p                 674 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->rfa_offset		= make16(p->rfd_first);
p                 675 drivers/net/ethernet/i825xx/sun3_82586.c 	p->rfd_first->rbd_offset	= make16(rbd);
p                 690 drivers/net/ethernet/i825xx/sun3_82586.c 	struct priv *p;
p                 696 drivers/net/ethernet/i825xx/sun3_82586.c 	p = netdev_priv(dev);
p                 703 drivers/net/ethernet/i825xx/sun3_82586.c 	while((stat=p->scb->cus & STAT_MASK))
p                 705 drivers/net/ethernet/i825xx/sun3_82586.c 		p->scb->cmd_cuc = stat;
p                 714 drivers/net/ethernet/i825xx/sun3_82586.c 			if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */
p                 717 drivers/net/ethernet/i825xx/sun3_82586.c 				p->scb->cmd_ruc = RUC_RESUME;
p                 723 drivers/net/ethernet/i825xx/sun3_82586.c 				printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus);
p                 735 drivers/net/ethernet/i825xx/sun3_82586.c 				printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus);
p                 743 drivers/net/ethernet/i825xx/sun3_82586.c 		if(p->scb->cmd_cuc)	 /* timed out? */
p                 766 drivers/net/ethernet/i825xx/sun3_82586.c 	struct priv *p = netdev_priv(dev);
p                 771 drivers/net/ethernet/i825xx/sun3_82586.c 	for(;(status = p->rfd_top->stat_high) & RFD_COMPL;)
p                 773 drivers/net/ethernet/i825xx/sun3_82586.c 			rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
p                 786 drivers/net/ethernet/i825xx/sun3_82586.c 						skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
p                 821 drivers/net/ethernet/i825xx/sun3_82586.c 		p->rfd_top->stat_high = 0;
p                 822 drivers/net/ethernet/i825xx/sun3_82586.c 		p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */
p                 823 drivers/net/ethernet/i825xx/sun3_82586.c 		p->rfd_top->rbd_offset = 0xffff;
p                 824 drivers/net/ethernet/i825xx/sun3_82586.c 		p->rfd_last->last = 0;				/* delete RFD_SUSP	*/
p                 825 drivers/net/ethernet/i825xx/sun3_82586.c 		p->rfd_last = p->rfd_top;
p                 826 drivers/net/ethernet/i825xx/sun3_82586.c 		p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
p                 827 drivers/net/ethernet/i825xx/sun3_82586.c 		p->scb->rfa_offset = make16(p->rfd_top);
p                 836 drivers/net/ethernet/i825xx/sun3_82586.c 		p->scb->cmd_ruc = RUC_RESUME;
p                 846 drivers/net/ethernet/i825xx/sun3_82586.c 			if(p->rfd_top->status)
p                 859 drivers/net/ethernet/i825xx/sun3_82586.c 		volatile struct rfd_struct *rfds=p->rfd_top;
p                 862 drivers/net/ethernet/i825xx/sun3_82586.c 		for(i=0;i< (p->num_recv_buffs+4);i++)
p                 868 drivers/net/ethernet/i825xx/sun3_82586.c 		printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status);
p                 869 drivers/net/ethernet/i825xx/sun3_82586.c 		printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus);
p                 884 drivers/net/ethernet/i825xx/sun3_82586.c 	struct priv *p = netdev_priv(dev);
p                 889 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
p                 893 drivers/net/ethernet/i825xx/sun3_82586.c 	alloc_rfa(dev,(char *)p->rfd_first);
p                 897 drivers/net/ethernet/i825xx/sun3_82586.c 	printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus);
p                 908 drivers/net/ethernet/i825xx/sun3_82586.c 	struct priv *p = netdev_priv(dev);
p                 913 drivers/net/ethernet/i825xx/sun3_82586.c 	status = swab16(p->xmit_cmds[p->xmit_last]->cmd_status);
p                 946 drivers/net/ethernet/i825xx/sun3_82586.c 	if( (++p->xmit_last) == NUM_XMIT_BUFFS)
p                 947 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_last = 0;
p                 958 drivers/net/ethernet/i825xx/sun3_82586.c 	struct priv *p = netdev_priv(dev);
p                 962 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->rfa_offset = make16(p->rfd_first);
p                 963 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cmd_ruc = RUC_START;
p                 970 drivers/net/ethernet/i825xx/sun3_82586.c 	struct priv *p = netdev_priv(dev);
p                 972 drivers/net/ethernet/i825xx/sun3_82586.c 	if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */
p                 977 drivers/net/ethernet/i825xx/sun3_82586.c 		printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)swab16(p->xmit_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[1]->cmd_status),(int)p->nop_point);
p                 979 drivers/net/ethernet/i825xx/sun3_82586.c 		p->scb->cmd_cuc = CUC_ABORT;
p                 982 drivers/net/ethernet/i825xx/sun3_82586.c 		p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
p                 983 drivers/net/ethernet/i825xx/sun3_82586.c 		p->scb->cmd_cuc = CUC_START;
p                 992 drivers/net/ethernet/i825xx/sun3_82586.c 		printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
p                 993 drivers/net/ethernet/i825xx/sun3_82586.c 		printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status));
p                1013 drivers/net/ethernet/i825xx/sun3_82586.c 	struct priv *p = netdev_priv(dev);
p                1024 drivers/net/ethernet/i825xx/sun3_82586.c 	if(test_and_set_bit(0,(void *) &p->lock)) {
p                1033 drivers/net/ethernet/i825xx/sun3_82586.c 			memset((void *)p->xmit_cbuffs[p->xmit_count], 0,
p                1037 drivers/net/ethernet/i825xx/sun3_82586.c 		skb_copy_from_linear_data(skb, (void *)p->xmit_cbuffs[p->xmit_count], skb->len);
p                1043 drivers/net/ethernet/i825xx/sun3_82586.c 		if(p->scb->cus & CU_ACTIVE)
p                1046 drivers/net/ethernet/i825xx/sun3_82586.c 			printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,swab16(p->xmit_cmds[0]->cmd_status));
p                1050 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_buffs[0]->size = swab16(TBD_LAST | len);
p                1053 drivers/net/ethernet/i825xx/sun3_82586.c 			p->xmit_cmds[0]->cmd_status = 0;
p                1055 drivers/net/ethernet/i825xx/sun3_82586.c 			if( (p->scb->cus & CU_STATUS) == CU_SUSPEND)
p                1056 drivers/net/ethernet/i825xx/sun3_82586.c 				p->scb->cmd_cuc = CUC_RESUME;
p                1059 drivers/net/ethernet/i825xx/sun3_82586.c 				p->scb->cbl_offset = make16(p->xmit_cmds[0]);
p                1060 drivers/net/ethernet/i825xx/sun3_82586.c 				p->scb->cmd_cuc = CUC_START;
p                1067 drivers/net/ethernet/i825xx/sun3_82586.c 			if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */
p                1069 drivers/net/ethernet/i825xx/sun3_82586.c 			if(p->xmit_cmds[0]->cmd_status)
p                1075 drivers/net/ethernet/i825xx/sun3_82586.c 		next_nop = (p->nop_point + 1) & 0x1;
p                1076 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_buffs[0]->size = swab16(TBD_LAST | len);
p                1078 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_cmds[0]->cmd_link	 = p->nop_cmds[next_nop]->cmd_link
p                1079 drivers/net/ethernet/i825xx/sun3_82586.c 			= make16((p->nop_cmds[next_nop]));
p                1080 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
p                1082 drivers/net/ethernet/i825xx/sun3_82586.c 		p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
p                1083 drivers/net/ethernet/i825xx/sun3_82586.c 		p->nop_point = next_nop;
p                1087 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_buffs[p->xmit_count]->size = swab16(TBD_LAST | len);
p                1088 drivers/net/ethernet/i825xx/sun3_82586.c 		if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS )
p                1091 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_cmds[p->xmit_count]->cmd_status	= 0;
p                1093 drivers/net/ethernet/i825xx/sun3_82586.c 		p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop]));
p                1094 drivers/net/ethernet/i825xx/sun3_82586.c 		p->nop_cmds[next_nop]->cmd_status = 0;
p                1096 drivers/net/ethernet/i825xx/sun3_82586.c 		p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
p                1097 drivers/net/ethernet/i825xx/sun3_82586.c 		p->xmit_count = next_nop;
p                1102 drivers/net/ethernet/i825xx/sun3_82586.c 			if(p->xmit_count != p->xmit_last)
p                1104 drivers/net/ethernet/i825xx/sun3_82586.c 			p->lock = 0;
p                1119 drivers/net/ethernet/i825xx/sun3_82586.c 	struct priv *p = netdev_priv(dev);
p                1122 drivers/net/ethernet/i825xx/sun3_82586.c 	crc = swab16(p->scb->crc_errs); /* get error-statistic from the ni82586 */
p                1123 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->crc_errs = 0;
p                1124 drivers/net/ethernet/i825xx/sun3_82586.c 	aln = swab16(p->scb->aln_errs);
p                1125 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->aln_errs = 0;
p                1126 drivers/net/ethernet/i825xx/sun3_82586.c 	rsc = swab16(p->scb->rsc_errs);
p                1127 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->rsc_errs = 0;
p                1128 drivers/net/ethernet/i825xx/sun3_82586.c 	ovrn = swab16(p->scb->ovrn_errs);
p                1129 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->ovrn_errs = 0;
p                1160 drivers/net/ethernet/i825xx/sun3_82586.c 	struct priv *p = netdev_priv(dev);
p                1164 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cmd_cuc = CUC_ABORT;
p                1174 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cbl_offset = make16(dump_cmd);
p                1175 drivers/net/ethernet/i825xx/sun3_82586.c 	p->scb->cmd_cuc = CUC_START;
p                 213 drivers/net/ethernet/ibm/emac/core.c 	struct emac_regs __iomem *p = dev->emacp;
p                 218 drivers/net/ethernet/ibm/emac/core.c 	r = in_be32(&p->mr0);
p                 220 drivers/net/ethernet/ibm/emac/core.c 		out_be32(&p->mr0, r | EMAC_MR0_TXE);
p                 225 drivers/net/ethernet/ibm/emac/core.c 	struct emac_regs __iomem *p = dev->emacp;
p                 230 drivers/net/ethernet/ibm/emac/core.c 	r = in_be32(&p->mr0);
p                 233 drivers/net/ethernet/ibm/emac/core.c 		out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
p                 234 drivers/net/ethernet/ibm/emac/core.c 		while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
p                 245 drivers/net/ethernet/ibm/emac/core.c 	struct emac_regs __iomem *p = dev->emacp;
p                 253 drivers/net/ethernet/ibm/emac/core.c 	r = in_be32(&p->mr0);
p                 258 drivers/net/ethernet/ibm/emac/core.c 			while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
p                 266 drivers/net/ethernet/ibm/emac/core.c 		out_be32(&p->mr0, r | EMAC_MR0_RXE);
p                 274 drivers/net/ethernet/ibm/emac/core.c 	struct emac_regs __iomem *p = dev->emacp;
p                 279 drivers/net/ethernet/ibm/emac/core.c 	r = in_be32(&p->mr0);
p                 282 drivers/net/ethernet/ibm/emac/core.c 		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
p                 283 drivers/net/ethernet/ibm/emac/core.c 		while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
p                 326 drivers/net/ethernet/ibm/emac/core.c 	struct emac_regs __iomem *p = dev->emacp;
p                 331 drivers/net/ethernet/ibm/emac/core.c 	r = in_be32(&p->mr0);
p                 333 drivers/net/ethernet/ibm/emac/core.c 		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
p                 338 drivers/net/ethernet/ibm/emac/core.c 	struct emac_regs __iomem *p = dev->emacp;
p                 385 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->mr0, EMAC_MR0_SRST);
p                 386 drivers/net/ethernet/ibm/emac/core.c 	while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
p                 575 drivers/net/ethernet/ibm/emac/core.c 	struct emac_regs __iomem *p = dev->emacp;
p                 583 drivers/net/ethernet/ibm/emac/core.c 		out_be32(&p->mr1, in_be32(&p->mr1)
p                 619 drivers/net/ethernet/ibm/emac/core.c 			out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
p                 663 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->mr1, mr1);
p                 666 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
p                 667 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
p                 672 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->vtpid, 0x8100);
p                 678 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->rmr, r);
p                 687 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->tmr1, r);
p                 688 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
p                 711 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->rwmr, r);
p                 714 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->ptr, 0xffff);
p                 723 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->iser,  r);
p                 801 drivers/net/ethernet/ibm/emac/core.c 	struct emac_regs __iomem *p = dev->emacp;
p                 817 drivers/net/ethernet/ibm/emac/core.c 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
p                 838 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->stacr, r);
p                 842 drivers/net/ethernet/ibm/emac/core.c 	while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
p                 873 drivers/net/ethernet/ibm/emac/core.c 	struct emac_regs __iomem *p = dev->emacp;
p                 889 drivers/net/ethernet/ibm/emac/core.c 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
p                 911 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->stacr, r);
p                 915 drivers/net/ethernet/ibm/emac/core.c 	while (!emac_phy_done(dev, in_be32(&p->stacr))) {
p                 956 drivers/net/ethernet/ibm/emac/core.c 	struct emac_regs __iomem *p = dev->emacp;
p                 982 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->rmr, rmr);
p                1009 drivers/net/ethernet/ibm/emac/core.c 	struct emac_regs __iomem *p = dev->emacp;
p                1020 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
p                1021 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
p                1430 drivers/net/ethernet/ibm/emac/core.c 	struct emac_regs __iomem *p = dev->emacp;
p                1438 drivers/net/ethernet/ibm/emac/core.c 		out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
p                1440 drivers/net/ethernet/ibm/emac/core.c 		out_be32(&p->tmr0, EMAC_TMR0_XMIT);
p                1923 drivers/net/ethernet/ibm/emac/core.c 	struct emac_regs __iomem *p = dev->emacp;
p                1929 drivers/net/ethernet/ibm/emac/core.c 	isr = in_be32(&p->isr);
p                1930 drivers/net/ethernet/ibm/emac/core.c 	out_be32(&p->isr, isr);
p                2851 drivers/net/ethernet/ibm/emac/core.c 	const void *p;
p                2978 drivers/net/ethernet/ibm/emac/core.c 	p = of_get_property(np, "local-mac-address", NULL);
p                2979 drivers/net/ethernet/ibm/emac/core.c 	if (p == NULL) {
p                2984 drivers/net/ethernet/ibm/emac/core.c 	memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
p                 406 drivers/net/ethernet/ibm/emac/core.h 	struct emac_regs __iomem *p = dev->emacp;
p                 417 drivers/net/ethernet/ibm/emac/core.h 	return (u32 *)((ptrdiff_t)p + offset);
p                  81 drivers/net/ethernet/ibm/emac/rgmii.c 	struct rgmii_regs __iomem *p = dev->base;
p                  95 drivers/net/ethernet/ibm/emac/rgmii.c 	out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
p                 110 drivers/net/ethernet/ibm/emac/rgmii.c 	struct rgmii_regs __iomem *p = dev->base;
p                 115 drivers/net/ethernet/ibm/emac/rgmii.c 	ssr = in_be32(&p->ssr) & ~RGMII_SSR_MASK(input);
p                 126 drivers/net/ethernet/ibm/emac/rgmii.c 	out_be32(&p->ssr, ssr);
p                 134 drivers/net/ethernet/ibm/emac/rgmii.c 	struct rgmii_regs __iomem *p = dev->base;
p                 144 drivers/net/ethernet/ibm/emac/rgmii.c 	fer = in_be32(&p->fer);
p                 146 drivers/net/ethernet/ibm/emac/rgmii.c 	out_be32(&p->fer, fer);
p                 147 drivers/net/ethernet/ibm/emac/rgmii.c 	(void)in_be32(&p->fer);
p                 155 drivers/net/ethernet/ibm/emac/rgmii.c 	struct rgmii_regs __iomem *p = dev->base;
p                 163 drivers/net/ethernet/ibm/emac/rgmii.c 	fer = in_be32(&p->fer);
p                 165 drivers/net/ethernet/ibm/emac/rgmii.c 	out_be32(&p->fer, fer);
p                 166 drivers/net/ethernet/ibm/emac/rgmii.c 	(void)in_be32(&p->fer);
p                 176 drivers/net/ethernet/ibm/emac/rgmii.c 	struct rgmii_regs __iomem *p;
p                 179 drivers/net/ethernet/ibm/emac/rgmii.c 	p = dev->base;
p                 186 drivers/net/ethernet/ibm/emac/rgmii.c 	out_be32(&p->fer, in_be32(&p->fer) & ~RGMII_FER_MASK(input));
p                  47 drivers/net/ethernet/ibm/emac/tah.c 	struct tah_regs __iomem *p = dev->base;
p                  51 drivers/net/ethernet/ibm/emac/tah.c 	out_be32(&p->mr, TAH_MR_SR);
p                  53 drivers/net/ethernet/ibm/emac/tah.c 	while ((in_be32(&p->mr) & TAH_MR_SR) && n)
p                  60 drivers/net/ethernet/ibm/emac/tah.c 	out_be32(&p->mr,
p                  84 drivers/net/ethernet/ibm/emac/zmii.c 	struct zmii_regs __iomem *p = dev->base;
p                 139 drivers/net/ethernet/ibm/emac/zmii.c 	out_be32(&p->fer, in_be32(&p->fer) | zmii_mode_mask(dev->mode, input));
p                 302 drivers/net/ethernet/ibm/ibmveth.c 	__be64 *p = adapter->buffer_list_addr + 4096 - 8;
p                 304 drivers/net/ethernet/ibm/ibmveth.c 	adapter->rx_no_buffer = be64_to_cpup(p);
p                1564 drivers/net/ethernet/ibm/ibmveth.c static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
p                1567 drivers/net/ethernet/ibm/ibmveth.c 	struct sockaddr *addr = p;
p                1712 drivers/net/ethernet/ibm/ibmvnic.c static int ibmvnic_set_mac(struct net_device *netdev, void *p)
p                1715 drivers/net/ethernet/ibm/ibmvnic.c 	struct sockaddr *addr = p;
p                2251 drivers/net/ethernet/intel/e100.c static int e100_set_mac_address(struct net_device *netdev, void *p)
p                2254 drivers/net/ethernet/intel/e100.c 	struct sockaddr *addr = p;
p                2446 drivers/net/ethernet/intel/e100.c 	struct ethtool_regs *regs, void *p)
p                2449 drivers/net/ethernet/intel/e100.c 	u32 *buff = p;
p                 325 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 			   void *p)
p                 329 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	u32 *regs_buff = p;
p                 332 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	memset(p, 0, E1000_REGS_LEN * sizeof(u32));
p                1810 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 		char *p;
p                1814 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 			p = (char *)netdev + stat->stat_offset;
p                1817 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 			p = (char *)adapter + stat->stat_offset;
p                1826 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 			data[i] = *(u64 *)p;
p                1828 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 			data[i] = *(u32 *)p;
p                1836 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	u8 *p = data;
p                1845 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 			memcpy(p, e1000_gstrings_stats[i].stat_string,
p                1847 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 			p += ETH_GSTRING_LEN;
p                 110 drivers/net/ethernet/intel/e1000/e1000_main.c static int e1000_set_mac(struct net_device *netdev, void *p);
p                2192 drivers/net/ethernet/intel/e1000/e1000_main.c static int e1000_set_mac(struct net_device *netdev, void *p)
p                2196 drivers/net/ethernet/intel/e1000/e1000_main.c 	struct sockaddr *addr = p;
p                4215 drivers/net/ethernet/intel/e1000/e1000_main.c 				struct page *p;
p                4219 drivers/net/ethernet/intel/e1000/e1000_main.c 				p = buffer_info->rxbuf.page;
p                4230 drivers/net/ethernet/intel/e1000/e1000_main.c 					vaddr = kmap_atomic(p);
p                4254 drivers/net/ethernet/intel/e1000/e1000_main.c 					skb_fill_page_desc(skb, 0, p, 0,
p                 178 drivers/net/ethernet/intel/e1000/e1000_param.c 			const struct e1000_opt_list { int i; char *str; } *p;
p                 214 drivers/net/ethernet/intel/e1000/e1000_param.c 			ent = &opt->arg.l.p[i];
p                 344 drivers/net/ethernet/intel/e1000/e1000_param.c 					 .p = fc_list }}
p                 556 drivers/net/ethernet/intel/e1000/e1000_param.c 					 .p = speed_list }}
p                 578 drivers/net/ethernet/intel/e1000/e1000_param.c 					 .p = dplx_list }}
p                 634 drivers/net/ethernet/intel/e1000/e1000_param.c 					 .p = an_list }}
p                 429 drivers/net/ethernet/intel/e1000e/ethtool.c 			   struct ethtool_regs *regs, void *p)
p                 433 drivers/net/ethernet/intel/e1000e/ethtool.c 	u32 *regs_buff = p;
p                 438 drivers/net/ethernet/intel/e1000e/ethtool.c 	memset(p, 0, E1000_REGS_LEN * sizeof(u32));
p                2052 drivers/net/ethernet/intel/e1000e/ethtool.c 	char *p = NULL;
p                2063 drivers/net/ethernet/intel/e1000e/ethtool.c 			p = (char *)&net_stats +
p                2067 drivers/net/ethernet/intel/e1000e/ethtool.c 			p = (char *)adapter +
p                2076 drivers/net/ethernet/intel/e1000e/ethtool.c 			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                2083 drivers/net/ethernet/intel/e1000e/ethtool.c 	u8 *p = data;
p                2092 drivers/net/ethernet/intel/e1000e/ethtool.c 			memcpy(p, e1000_gstrings_stats[i].stat_string,
p                2094 drivers/net/ethernet/intel/e1000e/ethtool.c 			p += ETH_GSTRING_LEN;
p                4757 drivers/net/ethernet/intel/e1000e/netdev.c static int e1000_set_mac(struct net_device *netdev, void *p)
p                4761 drivers/net/ethernet/intel/e1000e/netdev.c 	struct sockaddr *addr = p;
p                 158 drivers/net/ethernet/intel/e1000e/param.c 			} *p;
p                 197 drivers/net/ethernet/intel/e1000e/param.c 			ent = &opt->arg.l.p[i];
p                 138 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c static void __fm10k_add_stat_strings(u8 **p, const struct fm10k_stats stats[],
p                 147 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 		vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
p                 148 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 		*p += ETH_GSTRING_LEN;
p                 153 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c #define fm10k_add_stat_strings(p, stats, ...) \
p                 154 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 	__fm10k_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
p                 234 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 		char *p = (char *)pointer + stats[i].stat_offset;
p                 238 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 			*((*data)++) = *(u64 *)p;
p                 241 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 			*((*data)++) = *(u32 *)p;
p                 244 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 			*((*data)++) = *(u16 *)p;
p                 247 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 			*((*data)++) = *(u8 *)p;
p                 352 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 			   struct ethtool_regs *regs, void *p)
p                 356 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 	u32 *buff = p;
p                1084 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c static int fm10k_set_mac(struct net_device *dev, void *p)
p                1088 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	struct sockaddr *addr = p;
p                3202 drivers/net/ethernet/intel/i40e/i40e_common.c 	struct i40e_hw_capabilities *p;
p                3211 drivers/net/ethernet/intel/i40e/i40e_common.c 		p = &hw->dev_caps;
p                3213 drivers/net/ethernet/intel/i40e/i40e_common.c 		p = &hw->func_caps;
p                3226 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->switch_mode = number;
p                3229 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->management_mode = number;
p                3231 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->mng_protocols_over_mctp = logical_id;
p                3234 drivers/net/ethernet/intel/i40e/i40e_common.c 					   p->mng_protocols_over_mctp);
p                3236 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->mng_protocols_over_mctp = 0;
p                3240 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->npar_enable = number;
p                3243 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->os2bmc = number;
p                3246 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->valid_functions = number;
p                3250 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->sr_iov_1_1 = true;
p                3253 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->num_vfs = number;
p                3254 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->vf_base_id = logical_id;
p                3258 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->vmdq = true;
p                3262 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->evb_802_1_qbg = true;
p                3266 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->evb_802_1_qbh = true;
p                3269 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->num_vsis = number;
p                3273 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->dcb = true;
p                3274 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->enabled_tcmap = logical_id;
p                3275 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->maxtc = phys_id;
p                3280 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->fcoe = true;
p                3284 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->iscsi = true;
p                3287 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->rss = true;
p                3288 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->rss_table_size = number;
p                3289 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->rss_table_entry_width = logical_id;
p                3292 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->num_rx_qp = number;
p                3293 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->base_queue = phys_id;
p                3296 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->num_tx_qp = number;
p                3297 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->base_queue = phys_id;
p                3300 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->num_msix_vectors = number;
p                3303 drivers/net/ethernet/intel/i40e/i40e_common.c 				   p->num_msix_vectors);
p                3306 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->num_msix_vectors_vf = number;
p                3311 drivers/net/ethernet/intel/i40e/i40e_common.c 					p->flex10_enable = true;
p                3312 drivers/net/ethernet/intel/i40e/i40e_common.c 					p->flex10_capable = true;
p                3317 drivers/net/ethernet/intel/i40e/i40e_common.c 					p->flex10_enable = true;
p                3319 drivers/net/ethernet/intel/i40e/i40e_common.c 					p->flex10_capable = true;
p                3321 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->flex10_mode = logical_id;
p                3322 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->flex10_status = phys_id;
p                3326 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->mgmt_cem = true;
p                3330 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->iwarp = true;
p                3334 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->led[phys_id] = true;
p                3338 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->sdp[phys_id] = true;
p                3342 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->mdio_port_num = phys_id;
p                3343 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->mdio_port_mode = logical_id;
p                3348 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->ieee_1588 = true;
p                3351 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->fd = true;
p                3352 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->fd_filters_guaranteed = number;
p                3353 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->fd_filters_best_effort = logical_id;
p                3356 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->wr_csr_prot = (u64)number;
p                3357 drivers/net/ethernet/intel/i40e/i40e_common.c 			p->wr_csr_prot |= (u64)logical_id << 32;
p                3361 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->sec_rev_disabled = true;
p                3363 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->update_disabled = true;
p                3370 drivers/net/ethernet/intel/i40e/i40e_common.c 	if (p->fcoe)
p                3376 drivers/net/ethernet/intel/i40e/i40e_common.c 	if (p->npar_enable || p->flex10_enable)
p                3377 drivers/net/ethernet/intel/i40e/i40e_common.c 		p->fcoe = false;
p                3413 drivers/net/ethernet/intel/i40e/i40e_common.c 	valid_functions = p->valid_functions;
p                3432 drivers/net/ethernet/intel/i40e/i40e_common.c 	p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
p                5510 drivers/net/ethernet/intel/i40e/i40e_common.c 		struct i40e_profile_segment *p = (profile);		\
p                5513 drivers/net/ethernet/intel/i40e/i40e_common.c 		count = p->device_table_count;				\
p                5514 drivers/net/ethernet/intel/i40e/i40e_common.c 		nvm = (u32 *)&p->device_table[count];			\
p                  80 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	char *p;
p                  90 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	p = (char *)pointer + stat->stat_offset;
p                  93 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		*data = *((u64 *)p);
p                  96 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		*data = *((u32 *)p);
p                  99 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		*data = *((u16 *)p);
p                 102 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		*data = *((u8 *)p);
p                 199 drivers/net/ethernet/intel/i40e/i40e_ethtool.c static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
p                 208 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
p                 209 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		*p += ETH_GSTRING_LEN;
p                 226 drivers/net/ethernet/intel/i40e/i40e_ethtool.c #define i40e_add_stat_strings(p, stats, ...) \
p                 227 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	__i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
p                1710 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 			  void *p)
p                1715 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	u32 *reg_buf = p;
p                2263 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	u64 *p = data;
p                2311 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	WARN_ONCE(data - p != i40e_get_stats_count(netdev),
p                2331 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	u8 *p = data;
p                2358 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
p                2367 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	char *p = (char *)data;
p                2371 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		snprintf(p, ETH_GSTRING_LEN, "%s",
p                2373 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		p += ETH_GSTRING_LEN;
p                2378 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		snprintf(p, ETH_GSTRING_LEN, "%s",
p                2380 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		p += ETH_GSTRING_LEN;
p                 778 drivers/net/ethernet/intel/i40e/i40e_main.c 	struct i40e_ring *p;
p                 808 drivers/net/ethernet/intel/i40e/i40e_main.c 		p = READ_ONCE(vsi->tx_rings[q]);
p                 811 drivers/net/ethernet/intel/i40e/i40e_main.c 			start = u64_stats_fetch_begin_irq(&p->syncp);
p                 812 drivers/net/ethernet/intel/i40e/i40e_main.c 			packets = p->stats.packets;
p                 813 drivers/net/ethernet/intel/i40e/i40e_main.c 			bytes = p->stats.bytes;
p                 814 drivers/net/ethernet/intel/i40e/i40e_main.c 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
p                 817 drivers/net/ethernet/intel/i40e/i40e_main.c 		tx_restart += p->tx_stats.restart_queue;
p                 818 drivers/net/ethernet/intel/i40e/i40e_main.c 		tx_busy += p->tx_stats.tx_busy;
p                 819 drivers/net/ethernet/intel/i40e/i40e_main.c 		tx_linearize += p->tx_stats.tx_linearize;
p                 820 drivers/net/ethernet/intel/i40e/i40e_main.c 		tx_force_wb += p->tx_stats.tx_force_wb;
p                 823 drivers/net/ethernet/intel/i40e/i40e_main.c 		p = &p[1];
p                 825 drivers/net/ethernet/intel/i40e/i40e_main.c 			start = u64_stats_fetch_begin_irq(&p->syncp);
p                 826 drivers/net/ethernet/intel/i40e/i40e_main.c 			packets = p->stats.packets;
p                 827 drivers/net/ethernet/intel/i40e/i40e_main.c 			bytes = p->stats.bytes;
p                 828 drivers/net/ethernet/intel/i40e/i40e_main.c 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
p                 831 drivers/net/ethernet/intel/i40e/i40e_main.c 		rx_buf += p->rx_stats.alloc_buff_failed;
p                 832 drivers/net/ethernet/intel/i40e/i40e_main.c 		rx_page += p->rx_stats.alloc_page_failed;
p                1532 drivers/net/ethernet/intel/i40e/i40e_main.c static int i40e_set_mac(struct net_device *netdev, void *p)
p                1538 drivers/net/ethernet/intel/i40e/i40e_main.c 	struct sockaddr *addr = p;
p                  73 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	char *p;
p                  83 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	p = (char *)pointer + stat->stat_offset;
p                  86 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		*data = *((u64 *)p);
p                  89 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		*data = *((u32 *)p);
p                  92 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		*data = *((u16 *)p);
p                  95 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		*data = *((u8 *)p);
p                 190 drivers/net/ethernet/intel/iavf/iavf_ethtool.c static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[],
p                 199 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
p                 200 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		*p += ETH_GSTRING_LEN;
p                 217 drivers/net/ethernet/intel/iavf/iavf_ethtool.c #define iavf_add_stat_strings(p, stats, ...) \
p                 218 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	__iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
p                 779 drivers/net/ethernet/intel/iavf/iavf_main.c static int iavf_set_mac(struct net_device *netdev, void *p)
p                 784 drivers/net/ethernet/intel/iavf/iavf_main.c 	struct sockaddr *addr = p;
p                 185 drivers/net/ethernet/intel/ice/ice_ethtool.c ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
p                 190 drivers/net/ethernet/intel/ice/ice_ethtool.c 	u32 *regs_buf = (u32 *)p;
p                 846 drivers/net/ethernet/intel/ice/ice_ethtool.c 	char *p = (char *)data;
p                 852 drivers/net/ethernet/intel/ice/ice_ethtool.c 			snprintf(p, ETH_GSTRING_LEN, "%s",
p                 854 drivers/net/ethernet/intel/ice/ice_ethtool.c 			p += ETH_GSTRING_LEN;
p                 858 drivers/net/ethernet/intel/ice/ice_ethtool.c 			snprintf(p, ETH_GSTRING_LEN,
p                 860 drivers/net/ethernet/intel/ice/ice_ethtool.c 			p += ETH_GSTRING_LEN;
p                 861 drivers/net/ethernet/intel/ice/ice_ethtool.c 			snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
p                 862 drivers/net/ethernet/intel/ice/ice_ethtool.c 			p += ETH_GSTRING_LEN;
p                 866 drivers/net/ethernet/intel/ice/ice_ethtool.c 			snprintf(p, ETH_GSTRING_LEN,
p                 868 drivers/net/ethernet/intel/ice/ice_ethtool.c 			p += ETH_GSTRING_LEN;
p                 869 drivers/net/ethernet/intel/ice/ice_ethtool.c 			snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
p                 870 drivers/net/ethernet/intel/ice/ice_ethtool.c 			p += ETH_GSTRING_LEN;
p                 877 drivers/net/ethernet/intel/ice/ice_ethtool.c 			snprintf(p, ETH_GSTRING_LEN, "%s",
p                 879 drivers/net/ethernet/intel/ice/ice_ethtool.c 			p += ETH_GSTRING_LEN;
p                 883 drivers/net/ethernet/intel/ice/ice_ethtool.c 			snprintf(p, ETH_GSTRING_LEN,
p                 885 drivers/net/ethernet/intel/ice/ice_ethtool.c 			p += ETH_GSTRING_LEN;
p                 886 drivers/net/ethernet/intel/ice/ice_ethtool.c 			snprintf(p, ETH_GSTRING_LEN,
p                 888 drivers/net/ethernet/intel/ice/ice_ethtool.c 			p += ETH_GSTRING_LEN;
p                 891 drivers/net/ethernet/intel/ice/ice_ethtool.c 			snprintf(p, ETH_GSTRING_LEN,
p                 893 drivers/net/ethernet/intel/ice/ice_ethtool.c 			p += ETH_GSTRING_LEN;
p                 894 drivers/net/ethernet/intel/ice/ice_ethtool.c 			snprintf(p, ETH_GSTRING_LEN,
p                 896 drivers/net/ethernet/intel/ice/ice_ethtool.c 			p += ETH_GSTRING_LEN;
p                 904 drivers/net/ethernet/intel/ice/ice_ethtool.c 			snprintf(p, ETH_GSTRING_LEN, "%s",
p                 906 drivers/net/ethernet/intel/ice/ice_ethtool.c 			p += ETH_GSTRING_LEN;
p                1305 drivers/net/ethernet/intel/ice/ice_ethtool.c 	char *p;
p                1311 drivers/net/ethernet/intel/ice/ice_ethtool.c 		p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
p                1313 drivers/net/ethernet/intel/ice/ice_ethtool.c 			     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                1347 drivers/net/ethernet/intel/ice/ice_ethtool.c 		p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset;
p                1349 drivers/net/ethernet/intel/ice/ice_ethtool.c 			     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                2293 drivers/net/ethernet/intel/ice/ice_ethtool.c ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
p                2305 drivers/net/ethernet/intel/ice/ice_ethtool.c 		if (!(p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)) {
p                2320 drivers/net/ethernet/intel/ice/ice_ethtool.c 		if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) {
p                2358 drivers/net/ethernet/intel/ice/ice_ethtool.c 	struct ice_port_info *p;
p                2366 drivers/net/ethernet/intel/ice/ice_ethtool.c 	p = np->vsi->port_info;
p                2368 drivers/net/ethernet/intel/ice/ice_ethtool.c 	if (!p)
p                2379 drivers/net/ethernet/intel/ice/ice_ethtool.c 	if (p->phy.media_type != ICE_MEDIA_BASET &&
p                2380 drivers/net/ethernet/intel/ice/ice_ethtool.c 	    p->phy.media_type != ICE_MEDIA_FIBER &&
p                2381 drivers/net/ethernet/intel/ice/ice_ethtool.c 	    p->phy.media_type != ICE_MEDIA_BACKPLANE &&
p                2382 drivers/net/ethernet/intel/ice/ice_ethtool.c 	    p->phy.media_type != ICE_MEDIA_DA &&
p                2383 drivers/net/ethernet/intel/ice/ice_ethtool.c 	    p->phy.link_info.link_info & ICE_AQ_LINK_UP)
p                2436 drivers/net/ethernet/intel/ice/ice_ethtool.c 	status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities,
p                2450 drivers/net/ethernet/intel/ice/ice_ethtool.c 	err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed,
p                2457 drivers/net/ethernet/intel/ice/ice_ethtool.c 	p->phy.get_link_info = true;
p                2458 drivers/net/ethernet/intel/ice/ice_ethtool.c 	status = ice_get_link_status(p, &linkup);
p                2464 drivers/net/ethernet/intel/ice/ice_ethtool.c 	curr_link_speed = p->phy.link_info.link_speed;
p                2489 drivers/net/ethernet/intel/ice/ice_ethtool.c 	p->phy.link_info.req_speeds = adv_link_speed;
p                2506 drivers/net/ethernet/intel/ice/ice_ethtool.c 	if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) {
p                 821 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 	struct ice_ptg_ptype *p;
p                 834 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
p                 836 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 	while (p) {
p                 837 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
p                 838 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 			*ch = p->next_ptype;
p                 842 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		ch = &p->next_ptype;
p                 843 drivers/net/ethernet/intel/ice/ice_flex_pipe.c 		p = p->next_ptype;
p                 329 drivers/net/ethernet/intel/ice/ice_sched.c 		struct ice_sched_node *p;
p                 341 drivers/net/ethernet/intel/ice/ice_sched.c 		p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
p                 342 drivers/net/ethernet/intel/ice/ice_sched.c 		while (p) {
p                 343 drivers/net/ethernet/intel/ice/ice_sched.c 			if (p->sibling == node) {
p                 344 drivers/net/ethernet/intel/ice/ice_sched.c 				p->sibling = node->sibling;
p                 347 drivers/net/ethernet/intel/ice/ice_sched.c 			p = p->sibling;
p                 456 drivers/net/ethernet/intel/igb/igb_ethtool.c 			 struct ethtool_regs *regs, void *p)
p                 460 drivers/net/ethernet/intel/igb/igb_ethtool.c 	u32 *regs_buff = p;
p                 463 drivers/net/ethernet/intel/igb/igb_ethtool.c 	memset(p, 0, IGB_REGS_LEN * sizeof(u32));
p                2299 drivers/net/ethernet/intel/igb/igb_ethtool.c 	char *p;
p                2305 drivers/net/ethernet/intel/igb/igb_ethtool.c 		p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
p                2307 drivers/net/ethernet/intel/igb/igb_ethtool.c 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                2310 drivers/net/ethernet/intel/igb/igb_ethtool.c 		p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
p                2312 drivers/net/ethernet/intel/igb/igb_ethtool.c 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                2350 drivers/net/ethernet/intel/igb/igb_ethtool.c 	u8 *p = data;
p                2360 drivers/net/ethernet/intel/igb/igb_ethtool.c 			memcpy(p, igb_gstrings_stats[i].stat_string,
p                2362 drivers/net/ethernet/intel/igb/igb_ethtool.c 			p += ETH_GSTRING_LEN;
p                2365 drivers/net/ethernet/intel/igb/igb_ethtool.c 			memcpy(p, igb_gstrings_net_stats[i].stat_string,
p                2367 drivers/net/ethernet/intel/igb/igb_ethtool.c 			p += ETH_GSTRING_LEN;
p                2370 drivers/net/ethernet/intel/igb/igb_ethtool.c 			sprintf(p, "tx_queue_%u_packets", i);
p                2371 drivers/net/ethernet/intel/igb/igb_ethtool.c 			p += ETH_GSTRING_LEN;
p                2372 drivers/net/ethernet/intel/igb/igb_ethtool.c 			sprintf(p, "tx_queue_%u_bytes", i);
p                2373 drivers/net/ethernet/intel/igb/igb_ethtool.c 			p += ETH_GSTRING_LEN;
p                2374 drivers/net/ethernet/intel/igb/igb_ethtool.c 			sprintf(p, "tx_queue_%u_restart", i);
p                2375 drivers/net/ethernet/intel/igb/igb_ethtool.c 			p += ETH_GSTRING_LEN;
p                2378 drivers/net/ethernet/intel/igb/igb_ethtool.c 			sprintf(p, "rx_queue_%u_packets", i);
p                2379 drivers/net/ethernet/intel/igb/igb_ethtool.c 			p += ETH_GSTRING_LEN;
p                2380 drivers/net/ethernet/intel/igb/igb_ethtool.c 			sprintf(p, "rx_queue_%u_bytes", i);
p                2381 drivers/net/ethernet/intel/igb/igb_ethtool.c 			p += ETH_GSTRING_LEN;
p                2382 drivers/net/ethernet/intel/igb/igb_ethtool.c 			sprintf(p, "rx_queue_%u_drops", i);
p                2383 drivers/net/ethernet/intel/igb/igb_ethtool.c 			p += ETH_GSTRING_LEN;
p                2384 drivers/net/ethernet/intel/igb/igb_ethtool.c 			sprintf(p, "rx_queue_%u_csum_err", i);
p                2385 drivers/net/ethernet/intel/igb/igb_ethtool.c 			p += ETH_GSTRING_LEN;
p                2386 drivers/net/ethernet/intel/igb/igb_ethtool.c 			sprintf(p, "rx_queue_%u_alloc_failed", i);
p                2387 drivers/net/ethernet/intel/igb/igb_ethtool.c 			p += ETH_GSTRING_LEN;
p                4791 drivers/net/ethernet/intel/igb/igb_main.c static int igb_set_mac(struct net_device *netdev, void *p)
p                4795 drivers/net/ethernet/intel/igb/igb_main.c 	struct sockaddr *addr = p;
p                6707 drivers/net/ethernet/intel/igb/igb_main.c 			  void *p)
p                 126 drivers/net/ethernet/intel/igbvf/ethtool.c 			   struct ethtool_regs *regs, void *p)
p                 130 drivers/net/ethernet/intel/igbvf/ethtool.c 	u32 *regs_buff = p;
p                 132 drivers/net/ethernet/intel/igbvf/ethtool.c 	memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
p                 384 drivers/net/ethernet/intel/igbvf/ethtool.c 		char *p = (char *)adapter +
p                 389 drivers/net/ethernet/intel/igbvf/ethtool.c 			    sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
p                 390 drivers/net/ethernet/intel/igbvf/ethtool.c 			    (*(u32 *)p - *(u32 *)b));
p                 409 drivers/net/ethernet/intel/igbvf/ethtool.c 	u8 *p = data;
p                 418 drivers/net/ethernet/intel/igbvf/ethtool.c 			memcpy(p, igbvf_gstrings_stats[i].stat_string,
p                 420 drivers/net/ethernet/intel/igbvf/ethtool.c 			p += ETH_GSTRING_LEN;
p                1792 drivers/net/ethernet/intel/igbvf/netdev.c static int igbvf_set_mac(struct net_device *netdev, void *p)
p                1796 drivers/net/ethernet/intel/igbvf/netdev.c 	struct sockaddr *addr = p;
p                 147 drivers/net/ethernet/intel/igc/igc_ethtool.c 			 struct ethtool_regs *regs, void *p)
p                 151 drivers/net/ethernet/intel/igc/igc_ethtool.c 	u32 *regs_buff = p;
p                 154 drivers/net/ethernet/intel/igc/igc_ethtool.c 	memset(p, 0, IGC_REGS_LEN * sizeof(u32));
p                 661 drivers/net/ethernet/intel/igc/igc_ethtool.c 	u8 *p = data;
p                 671 drivers/net/ethernet/intel/igc/igc_ethtool.c 			memcpy(p, igc_gstrings_stats[i].stat_string,
p                 673 drivers/net/ethernet/intel/igc/igc_ethtool.c 			p += ETH_GSTRING_LEN;
p                 676 drivers/net/ethernet/intel/igc/igc_ethtool.c 			memcpy(p, igc_gstrings_net_stats[i].stat_string,
p                 678 drivers/net/ethernet/intel/igc/igc_ethtool.c 			p += ETH_GSTRING_LEN;
p                 681 drivers/net/ethernet/intel/igc/igc_ethtool.c 			sprintf(p, "tx_queue_%u_packets", i);
p                 682 drivers/net/ethernet/intel/igc/igc_ethtool.c 			p += ETH_GSTRING_LEN;
p                 683 drivers/net/ethernet/intel/igc/igc_ethtool.c 			sprintf(p, "tx_queue_%u_bytes", i);
p                 684 drivers/net/ethernet/intel/igc/igc_ethtool.c 			p += ETH_GSTRING_LEN;
p                 685 drivers/net/ethernet/intel/igc/igc_ethtool.c 			sprintf(p, "tx_queue_%u_restart", i);
p                 686 drivers/net/ethernet/intel/igc/igc_ethtool.c 			p += ETH_GSTRING_LEN;
p                 689 drivers/net/ethernet/intel/igc/igc_ethtool.c 			sprintf(p, "rx_queue_%u_packets", i);
p                 690 drivers/net/ethernet/intel/igc/igc_ethtool.c 			p += ETH_GSTRING_LEN;
p                 691 drivers/net/ethernet/intel/igc/igc_ethtool.c 			sprintf(p, "rx_queue_%u_bytes", i);
p                 692 drivers/net/ethernet/intel/igc/igc_ethtool.c 			p += ETH_GSTRING_LEN;
p                 693 drivers/net/ethernet/intel/igc/igc_ethtool.c 			sprintf(p, "rx_queue_%u_drops", i);
p                 694 drivers/net/ethernet/intel/igc/igc_ethtool.c 			p += ETH_GSTRING_LEN;
p                 695 drivers/net/ethernet/intel/igc/igc_ethtool.c 			sprintf(p, "rx_queue_%u_csum_err", i);
p                 696 drivers/net/ethernet/intel/igc/igc_ethtool.c 			p += ETH_GSTRING_LEN;
p                 697 drivers/net/ethernet/intel/igc/igc_ethtool.c 			sprintf(p, "rx_queue_%u_alloc_failed", i);
p                 698 drivers/net/ethernet/intel/igc/igc_ethtool.c 			p += ETH_GSTRING_LEN;
p                 731 drivers/net/ethernet/intel/igc/igc_ethtool.c 	char *p;
p                 737 drivers/net/ethernet/intel/igc/igc_ethtool.c 		p = (char *)adapter + igc_gstrings_stats[i].stat_offset;
p                 739 drivers/net/ethernet/intel/igc/igc_ethtool.c 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                 742 drivers/net/ethernet/intel/igc/igc_ethtool.c 		p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset;
p                 744 drivers/net/ethernet/intel/igc/igc_ethtool.c 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                 780 drivers/net/ethernet/intel/igc/igc_main.c static int igc_set_mac(struct net_device *netdev, void *p)
p                 784 drivers/net/ethernet/intel/igc/igc_main.c 	struct sockaddr *addr = p;
p                 203 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 		   struct ethtool_regs *regs, void *p)
p                 207 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 	u32 *reg = p;
p                 579 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 	char *p = NULL;
p                 585 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 			p = (char *) netdev +
p                 589 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 			p = (char *) adapter +
p                 595 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                  65 drivers/net/ethernet/intel/ixgb/ixgb_main.c static int ixgb_set_mac(struct net_device *netdev, void *p);
p                1029 drivers/net/ethernet/intel/ixgb/ixgb_main.c ixgb_set_mac(struct net_device *netdev, void *p)
p                1032 drivers/net/ethernet/intel/ixgb/ixgb_main.c 	struct sockaddr *addr = p;
p                 172 drivers/net/ethernet/intel/ixgb/ixgb_param.c 			} *p;
p                 207 drivers/net/ethernet/intel/ixgb/ixgb_param.c 			ent = &opt->arg.l.p[i];
p                 314 drivers/net/ethernet/intel/ixgb/ixgb_param.c 					 .p = fc_list }}
p                 166 drivers/net/ethernet/intel/ixgbe/ixgbe.h #define VMDQ_P(p)   ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
p                  66 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c 	struct tc_bw_alloc *p;
p                  85 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c 		p = &dcb_config->tc_config[i].path[direction];
p                  86 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c 		bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
p                  87 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c 		link_percentage = p->bwg_percent;
p                 107 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c 		p = &dcb_config->tc_config[i].path[direction];
p                 108 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c 		bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
p                 110 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c 		link_percentage = p->bwg_percent;
p                 113 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c 		if (p->bwg_percent > 0 && link_percentage == 0)
p                 117 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c 		p->link_percent = (u8)link_percentage;
p                 127 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c 		p->data_credits_refill = (u16)credit_refill;
p                 156 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c 		p->data_credits_max = (u16)credit_max;
p                 523 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			   struct ethtool_regs *regs, void *p)
p                 527 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	u32 *regs_buff = p;
p                 530 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
p                1201 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	char *p = NULL;
p                1208 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			p = (char *) net_stats +
p                1212 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			p = (char *) adapter +
p                1221 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                1269 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	char *p = (char *)data;
p                1281 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			memcpy(p, ixgbe_gstrings_stats[i].stat_string,
p                1283 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			p += ETH_GSTRING_LEN;
p                1286 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			sprintf(p, "tx_queue_%u_packets", i);
p                1287 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			p += ETH_GSTRING_LEN;
p                1288 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			sprintf(p, "tx_queue_%u_bytes", i);
p                1289 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			p += ETH_GSTRING_LEN;
p                1292 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			sprintf(p, "rx_queue_%u_packets", i);
p                1293 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			p += ETH_GSTRING_LEN;
p                1294 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			sprintf(p, "rx_queue_%u_bytes", i);
p                1295 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			p += ETH_GSTRING_LEN;
p                1298 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			sprintf(p, "tx_pb_%u_pxon", i);
p                1299 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			p += ETH_GSTRING_LEN;
p                1300 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			sprintf(p, "tx_pb_%u_pxoff", i);
p                1301 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			p += ETH_GSTRING_LEN;
p                1304 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			sprintf(p, "rx_pb_%u_pxon", i);
p                1305 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			p += ETH_GSTRING_LEN;
p                1306 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			sprintf(p, "rx_pb_%u_pxoff", i);
p                1307 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			p += ETH_GSTRING_LEN;
p                 140 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			    void *p);
p                 540 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		char *p = buf;
p                 544 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			p += sprintf(p, " %08x", regs[i++]);
p                8791 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static int ixgbe_set_mac(struct net_device *netdev, void *p)
p                8795 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct sockaddr *addr = p;
p                9951 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	unsigned int p, num_pools;
p                9971 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		for (p = 0; p < num_pools; p++) {
p                9975 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 								       p);
p                9995 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		for (p = 0; p < num_pools; p++) {
p                9999 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 								       p);
p                11556 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			    void *p)
p                 143 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			     void *p)
p                 147 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	u32 *regs_buff = p;
p                 151 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	memset(p, 0, regs_len);
p                 430 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	char *p;
p                 437 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			p = (char *)net_stats +
p                 441 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			p = (char *)adapter +
p                 450 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                 509 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	char *p = (char *)data;
p                 519 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
p                 521 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			p += ETH_GSTRING_LEN;
p                 525 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			sprintf(p, "tx_queue_%u_packets", i);
p                 526 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			p += ETH_GSTRING_LEN;
p                 527 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			sprintf(p, "tx_queue_%u_bytes", i);
p                 528 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			p += ETH_GSTRING_LEN;
p                 531 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			sprintf(p, "xdp_queue_%u_packets", i);
p                 532 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			p += ETH_GSTRING_LEN;
p                 533 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			sprintf(p, "xdp_queue_%u_bytes", i);
p                 534 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			p += ETH_GSTRING_LEN;
p                 537 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			sprintf(p, "rx_queue_%u_packets", i);
p                 538 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			p += ETH_GSTRING_LEN;
p                 539 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			sprintf(p, "rx_queue_%u_bytes", i);
p                 540 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			p += ETH_GSTRING_LEN;
p                4218 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static int ixgbevf_set_mac(struct net_device *netdev, void *p)
p                4222 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct sockaddr *addr = p;
p                 321 drivers/net/ethernet/jme.c jme_set_rx_pcc(struct jme_adapter *jme, int p)
p                 323 drivers/net/ethernet/jme.c 	switch (p) {
p                 350 drivers/net/ethernet/jme.c 		netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
p                2275 drivers/net/ethernet/jme.c jme_set_macaddr(struct net_device *netdev, void *p)
p                2278 drivers/net/ethernet/jme.c 	struct sockaddr *addr = p;
p                2373 drivers/net/ethernet/jme.c mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len)
p                2378 drivers/net/ethernet/jme.c 		p[i >> 2] = jread32(jme, reg + i);
p                2382 drivers/net/ethernet/jme.c mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
p                2385 drivers/net/ethernet/jme.c 	u16 *p16 = (u16 *)p;
p                2392 drivers/net/ethernet/jme.c jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
p                2395 drivers/net/ethernet/jme.c 	u32 *p32 = (u32 *)p;
p                2397 drivers/net/ethernet/jme.c 	memset(p, 0xFF, JME_REG_LEN);
p                 520 drivers/net/ethernet/lantiq_etop.c ltq_etop_set_mac_address(struct net_device *dev, void *p)
p                 522 drivers/net/ethernet/lantiq_etop.c 	int ret = eth_mac_addr(dev, p);
p                 143 drivers/net/ethernet/marvell/mv643xx_eth.c #define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
p                 144 drivers/net/ethernet/marvell/mv643xx_eth.c #define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
p                 145 drivers/net/ethernet/marvell/mv643xx_eth.c #define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
p                 146 drivers/net/ethernet/marvell/mv643xx_eth.c #define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
p                1298 drivers/net/ethernet/marvell/mv643xx_eth.c 	struct mib_counters *p = &mp->mib_counters;
p                1301 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->good_octets_received += mib_read(mp, 0x00);
p                1302 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->bad_octets_received += mib_read(mp, 0x08);
p                1303 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->internal_mac_transmit_err += mib_read(mp, 0x0c);
p                1304 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->good_frames_received += mib_read(mp, 0x10);
p                1305 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->bad_frames_received += mib_read(mp, 0x14);
p                1306 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->broadcast_frames_received += mib_read(mp, 0x18);
p                1307 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->multicast_frames_received += mib_read(mp, 0x1c);
p                1308 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->frames_64_octets += mib_read(mp, 0x20);
p                1309 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->frames_65_to_127_octets += mib_read(mp, 0x24);
p                1310 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->frames_128_to_255_octets += mib_read(mp, 0x28);
p                1311 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->frames_256_to_511_octets += mib_read(mp, 0x2c);
p                1312 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->frames_512_to_1023_octets += mib_read(mp, 0x30);
p                1313 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->frames_1024_to_max_octets += mib_read(mp, 0x34);
p                1314 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->good_octets_sent += mib_read(mp, 0x38);
p                1315 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->good_frames_sent += mib_read(mp, 0x40);
p                1316 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->excessive_collision += mib_read(mp, 0x44);
p                1317 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->multicast_frames_sent += mib_read(mp, 0x48);
p                1318 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->broadcast_frames_sent += mib_read(mp, 0x4c);
p                1319 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->unrec_mac_control_received += mib_read(mp, 0x50);
p                1320 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->fc_sent += mib_read(mp, 0x54);
p                1321 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->good_fc_received += mib_read(mp, 0x58);
p                1322 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->bad_fc_received += mib_read(mp, 0x5c);
p                1323 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->undersize_received += mib_read(mp, 0x60);
p                1324 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->fragments_received += mib_read(mp, 0x64);
p                1325 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->oversize_received += mib_read(mp, 0x68);
p                1326 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->jabber_received += mib_read(mp, 0x6c);
p                1327 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->mac_receive_error += mib_read(mp, 0x70);
p                1328 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->bad_crc_event += mib_read(mp, 0x74);
p                1329 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->collision += mib_read(mp, 0x78);
p                1330 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->late_collision += mib_read(mp, 0x7c);
p                1332 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
p                1333 drivers/net/ethernet/marvell/mv643xx_eth.c 	p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
p                1717 drivers/net/ethernet/marvell/mv643xx_eth.c 		void *p;
p                1722 drivers/net/ethernet/marvell/mv643xx_eth.c 			p = ((void *)mp->dev) + stat->netdev_off;
p                1724 drivers/net/ethernet/marvell/mv643xx_eth.c 			p = ((void *)mp) + stat->mp_off;
p                1727 drivers/net/ethernet/marvell/mv643xx_eth.c 				*(uint64_t *)p : *(uint32_t *)p;
p                  78 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define     MVPP22_RSS_TABLE_POINTER(p)		(p)
p                 525 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define     GENCONF_PORT_CTRL1_EN(p)			BIT(p)
p                 526 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define     GENCONF_PORT_CTRL1_RESET(p)			(BIT(p) << 28)
p                 150 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h #define MVPP22_CLS_C2_PORT_FIRST(p)	((p) * MVPP22_CLS_C2_PORT_RANGE)
p                 151 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h #define MVPP22_CLS_C2_RSS_ENTRY(p)	(MVPP22_CLS_C2_PORT_FIRST((p) + 1) - 1)
p                 153 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h #define MVPP22_CLS_C2_PORT_FLOW_FIRST(p)	(MVPP22_CLS_C2_PORT_FIRST(p))
p                 155 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h #define MVPP22_CLS_C2_RFS_LOC(p, loc)	(MVPP22_CLS_C2_PORT_FLOW_FIRST(p) + (loc))
p                 907 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	struct mvpp2_bm_pool *p;
p                 911 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
p                 913 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		if (!p)
p                 921 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
p                 923 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		if (!p)
p                3820 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c static int mvpp2_set_mac_address(struct net_device *dev, void *p)
p                3822 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	const struct sockaddr *addr = p;
p                  64 drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h #define MVPP2_PRS_TCAM_PORT(p)		((p) << 8)
p                  65 drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h #define MVPP2_PRS_TCAM_PORT_EN(p)	MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_PORT(p))
p                 134 drivers/net/ethernet/marvell/skge.c 			  void *p)
p                 140 drivers/net/ethernet/marvell/skge.c 	memset(p, 0, regs->len);
p                 141 drivers/net/ethernet/marvell/skge.c 	memcpy_fromio(p, io, B3_RAM_ADDR);
p                 144 drivers/net/ethernet/marvell/skge.c 		memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
p                 495 drivers/net/ethernet/marvell/skge.c 				struct ethtool_ringparam *p)
p                 499 drivers/net/ethernet/marvell/skge.c 	p->rx_max_pending = MAX_RX_RING_SIZE;
p                 500 drivers/net/ethernet/marvell/skge.c 	p->tx_max_pending = MAX_TX_RING_SIZE;
p                 502 drivers/net/ethernet/marvell/skge.c 	p->rx_pending = skge->rx_ring.count;
p                 503 drivers/net/ethernet/marvell/skge.c 	p->tx_pending = skge->tx_ring.count;
p                 507 drivers/net/ethernet/marvell/skge.c 			       struct ethtool_ringparam *p)
p                 512 drivers/net/ethernet/marvell/skge.c 	if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
p                 513 drivers/net/ethernet/marvell/skge.c 	    p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE)
p                 516 drivers/net/ethernet/marvell/skge.c 	skge->rx_ring.count = p->rx_pending;
p                 517 drivers/net/ethernet/marvell/skge.c 	skge->tx_ring.count = p->tx_pending;
p                3449 drivers/net/ethernet/marvell/skge.c static int skge_set_mac_address(struct net_device *dev, void *p)
p                3454 drivers/net/ethernet/marvell/skge.c 	const struct sockaddr *addr = p;
p                3808 drivers/net/ethernet/marvell/sky2.c static int sky2_set_mac_address(struct net_device *dev, void *p)
p                3813 drivers/net/ethernet/marvell/sky2.c 	const struct sockaddr *addr = p;
p                4231 drivers/net/ethernet/marvell/sky2.c 			  void *p)
p                4242 drivers/net/ethernet/marvell/sky2.c 			memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
p                4244 drivers/net/ethernet/marvell/sky2.c 			memcpy_fromio(p, io, 128);
p                4246 drivers/net/ethernet/marvell/sky2.c 			memset(p, 0, 128);
p                4248 drivers/net/ethernet/marvell/sky2.c 		p += 128;
p                 634 drivers/net/ethernet/mediatek/mtk_eth_soc.c static int mtk_set_mac_address(struct net_device *dev, void *p)
p                 636 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	int ret = eth_mac_addr(dev, p);
p                2520 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		pinctrl_select_state(eth->dev->pins->p,
p                4048 drivers/net/ethernet/mellanox/mlx4/main.c 	int p, i;
p                4065 drivers/net/ethernet/mellanox/mlx4/main.c 	for (p = 1; p <= dev->caps.num_ports; p++) {
p                4066 drivers/net/ethernet/mellanox/mlx4/main.c 		mlx4_cleanup_port_info(&priv->port[p]);
p                4067 drivers/net/ethernet/mellanox/mlx4/main.c 		mlx4_CLOSE_PORT(dev, p);
p                5030 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	struct rb_node *p;
p                5035 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	for (p = rb_first(root); p; p = rb_next(p)) {
p                5036 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 		fs_rule = rb_entry(p, struct res_fs_rule, com.node);
p                 252 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	__be32 *p = buf;
p                 256 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 		pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
p                 257 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 			 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
p                 258 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 			 be32_to_cpu(p[3]));
p                 259 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 		p += 4;
p                  42 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c const char *parse_action(struct trace_seq *p,
p                  46 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  51 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c 			trace_seq_printf(p, "%s ", FLOWACT2STR[ids[i]]);
p                  53 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c 			trace_seq_printf(p, "UNKNOWN ");
p                  56 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c 	trace_seq_putc(p, 0);
p                  15 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h #define __parse_action(ids, num) parse_action(p, ids, num)
p                  21 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h const char *parse_action(struct trace_seq *p,
p                  53 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c #define PRINT_MASKED_VAL(name, p, format) {		\
p                  55 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 		trace_seq_printf(p, __stringify(name) "=" format " ", name.v); \
p                  57 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c #define PRINT_MASKED_VALP(name, cast, p, format) {	\
p                  59 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 		trace_seq_printf(p, __stringify(name) "=" format " ",	       \
p                  63 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c static void print_lyr_2_4_hdrs(struct trace_seq *p,
p                  80 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VALP(smac, u8 *, p, "%pM");
p                  81 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VALP(dmac, u8 *, p, "%pM");
p                  82 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL(ethertype, p, "%04x");
p                  93 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 			PRINT_MASKED_VALP(src_ipv4, typeof(&src_ipv4.v), p,
p                  95 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 			PRINT_MASKED_VALP(dst_ipv4, typeof(&dst_ipv4.v), p,
p                 125 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 				trace_seq_printf(p, "src_ipv6=%pI6 ",
p                 128 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 				trace_seq_printf(p, "dst_ipv6=%pI6 ",
p                 133 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c #define PRINT_MASKED_VAL_L2(type, name, fld, p, format) {\
p                 135 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL(name, p, format);		 \
p                 138 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_L2(u8, ip_protocol, ip_protocol, p, "%02x");
p                 139 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_L2(u16, tcp_flags, tcp_flags, p, "%x");
p                 140 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_L2(u16, tcp_sport, tcp_sport, p, "%u");
p                 141 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_L2(u16, tcp_dport, tcp_dport, p, "%u");
p                 142 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_L2(u16, udp_sport, udp_sport, p, "%u");
p                 143 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_L2(u16, udp_dport, udp_dport, p, "%u");
p                 144 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_L2(u16, first_vid, first_vid, p, "%04x");
p                 145 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_L2(u8, first_prio, first_prio, p, "%x");
p                 146 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_L2(u8, first_cfi, first_cfi, p, "%d");
p                 147 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_L2(u8, ip_dscp, ip_dscp, p, "%02x");
p                 148 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_L2(u8, ip_ecn, ip_ecn, p, "%x");
p                 149 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_L2(u8, cvlan_tag, cvlan_tag, p, "%d");
p                 150 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_L2(u8, svlan_tag, svlan_tag, p, "%d");
p                 151 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_L2(u8, frag, frag, p, "%d");
p                 154 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c static void print_misc_parameters_hdrs(struct trace_seq *p,
p                 159 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c #define PRINT_MASKED_VAL_MISC(type, name, fld, p, format) {\
p                 161 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL(name, p, format);		   \
p                 169 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL(gre_key, p, "%llu");
p                 170 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_MISC(u32, source_sqn, source_sqn, p, "%u");
p                 171 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_MISC(u16, source_port, source_port, p, "%u");
p                 173 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 			      p, "%u");
p                 174 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_MISC(u8, outer_second_cfi, outer_second_cfi, p, "%u");
p                 175 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_MISC(u16, outer_second_vid, outer_second_vid, p, "%u");
p                 177 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 			      p, "%u");
p                 178 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_MISC(u8, inner_second_cfi, inner_second_cfi, p, "%u");
p                 179 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_MISC(u16, inner_second_vid, inner_second_vid, p, "%u");
p                 182 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 			      outer_second_cvlan_tag, p, "%u");
p                 184 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 			      inner_second_cvlan_tag, p, "%u");
p                 186 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 			      outer_second_svlan_tag, p, "%u");
p                 188 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 			      inner_second_svlan_tag, p, "%u");
p                 190 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_MISC(u8, gre_protocol, gre_protocol, p, "%u");
p                 192 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	PRINT_MASKED_VAL_MISC(u32, vxlan_vni, vxlan_vni, p, "%u");
p                 194 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 			      p, "%x");
p                 196 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 			      p, "%x");
p                 199 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c const char *parse_fs_hdrs(struct trace_seq *p,
p                 208 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 212 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 		trace_seq_printf(p, "[outer] ");
p                 213 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 		print_lyr_2_4_hdrs(p, mask_outer, value_outer);
p                 218 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 		trace_seq_printf(p, "[misc] ");
p                 219 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 		print_misc_parameters_hdrs(p, mask_misc, value_misc);
p                 223 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 		trace_seq_printf(p, "[inner] ");
p                 224 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 		print_lyr_2_4_hdrs(p, mask_inner, value_inner);
p                 226 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	trace_seq_putc(p, 0);
p                 230 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c const char *parse_fs_dst(struct trace_seq *p,
p                 234 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 238 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 		trace_seq_printf(p, "vport=%u\n", dst->vport.num);
p                 241 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 		trace_seq_printf(p, "ft=%p\n", dst->ft);
p                 244 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 		trace_seq_printf(p, "ft_num=%u\n", dst->ft_num);
p                 247 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 		trace_seq_printf(p, "tir=%u\n", dst->tir_num);
p                 250 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 		trace_seq_printf(p, "counter_id=%u\n", counter_id);
p                 253 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 		trace_seq_printf(p, "port\n");
p                 257 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c 	trace_seq_putc(p, 0);
p                  45 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h 	parse_fs_hdrs(p, match_criteria_enable, mouter, mmisc, minner, vouter,\
p                  48 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h const char *parse_fs_hdrs(struct trace_seq *p,
p                  58 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h 	parse_fs_dst(p, (const struct mlx5_flow_destination *)dst, counter_id)
p                  60 drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h const char *parse_fs_dst(struct trace_seq *p,
p                  65 drivers/net/ethernet/mellanox/mlx5/core/en.h #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
p                1069 drivers/net/ethernet/mellanox/mlx5/core/en.h 		    struct mlx5e_modify_sq_param *p);
p                1251 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		    struct mlx5e_modify_sq_param *p)
p                1265 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
p                1266 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	MLX5_SET(sqc, sqc, state, p->next_state);
p                1267 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
p                1269 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		MLX5_SET(sqc,  sqc, packet_pacing_rate_limit_index, p->rl_index);
p                 915 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct neigh_parms *p;
p                 943 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		p = ptr;
p                 950 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
p                 952 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		if (!p->dev || p->tbl != &arp_tbl)
p                 959 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 			if (p->dev == nhe->m_neigh.dev) {
p                 969 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 						   NEIGH_VAR(p, DELAY_PROBE_TIME),
p                2523 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 	#define MLX5_GET_CTR(p, x) \
p                2524 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 		MLX5_GET64(query_vport_counter_out, p, x)
p                 543 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c static bool is_full_mask(const void *p, size_t len)
p                 547 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	return !memchr_inv(p, 0xff, len);
p                 126 drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c #define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
p                 346 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	struct rb_node *p;
p                 358 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	p = rb_first(&dev->priv.page_root);
p                 359 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	while (p && i < npages) {
p                 360 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 		fwp = rb_entry(p, struct fw_page, rb_node);
p                 361 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 		p = rb_next(p);
p                 524 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	struct rb_node *p;
p                 529 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 		p = rb_first(&dev->priv.page_root);
p                 530 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 		if (p) {
p                 531 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 			fwp = rb_entry(p, struct fw_page, rb_node);
p                 548 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	} while (p);
p                 179 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	void (*get_stats_strings)(u8 **p);
p                 859 drivers/net/ethernet/mellanox/mlxsw/spectrum.c static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
p                 862 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	struct sockaddr *addr = p;
p                1018 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	struct mlxsw_sp_port_pcpu_stats *p;
p                1025 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 		p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
p                1027 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			start = u64_stats_fetch_begin_irq(&p->syncp);
p                1028 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			rx_packets	= p->rx_packets;
p                1029 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			rx_bytes	= p->rx_bytes;
p                1030 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			tx_packets	= p->tx_packets;
p                1031 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			tx_bytes	= p->tx_bytes;
p                1032 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
p                1039 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 		tx_dropped	+= p->tx_dropped;
p                2308 drivers/net/ethernet/mellanox/mlxsw/spectrum.c static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
p                2313 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 		snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
p                2315 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 		*p += ETH_GSTRING_LEN;
p                2319 drivers/net/ethernet/mellanox/mlxsw/spectrum.c static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
p                2324 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 		snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
p                2326 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 		*p += ETH_GSTRING_LEN;
p                2334 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 	u8 *p = data;
p                2340 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			memcpy(p, mlxsw_sp_port_hw_stats[i].str,
p                2342 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			p += ETH_GSTRING_LEN;
p                2346 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str,
p                2348 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			p += ETH_GSTRING_LEN;
p                2352 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
p                2354 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			p += ETH_GSTRING_LEN;
p                2358 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str,
p                2360 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			p += ETH_GSTRING_LEN;
p                2364 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
p                2366 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			p += ETH_GSTRING_LEN;
p                2370 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			mlxsw_sp_port_get_prio_strings(&p, i);
p                2373 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 			mlxsw_sp_port_get_tc_strings(&p, i);
p                2375 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 		mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p);
p                 842 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 			  struct tc_red_qopt_offload *p);
p                 844 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 			   struct tc_prio_qopt_offload *p);
p                1144 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c void mlxsw_sp1_get_stats_strings(u8 **p)
p                1149 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 		memcpy(*p, mlxsw_sp_ptp_port_stats[i].str,
p                1151 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c 		*p += ETH_GSTRING_LEN;
p                  63 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h void mlxsw_sp1_get_stats_strings(u8 **p);
p                 138 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h static inline void mlxsw_sp1_get_stats_strings(u8 **p)
p                 209 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h static inline void mlxsw_sp2_get_stats_strings(u8 **p)
p                 314 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	struct tc_red_qopt_offload_params *p = params;
p                 316 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	if (p->min > p->max) {
p                 318 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 			"spectrum: RED: min %u is bigger then max %u\n", p->min,
p                 319 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 			p->max);
p                 322 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
p                 324 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 			"spectrum: RED: max value %u is too big\n", p->max);
p                 327 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	if (p->min == 0 || p->max == 0) {
p                 341 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	struct tc_red_qopt_offload_params *p = params;
p                 347 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	prob = p->probability;
p                 351 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
p                 352 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
p                 354 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 						 max, prob, p->is_ecn);
p                 362 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	struct tc_red_qopt_offload_params *p = params;
p                 367 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	p->qstats->backlog -= backlog;
p                 456 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 			  struct tc_red_qopt_offload *p)
p                 460 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
p                 464 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	if (p->command == TC_RED_REPLACE)
p                 465 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
p                 468 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 					      &p->set);
p                 470 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
p                 474 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	switch (p->command) {
p                 479 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 						 p->xstats);
p                 482 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 						&p->stats);
p                 510 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	struct tc_prio_qopt_offload_params *p = params;
p                 512 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	if (p->bands > IEEE_8021QAZ_MAX_TCS)
p                 523 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	struct tc_prio_qopt_offload_params *p = params;
p                 529 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	for (band = 0; band < p->bands; band++) {
p                 535 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 			if (p->priomap[i] == band) {
p                 567 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	struct tc_prio_qopt_offload_params *p = params;
p                 572 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	p->qstats->backlog -= backlog;
p                 657 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 			  struct tc_prio_qopt_offload_graft_params *p)
p                 659 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band);
p                 665 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	if (p->band < IEEE_8021QAZ_MAX_TCS &&
p                 666 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	    mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
p                 669 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	if (!p->child_handle) {
p                 680 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 						  p->child_handle);
p                 690 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 			   struct tc_prio_qopt_offload *p)
p                 694 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
p                 698 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	if (p->command == TC_PRIO_REPLACE)
p                 699 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
p                 702 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 					      &p->replace_params);
p                 704 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
p                 708 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	switch (p->command) {
p                 713 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 						&p->stats);
p                 716 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 						 &p->graft_params);
p                2558 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 	struct neigh_parms *p;
p                2563 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		p = ptr;
p                2566 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		if (!p->dev || (p->tbl->family != AF_INET &&
p                2567 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 				p->tbl->family != AF_INET6))
p                2573 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
p                2578 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
p                 715 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	struct mlxsw_sp_span_inspected_port *p;
p                 721 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		list_for_each_entry(p, &curr->bound_ports_list, list)
p                 722 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 			if (p->local_port == port->local_port &&
p                 723 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 			    p->type == MLXSW_SP_SPAN_EGRESS)
p                 765 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	struct mlxsw_sp_span_inspected_port *p;
p                 767 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 	list_for_each_entry(p, &span_entry->bound_ports_list, list)
p                 768 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		if (type == p->type &&
p                 769 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		    port->local_port == p->local_port &&
p                 770 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 		    bind == p->bound)
p                 771 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 			return p;
p                 353 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 	struct mlxsw_sx_port_pcpu_stats *p;
p                 360 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 		p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
p                 362 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 			start = u64_stats_fetch_begin_irq(&p->syncp);
p                 363 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 			rx_packets	= p->rx_packets;
p                 364 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 			rx_bytes	= p->rx_bytes;
p                 365 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 			tx_packets	= p->tx_packets;
p                 366 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 			tx_bytes	= p->tx_bytes;
p                 367 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
p                 374 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 		tx_dropped	+= p->tx_dropped;
p                 505 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 	u8 *p = data;
p                 511 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 			memcpy(p, mlxsw_sx_port_hw_stats[i].str,
p                 513 drivers/net/ethernet/mellanox/mlxsw/switchx2.c 			p += ETH_GSTRING_LEN;
p                1057 drivers/net/ethernet/micrel/ks8842.c static int ks8842_set_mac(struct net_device *netdev, void *p)
p                1060 drivers/net/ethernet/micrel/ks8842.c 	struct sockaddr *addr = p;
p                2114 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
p                2116 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2120 drivers/net/ethernet/micrel/ksz884x.c static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
p                2122 drivers/net/ethernet/micrel/ksz884x.c 	return port_chk(hw, p,
p                2272 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
p                2274 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2278 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
p                2280 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2284 drivers/net/ethernet/micrel/ksz884x.c static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
p                2286 drivers/net/ethernet/micrel/ksz884x.c 	return port_chk(hw, p,
p                2290 drivers/net/ethernet/micrel/ksz884x.c static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
p                2292 drivers/net/ethernet/micrel/ksz884x.c 	return port_chk(hw, p,
p                2298 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
p                2300 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2304 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
p                2306 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2326 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
p                2328 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2332 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
p                2334 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2338 drivers/net/ethernet/micrel/ksz884x.c static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
p                2340 drivers/net/ethernet/micrel/ksz884x.c 	return port_chk(hw, p,
p                2344 drivers/net/ethernet/micrel/ksz884x.c static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
p                2346 drivers/net/ethernet/micrel/ksz884x.c 	return port_chk(hw, p,
p                2350 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
p                2352 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2356 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
p                2358 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2362 drivers/net/ethernet/micrel/ksz884x.c static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
p                2364 drivers/net/ethernet/micrel/ksz884x.c 	return port_chk(hw, p,
p                2368 drivers/net/ethernet/micrel/ksz884x.c static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
p                2370 drivers/net/ethernet/micrel/ksz884x.c 	return port_chk(hw, p,
p                2376 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
p                2378 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2382 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
p                2384 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2388 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
p                2390 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2435 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
p                2437 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2441 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
p                2443 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2447 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
p                2449 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2453 drivers/net/ethernet/micrel/ksz884x.c static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
p                2455 drivers/net/ethernet/micrel/ksz884x.c 	port_cfg(hw, p,
p                2459 drivers/net/ethernet/micrel/ksz884x.c static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
p                2461 drivers/net/ethernet/micrel/ksz884x.c 	return port_chk(hw, p,
p                2465 drivers/net/ethernet/micrel/ksz884x.c static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
p                2467 drivers/net/ethernet/micrel/ksz884x.c 	return port_chk(hw, p,
p                2471 drivers/net/ethernet/micrel/ksz884x.c static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
p                2473 drivers/net/ethernet/micrel/ksz884x.c 	return port_chk(hw, p,
p                2477 drivers/net/ethernet/micrel/ksz884x.c static inline int port_chk_prio(struct ksz_hw *hw, int p)
p                2479 drivers/net/ethernet/micrel/ksz884x.c 	return port_chk(hw, p,
p                3288 drivers/net/ethernet/micrel/ksz884x.c 	int p;
p                3293 drivers/net/ethernet/micrel/ksz884x.c 	for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
p                3294 drivers/net/ethernet/micrel/ksz884x.c 		info = &hw->port_info[p];
p                3295 drivers/net/ethernet/micrel/ksz884x.c 		port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
p                3296 drivers/net/ethernet/micrel/ksz884x.c 		port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
p                3327 drivers/net/ethernet/micrel/ksz884x.c 				hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
p                3329 drivers/net/ethernet/micrel/ksz884x.c 				hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
p                3333 drivers/net/ethernet/micrel/ksz884x.c 					port_cfg_back_pressure(hw, p,
p                3345 drivers/net/ethernet/micrel/ksz884x.c 				hw->port_mib[p].link_down = 1;
p                3349 drivers/net/ethernet/micrel/ksz884x.c 		hw->port_mib[p].state = (u8) info->state;
p                3373 drivers/net/ethernet/micrel/ksz884x.c 	int p;
p                3375 drivers/net/ethernet/micrel/ksz884x.c 	for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
p                3376 drivers/net/ethernet/micrel/ksz884x.c 		port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
p                3377 drivers/net/ethernet/micrel/ksz884x.c 		port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
p                3406 drivers/net/ethernet/micrel/ksz884x.c 			port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
p                3423 drivers/net/ethernet/micrel/ksz884x.c 	int p;
p                3425 drivers/net/ethernet/micrel/ksz884x.c 	for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
p                3426 drivers/net/ethernet/micrel/ksz884x.c 		phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
p                3447 drivers/net/ethernet/micrel/ksz884x.c 	int p;
p                3449 drivers/net/ethernet/micrel/ksz884x.c 	for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
p                3450 drivers/net/ethernet/micrel/ksz884x.c 		port_cfg(hw, p,
p                5087 drivers/net/ethernet/micrel/ksz884x.c 			int p = HW_TO_DEV_PORT(status.rx.src_port);
p                5089 drivers/net/ethernet/micrel/ksz884x.c 			dev = hw->port_info[p].pdev;
p                5132 drivers/net/ethernet/micrel/ksz884x.c 			int p = HW_TO_DEV_PORT(status.rx.src_port);
p                5134 drivers/net/ethernet/micrel/ksz884x.c 			dev = hw->port_info[p].pdev;
p                5494 drivers/net/ethernet/micrel/ksz884x.c 	int p;
p                5528 drivers/net/ethernet/micrel/ksz884x.c 	for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
p                5533 drivers/net/ethernet/micrel/ksz884x.c 		hw->port_info[p].partner = 0xFF;
p                5534 drivers/net/ethernet/micrel/ksz884x.c 		hw->port_info[p].state = media_disconnected;
p                5599 drivers/net/ethernet/micrel/ksz884x.c 	int p;
p                5612 drivers/net/ethernet/micrel/ksz884x.c 	for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
p                5613 drivers/net/ethernet/micrel/ksz884x.c 		mib = &hw->port_mib[p];
p                6519 drivers/net/ethernet/micrel/ksz884x.c 	int p;
p                6525 drivers/net/ethernet/micrel/ksz884x.c 	for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
p                6526 drivers/net/ethernet/micrel/ksz884x.c 		if (media_connected == hw->port_mib[p].state) {
p                6527 drivers/net/ethernet/micrel/ksz884x.c 			hw_priv->counter[p].read = 1;
p                6531 drivers/net/ethernet/micrel/ksz884x.c 				n = p;
p                6540 drivers/net/ethernet/micrel/ksz884x.c 		p = n;
p                6542 drivers/net/ethernet/micrel/ksz884x.c 			hw_priv->counter[p].counter,
p                6543 drivers/net/ethernet/micrel/ksz884x.c 			2 == hw_priv->counter[p].read,
p                6546 drivers/net/ethernet/micrel/ksz884x.c 		for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
p                6549 drivers/net/ethernet/micrel/ksz884x.c 					hw_priv->counter[p].counter,
p                6550 drivers/net/ethernet/micrel/ksz884x.c 					2 == hw_priv->counter[p].read,
p                6552 drivers/net/ethernet/micrel/ksz884x.c 			} else if (hw->port_mib[p].cnt_ptr) {
p                6554 drivers/net/ethernet/micrel/ksz884x.c 					hw_priv->counter[p].counter,
p                6555 drivers/net/ethernet/micrel/ksz884x.c 					2 == hw_priv->counter[p].read,
p                 912 drivers/net/ethernet/microchip/encx24j600.c 				struct ethtool_regs *regs, void *p)
p                 915 drivers/net/ethernet/microchip/encx24j600.c 	u16 *buff = p;
p                 176 drivers/net/ethernet/mscc/ocelot.c 	u8 p = port->chip_port;
p                 182 drivers/net/ethernet/mscc/ocelot.c 		val |= BIT(p);
p                 184 drivers/net/ethernet/mscc/ocelot.c 		val &= ~BIT(p);
p                 369 drivers/net/ethernet/mscc/ocelot.c 	u8 p = port->chip_port;
p                 454 drivers/net/ethernet/mscc/ocelot.c 			 ANA_PFC_PFC_CFG, p);
p                 462 drivers/net/ethernet/mscc/ocelot.c 			 SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, p);
p                 468 drivers/net/ethernet/mscc/ocelot.c 			 QSYS_SWITCH_PORT_MODE, p);
p                 476 drivers/net/ethernet/mscc/ocelot.c 			 SYS_MAC_FC_CFG, p);
p                 477 drivers/net/ethernet/mscc/ocelot.c 	ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, p);
p                 482 drivers/net/ethernet/mscc/ocelot.c 			 SYS_ATOP, p);
p                 714 drivers/net/ethernet/mscc/ocelot.c static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
p                 718 drivers/net/ethernet/mscc/ocelot.c 	const struct sockaddr *addr = p;
p                1583 drivers/net/ethernet/mscc/ocelot.c 	unsigned int p;
p                1585 drivers/net/ethernet/mscc/ocelot.c 	for_each_set_bit(p, &bond_mask, ocelot->num_phys_ports) {
p                1586 drivers/net/ethernet/mscc/ocelot.c 		u32 port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
p                1593 drivers/net/ethernet/mscc/ocelot.c 				 ANA_PORT_PORT_CFG, p);
p                1601 drivers/net/ethernet/mscc/ocelot.c 	int p = ocelot_port->chip_port;
p                1619 drivers/net/ethernet/mscc/ocelot.c 	if (p == lp) {
p                1620 drivers/net/ethernet/mscc/ocelot.c 		lag = p;
p                1621 drivers/net/ethernet/mscc/ocelot.c 		ocelot->lags[p] = bond_mask;
p                1622 drivers/net/ethernet/mscc/ocelot.c 		bond_mask &= ~BIT(p);
p                1629 drivers/net/ethernet/mscc/ocelot.c 		ocelot->lags[lp] |= BIT(p);
p                1642 drivers/net/ethernet/mscc/ocelot.c 	int p = ocelot_port->chip_port;
p                1653 drivers/net/ethernet/mscc/ocelot.c 	if (ocelot->lags[p]) {
p                1654 drivers/net/ethernet/mscc/ocelot.c 		int n = __ffs(ocelot->lags[p]);
p                1656 drivers/net/ethernet/mscc/ocelot.c 		ocelot->lags[n] = ocelot->lags[p];
p                1657 drivers/net/ethernet/mscc/ocelot.c 		ocelot->lags[p] = 0;
p                1662 drivers/net/ethernet/mscc/ocelot.c 	port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
p                1664 drivers/net/ethernet/mscc/ocelot.c 	ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(p),
p                1665 drivers/net/ethernet/mscc/ocelot.c 			 ANA_PORT_PORT_CFG, p);
p                 360 drivers/net/ethernet/myricom/myri10ge/myri10ge.c static inline void put_be32(__be32 val, __be32 __iomem * p)
p                 362 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	__raw_writel((__force __u32) val, (__force void __iomem *)p);
p                4046 drivers/net/ethernet/myricom/myri10ge/myri10ge.c myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
p                5182 drivers/net/ethernet/neterion/s2io.c static int s2io_set_mac_addr(struct net_device *dev, void *p)
p                5184 drivers/net/ethernet/neterion/s2io.c 	struct sockaddr *addr = p;
p                1097 drivers/net/ethernet/neterion/vxge/vxge-config.c 	struct list_head *p, *n;
p                1104 drivers/net/ethernet/neterion/vxge/vxge-config.c 	list_for_each_safe(p, n, &blockpool->free_block_list) {
p                1106 drivers/net/ethernet/neterion/vxge/vxge-config.c 			((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
p                1107 drivers/net/ethernet/neterion/vxge/vxge-config.c 			((struct __vxge_hw_blockpool_entry *)p)->length,
p                1111 drivers/net/ethernet/neterion/vxge/vxge-config.c 			((struct __vxge_hw_blockpool_entry *)p)->memblock,
p                1112 drivers/net/ethernet/neterion/vxge/vxge-config.c 			&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
p                1114 drivers/net/ethernet/neterion/vxge/vxge-config.c 		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
p                1115 drivers/net/ethernet/neterion/vxge/vxge-config.c 		kfree(p);
p                1119 drivers/net/ethernet/neterion/vxge/vxge-config.c 	list_for_each_safe(p, n, &blockpool->free_entry_list) {
p                1120 drivers/net/ethernet/neterion/vxge/vxge-config.c 		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
p                1121 drivers/net/ethernet/neterion/vxge/vxge-config.c 		kfree((void *)p);
p                2406 drivers/net/ethernet/neterion/vxge/vxge-config.c 	struct list_head *p, *n;
p                2408 drivers/net/ethernet/neterion/vxge/vxge-config.c 	list_for_each_safe(p, n, &blockpool->free_block_list) {
p                2415 drivers/net/ethernet/neterion/vxge/vxge-config.c 			((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
p                2416 drivers/net/ethernet/neterion/vxge/vxge-config.c 			((struct __vxge_hw_blockpool_entry *)p)->length,
p                2421 drivers/net/ethernet/neterion/vxge/vxge-config.c 			((struct __vxge_hw_blockpool_entry *)p)->memblock,
p                2422 drivers/net/ethernet/neterion/vxge/vxge-config.c 			&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
p                2424 drivers/net/ethernet/neterion/vxge/vxge-config.c 		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
p                2426 drivers/net/ethernet/neterion/vxge/vxge-config.c 		list_add(p, &blockpool->free_entry_list);
p                1280 drivers/net/ethernet/neterion/vxge/vxge-main.c static int vxge_set_mac_addr(struct net_device *dev, void *p)
p                1282 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct sockaddr *addr = p;
p                 415 drivers/net/ethernet/neterion/vxge/vxge-main.h #define VXGE_MODULE_PARAM_INT(p, val) \
p                 416 drivers/net/ethernet/neterion/vxge/vxge-main.h 	static int p = val; \
p                 417 drivers/net/ethernet/neterion/vxge/vxge-main.h 	module_param(p, int, 0)
p                 109 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	void *p;		/* current point in dump buffer */
p                 122 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	void *p = data;
p                 126 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 		tl = p;
p                 139 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 		p += total_tlv_size;
p                 331 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	struct nfp_dump_tl *tl = dump->p;
p                 343 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	dump->p += total_tlv_sz;
p                 352 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	struct nfp_dump_error *dump_header = dump->p;
p                 371 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	struct nfp_dump_tl *dump_header = dump->p;
p                 393 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	struct nfp_dump_tl *dump_header = dump->p;
p                 414 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	struct nfp_dump_tl *dump_header = dump->p;
p                 452 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	struct nfp_dump_csr *dump_header = dump->p;
p                 467 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	dest = dump->p + header_size;
p                 555 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	struct nfp_dump_csr *dump_header = dump->p;
p                 570 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	dest = dump->p + header_size;
p                 600 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	struct nfp_dump_rtsym *dump_header = dump->p;
p                 623 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	dest = dump->p + header_size;
p                 722 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	struct nfp_dump_prolog *prolog = dump->p;
p                 745 drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c 	dump.p = dest;
p                 979 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c 			     struct ethtool_regs *regs, void *p)
p                 982 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c 	u32 *regs_buf = p;
p                 969 drivers/net/ethernet/ni/nixge.c static int nixge_net_set_mac_address(struct net_device *ndev, void *p)
p                 973 drivers/net/ethernet/ni/nixge.c 	err = eth_mac_addr(ndev, p);
p                1077 drivers/net/ethernet/nxp/lpc_eth.c static int lpc_set_mac_address(struct net_device *ndev, void *p)
p                1079 drivers/net/ethernet/nxp/lpc_eth.c 	struct sockaddr *addr = p;
p                 183 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 				struct ethtool_regs *regs, void *p)
p                 188 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 	u32 *regs_buff = p;
p                 437 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 	u8 *p = data;
p                 443 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 			memcpy(p, pch_gbe_gstrings_stats[i].string,
p                 445 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 			p += ETH_GSTRING_LEN;
p                 467 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 		char *p = hw_stats + gstats->offset;
p                 468 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 		data[i] = gstats->size == sizeof(u64) ? *(u64 *)p:(*(u32 *)p);
p                 146 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c 			const struct pch_gbe_opt_list { int i; char *str; } *p;
p                 247 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c 			ent = &opt->arg.l.p[i];
p                 283 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c 					 .p = speed_list } }
p                 295 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c 					 .p = dplx_list } }
p                 308 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c 					 .p = an_list} }
p                 501 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c 					 .p = fc_list } }
p                1525 drivers/net/ethernet/packetengines/hamachi.c 						u32 *p = (u32 *) &buf_addr[data_size - 20];
p                1530 drivers/net/ethernet/packetengines/hamachi.c 							--p;
p                1532 drivers/net/ethernet/packetengines/hamachi.c 						p_r = *p;
p                1533 drivers/net/ethernet/packetengines/hamachi.c 						p_r1 = *(p-1);
p                 215 drivers/net/ethernet/pasemi/pasemi_mac.c static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
p                 218 drivers/net/ethernet/pasemi/pasemi_mac.c 	struct sockaddr *addr = p;
p                 102 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 			   void *p)
p                 110 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 	memcpy_fromio(p, lif->ionic->idev.dev_info_regs->words, size);
p                 113 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 	memcpy_fromio(p, lif->ionic->idev.dev_cmd_regs->words, size);
p                 284 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
p                 289 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 	u32 *regs_buff = p;
p                 293 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 	memset(p, 0, NETXEN_NIC_REGS_LEN);
p                 672 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 		char *p =
p                 677 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 		     sizeof(u64)) ? *(u64 *) p : *(u32 *) p;
p                  76 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c static int netxen_nic_set_mac(struct net_device *netdev, void *p);
p                 462 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	unsigned char *p;
p                 475 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	p = (unsigned char *)&mac_addr;
p                 477 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		netdev->dev_addr[i] = *(p + 5 - i);
p                 489 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c static int netxen_nic_set_mac(struct net_device *netdev, void *p)
p                 492 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	struct sockaddr *addr = p;
p                 306 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 	u16 *p;
p                 361 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p = (u16 *)p_conn->local_mac;
p                 362 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p_tcp->local_mac_addr_hi = swab16(get_unaligned(p));
p                 363 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p_tcp->local_mac_addr_mid = swab16(get_unaligned(p + 1));
p                 364 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p_tcp->local_mac_addr_lo = swab16(get_unaligned(p + 2));
p                 366 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p = (u16 *)p_conn->remote_mac;
p                 367 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p_tcp->remote_mac_addr_hi = swab16(get_unaligned(p));
p                 368 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p_tcp->remote_mac_addr_mid = swab16(get_unaligned(p + 1));
p                 369 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p_tcp->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
p                 427 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p = (u16 *)p_conn->local_mac;
p                 428 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p_tcp2->local_mac_addr_hi = swab16(get_unaligned(p));
p                 429 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p_tcp2->local_mac_addr_mid = swab16(get_unaligned(p + 1));
p                 430 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p_tcp2->local_mac_addr_lo = swab16(get_unaligned(p + 2));
p                 432 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p = (u16 *)p_conn->remote_mac;
p                 433 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p_tcp2->remote_mac_addr_hi = swab16(get_unaligned(p));
p                 434 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p_tcp2->remote_mac_addr_mid = swab16(get_unaligned(p + 1));
p                 435 drivers/net/ethernet/qlogic/qed/qed_iscsi.c 		p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
p                1634 drivers/net/ethernet/qlogic/qed/qed_main.c 	void *p;
p                1644 drivers/net/ethernet/qlogic/qed/qed_main.c 	p = qed_mcp_get_link_params(hwfn);
p                1645 drivers/net/ethernet/qlogic/qed/qed_main.c 	if (!p)
p                1647 drivers/net/ethernet/qlogic/qed/qed_main.c 	memcpy(params, p, sizeof(*params));
p                1649 drivers/net/ethernet/qlogic/qed/qed_main.c 	p = qed_mcp_get_link_state(hwfn);
p                1650 drivers/net/ethernet/qlogic/qed/qed_main.c 	if (!p)
p                1652 drivers/net/ethernet/qlogic/qed/qed_main.c 	memcpy(link, p, sizeof(*link));
p                1654 drivers/net/ethernet/qlogic/qed/qed_main.c 	p = qed_mcp_get_link_capabilities(hwfn);
p                1655 drivers/net/ethernet/qlogic/qed/qed_main.c 	if (!p)
p                1657 drivers/net/ethernet/qlogic/qed/qed_main.c 	memcpy(link_caps, p, sizeof(*link_caps));
p                  15 drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c #define TLV_TYPE(p)     (p[0])
p                  16 drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c #define TLV_LENGTH(p)   (p[1])
p                  17 drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c #define TLV_FLAGS(p)    (p[3])
p                 534 drivers/net/ethernet/qlogic/qede/qede.h int qede_set_mac_addr(struct net_device *ndev, void *p);
p                1147 drivers/net/ethernet/qlogic/qede/qede_filter.c int qede_set_mac_addr(struct net_device *ndev, void *p)
p                1150 drivers/net/ethernet/qlogic/qede/qede_filter.c 	struct sockaddr *addr = p;
p                 513 drivers/net/ethernet/qlogic/qla3xxx.c 	__le16 *p = (__le16 *)ndev->dev_addr;
p                 514 drivers/net/ethernet/qlogic/qla3xxx.c 	p[0] = cpu_to_le16(addr[0]);
p                 515 drivers/net/ethernet/qlogic/qla3xxx.c 	p[1] = cpu_to_le16(addr[1]);
p                 516 drivers/net/ethernet/qlogic/qla3xxx.c 	p[2] = cpu_to_le16(addr[2]);
p                3570 drivers/net/ethernet/qlogic/qla3xxx.c static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
p                3575 drivers/net/ethernet/qlogic/qla3xxx.c 	struct sockaddr *addr = p;
p                 574 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	char *p;
p                 593 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	p = memcpy(buf, &mbx_out, sizeof(u32));
p                 595 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	p += sizeof(u32);
p                 615 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 		memcpy(p, &each->hdr_prio_pfc_map[0], size);
p                 616 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 		p += size;
p                 530 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
p                 537 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	u32 *regs_buff = p;
p                 540 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	memset(p, 0, qlcnic_get_regs_len(dev));
p                1350 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	char *p;
p                1366 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
p                1368 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		*data++ = (size == sizeof(u64)) ? (*(u64 *)p) : ((*(u32 *)p));
p                 337 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c static int qlcnic_set_mac(struct net_device *netdev, void *p)
p                 340 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	struct sockaddr *addr = p;
p                 162 drivers/net/ethernet/qualcomm/qca_debug.c qcaspi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *p)
p                 166 drivers/net/ethernet/qualcomm/qca_debug.c 	strlcpy(p->driver, QCASPI_DRV_NAME, sizeof(p->driver));
p                 167 drivers/net/ethernet/qualcomm/qca_debug.c 	strlcpy(p->version, QCASPI_DRV_VERSION, sizeof(p->version));
p                 168 drivers/net/ethernet/qualcomm/qca_debug.c 	strlcpy(p->fw_version, "QCA7000", sizeof(p->fw_version));
p                 169 drivers/net/ethernet/qualcomm/qca_debug.c 	strlcpy(p->bus_info, dev_name(&qca->spi_dev->dev),
p                 170 drivers/net/ethernet/qualcomm/qca_debug.c 		sizeof(p->bus_info));
p                 229 drivers/net/ethernet/qualcomm/qca_debug.c qcaspi_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
p                 232 drivers/net/ethernet/qualcomm/qca_debug.c 	u32 *regs_buff = p;
p                1486 drivers/net/ethernet/realtek/8139cp.c 		        void *p)
p                1497 drivers/net/ethernet/realtek/8139cp.c 	memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
p                1619 drivers/net/ethernet/realtek/8139cp.c static int cp_set_mac_address(struct net_device *dev, void *p)
p                1622 drivers/net/ethernet/realtek/8139cp.c 	struct sockaddr *addr = p;
p                 652 drivers/net/ethernet/realtek/8139too.c static int rtl8139_set_mac_address(struct net_device *dev, void *p);
p                2232 drivers/net/ethernet/realtek/8139too.c static int rtl8139_set_mac_address(struct net_device *dev, void *p)
p                2236 drivers/net/ethernet/realtek/8139too.c 	struct sockaddr *addr = p;
p                 803 drivers/net/ethernet/realtek/atp.c static void read_block(long ioaddr, int length, unsigned char *p, int data_mode)
p                 810 drivers/net/ethernet/realtek/atp.c 			do { *p++ = read_byte_mode0(ioaddr); } while (--length > 0);
p                 812 drivers/net/ethernet/realtek/atp.c 			do { *p++ = read_byte_mode2(ioaddr); } while (--length > 0);
p                 815 drivers/net/ethernet/realtek/atp.c 		do { *p++ = read_byte_mode4(ioaddr); } while (--length > 0);
p                 817 drivers/net/ethernet/realtek/atp.c 		do { *p++ = read_byte_mode6(ioaddr); } while (--length > 0);
p                1088 drivers/net/ethernet/realtek/r8169_main.c static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
p                1093 drivers/net/ethernet/realtek/r8169_main.c 	rtl_writephy(tp, reg_addr, (val & ~m) | p);
p                1153 drivers/net/ethernet/realtek/r8169_main.c static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
p                1159 drivers/net/ethernet/realtek/r8169_main.c 	rtl_eri_write(tp, addr, mask, (val & ~m) | p);
p                1163 drivers/net/ethernet/realtek/r8169_main.c 			     u32 p)
p                1165 drivers/net/ethernet/realtek/r8169_main.c 	rtl_w0w1_eri(tp, addr, mask, p, 0);
p                1639 drivers/net/ethernet/realtek/r8169_main.c 			     void *p)
p                1643 drivers/net/ethernet/realtek/r8169_main.c 	u32 *dw = p;
p                1909 drivers/net/ethernet/realtek/r8169_main.c 	}, *p = coal_settings;
p                1926 drivers/net/ethernet/realtek/r8169_main.c 	for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) {
p                1927 drivers/net/ethernet/realtek/r8169_main.c 		*p->max_frames = (w & RTL_COALESCE_MASK) << 2;
p                1929 drivers/net/ethernet/realtek/r8169_main.c 		*p->usecs = w & RTL_COALESCE_MASK;
p                1933 drivers/net/ethernet/realtek/r8169_main.c 		p = coal_settings + i;
p                1934 drivers/net/ethernet/realtek/r8169_main.c 		*p->usecs = (*p->usecs * scale->nsecs[i]) / 1000;
p                1940 drivers/net/ethernet/realtek/r8169_main.c 		if (!*p->usecs && !*p->max_frames)
p                1941 drivers/net/ethernet/realtek/r8169_main.c 			*p->max_frames = 1;
p                1980 drivers/net/ethernet/realtek/r8169_main.c 	}, *p = coal_settings;
p                1988 drivers/net/ethernet/realtek/r8169_main.c 			max(p[0].usecs, p[1].usecs) * 1000, &cp01);
p                1992 drivers/net/ethernet/realtek/r8169_main.c 	for (i = 0; i < 2; i++, p++) {
p                2007 drivers/net/ethernet/realtek/r8169_main.c 		if (p->frames == 1) {
p                2008 drivers/net/ethernet/realtek/r8169_main.c 			p->frames = 0;
p                2011 drivers/net/ethernet/realtek/r8169_main.c 		units = p->usecs * 1000 / scale->nsecs[i];
p                2012 drivers/net/ethernet/realtek/r8169_main.c 		if (p->frames > RTL_COALESCE_FRAME_MAX || p->frames % 4)
p                2018 drivers/net/ethernet/realtek/r8169_main.c 		w |= p->frames >> 2;
p                2226 drivers/net/ethernet/realtek/r8169_main.c 	const struct rtl_mac_info *p = mac_info;
p                2229 drivers/net/ethernet/realtek/r8169_main.c 	while ((reg & p->mask) != p->val)
p                2230 drivers/net/ethernet/realtek/r8169_main.c 		p++;
p                2231 drivers/net/ethernet/realtek/r8169_main.c 	tp->mac_version = p->mac_version;
p                3896 drivers/net/ethernet/realtek/r8169_main.c static int rtl_set_mac_address(struct net_device *dev, void *p)
p                3902 drivers/net/ethernet/realtek/r8169_main.c 	ret = eth_mac_addr(dev, p);
p                 460 drivers/net/ethernet/renesas/sh_eth.c 	u32 *p = (u32 *)src;
p                 461 drivers/net/ethernet/renesas/sh_eth.c 	u32 *maxp = p + DIV_ROUND_UP(len, sizeof(u32));
p                 463 drivers/net/ethernet/renesas/sh_eth.c 	for (; p < maxp; p++)
p                 464 drivers/net/ethernet/renesas/sh_eth.c 		*p = swab32(*p);
p                1961 drivers/net/ethernet/rocker/rocker_main.c static int rocker_port_set_mac_address(struct net_device *dev, void *p)
p                1963 drivers/net/ethernet/rocker/rocker_main.c 	struct sockaddr *addr = p;
p                2291 drivers/net/ethernet/rocker/rocker_main.c 	u8 *p = data;
p                2297 drivers/net/ethernet/rocker/rocker_main.c 			memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
p                2298 drivers/net/ethernet/rocker/rocker_main.c 			p += ETH_GSTRING_LEN;
p                1446 drivers/net/ethernet/rocker/rocker_ofdpa.c 	struct ofdpa_port *p;
p                1465 drivers/net/ethernet/rocker/rocker_ofdpa.c 		p = ofdpa_port_get(ofdpa, i);
p                1466 drivers/net/ethernet/rocker/rocker_ofdpa.c 		if (!p)
p                1468 drivers/net/ethernet/rocker/rocker_ofdpa.c 		if (!ofdpa_port_is_bridged(p))
p                1470 drivers/net/ethernet/rocker/rocker_ofdpa.c 		if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
p                1472 drivers/net/ethernet/rocker/rocker_ofdpa.c 				ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
p                1495 drivers/net/ethernet/rocker/rocker_ofdpa.c 	struct ofdpa_port *p;
p                1524 drivers/net/ethernet/rocker/rocker_ofdpa.c 		p = ofdpa_port_get(ofdpa, i);
p                1525 drivers/net/ethernet/rocker/rocker_ofdpa.c 		if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
p                  23 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_init_tx_desc(struct sxgbe_tx_norm_desc *p)
p                  25 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tdes23.tx_rd_des23.own_bit = 0;
p                  28 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_tx_desc_enable_tse(struct sxgbe_tx_norm_desc *p, u8 is_tse,
p                  32 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tdes23.tx_rd_des23.tse_bit = is_tse;
p                  33 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tdes23.tx_rd_des23.buf1_size = total_hdr_len;
p                  34 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tdes23.tx_rd_des23.tcp_hdr_len = tcp_hdr_len / 4;
p                  35 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tdes23.tx_rd_des23.tx_pkt_len.tcp_payload_len  = tcp_payload_len;
p                  39 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
p                  42 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tdes23.tx_rd_des23.first_desc = is_fd;
p                  43 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tdes23.tx_rd_des23.buf1_size = buf1_len;
p                  45 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tdes23.tx_rd_des23.tx_pkt_len.pkt_len.total_pkt_len = pkt_len;
p                  48 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 		p->tdes23.tx_rd_des23.cksum_ctl = cic_full;
p                  52 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_tx_vlanctl_desc(struct sxgbe_tx_norm_desc *p, int vlan_ctl)
p                  54 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tdes23.tx_rd_des23.vlan_tag_ctl = vlan_ctl;
p                  58 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_set_tx_owner(struct sxgbe_tx_norm_desc *p)
p                  60 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tdes23.tx_rd_des23.own_bit = 1;
p                  64 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_get_tx_owner(struct sxgbe_tx_norm_desc *p)
p                  66 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->tdes23.tx_rd_des23.own_bit;
p                  70 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_close_tx_desc(struct sxgbe_tx_norm_desc *p)
p                  72 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tdes23.tx_rd_des23.last_desc = 1;
p                  73 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tdes23.tx_rd_des23.int_on_com = 1;
p                  77 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_release_tx_desc(struct sxgbe_tx_norm_desc *p)
p                  79 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	memset(p, 0, sizeof(*p));
p                  85 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_clear_tx_ic(struct sxgbe_tx_norm_desc *p)
p                  87 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tdes23.tx_rd_des23.int_on_com = 0;
p                  91 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_get_tx_ls(struct sxgbe_tx_norm_desc *p)
p                  93 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->tdes23.tx_rd_des23.last_desc;
p                  97 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_get_tx_len(struct sxgbe_tx_norm_desc *p)
p                  99 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->tdes23.tx_rd_des23.buf1_size;
p                 103 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_tx_enable_tstamp(struct sxgbe_tx_norm_desc *p)
p                 105 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tdes23.tx_rd_des23.timestmp_enable = 1;
p                 109 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_get_tx_timestamp_status(struct sxgbe_tx_norm_desc *p)
p                 111 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->tdes23.tx_rd_des23.timestmp_enable;
p                 115 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_tx_ctxt_desc_set_ctxt(struct sxgbe_tx_ctxt_desc *p)
p                 117 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->ctxt_bit = 1;
p                 121 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_tx_ctxt_desc_set_owner(struct sxgbe_tx_ctxt_desc *p)
p                 123 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->own_bit = 1;
p                 127 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_tx_ctxt_desc_get_owner(struct sxgbe_tx_ctxt_desc *p)
p                 129 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->own_bit;
p                 133 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_tx_ctxt_desc_set_mss(struct sxgbe_tx_ctxt_desc *p, u16 mss)
p                 135 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->maxseg_size = mss;
p                 139 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_tx_ctxt_desc_get_mss(struct sxgbe_tx_ctxt_desc *p)
p                 141 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->maxseg_size;
p                 145 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_tx_ctxt_desc_set_tcmssv(struct sxgbe_tx_ctxt_desc *p)
p                 147 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->tcmssv = 1;
p                 151 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_tx_ctxt_desc_reset_ostc(struct sxgbe_tx_ctxt_desc *p)
p                 153 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->ostc = 0;
p                 157 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_tx_ctxt_desc_set_ivlantag(struct sxgbe_tx_ctxt_desc *p,
p                 162 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 		p->ivlan_tag_valid = is_ivlanvalid;
p                 163 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 		p->ivlan_tag = ivlan_tag;
p                 164 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 		p->ivlan_tag_ctl = ivlan_ctl;
p                 169 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_tx_ctxt_desc_get_ivlantag(struct sxgbe_tx_ctxt_desc *p)
p                 171 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->ivlan_tag;
p                 175 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_tx_ctxt_desc_set_vlantag(struct sxgbe_tx_ctxt_desc *p,
p                 179 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 		p->vltag_valid = is_vlanvalid;
p                 180 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 		p->vlan_tag = vlan_tag;
p                 185 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_tx_ctxt_desc_get_vlantag(struct sxgbe_tx_ctxt_desc *p)
p                 187 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->vlan_tag;
p                 191 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_tx_ctxt_desc_set_tstamp(struct sxgbe_tx_ctxt_desc *p,
p                 195 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 		p->ostc = ostc_enable;
p                 196 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 		p->tstamp_lo = (u32) tstamp;
p                 197 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 		p->tstamp_hi = (u32) (tstamp>>32);
p                 201 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_tx_ctxt_desc_close(struct sxgbe_tx_ctxt_desc *p)
p                 203 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->own_bit = 1;
p                 207 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_tx_ctxt_desc_get_cde(struct sxgbe_tx_ctxt_desc *p)
p                 209 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->ctxt_desc_err;
p                 213 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_init_rx_desc(struct sxgbe_rx_norm_desc *p, int disable_rx_ic,
p                 216 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->rdes23.rx_rd_des23.own_bit = 1;
p                 218 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 		p->rdes23.rx_rd_des23.int_on_com = disable_rx_ic;
p                 222 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_get_rx_owner(struct sxgbe_rx_norm_desc *p)
p                 224 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->rdes23.rx_rd_des23.own_bit;
p                 228 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
p                 230 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->rdes23.rx_rd_des23.own_bit = 1;
p                 234 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_set_rx_int_on_com(struct sxgbe_rx_norm_desc *p)
p                 236 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->rdes23.rx_rd_des23.int_on_com = 1;
p                 240 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
p                 242 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->rdes23.rx_wb_des23.pkt_len;
p                 246 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_get_rx_fd_status(struct sxgbe_rx_norm_desc *p)
p                 248 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->rdes23.rx_wb_des23.first_desc;
p                 252 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_get_rx_ld_status(struct sxgbe_rx_norm_desc *p)
p                 254 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->rdes23.rx_wb_des23.last_desc;
p                 259 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_rx_wbstatus(struct sxgbe_rx_norm_desc *p,
p                 265 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	if (p->rdes23.rx_wb_des23.err_summary) {
p                 266 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 		switch (p->rdes23.rx_wb_des23.err_l2_type) {
p                 300 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 		switch (p->rdes23.rx_wb_des23.err_l2_type) {
p                 347 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	switch (p->rdes23.rx_wb_des23.layer34_pkt_type) {
p                 381 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	if (p->rdes23.rx_wb_des23.vlan_filter_match)
p                 384 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	if (p->rdes23.rx_wb_des23.sa_filter_fail) {
p                 388 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	if (p->rdes23.rx_wb_des23.da_filter_fail) {
p                 392 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	if (p->rdes23.rx_wb_des23.hash_filter_pass)
p                 395 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	if (p->rdes23.rx_wb_des23.l3_filter_match)
p                 398 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	if (p->rdes23.rx_wb_des23.l4_filter_match)
p                 405 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_get_rx_ctxt_owner(struct sxgbe_rx_ctxt_desc *p)
p                 407 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->own_bit;
p                 411 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_set_ctxt_rx_owner(struct sxgbe_rx_ctxt_desc *p)
p                 413 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	p->own_bit = 1;
p                 418 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static void sxgbe_rx_ctxt_wbstatus(struct sxgbe_rx_ctxt_desc *p,
p                 421 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	if (p->tstamp_dropped)
p                 425 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	if (p->ptp_msgtype == RX_NO_PTP)
p                 427 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	else if (p->ptp_msgtype == RX_PTP_SYNC)
p                 429 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	else if (p->ptp_msgtype == RX_PTP_FOLLOW_UP)
p                 431 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	else if (p->ptp_msgtype == RX_PTP_DELAY_REQ)
p                 433 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	else if (p->ptp_msgtype == RX_PTP_DELAY_RESP)
p                 435 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	else if (p->ptp_msgtype == RX_PTP_PDELAY_REQ)
p                 437 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	else if (p->ptp_msgtype == RX_PTP_PDELAY_RESP)
p                 439 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	else if (p->ptp_msgtype == RX_PTP_PDELAY_FOLLOW_UP)
p                 441 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	else if (p->ptp_msgtype == RX_PTP_ANNOUNCE)
p                 443 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	else if (p->ptp_msgtype == RX_PTP_MGMT)
p                 445 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	else if (p->ptp_msgtype == RX_PTP_SIGNAL)
p                 447 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	else if (p->ptp_msgtype == RX_PTP_RESV_MSG)
p                 452 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static int sxgbe_get_rx_ctxt_tstamp_status(struct sxgbe_rx_ctxt_desc *p)
p                 454 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	if ((p->tstamp_hi == 0xffffffff) && (p->tstamp_lo == 0xffffffff)) {
p                 459 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	return p->tstamp_available;
p                 463 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c static u64 sxgbe_get_rx_timestamp(struct sxgbe_rx_ctxt_desc *p)
p                 467 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	ns = p->tstamp_lo;
p                 468 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c 	ns |= ((u64)p->tstamp_hi) << 32;
p                 159 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*init_tx_desc)(struct sxgbe_tx_norm_desc *p);
p                 162 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*tx_desc_enable_tse)(struct sxgbe_tx_norm_desc *p, u8 is_tse,
p                 167 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*prepare_tx_desc)(struct sxgbe_tx_norm_desc *p, u8 is_fd,
p                 171 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*tx_vlanctl_desc)(struct sxgbe_tx_norm_desc *p, int vlan_ctl);
p                 174 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*set_tx_owner)(struct sxgbe_tx_norm_desc *p);
p                 177 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*get_tx_owner)(struct sxgbe_tx_norm_desc *p);
p                 180 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*close_tx_desc)(struct sxgbe_tx_norm_desc *p);
p                 183 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*release_tx_desc)(struct sxgbe_tx_norm_desc *p);
p                 188 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*clear_tx_ic)(struct sxgbe_tx_norm_desc *p);
p                 191 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*get_tx_ls)(struct sxgbe_tx_norm_desc *p);
p                 194 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*get_tx_len)(struct sxgbe_tx_norm_desc *p);
p                 197 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*tx_enable_tstamp)(struct sxgbe_tx_norm_desc *p);
p                 200 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*get_tx_timestamp_status)(struct sxgbe_tx_norm_desc *p);
p                 203 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*tx_ctxt_desc_set_ctxt)(struct sxgbe_tx_ctxt_desc *p);
p                 206 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*tx_ctxt_desc_set_owner)(struct sxgbe_tx_ctxt_desc *p);
p                 209 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*get_tx_ctxt_owner)(struct sxgbe_tx_ctxt_desc *p);
p                 212 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*tx_ctxt_desc_set_mss)(struct sxgbe_tx_ctxt_desc *p, u16 mss);
p                 215 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*tx_ctxt_desc_get_mss)(struct sxgbe_tx_ctxt_desc *p);
p                 218 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*tx_ctxt_desc_set_tcmssv)(struct sxgbe_tx_ctxt_desc *p);
p                 221 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*tx_ctxt_desc_reset_ostc)(struct sxgbe_tx_ctxt_desc *p);
p                 224 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*tx_ctxt_desc_set_ivlantag)(struct sxgbe_tx_ctxt_desc *p,
p                 229 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*tx_ctxt_desc_get_ivlantag)(struct sxgbe_tx_ctxt_desc *p);
p                 232 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*tx_ctxt_desc_set_vlantag)(struct sxgbe_tx_ctxt_desc *p,
p                 236 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*tx_ctxt_desc_get_vlantag)(struct sxgbe_tx_ctxt_desc *p);
p                 239 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*tx_ctxt_set_tstamp)(struct sxgbe_tx_ctxt_desc *p,
p                 243 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*close_tx_ctxt_desc)(struct sxgbe_tx_ctxt_desc *p);
p                 246 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*get_tx_ctxt_cde)(struct sxgbe_tx_ctxt_desc *p);
p                 249 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*init_rx_desc)(struct sxgbe_rx_norm_desc *p, int disable_rx_ic,
p                 253 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*get_rx_owner)(struct sxgbe_rx_norm_desc *p);
p                 256 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
p                 259 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*set_rx_int_on_com)(struct sxgbe_rx_norm_desc *p);
p                 262 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
p                 265 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*get_rx_fd_status)(struct sxgbe_rx_norm_desc *p);
p                 268 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*get_rx_ld_status)(struct sxgbe_rx_norm_desc *p);
p                 271 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*rx_wbstatus)(struct sxgbe_rx_norm_desc *p,
p                 275 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*get_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p);
p                 278 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*set_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p);
p                 281 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	void (*rx_ctxt_wbstatus)(struct sxgbe_rx_ctxt_desc *p,
p                 285 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	int (*get_rx_ctxt_tstamp_status)(struct sxgbe_rx_ctxt_desc *p);
p                 288 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h 	u64 (*get_timestamp)(struct sxgbe_rx_ctxt_desc *p);
p                 197 drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c 	u8 *p = data;
p                 202 drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c 			memcpy(p, sxgbe_gstrings_stats[i].stat_string,
p                 204 drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c 			p += ETH_GSTRING_LEN;
p                 231 drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c 	char *p;
p                 241 drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c 		p = (char *)priv + sxgbe_gstrings_stats[i].stat_offset;
p                 243 drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c 			? (*(u64 *)p) : (*(u32 *)p);
p                 338 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 				 struct sxgbe_rx_norm_desc *p, int i,
p                 359 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i];
p                 372 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 				  struct sxgbe_rx_norm_desc *p, int i,
p                 503 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		struct sxgbe_rx_norm_desc *p;
p                 504 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		p = rx_ring->dma_rx + desc_index;
p                 505 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		ret = sxgbe_init_rx_buffers(dev, p, desc_index,
p                 520 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		struct sxgbe_rx_norm_desc *p;
p                 522 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		p = rx_ring->dma_rx + desc_index;
p                 523 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		sxgbe_free_rx_buffers(dev, p, desc_index, bfsize, rx_ring);
p                 745 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		struct sxgbe_tx_norm_desc *p;
p                 747 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		p = tqueue->dma_tx + entry;
p                 750 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		if (priv->hw->desc->get_tx_owner(p))
p                 760 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 					 priv->hw->desc->get_tx_len(p),
p                 770 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		priv->hw->desc->release_tx_desc(p);
p                1007 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	struct sxgbe_tx_queue *p = from_timer(p, t, txtimer);
p                1008 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	sxgbe_tx_queue_clean(p);
p                1024 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		struct sxgbe_tx_queue *p = priv->txq[queue_num];
p                1025 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		p->tx_coal_frames =  SXGBE_TX_FRAMES;
p                1026 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
p                1027 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		timer_setup(&p->txtimer, sxgbe_tx_timer, 0);
p                1028 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
p                1029 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		add_timer(&p->txtimer);
p                1038 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		struct sxgbe_tx_queue *p = priv->txq[queue_num];
p                1039 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		del_timer_sync(&p->txtimer);
p                1438 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		struct sxgbe_rx_norm_desc *p;
p                1440 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		p = priv->rxq[qnum]->dma_rx + entry;
p                1455 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 			p->rdes23.rx_rd_des23.buf2_addr =
p                1461 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		priv->hw->desc->set_rx_owner(p);
p                1462 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		priv->hw->desc->set_rx_int_on_com(p);
p                1486 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		struct sxgbe_rx_norm_desc *p;
p                1490 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		p = priv->rxq[qnum]->dma_rx + entry;
p                1492 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		if (priv->hw->desc->get_rx_owner(p))
p                1504 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
p                1521 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		frame_len = priv->hw->desc->get_rx_frame_len(p);
p                  34 drivers/net/ethernet/sfc/falcon/phy.h void falcon_qt202x_set_led(struct ef4_nic *p, int led, int state);
p                  50 drivers/net/ethernet/sfc/falcon/qt202x_phy.c void falcon_qt202x_set_led(struct ef4_nic *p, int led, int mode)
p                  53 drivers/net/ethernet/sfc/falcon/qt202x_phy.c 	ef4_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
p                 959 drivers/net/ethernet/sis/sis190.c 		}, *p = NULL;
p                 977 drivers/net/ethernet/sis/sis190.c 				p = reg31;
p                 979 drivers/net/ethernet/sis/sis190.c 				p = reg31 + 1;
p                 981 drivers/net/ethernet/sis/sis190.c 		if (!p) {
p                 984 drivers/net/ethernet/sis/sis190.c 			for (p = reg31; p->val; p++) {
p                 985 drivers/net/ethernet/sis/sis190.c 				if ((val & p->val) == p->val)
p                 990 drivers/net/ethernet/sis/sis190.c 		p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
p                 998 drivers/net/ethernet/sis/sis190.c 			p->ctl |= 0x03000000;
p                1001 drivers/net/ethernet/sis/sis190.c 		SIS_W32(StationControl, p->ctl);
p                1008 drivers/net/ethernet/sis/sis190.c 		tp->negotiated_lpa = p->val;
p                1010 drivers/net/ethernet/sis/sis190.c 		netif_info(tp, link, dev, "link on %s mode\n", p->msg);
p                1320 drivers/net/ethernet/sis/sis190.c 	struct mii_chip_info *p;
p                1329 drivers/net/ethernet/sis/sis190.c 	for (p = mii_chip_table; p->type; p++) {
p                1330 drivers/net/ethernet/sis/sis190.c 		if ((p->id[0] == phy->id[0]) &&
p                1331 drivers/net/ethernet/sis/sis190.c 		    (p->id[1] == (phy->id[1] & 0xfff0))) {
p                1336 drivers/net/ethernet/sis/sis190.c 	if (p->id[1]) {
p                1337 drivers/net/ethernet/sis/sis190.c 		phy->type = (p->type == MIX) ?
p                1339 drivers/net/ethernet/sis/sis190.c 				LAN : HOME) : p->type;
p                1340 drivers/net/ethernet/sis/sis190.c 		tp->features |= p->feature;
p                1343 drivers/net/ethernet/sis/sis190.c 				pci_name(tp->pci_dev), p->name, phy_id);
p                1361 drivers/net/ethernet/sis/sis190.c 		}, *p;
p                1363 drivers/net/ethernet/sis/sis190.c 		p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
p                1365 drivers/net/ethernet/sis/sis190.c 		mdio_write(ioaddr, phy_id, 0x1b, p[0]);
p                1367 drivers/net/ethernet/sis/sis190.c 		mdio_write(ioaddr, phy_id, 0x14, p[1]);
p                1770 drivers/net/ethernet/sis/sis190.c 			    void *p)
p                1776 drivers/net/ethernet/sis/sis190.c 	memcpy_fromio(p, tp->mmio_addr, regs->len);
p                1821 drivers/net/ethernet/sis/sis190.c static int sis190_mac_addr(struct net_device  *dev, void *p)
p                1825 drivers/net/ethernet/sis/sis190.c 	rc = eth_mac_addr(dev, p);
p                 187 drivers/net/ethernet/smsc/smc911x.h #define SMC_insl(lp, r, p, l)	 ioread16_rep((short*)((lp)->base + (r)), p, l*2)
p                 188 drivers/net/ethernet/smsc/smc911x.h #define SMC_outsl(lp, r, p, l)	 iowrite16_rep((short*)((lp)->base + (r)), p, l*2)
p                 193 drivers/net/ethernet/smsc/smc911x.h #define SMC_insl(lp, r, p, l)	 ioread32_rep((int*)((lp)->base + (r)), p, l)
p                 194 drivers/net/ethernet/smsc/smc911x.h #define SMC_outsl(lp, r, p, l)	 iowrite32_rep((int*)((lp)->base + (r)), p, l)
p                 214 drivers/net/ethernet/smsc/smc911x.h #define SMC_insl(lp, r, p, l) \
p                 215 drivers/net/ethernet/smsc/smc911x.h 	smc_pxa_dma_insl(lp, lp->physaddr, r, lp->rxdma, p, l)
p                 246 drivers/net/ethernet/smsc/smc911x.h #define SMC_outsl(lp, r, p, l) \
p                 247 drivers/net/ethernet/smsc/smc911x.h 	 smc_pxa_dma_outsl(lp, lp->physaddr, r, lp->txdma, p, l)
p                 692 drivers/net/ethernet/smsc/smc911x.h #define SMC_PUSH_DATA(lp, p, l)	SMC_outsl( lp, TX_DATA_FIFO, p, (l) >> 2 )
p                 693 drivers/net/ethernet/smsc/smc911x.h #define SMC_PULL_DATA(lp, p, l)	SMC_insl ( lp, RX_DATA_FIFO, p, (l) >> 2 )
p                 262 drivers/net/ethernet/smsc/smc91c92_cs.c #define set_bits(v, p) outw(inw(p)|(v), (p))
p                 263 drivers/net/ethernet/smsc/smc91c92_cs.c #define mask_bits(v, p) outw(inw(p)&(v), (p))
p                1778 drivers/net/ethernet/smsc/smc91c92_cs.c 	    u_short p = mdio_read(dev, smc->mii_if.phy_id, 5);
p                1780 drivers/net/ethernet/smsc/smc91c92_cs.c 	    smc->duplex = (((p & 0x0100) || ((p & 0x1c0) == 0x40))
p                1785 drivers/net/ethernet/smsc/smc91c92_cs.c 			    (p & 0x0180) ? 100 : 10, smc->duplex ? 'F' : 'H');
p                  90 drivers/net/ethernet/smsc/smc91x.h #define SMC_insb(a, r, p, l)	readsb((a) + (r), p, l)
p                  91 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsb(a, r, p, l)	writesb((a) + (r), p, l)
p                  92 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l)	readsw((a) + (r), p, l)
p                  93 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsw(a, r, p, l)	writesw((a) + (r), p, l)
p                  94 drivers/net/ethernet/smsc/smc91x.h #define SMC_insl(a, r, p, l)	readsl((a) + (r), p, l)
p                  95 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsl(a, r, p, l)	writesl((a) + (r), p, l)
p                 129 drivers/net/ethernet/smsc/smc91x.h #define SMC_insl(a, r, p, l)	insl((a) + (r) - 0xa0000000, p, l)
p                 130 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsl(a, r, p, l)	outsl((a) + (r) - 0xa0000000, p, l)
p                 131 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l)	insw((a) + (r) - 0xa0000000, p, l)
p                 132 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsw(a, r, p, l)	outsw((a) + (r) - 0xa0000000, p, l)
p                 149 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l)    readsw((a) + (r), p, l)
p                 150 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsw(a, r, p, l)   writesw((a) + (r), p, l)
p                 151 drivers/net/ethernet/smsc/smc91x.h #define SMC_insl(a, r, p, l)    readsl((a) + (r), p, l)
p                 152 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsl(a, r, p, l)   writesl((a) + (r), p, l)
p                 164 drivers/net/ethernet/smsc/smc91x.h static inline void mcf_insw(void *a, unsigned char *p, int l)
p                 166 drivers/net/ethernet/smsc/smc91x.h 	u16 *wp = (u16 *) p;
p                 171 drivers/net/ethernet/smsc/smc91x.h static inline void mcf_outsw(void *a, unsigned char *p, int l)
p                 173 drivers/net/ethernet/smsc/smc91x.h 	u16 *wp = (u16 *) p;
p                 180 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l)	mcf_insw(a + r, p, l)
p                 181 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsw(a, r, p, l)	mcf_outsw(a + r, p, l)
p                 193 drivers/net/ethernet/smsc/smc91x.h #define SMC_insb(a, r, p, l)	ioread8_rep((a) + (r), p, l)
p                 194 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsb(a, r, p, l)	iowrite8_rep((a) + (r), p, l)
p                 215 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l)	ioread16_rep((a) + (r), p, l)
p                 216 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsw(a, r, p, l)	iowrite16_rep((a) + (r), p, l)
p                 217 drivers/net/ethernet/smsc/smc91x.h #define SMC_insl(a, r, p, l)	ioread32_rep((a) + (r), p, l)
p                 218 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsl(a, r, p, l)	iowrite32_rep((a) + (r), p, l)
p                 281 drivers/net/ethernet/smsc/smc91x.h #define SMC_8BIT(p)	((p)->cfg.flags & SMC91X_USE_8BIT)
p                 282 drivers/net/ethernet/smsc/smc91x.h #define SMC_16BIT(p)	((p)->cfg.flags & SMC91X_USE_16BIT)
p                 283 drivers/net/ethernet/smsc/smc91x.h #define SMC_32BIT(p)	((p)->cfg.flags & SMC91X_USE_32BIT)
p                 296 drivers/net/ethernet/smsc/smc91x.h #define SMC_insl(a, r, p, l) \
p                 297 drivers/net/ethernet/smsc/smc91x.h 	smc_pxa_dma_insl(a, lp, r, dev->dma, p, l)
p                 365 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l) \
p                 366 drivers/net/ethernet/smsc/smc91x.h 	smc_pxa_dma_insw(a, lp, r, dev->dma, p, l)
p                 419 drivers/net/ethernet/smsc/smc91x.h #define SMC_insl(a, r, p, l)		BUG()
p                 420 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsl(a, r, p, l)		BUG()
p                 424 drivers/net/ethernet/smsc/smc91x.h #define SMC_insl(a, r, p, l)		BUG()
p                 425 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsl(a, r, p, l)		BUG()
p                 432 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l)		BUG()
p                 433 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsw(a, r, p, l)		BUG()
p                 438 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l)		BUG()
p                 439 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsw(a, r, p, l)		BUG()
p                 447 drivers/net/ethernet/smsc/smc91x.h #define SMC_insb(a, r, p, l)		BUG()
p                 448 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsb(a, r, p, l)		BUG()
p                 452 drivers/net/ethernet/smsc/smc91x.h #define SMC_insb(a, r, p, l)		BUG()
p                 453 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsb(a, r, p, l)		BUG()
p                1059 drivers/net/ethernet/smsc/smc91x.h #define SMC_PUSH_DATA(lp, p, l)					\
p                1062 drivers/net/ethernet/smsc/smc91x.h 			void *__ptr = (p);				\
p                1078 drivers/net/ethernet/smsc/smc91x.h 			SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1);	\
p                1080 drivers/net/ethernet/smsc/smc91x.h 			SMC_outsb(ioaddr, DATA_REG(lp), p, l);	\
p                1083 drivers/net/ethernet/smsc/smc91x.h #define SMC_PULL_DATA(lp, p, l)					\
p                1086 drivers/net/ethernet/smsc/smc91x.h 			void *__ptr = (p);				\
p                1113 drivers/net/ethernet/smsc/smc91x.h 			SMC_insw(ioaddr, DATA_REG(lp), p, (l) >> 1);	\
p                1115 drivers/net/ethernet/smsc/smc91x.h 			SMC_insb(ioaddr, DATA_REG(lp), p, l);		\
p                1921 drivers/net/ethernet/smsc/smsc911x.c static int smsc911x_set_mac_address(struct net_device *dev, void *p)
p                1924 drivers/net/ethernet/smsc/smsc911x.c 	struct sockaddr *addr = p;
p                 208 drivers/net/ethernet/socionext/sni_ave.c #define IS_DESC_64BIT(p)	((p)->data->is_desc_64bit)
p                1528 drivers/net/ethernet/socionext/sni_ave.c static int ave_set_mac_address(struct net_device *ndev, void *p)
p                1530 drivers/net/ethernet/socionext/sni_ave.c 	int ret = eth_mac_addr(ndev, p);
p                  17 drivers/net/ethernet/stmicro/stmmac/chain_mode.c static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
p                  19 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
p                 109 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 		struct dma_extended_desc *p = (struct dma_extended_desc *)des;
p                 112 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 			p->basic.des3 = cpu_to_le32((unsigned int)dma_phy);
p                 113 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 			p++;
p                 115 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 		p->basic.des3 = cpu_to_le32((unsigned int)phy_addr);
p                 118 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 		struct dma_desc *p = (struct dma_desc *)des;
p                 121 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 			p->des3 = cpu_to_le32((unsigned int)dma_phy);
p                 122 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 			p++;
p                 124 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 		p->des3 = cpu_to_le32((unsigned int)phy_addr);
p                 128 drivers/net/ethernet/stmicro/stmmac/chain_mode.c static void refill_desc3(void *priv_ptr, struct dma_desc *p)
p                 138 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 		p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
p                 144 drivers/net/ethernet/stmicro/stmmac/chain_mode.c static void clean_desc3(void *priv_ptr, struct dma_desc *p)
p                 156 drivers/net/ethernet/stmicro/stmmac/chain_mode.c 		p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
p                  22 drivers/net/ethernet/stmicro/stmmac/descs_com.h static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
p                  26 drivers/net/ethernet/stmicro/stmmac/descs_com.h 		p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
p                  31 drivers/net/ethernet/stmicro/stmmac/descs_com.h 		p->des1 |= cpu_to_le32(ERDES1_END_RING);
p                  34 drivers/net/ethernet/stmicro/stmmac/descs_com.h static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end)
p                  37 drivers/net/ethernet/stmicro/stmmac/descs_com.h 		p->des0 |= cpu_to_le32(ETDES0_END_RING);
p                  39 drivers/net/ethernet/stmicro/stmmac/descs_com.h 		p->des0 &= cpu_to_le32(~ETDES0_END_RING);
p                  42 drivers/net/ethernet/stmicro/stmmac/descs_com.h static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
p                  45 drivers/net/ethernet/stmicro/stmmac/descs_com.h 		p->des1 |= cpu_to_le32((((len - BUF_SIZE_4KiB)
p                  50 drivers/net/ethernet/stmicro/stmmac/descs_com.h 		p->des1 |= cpu_to_le32((len & ETDES1_BUFFER1_SIZE_MASK));
p                  54 drivers/net/ethernet/stmicro/stmmac/descs_com.h static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
p                  60 drivers/net/ethernet/stmicro/stmmac/descs_com.h 		p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
p                  65 drivers/net/ethernet/stmicro/stmmac/descs_com.h 		p->des1 |= cpu_to_le32(RDES1_END_RING);
p                  68 drivers/net/ethernet/stmicro/stmmac/descs_com.h static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end)
p                  71 drivers/net/ethernet/stmicro/stmmac/descs_com.h 		p->des1 |= cpu_to_le32(TDES1_END_RING);
p                  73 drivers/net/ethernet/stmicro/stmmac/descs_com.h 		p->des1 &= cpu_to_le32(~TDES1_END_RING);
p                  76 drivers/net/ethernet/stmicro/stmmac/descs_com.h static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
p                  81 drivers/net/ethernet/stmicro/stmmac/descs_com.h 		p->des1 |= cpu_to_le32((((len - buffer1)
p                  85 drivers/net/ethernet/stmicro/stmmac/descs_com.h 		p->des1 |= cpu_to_le32((len & TDES1_BUFFER1_SIZE_MASK));
p                  91 drivers/net/ethernet/stmicro/stmmac/descs_com.h static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p)
p                  93 drivers/net/ethernet/stmicro/stmmac/descs_com.h 	p->des1 |= cpu_to_le32(ERDES1_SECOND_ADDRESS_CHAINED);
p                  96 drivers/net/ethernet/stmicro/stmmac/descs_com.h static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p)
p                  98 drivers/net/ethernet/stmicro/stmmac/descs_com.h 	p->des0 |= cpu_to_le32(ETDES0_SECOND_ADDRESS_CHAINED);
p                 101 drivers/net/ethernet/stmicro/stmmac/descs_com.h static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
p                 103 drivers/net/ethernet/stmicro/stmmac/descs_com.h 	p->des1 |= cpu_to_le32(len & ETDES1_BUFFER1_SIZE_MASK);
p                 107 drivers/net/ethernet/stmicro/stmmac/descs_com.h static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
p                 109 drivers/net/ethernet/stmicro/stmmac/descs_com.h 	p->des1 |= cpu_to_le32(RDES1_SECOND_ADDRESS_CHAINED);
p                 112 drivers/net/ethernet/stmicro/stmmac/descs_com.h static inline void ndesc_tx_set_on_chain(struct dma_desc *p)
p                 114 drivers/net/ethernet/stmicro/stmmac/descs_com.h 	p->des1 |= cpu_to_le32(TDES1_SECOND_ADDRESS_CHAINED);
p                 117 drivers/net/ethernet/stmicro/stmmac/descs_com.h static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
p                 119 drivers/net/ethernet/stmicro/stmmac/descs_com.h 	p->des1 |= cpu_to_le32(len & TDES1_BUFFER1_SIZE_MASK);
p                  16 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 				       struct dma_desc *p,
p                  23 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	tdes3 = le32_to_cpu(p->des3);
p                  74 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 				       struct dma_desc *p)
p                  77 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	unsigned int rdes1 = le32_to_cpu(p->des1);
p                  78 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	unsigned int rdes2 = le32_to_cpu(p->des2);
p                  79 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	unsigned int rdes3 = le32_to_cpu(p->des3);
p                 174 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static int dwmac4_rd_get_tx_len(struct dma_desc *p)
p                 176 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK);
p                 179 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static int dwmac4_get_tx_owner(struct dma_desc *p)
p                 181 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT;
p                 184 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_set_tx_owner(struct dma_desc *p)
p                 186 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des3 |= cpu_to_le32(TDES3_OWN);
p                 189 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
p                 191 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
p                 194 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 		p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
p                 197 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static int dwmac4_get_tx_ls(struct dma_desc *p)
p                 199 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR)
p                 203 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
p                 205 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK);
p                 208 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
p                 210 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE);
p                 213 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
p                 216 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
p                 220 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
p                 228 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	struct dma_desc *p = (struct dma_desc *)desc;
p                 231 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	ns = le32_to_cpu(p->des0);
p                 233 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	ns += le32_to_cpu(p->des1) * 1000000000ULL;
p                 240 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	struct dma_desc *p = (struct dma_desc *)desc;
p                 241 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	unsigned int rdes0 = le32_to_cpu(p->des0);
p                 242 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	unsigned int rdes1 = le32_to_cpu(p->des1);
p                 243 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	unsigned int rdes3 = le32_to_cpu(p->des3);
p                 267 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	struct dma_desc *p = (struct dma_desc *)desc;
p                 271 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	if (likely(le32_to_cpu(p->des3) & RDES3_RDES1_VALID)) {
p                 272 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 		if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) {
p                 295 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
p                 298 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	dwmac4_set_rx_owner(p, disable_rx_ic);
p                 301 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
p                 303 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des0 = 0;
p                 304 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des1 = 0;
p                 305 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des2 = 0;
p                 306 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des3 = 0;
p                 309 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
p                 313 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	unsigned int tdes3 = le32_to_cpu(p->des3);
p                 315 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
p                 344 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des3 = cpu_to_le32(tdes3);
p                 347 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
p                 352 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	unsigned int tdes3 = le32_to_cpu(p->des3);
p                 355 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 		p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
p                 358 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 		p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
p                 387 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des3 = cpu_to_le32(tdes3);
p                 390 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
p                 392 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des0 = 0;
p                 393 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des1 = 0;
p                 394 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des2 = 0;
p                 395 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des3 = 0;
p                 398 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
p                 400 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION);
p                 405 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	struct dma_desc *p = (struct dma_desc *)head;
p                 412 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 			i, (unsigned int)virt_to_phys(p),
p                 413 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 			le32_to_cpu(p->des0), le32_to_cpu(p->des1),
p                 414 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 			le32_to_cpu(p->des2), le32_to_cpu(p->des3));
p                 415 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 		p++;
p                 419 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
p                 421 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des0 = 0;
p                 422 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des1 = 0;
p                 423 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des2 = cpu_to_le32(mss);
p                 424 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
p                 427 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_get_addr(struct dma_desc *p, unsigned int *addr)
p                 429 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	*addr = le32_to_cpu(p->des0);
p                 432 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr)
p                 434 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des0 = cpu_to_le32(addr);
p                 435 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des1 = 0;
p                 438 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_clear(struct dma_desc *p)
p                 440 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des0 = 0;
p                 441 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des1 = 0;
p                 442 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des2 = 0;
p                 443 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des3 = 0;
p                 446 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_set_sarc(struct dma_desc *p, u32 sarc_type)
p                 450 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des3 |= cpu_to_le32(sarc_type & TDES3_SA_INSERT_CTRL_MASK);
p                 462 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
p                 465 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des0 = 0;
p                 466 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des1 = 0;
p                 467 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des2 = 0;
p                 468 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des3 = 0;
p                 475 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 		p->des2 = cpu_to_le32(des);
p                 479 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 		p->des3 = cpu_to_le32(des | TDES3_IVLTV);
p                 483 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des3 |= cpu_to_le32(tag & TDES3_VLAN_TAG);
p                 484 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des3 |= cpu_to_le32(TDES3_VLTV);
p                 486 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des3 |= cpu_to_le32(TDES3_CONTEXT_TYPE);
p                 489 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_set_vlan(struct dma_desc *p, u32 type)
p                 492 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c 	p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK);
p                  12 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 				  struct dma_desc *p, void __iomem *ioaddr)
p                  14 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	unsigned int tdes3 = le32_to_cpu(p->des3);
p                  26 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 				  struct dma_desc *p)
p                  28 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	unsigned int rdes3 = le32_to_cpu(p->des3);
p                  42 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static int dwxgmac2_get_tx_len(struct dma_desc *p)
p                  44 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	return (le32_to_cpu(p->des2) & XGMAC_TDES2_B1L);
p                  47 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static int dwxgmac2_get_tx_owner(struct dma_desc *p)
p                  49 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	return (le32_to_cpu(p->des3) & XGMAC_TDES3_OWN) > 0;
p                  52 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_set_tx_owner(struct dma_desc *p)
p                  54 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des3 |= cpu_to_le32(XGMAC_TDES3_OWN);
p                  57 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
p                  59 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des3 |= cpu_to_le32(XGMAC_RDES3_OWN);
p                  62 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 		p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC);
p                  65 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static int dwxgmac2_get_tx_ls(struct dma_desc *p)
p                  67 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0;
p                  70 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe)
p                  72 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL);
p                  75 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_enable_tx_timestamp(struct dma_desc *p)
p                  77 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des2 |= cpu_to_le32(XGMAC_TDES2_TTSE);
p                  80 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static int dwxgmac2_get_tx_timestamp_status(struct dma_desc *p)
p                  87 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	struct dma_desc *p = (struct dma_desc *)desc;
p                  90 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	ns += le32_to_cpu(p->des1) * 1000000000ULL;
p                  91 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	ns += le32_to_cpu(p->des0);
p                  98 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	struct dma_desc *p = (struct dma_desc *)desc;
p                  99 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	unsigned int rdes3 = le32_to_cpu(p->des3);
p                 108 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 		if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
p                 119 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	struct dma_desc *p = (struct dma_desc *)desc;
p                 120 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	unsigned int rdes3 = le32_to_cpu(p->des3);
p                 129 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
p                 132 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	dwxgmac2_set_rx_owner(p, disable_rx_ic);
p                 135 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_init_tx_desc(struct dma_desc *p, int mode, int end)
p                 137 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des0 = 0;
p                 138 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des1 = 0;
p                 139 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des2 = 0;
p                 140 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des3 = 0;
p                 143 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
p                 147 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	unsigned int tdes3 = le32_to_cpu(p->des3);
p                 149 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L);
p                 178 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des3 = cpu_to_le32(tdes3);
p                 181 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
p                 186 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	unsigned int tdes3 = le32_to_cpu(p->des3);
p                 189 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 		p->des2 |= cpu_to_le32(len1 & XGMAC_TDES2_B1L);
p                 191 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 		p->des2 |= cpu_to_le32((len2 << XGMAC_TDES2_B2L_SHIFT) &
p                 218 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des3 = cpu_to_le32(tdes3);
p                 221 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_release_tx_desc(struct dma_desc *p, int mode)
p                 223 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des0 = 0;
p                 224 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des1 = 0;
p                 225 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des2 = 0;
p                 226 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des3 = 0;
p                 229 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_set_tx_ic(struct dma_desc *p)
p                 231 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des2 |= cpu_to_le32(XGMAC_TDES2_IOC);
p                 234 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_set_mss(struct dma_desc *p, unsigned int mss)
p                 236 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des0 = 0;
p                 237 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des1 = 0;
p                 238 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des2 = cpu_to_le32(mss);
p                 239 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des3 = cpu_to_le32(XGMAC_TDES3_CTXT | XGMAC_TDES3_TCMSSV);
p                 242 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_get_addr(struct dma_desc *p, unsigned int *addr)
p                 244 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	*addr = le32_to_cpu(p->des0);
p                 247 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_set_addr(struct dma_desc *p, dma_addr_t addr)
p                 249 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des0 = cpu_to_le32(lower_32_bits(addr));
p                 250 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des1 = cpu_to_le32(upper_32_bits(addr));
p                 253 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_clear(struct dma_desc *p)
p                 255 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des0 = 0;
p                 256 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des1 = 0;
p                 257 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des2 = 0;
p                 258 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des3 = 0;
p                 261 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
p                 264 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	unsigned int rdes3 = le32_to_cpu(p->des3);
p                 282 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 		*hash = le32_to_cpu(p->des1);
p                 289 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
p                 291 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	if (le32_to_cpu(p->des3) & XGMAC_RDES3_L34T)
p                 292 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 		*len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
p                 296 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
p                 298 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des2 = cpu_to_le32(lower_32_bits(addr));
p                 299 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des3 = cpu_to_le32(upper_32_bits(addr));
p                 302 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_set_sarc(struct dma_desc *p, u32 sarc_type)
p                 306 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des3 |= cpu_to_le32(sarc_type & XGMAC_TDES3_SAIC);
p                 309 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
p                 312 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des0 = 0;
p                 313 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des1 = 0;
p                 314 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des2 = 0;
p                 315 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des3 = 0;
p                 322 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 		p->des2 = cpu_to_le32(des);
p                 326 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 		p->des3 = cpu_to_le32(des | XGMAC_TDES3_IVLTV);
p                 330 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des3 |= cpu_to_le32(tag & XGMAC_TDES3_VT);
p                 331 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des3 |= cpu_to_le32(XGMAC_TDES3_VLTV);
p                 333 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des3 |= cpu_to_le32(XGMAC_TDES3_CTXT);
p                 336 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type)
p                 339 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c 	p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR);
p                  16 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 				  struct dma_desc *p, void __iomem *ioaddr)
p                  19 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	unsigned int tdes0 = le32_to_cpu(p->des0);
p                  82 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static int enh_desc_get_tx_len(struct dma_desc *p)
p                  84 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK);
p                 121 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 				    struct dma_extended_desc *p)
p                 123 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	unsigned int rdes0 = le32_to_cpu(p->basic.des0);
p                 124 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	unsigned int rdes4 = le32_to_cpu(p->des4);
p                 185 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 				  struct dma_desc *p)
p                 188 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	unsigned int rdes0 = le32_to_cpu(p->des0);
p                 257 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
p                 262 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	p->des0 |= cpu_to_le32(RDES0_OWN);
p                 265 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
p                 268 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		ehn_desc_rx_set_on_chain(p);
p                 270 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		ehn_desc_rx_set_on_ring(p, end, bfsize);
p                 273 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
p                 276 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
p                 278 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	p->des0 &= cpu_to_le32(~ETDES0_OWN);
p                 280 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		enh_desc_end_tx_desc_on_chain(p);
p                 282 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		enh_desc_end_tx_desc_on_ring(p, end);
p                 285 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static int enh_desc_get_tx_owner(struct dma_desc *p)
p                 287 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31;
p                 290 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static void enh_desc_set_tx_owner(struct dma_desc *p)
p                 292 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	p->des0 |= cpu_to_le32(ETDES0_OWN);
p                 295 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
p                 297 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	p->des0 |= cpu_to_le32(RDES0_OWN);
p                 300 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static int enh_desc_get_tx_ls(struct dma_desc *p)
p                 302 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29;
p                 305 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
p                 307 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
p                 309 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	memset(p, 0, offsetof(struct dma_desc, des2));
p                 311 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		enh_desc_end_tx_desc_on_chain(p);
p                 313 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		enh_desc_end_tx_desc_on_ring(p, ter);
p                 316 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
p                 320 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	unsigned int tdes0 = le32_to_cpu(p->des0);
p                 323 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		enh_set_tx_desc_len_on_chain(p, len);
p                 325 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		enh_set_tx_desc_len_on_ring(p, len);
p                 351 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	p->des0 = cpu_to_le32(tdes0);
p                 354 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static void enh_desc_set_tx_ic(struct dma_desc *p)
p                 356 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	p->des0 |= cpu_to_le32(ETDES0_INTERRUPT);
p                 359 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
p                 371 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
p                 375 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
p                 377 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE);
p                 380 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
p                 382 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
p                 390 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
p                 391 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		ns = le32_to_cpu(p->des6);
p                 393 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		ns += le32_to_cpu(p->des7) * 1000000000ULL;
p                 395 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		struct dma_desc *p = (struct dma_desc *)desc;
p                 396 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		ns = le32_to_cpu(p->des2);
p                 397 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		ns += le32_to_cpu(p->des3) * 1000000000ULL;
p                 407 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
p                 408 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7;
p                 410 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		struct dma_desc *p = (struct dma_desc *)desc;
p                 411 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		if ((le32_to_cpu(p->des2) == 0xffffffff) &&
p                 412 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 		    (le32_to_cpu(p->des3) == 0xffffffff))
p                 440 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr)
p                 442 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	*addr = le32_to_cpu(p->des2);
p                 445 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr)
p                 447 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	p->des2 = cpu_to_le32(addr);
p                 450 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static void enh_desc_clear(struct dma_desc *p)
p                 452 drivers/net/ethernet/stmicro/stmmac/enh_desc.c 	p->des2 = 0;
p                  36 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
p                  39 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
p                  41 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*prepare_tx_desc)(struct dma_desc *p, int is_fs, int len,
p                  44 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
p                  48 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*set_tx_owner)(struct dma_desc *p);
p                  49 drivers/net/ethernet/stmicro/stmmac/hwif.h 	int (*get_tx_owner)(struct dma_desc *p);
p                  51 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*release_tx_desc)(struct dma_desc *p, int mode);
p                  54 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*set_tx_ic)(struct dma_desc *p);
p                  56 drivers/net/ethernet/stmicro/stmmac/hwif.h 	int (*get_tx_ls)(struct dma_desc *p);
p                  59 drivers/net/ethernet/stmicro/stmmac/hwif.h 			struct dma_desc *p, void __iomem *ioaddr);
p                  61 drivers/net/ethernet/stmicro/stmmac/hwif.h 	int (*get_tx_len)(struct dma_desc *p);
p                  63 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*set_rx_owner)(struct dma_desc *p, int disable_rx_ic);
p                  65 drivers/net/ethernet/stmicro/stmmac/hwif.h 	int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type);
p                  68 drivers/net/ethernet/stmicro/stmmac/hwif.h 			struct dma_desc *p);
p                  70 drivers/net/ethernet/stmicro/stmmac/hwif.h 			struct dma_extended_desc *p);
p                  72 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*enable_tx_timestamp) (struct dma_desc *p);
p                  74 drivers/net/ethernet/stmicro/stmmac/hwif.h 	int (*get_tx_timestamp_status) (struct dma_desc *p);
p                  82 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*set_mss)(struct dma_desc *p, unsigned int mss);
p                  84 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*get_addr)(struct dma_desc *p, unsigned int *addr);
p                  86 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*set_addr)(struct dma_desc *p, dma_addr_t addr);
p                  88 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*clear)(struct dma_desc *p);
p                  90 drivers/net/ethernet/stmicro/stmmac/hwif.h 	int (*get_rx_hash)(struct dma_desc *p, u32 *hash,
p                  92 drivers/net/ethernet/stmicro/stmmac/hwif.h 	int (*get_rx_header_len)(struct dma_desc *p, unsigned int *len);
p                  93 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr);
p                  94 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*set_sarc)(struct dma_desc *p, u32 sarc_type);
p                  95 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
p                  97 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*set_vlan)(struct dma_desc *p, u32 type);
p                 493 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*init_desc3)(struct dma_desc *p);
p                 494 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*refill_desc3) (void *priv, struct dma_desc *p);
p                 495 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*clean_desc3) (void *priv, struct dma_desc *p);
p                  16 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 			       struct dma_desc *p, void __iomem *ioaddr)
p                  19 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	unsigned int tdes0 = le32_to_cpu(p->des0);
p                  20 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	unsigned int tdes1 = le32_to_cpu(p->des1);
p                  64 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static int ndesc_get_tx_len(struct dma_desc *p)
p                  66 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK);
p                  74 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 			       struct dma_desc *p)
p                  77 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	unsigned int rdes0 = le32_to_cpu(p->des0);
p                 125 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
p                 130 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	p->des0 |= cpu_to_le32(RDES0_OWN);
p                 133 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
p                 136 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 		ndesc_rx_set_on_chain(p, end);
p                 138 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 		ndesc_rx_set_on_ring(p, end, bfsize);
p                 141 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 		p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
p                 144 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
p                 146 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	p->des0 &= cpu_to_le32(~TDES0_OWN);
p                 148 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 		ndesc_tx_set_on_chain(p);
p                 150 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 		ndesc_end_tx_desc_on_ring(p, end);
p                 153 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static int ndesc_get_tx_owner(struct dma_desc *p)
p                 155 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31;
p                 158 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static void ndesc_set_tx_owner(struct dma_desc *p)
p                 160 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	p->des0 |= cpu_to_le32(TDES0_OWN);
p                 163 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
p                 165 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	p->des0 |= cpu_to_le32(RDES0_OWN);
p                 168 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static int ndesc_get_tx_ls(struct dma_desc *p)
p                 170 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30;
p                 173 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
p                 175 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25;
p                 177 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	memset(p, 0, offsetof(struct dma_desc, des2));
p                 179 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 		ndesc_tx_set_on_chain(p);
p                 181 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 		ndesc_end_tx_desc_on_ring(p, ter);
p                 184 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
p                 188 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	unsigned int tdes1 = le32_to_cpu(p->des1);
p                 203 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	p->des1 = cpu_to_le32(tdes1);
p                 206 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 		norm_set_tx_desc_len_on_chain(p, len);
p                 208 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 		norm_set_tx_desc_len_on_ring(p, len);
p                 211 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 		p->des0 |= cpu_to_le32(TDES0_OWN);
p                 214 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static void ndesc_set_tx_ic(struct dma_desc *p)
p                 216 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	p->des1 |= cpu_to_le32(TDES1_INTERRUPT);
p                 219 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
p                 232 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
p                 238 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static void ndesc_enable_tx_timestamp(struct dma_desc *p)
p                 240 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE);
p                 243 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
p                 245 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17;
p                 250 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	struct dma_desc *p = (struct dma_desc *)desc;
p                 253 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	ns = le32_to_cpu(p->des2);
p                 255 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	ns += le32_to_cpu(p->des3) * 1000000000ULL;
p                 262 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	struct dma_desc *p = (struct dma_desc *)desc;
p                 264 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	if ((le32_to_cpu(p->des2) == 0xffffffff) &&
p                 265 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	    (le32_to_cpu(p->des3) == 0xffffffff))
p                 274 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	struct dma_desc *p = (struct dma_desc *)head;
p                 282 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 		x = *(u64 *)p;
p                 284 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 			i, (unsigned int)virt_to_phys(p),
p                 286 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 			p->des2, p->des3);
p                 287 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 		p++;
p                 292 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static void ndesc_get_addr(struct dma_desc *p, unsigned int *addr)
p                 294 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	*addr = le32_to_cpu(p->des2);
p                 297 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr)
p                 299 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	p->des2 = cpu_to_le32(addr);
p                 302 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static void ndesc_clear(struct dma_desc *p)
p                 304 drivers/net/ethernet/stmicro/stmmac/norm_desc.c 	p->des2 = 0;
p                  17 drivers/net/ethernet/stmicro/stmmac/ring_mode.c static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
p                  19 drivers/net/ethernet/stmicro/stmmac/ring_mode.c 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
p                 104 drivers/net/ethernet/stmicro/stmmac/ring_mode.c static void refill_desc3(void *priv_ptr, struct dma_desc *p)
p                 111 drivers/net/ethernet/stmicro/stmmac/ring_mode.c 		p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
p                 115 drivers/net/ethernet/stmicro/stmmac/ring_mode.c static void init_desc3(struct dma_desc *p)
p                 117 drivers/net/ethernet/stmicro/stmmac/ring_mode.c 	p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
p                 120 drivers/net/ethernet/stmicro/stmmac/ring_mode.c static void clean_desc3(void *priv_ptr, struct dma_desc *p)
p                 130 drivers/net/ethernet/stmicro/stmmac/ring_mode.c 		p->des3 = 0;
p                 502 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 				char *p;
p                 503 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 				p = (char *)priv + stmmac_mmc[i].stat_offset;
p                 506 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 					     sizeof(u64)) ? (*(u64 *)p) :
p                 507 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 					     (*(u32 *)p);
p                 522 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 		char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
p                 524 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 			     sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
p                 561 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 	u8 *p = data;
p                 572 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 					memcpy(p, desc, ETH_GSTRING_LEN);
p                 573 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 					p += ETH_GSTRING_LEN;
p                 579 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 				memcpy(p, stmmac_mmc[i].stat_string,
p                 581 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 				p += ETH_GSTRING_LEN;
p                 584 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 			memcpy(p, stmmac_gstrings_stats[i].stat_string,
p                 586 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 			p += ETH_GSTRING_LEN;
p                 590 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 		stmmac_selftest_get_strings(priv, p);
p                 433 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				   struct dma_desc *p, struct sk_buff *skb)
p                 447 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (stmmac_get_tx_timestamp_status(priv, p)) {
p                 448 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
p                 473 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
p                 477 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct dma_desc *desc = p;
p                 487 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
p                1206 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
p                1222 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
p                1228 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	stmmac_set_desc_addr(priv, p, buf->addr);
p                1230 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_init_desc3(priv, p);
p                1316 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			struct dma_desc *p;
p                1319 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				p = &((rx_q->dma_erx + i)->basic);
p                1321 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				p = rx_q->dma_rx + i;
p                1323 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
p                1392 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			struct dma_desc *p;
p                1394 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				p = &((tx_q->dma_etx + i)->basic);
p                1396 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				p = tx_q->dma_tx + i;
p                1398 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			stmmac_clear_desc(priv, p);
p                1883 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct dma_desc *p;
p                1887 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
p                1889 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			p = tx_q->dma_tx + entry;
p                1892 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				&priv->xstats, p, priv->ioaddr);
p                1913 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			stmmac_get_tx_hwtstamp(priv, p, skb);
p                1932 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_clean_desc3(priv, tx_q, p);
p                1944 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_release_tx_desc(priv, p, priv->mode);
p                2816 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct dma_desc *p;
p                2829 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	p = tx_q->dma_tx + tx_q->cur_tx;
p                2830 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
p                2833 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	stmmac_set_tx_owner(priv, p);
p                3394 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct dma_desc *p;
p                3398 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
p                3400 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			p = rx_q->dma_rx + entry;
p                3427 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_set_desc_addr(priv, p, buf->addr);
p                3428 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
p                3429 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_refill_desc3(priv, rx_q, p);
p                3438 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_set_rx_owner(priv, p, use_rx_wd);
p                3480 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct dma_desc *np, *p;
p                3505 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
p                3507 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			p = rx_q->dma_rx + entry;
p                3511 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				&priv->xstats, p);
p                3552 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			len = stmmac_get_rx_frame_len(priv, p, coe);
p                3567 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			int ret = stmmac_get_rx_header_len(priv, p, &hlen);
p                3631 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
p                3640 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
p                4041 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct dma_desc *p = (struct dma_desc *)head;
p                4054 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				   i, (unsigned int)virt_to_phys(p),
p                4055 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
p                4056 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
p                4057 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			p++;
p                1927 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	u8 *p = data;
p                1931 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1,
p                1933 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 		p += ETH_GSTRING_LEN;
p                1951 drivers/net/ethernet/sun/cassini.c 	char *p;
p                1969 drivers/net/ethernet/sun/cassini.c 	p = skb->data;
p                1983 drivers/net/ethernet/sun/cassini.c 		memcpy(p, addr + off, i);
p                1988 drivers/net/ethernet/sun/cassini.c 		p += hlen;
p                2016 drivers/net/ethernet/sun/cassini.c 		if (p == (char *) skb->data) { /* not split */
p                2018 drivers/net/ethernet/sun/cassini.c 			memcpy(p, addr + off, RX_COPY_MIN);
p                2092 drivers/net/ethernet/sun/cassini.c 		memcpy(p, addr + off, i);
p                2096 drivers/net/ethernet/sun/cassini.c 		if (p == (char *) skb->data) /* not split */
p                2103 drivers/net/ethernet/sun/cassini.c 			p += hlen;
p                2110 drivers/net/ethernet/sun/cassini.c 			memcpy(p, addr, dlen + cp->crc_size);
p                3152 drivers/net/ethernet/sun/cassini.c static int cas_vpd_match(const void __iomem *p, const char *str)
p                3158 drivers/net/ethernet/sun/cassini.c 		if (readb(p + i) != str[i])
p                3179 drivers/net/ethernet/sun/cassini.c 	void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
p                3198 drivers/net/ethernet/sun/cassini.c 	if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
p                3205 drivers/net/ethernet/sun/cassini.c 		if ((readb(p + i + 0) == 0x50) &&
p                3206 drivers/net/ethernet/sun/cassini.c 		    (readb(p + i + 1) == 0x43) &&
p                3207 drivers/net/ethernet/sun/cassini.c 		    (readb(p + i + 2) == 0x49) &&
p                3208 drivers/net/ethernet/sun/cassini.c 		    (readb(p + i + 3) == 0x52)) {
p                3209 drivers/net/ethernet/sun/cassini.c 			base = p + (readb(p + i + 8) |
p                3210 drivers/net/ethernet/sun/cassini.c 				    (readb(p + i + 9) << 8));
p                3228 drivers/net/ethernet/sun/cassini.c 		p = kstart;
p                3229 drivers/net/ethernet/sun/cassini.c 		while ((p - kstart) < len) {
p                3230 drivers/net/ethernet/sun/cassini.c 			int klen = readb(p + 2);
p                3234 drivers/net/ethernet/sun/cassini.c 			p += 3;
p                3273 drivers/net/ethernet/sun/cassini.c 			if (readb(p) != 'I')
p                3277 drivers/net/ethernet/sun/cassini.c 			type = readb(p + 3);
p                3279 drivers/net/ethernet/sun/cassini.c 				if ((klen == 29) && readb(p + 4) == 6 &&
p                3280 drivers/net/ethernet/sun/cassini.c 				    cas_vpd_match(p + 5,
p                3288 drivers/net/ethernet/sun/cassini.c 							readb(p + 23 + j);
p                3298 drivers/net/ethernet/sun/cassini.c 			    cas_vpd_match(p + 5, "entropy-dev") &&
p                3299 drivers/net/ethernet/sun/cassini.c 			    cas_vpd_match(p + 17, "vms110")) {
p                3308 drivers/net/ethernet/sun/cassini.c 			if ((klen == 18) && readb(p + 4) == 4 &&
p                3309 drivers/net/ethernet/sun/cassini.c 			    cas_vpd_match(p + 5, "phy-type")) {
p                3310 drivers/net/ethernet/sun/cassini.c 				if (cas_vpd_match(p + 14, "pcs")) {
p                3316 drivers/net/ethernet/sun/cassini.c 			if ((klen == 23) && readb(p + 4) == 4 &&
p                3317 drivers/net/ethernet/sun/cassini.c 			    cas_vpd_match(p + 5, "phy-interface")) {
p                3318 drivers/net/ethernet/sun/cassini.c 				if (cas_vpd_match(p + 19, "pcs")) {
p                3331 drivers/net/ethernet/sun/cassini.c 			p += klen;
p                4382 drivers/net/ethernet/sun/cassini.c 	u8 *p;
p                4387 drivers/net/ethernet/sun/cassini.c 	for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
p                4397 drivers/net/ethernet/sun/cassini.c 		memcpy(p, (u8 *)&val, sizeof(u32));
p                4699 drivers/net/ethernet/sun/cassini.c 			     void *p)
p                4704 drivers/net/ethernet/sun/cassini.c 	cas_read_regs(cp, p, regs->len / sizeof(u32));
p                3282 drivers/net/ethernet/sun/niu.c 	struct page *p, **pp;
p                3286 drivers/net/ethernet/sun/niu.c 	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
p                3287 drivers/net/ethernet/sun/niu.c 		if (p->index == addr) {
p                3295 drivers/net/ethernet/sun/niu.c 	return p;
p                6382 drivers/net/ethernet/sun/niu.c static int niu_set_mac_addr(struct net_device *dev, void *p)
p                6385 drivers/net/ethernet/sun/niu.c 	struct sockaddr *addr = p;
p                8541 drivers/net/ethernet/sun/niu.c static int phy_record(struct niu_parent *parent, struct phy_probe_info *p,
p                8567 drivers/net/ethernet/sun/niu.c 	if (p->cur[type] >= NIU_MAX_PORTS) {
p                8571 drivers/net/ethernet/sun/niu.c 	idx = p->cur[type];
p                8572 drivers/net/ethernet/sun/niu.c 	p->phy_id[type][idx] = id;
p                8573 drivers/net/ethernet/sun/niu.c 	p->phy_port[type][idx] = phy_port;
p                8574 drivers/net/ethernet/sun/niu.c 	p->cur[type] = idx + 1;
p                8578 drivers/net/ethernet/sun/niu.c static int port_has_10g(struct phy_probe_info *p, int port)
p                8582 drivers/net/ethernet/sun/niu.c 	for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
p                8583 drivers/net/ethernet/sun/niu.c 		if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
p                8586 drivers/net/ethernet/sun/niu.c 	for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
p                8587 drivers/net/ethernet/sun/niu.c 		if (p->phy_port[PHY_TYPE_PCS][i] == port)
p                8594 drivers/net/ethernet/sun/niu.c static int count_10g_ports(struct phy_probe_info *p, int *lowest)
p                8601 drivers/net/ethernet/sun/niu.c 		if (port_has_10g(p, port)) {
p                8611 drivers/net/ethernet/sun/niu.c static int count_1g_ports(struct phy_probe_info *p, int *lowest)
p                8614 drivers/net/ethernet/sun/niu.c 	if (p->cur[PHY_TYPE_MII])
p                8615 drivers/net/ethernet/sun/niu.c 		*lowest = p->phy_port[PHY_TYPE_MII][0];
p                8617 drivers/net/ethernet/sun/niu.c 	return p->cur[PHY_TYPE_MII];
p                9330 drivers/net/ethernet/sun/niu.c 	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
p                9331 drivers/net/ethernet/sun/niu.c 	u32 port_phy = p->port_phy;
p                9339 drivers/net/ethernet/sun/niu.c 	for (i = 0; i < p->num_ports; i++) {
p                9360 drivers/net/ethernet/sun/niu.c 	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
p                9363 drivers/net/ethernet/sun/niu.c 	switch (p->plat_type) {
p                9389 drivers/net/ethernet/sun/niu.c 	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
p                9394 drivers/net/ethernet/sun/niu.c 	arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
p                9396 drivers/net/ethernet/sun/niu.c 	for (i = 0; i < p->num_ports; i++) {
p                9422 drivers/net/ethernet/sun/niu.c 	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
p                9424 drivers/net/ethernet/sun/niu.c 	return sprintf(buf, "%d\n", p->num_ports);
p                9440 drivers/net/ethernet/sun/niu.c 	struct niu_parent *p;
p                9455 drivers/net/ethernet/sun/niu.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                9456 drivers/net/ethernet/sun/niu.c 	if (!p)
p                9459 drivers/net/ethernet/sun/niu.c 	p->index = niu_parent_index++;
p                9461 drivers/net/ethernet/sun/niu.c 	plat_dev->dev.platform_data = p;
p                9462 drivers/net/ethernet/sun/niu.c 	p->plat_dev = plat_dev;
p                9464 drivers/net/ethernet/sun/niu.c 	memcpy(&p->id, id, sizeof(*id));
p                9465 drivers/net/ethernet/sun/niu.c 	p->plat_type = ptype;
p                9466 drivers/net/ethernet/sun/niu.c 	INIT_LIST_HEAD(&p->list);
p                9467 drivers/net/ethernet/sun/niu.c 	atomic_set(&p->refcnt, 0);
p                9468 drivers/net/ethernet/sun/niu.c 	list_add(&p->list, &niu_parent_list);
p                9469 drivers/net/ethernet/sun/niu.c 	spin_lock_init(&p->lock);
p                9471 drivers/net/ethernet/sun/niu.c 	p->rxdma_clock_divider = 7500;
p                9473 drivers/net/ethernet/sun/niu.c 	p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
p                9474 drivers/net/ethernet/sun/niu.c 	if (p->plat_type == PLAT_TYPE_NIU)
p                9475 drivers/net/ethernet/sun/niu.c 		p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
p                9480 drivers/net/ethernet/sun/niu.c 		p->tcam_key[index] = TCAM_KEY_TSEL;
p                9481 drivers/net/ethernet/sun/niu.c 		p->flow_key[index] = (FLOW_KEY_IPSA |
p                9491 drivers/net/ethernet/sun/niu.c 		p->ldg_map[i] = LDG_INVALID;
p                9493 drivers/net/ethernet/sun/niu.c 	return p;
p                9503 drivers/net/ethernet/sun/niu.c 	struct niu_parent *p, *tmp;
p                9507 drivers/net/ethernet/sun/niu.c 	p = NULL;
p                9510 drivers/net/ethernet/sun/niu.c 			p = tmp;
p                9514 drivers/net/ethernet/sun/niu.c 	if (!p)
p                9515 drivers/net/ethernet/sun/niu.c 		p = niu_new_parent(np, id, ptype);
p                9517 drivers/net/ethernet/sun/niu.c 	if (p) {
p                9522 drivers/net/ethernet/sun/niu.c 		err = sysfs_create_link(&p->plat_dev->dev.kobj,
p                9526 drivers/net/ethernet/sun/niu.c 			p->ports[port] = np;
p                9527 drivers/net/ethernet/sun/niu.c 			atomic_inc(&p->refcnt);
p                9532 drivers/net/ethernet/sun/niu.c 	return p;
p                9537 drivers/net/ethernet/sun/niu.c 	struct niu_parent *p = np->parent;
p                9541 drivers/net/ethernet/sun/niu.c 	BUG_ON(!p || p->ports[port] != np);
p                9550 drivers/net/ethernet/sun/niu.c 	sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
p                9552 drivers/net/ethernet/sun/niu.c 	p->ports[port] = NULL;
p                9555 drivers/net/ethernet/sun/niu.c 	if (atomic_dec_and_test(&p->refcnt)) {
p                9556 drivers/net/ethernet/sun/niu.c 		list_del(&p->list);
p                9557 drivers/net/ethernet/sun/niu.c 		platform_device_unregister(p->plat_dev);
p                2008 drivers/net/ethernet/sun/sungem.c 		const char *p;
p                2010 drivers/net/ethernet/sun/sungem.c 		p = of_get_property(gp->of_node, "shared-pins", NULL);
p                2011 drivers/net/ethernet/sun/sungem.c 		if (p && !strcmp(p, "serdes"))
p                2736 drivers/net/ethernet/sun/sungem.c 		void __iomem *p = rom_base + this_offset;
p                2739 drivers/net/ethernet/sun/sungem.c 		if (readb(p + 0) != 0x90 ||
p                2740 drivers/net/ethernet/sun/sungem.c 		    readb(p + 1) != 0x00 ||
p                2741 drivers/net/ethernet/sun/sungem.c 		    readb(p + 2) != 0x09 ||
p                2742 drivers/net/ethernet/sun/sungem.c 		    readb(p + 3) != 0x4e ||
p                2743 drivers/net/ethernet/sun/sungem.c 		    readb(p + 4) != 0x41 ||
p                2744 drivers/net/ethernet/sun/sungem.c 		    readb(p + 5) != 0x06)
p                2748 drivers/net/ethernet/sun/sungem.c 		p += 6;
p                2751 drivers/net/ethernet/sun/sungem.c 			dev_addr[i] = readb(p + i);
p                2760 drivers/net/ethernet/sun/sungem.c 	void __iomem *p = pci_map_rom(pdev, &size);
p                2762 drivers/net/ethernet/sun/sungem.c 	if (p) {
p                2765 drivers/net/ethernet/sun/sungem.c 		found = readb(p) == 0x55 &&
p                2766 drivers/net/ethernet/sun/sungem.c 			readb(p + 1) == 0xaa &&
p                2767 drivers/net/ethernet/sun/sungem.c 			find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
p                2768 drivers/net/ethernet/sun/sungem.c 		pci_unmap_rom(pdev, p);
p                 211 drivers/net/ethernet/sun/sunhme.c static u32 sbus_hme_read_desc32(hme32 *p)
p                 213 drivers/net/ethernet/sun/sunhme.c 	return (__force u32)*p;
p                 240 drivers/net/ethernet/sun/sunhme.c static u32 pci_hme_read_desc32(hme32 *p)
p                 242 drivers/net/ethernet/sun/sunhme.c 	return le32_to_cpup((__le32 *)p);
p                 305 drivers/net/ethernet/sun/sunhme.c static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
p                 307 drivers/net/ethernet/sun/sunhme.c 	return le32_to_cpup((__le32 *)p);
p                2931 drivers/net/ethernet/sun/sunhme.c 		void __iomem *p = rom_base + this_offset;
p                2933 drivers/net/ethernet/sun/sunhme.c 		if (readb(p + 0) != 0x90 ||
p                2934 drivers/net/ethernet/sun/sunhme.c 		    readb(p + 1) != 0x00 ||
p                2935 drivers/net/ethernet/sun/sunhme.c 		    readb(p + 2) != 0x09 ||
p                2936 drivers/net/ethernet/sun/sunhme.c 		    readb(p + 3) != 0x4e ||
p                2937 drivers/net/ethernet/sun/sunhme.c 		    readb(p + 4) != 0x41 ||
p                2938 drivers/net/ethernet/sun/sunhme.c 		    readb(p + 5) != 0x06)
p                2942 drivers/net/ethernet/sun/sunhme.c 		p += 6;
p                2948 drivers/net/ethernet/sun/sunhme.c 				dev_addr[i] = readb(p + i);
p                2959 drivers/net/ethernet/sun/sunhme.c 	void __iomem *p = pci_map_rom(pdev, &size);
p                2961 drivers/net/ethernet/sun/sunhme.c 	if (p) {
p                2968 drivers/net/ethernet/sun/sunhme.c 		found = readb(p) == 0x55 &&
p                2969 drivers/net/ethernet/sun/sunhme.c 			readb(p + 1) == 0xaa &&
p                2970 drivers/net/ethernet/sun/sunhme.c 			find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
p                2971 drivers/net/ethernet/sun/sunhme.c 		pci_unmap_rom(pdev, p);
p                 117 drivers/net/ethernet/sun/sunvnet.c 	char *p = (char *)buf;
p                 122 drivers/net/ethernet/sun/sunvnet.c 		p += sizeof(ethtool_stats_keys);
p                 126 drivers/net/ethernet/sun/sunvnet.c 			snprintf(p, ETH_GSTRING_LEN, "p%u.%s-%pM",
p                 129 drivers/net/ethernet/sun/sunvnet.c 			p += ETH_GSTRING_LEN;
p                 130 drivers/net/ethernet/sun/sunvnet.c 			snprintf(p, ETH_GSTRING_LEN, "p%u.rx_packets",
p                 132 drivers/net/ethernet/sun/sunvnet.c 			p += ETH_GSTRING_LEN;
p                 133 drivers/net/ethernet/sun/sunvnet.c 			snprintf(p, ETH_GSTRING_LEN, "p%u.tx_packets",
p                 135 drivers/net/ethernet/sun/sunvnet.c 			p += ETH_GSTRING_LEN;
p                 136 drivers/net/ethernet/sun/sunvnet.c 			snprintf(p, ETH_GSTRING_LEN, "p%u.rx_bytes",
p                 138 drivers/net/ethernet/sun/sunvnet.c 			p += ETH_GSTRING_LEN;
p                 139 drivers/net/ethernet/sun/sunvnet.c 			snprintf(p, ETH_GSTRING_LEN, "p%u.tx_bytes",
p                 141 drivers/net/ethernet/sun/sunvnet.c 			p += ETH_GSTRING_LEN;
p                 142 drivers/net/ethernet/sun/sunvnet.c 			snprintf(p, ETH_GSTRING_LEN, "p%u.event_up",
p                 144 drivers/net/ethernet/sun/sunvnet.c 			p += ETH_GSTRING_LEN;
p                 145 drivers/net/ethernet/sun/sunvnet.c 			snprintf(p, ETH_GSTRING_LEN, "p%u.event_reset",
p                 147 drivers/net/ethernet/sun/sunvnet.c 			p += ETH_GSTRING_LEN;
p                1682 drivers/net/ethernet/sun/sunvnet_common.c int sunvnet_set_mac_addr_common(struct net_device *dev, void *p)
p                 137 drivers/net/ethernet/sun/sunvnet_common.h int sunvnet_set_mac_addr_common(struct net_device *dev, void *p);
p                 828 drivers/net/ethernet/tehuti/tehuti.c static int bdx_set_mac(struct net_device *ndev, void *p)
p                 831 drivers/net/ethernet/tehuti/tehuti.c 	struct sockaddr *addr = p;
p                2057 drivers/net/ethernet/ti/cpsw.c static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
p                2060 drivers/net/ethernet/ti/cpsw.c 	struct sockaddr *addr = (struct sockaddr *)p;
p                 234 drivers/net/ethernet/ti/cpsw_ethtool.c static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
p                 243 drivers/net/ethernet/ti/cpsw_ethtool.c 		snprintf(*p, ETH_GSTRING_LEN,
p                 247 drivers/net/ethernet/ti/cpsw_ethtool.c 		*p += ETH_GSTRING_LEN;
p                 254 drivers/net/ethernet/ti/cpsw_ethtool.c 	u8 *p = data;
p                 260 drivers/net/ethernet/ti/cpsw_ethtool.c 			memcpy(p, cpsw_gstrings_stats[i].stat_string,
p                 262 drivers/net/ethernet/ti/cpsw_ethtool.c 			p += ETH_GSTRING_LEN;
p                 265 drivers/net/ethernet/ti/cpsw_ethtool.c 		cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
p                 266 drivers/net/ethernet/ti/cpsw_ethtool.c 		cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
p                 274 drivers/net/ethernet/ti/cpsw_ethtool.c 	u8 *p;
p                 287 drivers/net/ethernet/ti/cpsw_ethtool.c 			p = (u8 *)&ch_stats +
p                 289 drivers/net/ethernet/ti/cpsw_ethtool.c 			data[l] = *(u32 *)p;
p                 296 drivers/net/ethernet/ti/cpsw_ethtool.c 			p = (u8 *)&ch_stats +
p                 298 drivers/net/ethernet/ti/cpsw_ethtool.c 			data[l] = *(u32 *)p;
p                 345 drivers/net/ethernet/ti/cpsw_ethtool.c void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p)
p                 347 drivers/net/ethernet/ti/cpsw_ethtool.c 	u32 *reg = p;
p                 417 drivers/net/ethernet/ti/cpsw_priv.h void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p);
p                1915 drivers/net/ethernet/ti/netcp_core.c 	struct netcp_stats *p = &netcp->stats;
p                1920 drivers/net/ethernet/ti/netcp_core.c 		start = u64_stats_fetch_begin_irq(&p->syncp_rx);
p                1921 drivers/net/ethernet/ti/netcp_core.c 		rxpackets       = p->rx_packets;
p                1922 drivers/net/ethernet/ti/netcp_core.c 		rxbytes         = p->rx_bytes;
p                1923 drivers/net/ethernet/ti/netcp_core.c 	} while (u64_stats_fetch_retry_irq(&p->syncp_rx, start));
p                1926 drivers/net/ethernet/ti/netcp_core.c 		start = u64_stats_fetch_begin_irq(&p->syncp_tx);
p                1927 drivers/net/ethernet/ti/netcp_core.c 		txpackets       = p->tx_packets;
p                1928 drivers/net/ethernet/ti/netcp_core.c 		txbytes         = p->tx_bytes;
p                1929 drivers/net/ethernet/ti/netcp_core.c 	} while (u64_stats_fetch_retry_irq(&p->syncp_tx, start));
p                1937 drivers/net/ethernet/ti/netcp_core.c 	stats->rx_errors = p->rx_errors;
p                1938 drivers/net/ethernet/ti/netcp_core.c 	stats->rx_dropped = p->rx_dropped;
p                1939 drivers/net/ethernet/ti/netcp_core.c 	stats->tx_dropped = p->tx_dropped;
p                 174 drivers/net/ethernet/ti/netcp_ethss.c #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
p                 176 drivers/net/ethernet/ti/netcp_ethss.c #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
p                 178 drivers/net/ethernet/ti/netcp_ethss.c #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
p                 180 drivers/net/ethernet/ti/netcp_ethss.c #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
p                2535 drivers/net/ethernet/ti/netcp_ethss.c #define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
p                2536 drivers/net/ethernet/ti/netcp_ethss.c #define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
p                 598 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	uint8_t *p;
p                 632 drivers/net/ethernet/toshiba/ps3_gelic_net.c 		p = ha->addr;
p                 635 drivers/net/ethernet/toshiba/ps3_gelic_net.c 			addr |= *p++;
p                1519 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	void *p;
p                1533 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	p  = kzalloc(alloc_size, GFP_KERNEL);
p                1534 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	if (!p)
p                1536 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	card = PTR_ALIGN(p, GELIC_ALIGN);
p                1537 drivers/net/ethernet/toshiba/ps3_gelic_net.c 	card->unalign = p;
p                 314 drivers/net/ethernet/toshiba/ps3_gelic_net.h static inline struct gelic_card *port_to_card(struct gelic_port *p)
p                 316 drivers/net/ethernet/toshiba/ps3_gelic_net.h 	return p->card;
p                 318 drivers/net/ethernet/toshiba/ps3_gelic_net.h static inline struct net_device *port_to_netdev(struct gelic_port *p)
p                 320 drivers/net/ethernet/toshiba/ps3_gelic_net.h 	return p->netdev;
p                1279 drivers/net/ethernet/toshiba/spider_net.c spider_net_set_mac(struct net_device *netdev, void *p)
p                1283 drivers/net/ethernet/toshiba/spider_net.c 	struct sockaddr *addr = p;
p                 487 drivers/net/ethernet/via/via-rhine.c #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
p                 488 drivers/net/ethernet/via/via-rhine.c #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
p                 489 drivers/net/ethernet/via/via-rhine.c #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
p                 491 drivers/net/ethernet/via/via-rhine.c #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
p                 492 drivers/net/ethernet/via/via-rhine.c #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
p                 493 drivers/net/ethernet/via/via-rhine.c #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
p                 495 drivers/net/ethernet/via/via-rhine.c #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
p                 496 drivers/net/ethernet/via/via-rhine.c #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
p                 497 drivers/net/ethernet/via/via-rhine.c #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
p                 499 drivers/net/ethernet/via/via-rhine.c #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
p                 500 drivers/net/ethernet/via/via-rhine.c #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
p                 501 drivers/net/ethernet/via/via-rhine.c #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
p                3632 drivers/net/ethernet/via/via-velocity.c 		u32 *p = vptr->mib_counter;
p                3640 drivers/net/ethernet/via/via-velocity.c 			*data++ = *p++;
p                  35 drivers/net/ethernet/via/via-velocity.h #define BYTE_REG_BITS_ON(x,p)       do { writeb(readb((p))|(x),(p));} while (0)
p                  36 drivers/net/ethernet/via/via-velocity.h #define WORD_REG_BITS_ON(x,p)       do { writew(readw((p))|(x),(p));} while (0)
p                  37 drivers/net/ethernet/via/via-velocity.h #define DWORD_REG_BITS_ON(x,p)      do { writel(readl((p))|(x),(p));} while (0)
p                  39 drivers/net/ethernet/via/via-velocity.h #define BYTE_REG_BITS_IS_ON(x,p)    (readb((p)) & (x))
p                  40 drivers/net/ethernet/via/via-velocity.h #define WORD_REG_BITS_IS_ON(x,p)    (readw((p)) & (x))
p                  41 drivers/net/ethernet/via/via-velocity.h #define DWORD_REG_BITS_IS_ON(x,p)   (readl((p)) & (x))
p                  43 drivers/net/ethernet/via/via-velocity.h #define BYTE_REG_BITS_OFF(x,p)      do { writeb(readb((p)) & (~(x)),(p));} while (0)
p                  44 drivers/net/ethernet/via/via-velocity.h #define WORD_REG_BITS_OFF(x,p)      do { writew(readw((p)) & (~(x)),(p));} while (0)
p                  45 drivers/net/ethernet/via/via-velocity.h #define DWORD_REG_BITS_OFF(x,p)     do { writel(readl((p)) & (~(x)),(p));} while (0)
p                  47 drivers/net/ethernet/via/via-velocity.h #define BYTE_REG_BITS_SET(x,m,p)    do { writeb( (readb((p)) & (~(m))) |(x),(p));} while (0)
p                  48 drivers/net/ethernet/via/via-velocity.h #define WORD_REG_BITS_SET(x,m,p)    do { writew( (readw((p)) & (~(m))) |(x),(p));} while (0)
p                  49 drivers/net/ethernet/via/via-velocity.h #define DWORD_REG_BITS_SET(x,m,p)   do { writel( (readl((p)) & (~(m)))|(x),(p));}  while (0)
p                  51 drivers/net/ethernet/via/via-velocity.h #define VAR_USED(p)     do {(p)=(p);} while (0)
p                1264 drivers/net/ethernet/via/via-velocity.h #define MII_REG_BITS_ON(x,i,p) do {\
p                1266 drivers/net/ethernet/via/via-velocity.h     velocity_mii_read((p),(i),&(w));\
p                1268 drivers/net/ethernet/via/via-velocity.h     velocity_mii_write((p),(i),(w));\
p                1271 drivers/net/ethernet/via/via-velocity.h #define MII_REG_BITS_OFF(x,i,p) do {\
p                1273 drivers/net/ethernet/via/via-velocity.h     velocity_mii_read((p),(i),&(w));\
p                1275 drivers/net/ethernet/via/via-velocity.h     velocity_mii_write((p),(i),(w));\
p                1278 drivers/net/ethernet/via/via-velocity.h #define MII_REG_BITS_IS_ON(x,i,p) ({\
p                1280 drivers/net/ethernet/via/via-velocity.h     velocity_mii_read((p),(i),&(w));\
p                1283 drivers/net/ethernet/via/via-velocity.h #define MII_GET_PHY_ID(p) ({\
p                1285 drivers/net/ethernet/via/via-velocity.h     velocity_mii_read((p),MII_PHYSID2,(u16 *) &id);\
p                1286 drivers/net/ethernet/via/via-velocity.h     velocity_mii_read((p),MII_PHYSID1,((u16 *) &id)+1);\
p                1310 drivers/net/ethernet/via/via-velocity.h #define VELOCITY_DBG(p,args...) printk(p, ##args)
p                1316 drivers/net/ethernet/via/via-velocity.h #define VELOCITY_PRT(l, p, args...) do {if (l<=msglevel) printk( p ,##args);} while (0)
p                1318 drivers/net/ethernet/via/via-velocity.h #define VELOCITY_PRT_CAMMASK(p,t) {\
p                1322 drivers/net/ethernet/via/via-velocity.h 			printk("%02X",(p)->mCAMmask[i]);\
p                1326 drivers/net/ethernet/via/via-velocity.h 			printk("%02X",(p)->vCAMmask[i]);\
p                1423 drivers/net/ethernet/via/via-velocity.h #define AVAIL_TD(p,q)   ((p)->options.numtx-((p)->tx.used[(q)]))
p                 444 drivers/net/ethernet/xilinx/ll_temac_main.c static int temac_set_mac_address(struct net_device *ndev, void *p)
p                 446 drivers/net/ethernet/xilinx/ll_temac_main.c 	struct sockaddr *addr = p;
p                 737 drivers/net/ethernet/xilinx/ll_temac_main.c static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
p                 739 drivers/net/ethernet/xilinx/ll_temac_main.c 	bd->app3 = (u32)(((u64)p) >> 32);
p                 740 drivers/net/ethernet/xilinx/ll_temac_main.c 	bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
p                 750 drivers/net/ethernet/xilinx/ll_temac_main.c static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
p                 752 drivers/net/ethernet/xilinx/ll_temac_main.c 	bd->app4 = (u32)p;
p                1281 drivers/net/ethernet/xilinx/ll_temac_main.c 	__be32 *p;
p                1362 drivers/net/ethernet/xilinx/ll_temac_main.c 		p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
p                1363 drivers/net/ethernet/xilinx/ll_temac_main.c 		if (p && be32_to_cpu(*p))
p                1365 drivers/net/ethernet/xilinx/ll_temac_main.c 		p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
p                1366 drivers/net/ethernet/xilinx/ll_temac_main.c 		if (p && be32_to_cpu(*p))
p                 334 drivers/net/ethernet/xilinx/xilinx_axienet_main.c static int netdev_set_mac_address(struct net_device *ndev, void *p)
p                 336 drivers/net/ethernet/xilinx/xilinx_axienet_main.c 	struct sockaddr *addr = p;
p                1064 drivers/net/ethernet/xilinx/xilinx_emaclite.c 	u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
p                1066 drivers/net/ethernet/xilinx/xilinx_emaclite.c 	if (!p) {
p                1071 drivers/net/ethernet/xilinx/xilinx_emaclite.c 	return (bool)*p;
p                1080 drivers/net/ethernet/xircom/xirc2ps_cs.c 		    u_long *p = skb_put(skb, pktlen);
p                1083 drivers/net/ethernet/xircom/xirc2ps_cs.c 		    for (i=0; i < len ; i += 4, p++) {
p                1088 drivers/net/ethernet/xircom/xirc2ps_cs.c 			*p = a;
p                  73 drivers/net/ethernet/xscale/ixp4xx_eth.c #define PORT2CHANNEL(p)		NPE_ID(p->id)
p                 143 drivers/net/fddi/skfp/cfm.c 	int		p ;
p                 145 drivers/net/fddi/skfp/cfm.c 	for ( p = 0,phy = smc->y ; p < NUMPHYS; p++, phy++ ) {
p                 283 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
p                 284 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
p                 285 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PA].fddiPORTMACPlacement = 0 ;
p                 286 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PB].fddiPORTMACPlacement = 0 ;
p                 318 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ;
p                 319 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
p                 320 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PA].fddiPORTMACPlacement = INDEX_MAC ;
p                 321 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PB].fddiPORTMACPlacement = 0 ;
p                 383 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
p                 384 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ;
p                 385 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PA].fddiPORTMACPlacement = 0 ;
p                 386 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PB].fddiPORTMACPlacement = INDEX_MAC ;
p                 435 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_THRU ;
p                 436 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_THRU ;
p                 437 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PA].fddiPORTMACPlacement = 0 ;
p                 438 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PB].fddiPORTMACPlacement = INDEX_MAC ;
p                 468 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_THRU ;
p                 469 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_THRU ;
p                 470 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PA].fddiPORTMACPlacement = INDEX_MAC ;
p                 471 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PB].fddiPORTMACPlacement = 0 ;
p                 501 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PS].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ;
p                 502 drivers/net/fddi/skfp/cfm.c 		smc->mib.p[PS].fddiPORTMACPlacement = INDEX_MAC ;
p                 369 drivers/net/fddi/skfp/drvfbi.c void plc_clear_irq(struct s_smc *smc, int p)
p                 371 drivers/net/fddi/skfp/drvfbi.c 	SK_UNUSED(p) ;
p                 140 drivers/net/fddi/skfp/ecm.c 	int	p ;			/* ports */
p                 192 drivers/net/fddi/skfp/ecm.c 		for (p = 0 ; p < NUMPHYS ; p++)
p                 193 drivers/net/fddi/skfp/ecm.c 			if (smc->mib.p[p].fddiPORTHardwarePresent)
p                 194 drivers/net/fddi/skfp/ecm.c 				queue_event(smc,EVENT_PCMA+p,PC_START) ;
p                 247 drivers/net/fddi/skfp/ecm.c 		for (p = 0 ; p < NUMPHYS ; p++)
p                 248 drivers/net/fddi/skfp/ecm.c 			queue_event(smc,EVENT_PCMA+p,PC_STOP) ;
p                 472 drivers/net/fddi/skfp/ecm.c 	int	p ;
p                 485 drivers/net/fddi/skfp/ecm.c 			for (p = NUMPHYS-1 ; p >= 0 ; p--) {
p                 487 drivers/net/fddi/skfp/ecm.c 					ENTITY_BIT(ENTITY_PHY(p)))
p                 490 drivers/net/fddi/skfp/ecm.c 			initiator = ENTITY_PHY(p) ;
p                 491 drivers/net/fddi/skfp/ecm.c 			smc->e.trace_prop &= ~ENTITY_BIT(ENTITY_PHY(p)) ;
p                 116 drivers/net/fddi/skfp/ess.c 	void			*p ;		/* universal pointer */
p                 132 drivers/net/fddi/skfp/ess.c 	if (!(p = (void *) sm_to_para(smc,sm,SMT_P0015))) {
p                 136 drivers/net/fddi/skfp/ess.c 	msg_res_type = ((struct smt_p_0015 *)p)->res_type ;
p                 176 drivers/net/fddi/skfp/ess.c 			p = (void *) sm_to_para(smc,sm,SMT_P0019)  ;
p                 178 drivers/net/fddi/skfp/ess.c 				if (((struct smt_p_0019 *)p)->alloc_addr.a[i]) {
p                 189 drivers/net/fddi/skfp/ess.c 			p = (void *) sm_to_para(smc,sm,SMT_P320F) ;
p                 190 drivers/net/fddi/skfp/ess.c 			((struct smt_p_320f *)p)->mib_payload =
p                 192 drivers/net/fddi/skfp/ess.c 			p = (void *) sm_to_para(smc,sm,SMT_P3210) ;
p                 193 drivers/net/fddi/skfp/ess.c 			((struct smt_p_3210 *)p)->mib_overhead =
p                 247 drivers/net/fddi/skfp/ess.c 		p = (void *) sm_to_para(smc,sm,SMT_P320F) ;
p                 248 drivers/net/fddi/skfp/ess.c                 if (!p) {
p                 252 drivers/net/fddi/skfp/ess.c 		payload = ((struct smt_p_320f *)p)->mib_payload ;
p                 253 drivers/net/fddi/skfp/ess.c 		p = (void *) sm_to_para(smc,sm,SMT_P3210) ;
p                 254 drivers/net/fddi/skfp/ess.c                 if (!p) {
p                 258 drivers/net/fddi/skfp/ess.c 		overhead = ((struct smt_p_3210 *)p)->mib_overhead ;
p                 306 drivers/net/fddi/skfp/ess.c 		p = (void *) sm_to_para(smc,sm,SMT_P320F) ;
p                 307 drivers/net/fddi/skfp/ess.c 		payload = ((struct smt_p_320f *)p)->mib_payload ;
p                 308 drivers/net/fddi/skfp/ess.c 		p = (void *) sm_to_para(smc,sm,SMT_P3210) ;
p                 309 drivers/net/fddi/skfp/ess.c 		overhead = ((struct smt_p_3210 *)p)->mib_overhead ;
p                 477 drivers/net/fddi/skfp/ess.c 	void			*p ;
p                 530 drivers/net/fddi/skfp/ess.c 		p = (void *) sm_to_para(smc,sm,SMT_P001A) ;
p                 531 drivers/net/fddi/skfp/ess.c 		chg->cat.category = ((struct smt_p_001a *)p)->category ;
p                 153 drivers/net/fddi/skfp/fplustm.c 	long p ;
p                 161 drivers/net/fddi/skfp/fplustm.c 	p = (u_long)inpw(FM_A(FM_MDRU))<<16 ;
p                 162 drivers/net/fddi/skfp/fplustm.c 	p += (u_long)inpw(FM_A(FM_MDRL)) ;
p                 163 drivers/net/fddi/skfp/fplustm.c 	return p;
p                 377 drivers/net/fddi/skfp/fplustm.c static void set_int(char *p, int l)
p                 379 drivers/net/fddi/skfp/fplustm.c 	p[0] = (char)(l >> 24) ;
p                 380 drivers/net/fddi/skfp/fplustm.c 	p[1] = (char)(l >> 16) ;
p                 381 drivers/net/fddi/skfp/fplustm.c 	p[2] = (char)(l >> 8) ;
p                 382 drivers/net/fddi/skfp/fplustm.c 	p[3] = (char)(l >> 0) ;
p                 401 drivers/net/fddi/skfp/fplustm.c 	__le32	*p ;
p                 406 drivers/net/fddi/skfp/fplustm.c 	p = (__le32 *) mac ;
p                 412 drivers/net/fddi/skfp/fplustm.c 		write_mdr(smc,le32_to_cpu(*p)) ;
p                 413 drivers/net/fddi/skfp/fplustm.c 		p++ ;
p                1063 drivers/net/fddi/skfp/fplustm.c 	u_char	*p ;
p                1071 drivers/net/fddi/skfp/fplustm.c 		p = own->a ;
p                1072 drivers/net/fddi/skfp/fplustm.c 		for (i = 0 ; i < 6 ; i++, p++)
p                1073 drivers/net/fddi/skfp/fplustm.c 			*p = bitrev8(*p);
p                 351 drivers/net/fddi/skfp/h/cmtdef.h #define ENTITY_PHY(p)	(p)
p                 576 drivers/net/fddi/skfp/h/cmtdef.h void smt_set_timestamp(struct s_smc *smc, u_char *p);
p                 591 drivers/net/fddi/skfp/h/cmtdef.h void plc_clear_irq(struct s_smc *smc, int p);
p                 645 drivers/net/fddi/skfp/h/cmtdef.h void dump_hex(char *p, int len);
p                 301 drivers/net/fddi/skfp/h/fddimib.h 	} p[NUMPHYS] ;
p                 870 drivers/net/fddi/skfp/h/skfbi.h #define	DB_PLC(p,iev) {	if (debug_plc & 0x1)\
p                 874 drivers/net/fddi/skfp/h/skfbi.h 					(p == PA) ? "A" : "B", iev) ;\
p                 876 drivers/net/fddi/skfp/h/skfbi.h 				dp_plc(p,iev) ;\
p                 888 drivers/net/fddi/skfp/h/skfbi.h #define	DB_PLC(p,iev)
p                 447 drivers/net/fddi/skfp/h/smc.h 	struct s_pcm	p ;		/* pcm */
p                 110 drivers/net/fddi/skfp/h/smt.h #define SMTSETPARA(p,t)		(p)->para.p_type = (t),\
p                 111 drivers/net/fddi/skfp/h/smt.h 				(p)->para.p_len = sizeof(*(p)) - PARA_LEN
p                  28 drivers/net/fddi/skfp/h/types.h #define inp(p)  ioread8(p)
p                  29 drivers/net/fddi/skfp/h/types.h #define inpw(p)	ioread16(p)
p                  30 drivers/net/fddi/skfp/h/types.h #define inpd(p) ioread32(p)
p                  31 drivers/net/fddi/skfp/h/types.h #define outp(p,c)  iowrite8(c,p)
p                  32 drivers/net/fddi/skfp/h/types.h #define outpw(p,s) iowrite16(s,p)
p                  33 drivers/net/fddi/skfp/h/types.h #define outpd(p,l) iowrite32(l,p)
p                  63 drivers/net/fddi/skfp/pcmplc.c int p
p                 199 drivers/net/fddi/skfp/pcmplc.c static void plc_init(struct s_smc *smc, int p);
p                 409 drivers/net/fddi/skfp/pcmplc.c 	int	p ;
p                 411 drivers/net/fddi/skfp/pcmplc.c 	for (p = 0 ; p < NUMPHYS ; p++)
p                 412 drivers/net/fddi/skfp/pcmplc.c 		plc_init(smc,p) ;
p                 415 drivers/net/fddi/skfp/pcmplc.c static void plc_init(struct s_smc *smc, int p)
p                 423 drivers/net/fddi/skfp/pcmplc.c 	outpw(PLC(p,PL_CNTRL_B),0) ;
p                 424 drivers/net/fddi/skfp/pcmplc.c 	outpw(PLC(p,PL_CNTRL_B),PL_PCM_STOP) ;
p                 425 drivers/net/fddi/skfp/pcmplc.c 	outpw(PLC(p,PL_CNTRL_A),0) ;
p                 431 drivers/net/fddi/skfp/pcmplc.c 	rev = inpw(PLC(p,PL_STATUS_A)) & PLC_REV_MASK ;
p                 435 drivers/net/fddi/skfp/pcmplc.c 		if (smc->y[p].pmd_scramble) {
p                 436 drivers/net/fddi/skfp/pcmplc.c 			outpw(PLC(p,PL_CNTRL_C),PLCS_CONTROL_C_S) ;
p                 438 drivers/net/fddi/skfp/pcmplc.c 			outpw(PLC(p,PL_T_FOT_ASS),PLCS_FASSERT_S) ;
p                 439 drivers/net/fddi/skfp/pcmplc.c 			outpw(PLC(p,PL_T_FOT_DEASS),PLCS_FDEASSERT_S) ;
p                 443 drivers/net/fddi/skfp/pcmplc.c 			outpw(PLC(p,PL_CNTRL_C),PLCS_CONTROL_C_U) ;
p                 445 drivers/net/fddi/skfp/pcmplc.c 			outpw(PLC(p,PL_T_FOT_ASS),PLCS_FASSERT_U) ;
p                 446 drivers/net/fddi/skfp/pcmplc.c 			outpw(PLC(p,PL_T_FOT_DEASS),PLCS_FDEASSERT_U) ;
p                 455 drivers/net/fddi/skfp/pcmplc.c 		outpw(PLC(p,pltm[i].timer),pltm[i].para) ;
p                 457 drivers/net/fddi/skfp/pcmplc.c 	(void)inpw(PLC(p,PL_INTR_EVENT)) ;	/* clear interrupt event reg */
p                 458 drivers/net/fddi/skfp/pcmplc.c 	plc_clear_irq(smc,p) ;
p                 459 drivers/net/fddi/skfp/pcmplc.c 	outpw(PLC(p,PL_INTR_MASK),plc_imsk_na); /* enable non active irq's */
p                 469 drivers/net/fddi/skfp/pcmplc.c 	if ((smc->s.sas == SMT_SAS) && (p == PS)) {
p                 470 drivers/net/fddi/skfp/pcmplc.c 		outpw(PLC(p,PL_CNTRL_B),PL_CLASS_S) ;
p                 478 drivers/net/fddi/skfp/pcmplc.c static void plc_go_state(struct s_smc *smc, int p, int state)
p                 485 drivers/net/fddi/skfp/pcmplc.c 	port = (HW_PTR) (PLC(p,PL_CNTRL_B)) ;
p                  41 drivers/net/fddi/skfp/pmf.c static int port_to_mib(struct s_smc *smc, int p);
p                 499 drivers/net/fddi/skfp/pmf.c 	char		*p ;
p                 504 drivers/net/fddi/skfp/pmf.c 	p = (char *) &smc->mib.fddiPRPMFStation ;
p                 505 drivers/net/fddi/skfp/pmf.c 	for (i = 0 ; i < 8 && !p[i] ; i++)
p                 515 drivers/net/fddi/skfp/pmf.c 	p = (char *) smc->mib.fddiPRPMFPasswd ;
p                 516 drivers/net/fddi/skfp/pmf.c 	for (i = 0 ; i < 8 && !p[i] ; i++)
p                 630 drivers/net/fddi/skfp/pmf.c 		mib_addr = (char *) (&smc->mib.p[port_to_mib(smc,port)]) ;
p                1128 drivers/net/fddi/skfp/pmf.c 		mib_p = &smc->mib.p[port_to_mib(smc,port)] ;
p                1540 drivers/net/fddi/skfp/pmf.c static int port_to_mib(struct s_smc *smc, int p)
p                1545 drivers/net/fddi/skfp/pmf.c 	return p;
p                1549 drivers/net/fddi/skfp/pmf.c 	return p;
p                1643 drivers/net/fddi/skfp/pmf.c void dump_hex(char *p, int len)
p                1649 drivers/net/fddi/skfp/pmf.c 		printf("%x%s",*p++ & 0xff,len ? ( (n & 7) ? " " : "-") : "") ;
p                1651 drivers/net/fddi/skfp/pmf.c 		printf("%02x%s",*p++ & 0xff,len ? ( (n & 7) ? " " : "-") : "") ;
p                 321 drivers/net/fddi/skfp/skfddi.c 	struct net_device *p = pci_get_drvdata(pdev);
p                 322 drivers/net/fddi/skfp/skfddi.c 	struct s_smc *lp = netdev_priv(p);
p                 324 drivers/net/fddi/skfp/skfddi.c 	unregister_netdev(p);
p                 346 drivers/net/fddi/skfp/skfddi.c 	free_netdev(p);
p                1224 drivers/net/fddi/skfp/smt.c void smt_set_timestamp(struct s_smc *smc, u_char *p)
p                1237 drivers/net/fddi/skfp/smt.c 	p[0] = 0 ;
p                1238 drivers/net/fddi/skfp/smt.c 	p[1] = (u_char)((time>>(8+8+8+8-1)) & 1) ;
p                1239 drivers/net/fddi/skfp/smt.c 	p[2] = (u_char)(time>>(8+8+8-1)) ;
p                1240 drivers/net/fddi/skfp/smt.c 	p[3] = (u_char)(time>>(8+8-1)) ;
p                1241 drivers/net/fddi/skfp/smt.c 	p[4] = (u_char)(time>>(8-1)) ;
p                1242 drivers/net/fddi/skfp/smt.c 	p[5] = (u_char)(time<<1) ;
p                1243 drivers/net/fddi/skfp/smt.c 	p[6] = (u_char)(smc->sm.uniq_ticks>>8) ;
p                1244 drivers/net/fddi/skfp/smt.c 	p[7] = (u_char)smc->sm.uniq_ticks ;
p                1338 drivers/net/fddi/skfp/smt.c 	int	p ;
p                1350 drivers/net/fddi/skfp/smt.c 	for (p = 0,phy = path->pd_phy ; p < ALLPHYS ; p++, phy++) {
p                1351 drivers/net/fddi/skfp/smt.c 		physp = p ;
p                1358 drivers/net/fddi/skfp/smt.c 		phy->phy_mib_index = smt_swap_short((u_short)p+INDEX_PORT) ;
p                1360 drivers/net/fddi/skfp/smt.c 		phy->phy_mib_index = p+INDEX_PORT ;
p                1366 drivers/net/fddi/skfp/smt.c 		phy->phy_resource_idx = phy_con_resource_index(smc,p) ;
p                1529 drivers/net/fddi/skfp/smt.c 	u_char	*p ;
p                1534 drivers/net/fddi/skfp/smt.c 	for (p = echo->ec_data ; len ; len--) {
p                1535 drivers/net/fddi/skfp/smt.c 		*p++ = (u_char) seed ;
p                1648 drivers/net/fddi/skfp/smt.c 	const u_short		*p = list ;
p                1649 drivers/net/fddi/skfp/smt.c 	while (*p) {
p                1650 drivers/net/fddi/skfp/smt.c 		if (!sm_to_para(smc,sm,(int) *p)) {
p                1651 drivers/net/fddi/skfp/smt.c 			DB_SMT("SMT: smt_check_para - missing para %hx", *p);
p                1654 drivers/net/fddi/skfp/smt.c 		p++ ;
p                1661 drivers/net/fddi/skfp/smt.c 	char	*p ;
p                1669 drivers/net/fddi/skfp/smt.c 	p = (char *)(sm+1) ;		/* pointer to info */
p                1671 drivers/net/fddi/skfp/smt.c 		if (((struct smt_para *)p)->p_type == para)
p                1672 drivers/net/fddi/skfp/smt.c 			found = (void *) p ;
p                1673 drivers/net/fddi/skfp/smt.c 		plen = ((struct smt_para *)p)->p_len + PARA_LEN ;
p                1674 drivers/net/fddi/skfp/smt.c 		p += plen ;
p                1702 drivers/net/fddi/skfp/smt.c 	char			*p ;
p                1706 drivers/net/fddi/skfp/smt.c 	p = smtod(mb, char *) + 12 ;
p                1708 drivers/net/fddi/skfp/smt.c 		*p++ = 1 << (i&7) ;
p                1826 drivers/net/fddi/skfp/smt.c 	char	*p ;
p                1839 drivers/net/fddi/skfp/smt.c 	p = (char *) (sm + 1) ;
p                1841 drivers/net/fddi/skfp/smt.c 		pa = (struct smt_para *) p ;
p                1861 drivers/net/fddi/skfp/smt.c 			smt_string_swap(p+PARA_LEN,pd->pswap,len) ;
p                1864 drivers/net/fddi/skfp/smt.c 		p += plen ;
p                1975 drivers/net/fddi/skfp/smt.c 				if (smc->mib.p[port].fddiPORTMy_Type != TM)
p                 249 drivers/net/fddi/skfp/smtdef.c 	pm = mib->p ;
p                  67 drivers/net/fddi/skfp/smtinit.c 	int	p ;
p                  87 drivers/net/fddi/skfp/smtinit.c 	for ( p = 0; p < NUMPHYS; p ++ ) {
p                  88 drivers/net/fddi/skfp/smtinit.c 		smc->y[p].mib = & smc->mib.p[p] ;
p                 108 drivers/net/fddi/skfp/smtinit.c 	for (p = 0 ; p < NUMPHYS ; p++) {
p                 109 drivers/net/fddi/skfp/smtinit.c 		pcm(smc,p,0) ;		/* PCM A state machine */
p                 124 drivers/net/fddi/skfp/srf.c 			&smc->mib.p[i].fddiPORTLerFlag ;
p                 126 drivers/net/fddi/skfp/srf.c 			&smc->mib.p[i].fddiPORTEB_Condition ;
p                 132 drivers/net/fddi/skfp/srf.c 			&smc->mib.p[i].fddiPORTMultiple_U ;
p                 134 drivers/net/fddi/skfp/srf.c 			&smc->mib.p[i].fddiPORTMultiple_P ;
p                  49 drivers/net/fjes/fjes_ethtool.c 	char *p;
p                  53 drivers/net/fjes/fjes_ethtool.c 		p = (char *)adapter + fjes_gstrings_stats[i].stat_offset;
p                  55 drivers/net/fjes/fjes_ethtool.c 			? *(u64 *)p : *(u32 *)p;
p                  90 drivers/net/fjes/fjes_ethtool.c 	u8 *p = data;
p                  96 drivers/net/fjes/fjes_ethtool.c 			memcpy(p, fjes_gstrings_stats[i].stat_string,
p                  98 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 103 drivers/net/fjes/fjes_ethtool.c 			sprintf(p, "ep%u_com_regist_buf_exec", i);
p                 104 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 105 drivers/net/fjes/fjes_ethtool.c 			sprintf(p, "ep%u_com_unregist_buf_exec", i);
p                 106 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 107 drivers/net/fjes/fjes_ethtool.c 			sprintf(p, "ep%u_send_intr_rx", i);
p                 108 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 109 drivers/net/fjes/fjes_ethtool.c 			sprintf(p, "ep%u_send_intr_unshare", i);
p                 110 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 111 drivers/net/fjes/fjes_ethtool.c 			sprintf(p, "ep%u_send_intr_zoneupdate", i);
p                 112 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 113 drivers/net/fjes/fjes_ethtool.c 			sprintf(p, "ep%u_recv_intr_rx", i);
p                 114 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 115 drivers/net/fjes/fjes_ethtool.c 			sprintf(p, "ep%u_recv_intr_unshare", i);
p                 116 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 117 drivers/net/fjes/fjes_ethtool.c 			sprintf(p, "ep%u_recv_intr_stop", i);
p                 118 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 119 drivers/net/fjes/fjes_ethtool.c 			sprintf(p, "ep%u_recv_intr_zoneupdate", i);
p                 120 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 121 drivers/net/fjes/fjes_ethtool.c 			sprintf(p, "ep%u_tx_buffer_full", i);
p                 122 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 123 drivers/net/fjes/fjes_ethtool.c 			sprintf(p, "ep%u_tx_dropped_not_shared", i);
p                 124 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 125 drivers/net/fjes/fjes_ethtool.c 			sprintf(p, "ep%u_tx_dropped_ver_mismatch", i);
p                 126 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 127 drivers/net/fjes/fjes_ethtool.c 			sprintf(p, "ep%u_tx_dropped_buf_size_mismatch", i);
p                 128 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 129 drivers/net/fjes/fjes_ethtool.c 			sprintf(p, "ep%u_tx_dropped_vlanid_mismatch", i);
p                 130 drivers/net/fjes/fjes_ethtool.c 			p += ETH_GSTRING_LEN;
p                 183 drivers/net/fjes/fjes_ethtool.c 			  struct ethtool_regs *regs, void *p)
p                 187 drivers/net/fjes/fjes_ethtool.c 	u32 *regs_buff = p;
p                 189 drivers/net/fjes/fjes_ethtool.c 	memset(p, 0, FJES_REGS_LEN * sizeof(u32));
p                 478 drivers/net/geneve.c 	struct sk_buff *p;
p                 505 drivers/net/geneve.c 	list_for_each_entry(p, head, list) {
p                 506 drivers/net/geneve.c 		if (!NAPI_GRO_CB(p)->same_flow)
p                 509 drivers/net/geneve.c 		gh2 = (struct genevehdr *)(p->data + off_gnv);
p                 512 drivers/net/geneve.c 			NAPI_GRO_CB(p)->same_flow = 0;
p                 166 drivers/net/hamradio/6pack.c 	unsigned char *msg, *p = icp;
p                 179 drivers/net/hamradio/6pack.c 	if (p[0] > 5) {
p                 184 drivers/net/hamradio/6pack.c 	if ((p[0] != 0) && (len > 2)) {
p                 189 drivers/net/hamradio/6pack.c 	if ((p[0] == 0) && (len < 15)) {
p                 194 drivers/net/hamradio/6pack.c 	count = encode_sixpack(p, sp->xbuff, len, sp->tx_delay);
p                 197 drivers/net/hamradio/6pack.c 	switch (p[0]) {
p                 198 drivers/net/hamradio/6pack.c 	case 1:	sp->tx_delay = p[1];
p                 200 drivers/net/hamradio/6pack.c 	case 2:	sp->persistence = p[1];
p                 202 drivers/net/hamradio/6pack.c 	case 3:	sp->slottime = p[1];
p                 206 drivers/net/hamradio/6pack.c 	case 5:	sp->duplex = p[1];
p                 210 drivers/net/hamradio/6pack.c 	if (p[0] != 0)
p                 374 drivers/net/hamradio/bpqether.c 	struct list_head *p;
p                 380 drivers/net/hamradio/bpqether.c 		p = rcu_dereference(list_next_rcu(&bpq_devices));
p                 382 drivers/net/hamradio/bpqether.c 		p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
p                 384 drivers/net/hamradio/bpqether.c 	return (p == &bpq_devices) ? NULL 
p                 385 drivers/net/hamradio/bpqether.c 		: list_entry(p, struct bpqdev, bpq_list);
p                1287 drivers/net/hamradio/dmascc.c 	int i = priv->tx_tail, p = priv->tx_ptr;
p                1291 drivers/net/hamradio/dmascc.c 	if (p == priv->tx_len[i]) {
p                1297 drivers/net/hamradio/dmascc.c 	while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
p                1298 drivers/net/hamradio/dmascc.c 		write_scc_data(priv, priv->tx_buf[i][p++], 0);
p                1302 drivers/net/hamradio/dmascc.c 	if (!priv->tx_ptr && p && priv->chip == Z8530)
p                1305 drivers/net/hamradio/dmascc.c 	priv->tx_ptr = p;
p                 426 drivers/net/hamradio/mkiss.c 	unsigned char *p;
p                 439 drivers/net/hamradio/mkiss.c 	p = icp;
p                 442 drivers/net/hamradio/mkiss.c 	if ((*p & 0x0f) != 0) {
p                 447 drivers/net/hamradio/mkiss.c 		switch (*p & 0xff) {
p                 452 drivers/net/hamradio/mkiss.c 				int cmd = (p[1] & 0xff);
p                 477 drivers/net/hamradio/mkiss.c 			count = kiss_esc(p, ax->xbuff, len);
p                 487 drivers/net/hamradio/mkiss.c 			*p |= 0x80;
p                 488 drivers/net/hamradio/mkiss.c 			crc = swab16(crc16(0, p, len));
p                 489 drivers/net/hamradio/mkiss.c 			count = kiss_esc_crc(p, ax->xbuff, crc, len+2);
p                 496 drivers/net/hamradio/mkiss.c 			*p |= 0x20;
p                 497 drivers/net/hamradio/mkiss.c 			crc = calc_crc_flex(p, len);
p                 498 drivers/net/hamradio/mkiss.c 			count = kiss_esc_crc(p, ax->xbuff, crc, len+2);
p                 502 drivers/net/hamradio/mkiss.c 			count = kiss_esc(p, ax->xbuff, len);
p                 343 drivers/net/hamradio/yam.c 	struct yam_mcs *p;
p                 379 drivers/net/hamradio/yam.c 	p = yam_data;
p                 380 drivers/net/hamradio/yam.c 	while (p) {
p                 381 drivers/net/hamradio/yam.c 		if (p->bitrate == bitrate) {
p                 382 drivers/net/hamradio/yam.c 			memcpy(p->bits, bits, YAM_FPGA_SIZE);
p                 385 drivers/net/hamradio/yam.c 		p = p->next;
p                 389 drivers/net/hamradio/yam.c 	if ((p = kmalloc(sizeof(struct yam_mcs), GFP_KERNEL)) == NULL) {
p                 393 drivers/net/hamradio/yam.c 	memcpy(p->bits, bits, YAM_FPGA_SIZE);
p                 394 drivers/net/hamradio/yam.c 	p->bitrate = bitrate;
p                 395 drivers/net/hamradio/yam.c 	p->next = yam_data;
p                 396 drivers/net/hamradio/yam.c 	yam_data = p;
p                 399 drivers/net/hamradio/yam.c 	return p->bits;
p                 404 drivers/net/hamradio/yam.c 	struct yam_mcs *p;
p                 406 drivers/net/hamradio/yam.c 	p = yam_data;
p                 407 drivers/net/hamradio/yam.c 	while (p) {
p                 408 drivers/net/hamradio/yam.c 		if (p->bitrate == bitrate)
p                 409 drivers/net/hamradio/yam.c 			return p->bits;
p                 410 drivers/net/hamradio/yam.c 		p = p->next;
p                1160 drivers/net/hamradio/yam.c 	struct yam_mcs *p;
p                1173 drivers/net/hamradio/yam.c 		p = yam_data;
p                1175 drivers/net/hamradio/yam.c 		kfree(p);
p                1300 drivers/net/hyperv/netvsc_drv.c static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
p                1305 drivers/net/hyperv/netvsc_drv.c 	struct sockaddr *addr = p;
p                1308 drivers/net/hyperv/netvsc_drv.c 	err = eth_prepare_mac_addr_change(ndev, p);
p                1323 drivers/net/hyperv/netvsc_drv.c 		eth_commit_mac_addr_change(ndev, p);
p                1462 drivers/net/hyperv/netvsc_drv.c 	u8 *p = data;
p                1471 drivers/net/hyperv/netvsc_drv.c 			memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
p                1472 drivers/net/hyperv/netvsc_drv.c 			p += ETH_GSTRING_LEN;
p                1476 drivers/net/hyperv/netvsc_drv.c 			memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
p                1477 drivers/net/hyperv/netvsc_drv.c 			p += ETH_GSTRING_LEN;
p                1481 drivers/net/hyperv/netvsc_drv.c 			sprintf(p, "tx_queue_%u_packets", i);
p                1482 drivers/net/hyperv/netvsc_drv.c 			p += ETH_GSTRING_LEN;
p                1483 drivers/net/hyperv/netvsc_drv.c 			sprintf(p, "tx_queue_%u_bytes", i);
p                1484 drivers/net/hyperv/netvsc_drv.c 			p += ETH_GSTRING_LEN;
p                1485 drivers/net/hyperv/netvsc_drv.c 			sprintf(p, "rx_queue_%u_packets", i);
p                1486 drivers/net/hyperv/netvsc_drv.c 			p += ETH_GSTRING_LEN;
p                1487 drivers/net/hyperv/netvsc_drv.c 			sprintf(p, "rx_queue_%u_bytes", i);
p                1488 drivers/net/hyperv/netvsc_drv.c 			p += ETH_GSTRING_LEN;
p                1493 drivers/net/hyperv/netvsc_drv.c 				sprintf(p, pcpu_stats[i].name, cpu);
p                1494 drivers/net/hyperv/netvsc_drv.c 				p += ETH_GSTRING_LEN;
p                2892 drivers/net/macsec.c static int macsec_set_mac_address(struct net_device *dev, void *p)
p                2896 drivers/net/macsec.c 	struct sockaddr *addr = p;
p                 733 drivers/net/macvlan.c static int macvlan_set_mac_address(struct net_device *dev, void *p)
p                 736 drivers/net/macvlan.c 	struct sockaddr *addr = p;
p                 920 drivers/net/macvlan.c 		struct vlan_pcpu_stats *p;
p                 927 drivers/net/macvlan.c 			p = per_cpu_ptr(vlan->pcpu_stats, i);
p                 929 drivers/net/macvlan.c 				start = u64_stats_fetch_begin_irq(&p->syncp);
p                 930 drivers/net/macvlan.c 				rx_packets	= p->rx_packets;
p                 931 drivers/net/macvlan.c 				rx_bytes	= p->rx_bytes;
p                 932 drivers/net/macvlan.c 				rx_multicast	= p->rx_multicast;
p                 933 drivers/net/macvlan.c 				tx_packets	= p->tx_packets;
p                 934 drivers/net/macvlan.c 				tx_bytes	= p->tx_bytes;
p                 935 drivers/net/macvlan.c 			} while (u64_stats_fetch_retry_irq(&p->syncp, start));
p                 945 drivers/net/macvlan.c 			rx_errors	+= p->rx_errors;
p                 946 drivers/net/macvlan.c 			tx_dropped	+= p->tx_dropped;
p                  19 drivers/net/netdevsim/ipsec.c 	char *buf, *p;
p                  31 drivers/net/netdevsim/ipsec.c 	p = buf;
p                  32 drivers/net/netdevsim/ipsec.c 	p += snprintf(p, bufsize - (p - buf),
p                  42 drivers/net/netdevsim/ipsec.c 		p += snprintf(p, bufsize - (p - buf),
p                  46 drivers/net/netdevsim/ipsec.c 		p += snprintf(p, bufsize - (p - buf),
p                  50 drivers/net/netdevsim/ipsec.c 		p += snprintf(p, bufsize - (p - buf),
p                  56 drivers/net/netdevsim/ipsec.c 	len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf);
p                 263 drivers/net/phy/dp83640.c static void phy2rxts(struct phy_rxts *p, struct rxts *rxts)
p                 267 drivers/net/phy/dp83640.c 	sec = p->sec_lo;
p                 268 drivers/net/phy/dp83640.c 	sec |= p->sec_hi << 16;
p                 270 drivers/net/phy/dp83640.c 	rxts->ns = p->ns_lo;
p                 271 drivers/net/phy/dp83640.c 	rxts->ns |= (p->ns_hi & 0x3fff) << 16;
p                 273 drivers/net/phy/dp83640.c 	rxts->seqid = p->seqid;
p                 274 drivers/net/phy/dp83640.c 	rxts->msgtype = (p->msgtype >> 12) & 0xf;
p                 275 drivers/net/phy/dp83640.c 	rxts->hash = p->msgtype & 0x0fff;
p                 279 drivers/net/phy/dp83640.c static u64 phy2txts(struct phy_txts *p)
p                 284 drivers/net/phy/dp83640.c 	sec = p->sec_lo;
p                 285 drivers/net/phy/dp83640.c 	sec |= p->sec_hi << 16;
p                 287 drivers/net/phy/dp83640.c 	ns = p->ns_lo;
p                 288 drivers/net/phy/dp83640.c 	ns |= (p->ns_hi & 0x3fff) << 16;
p                  13 drivers/net/phy/mdio-cavium.c static void cavium_mdiobus_set_mode(struct cavium_mdiobus *p,
p                  18 drivers/net/phy/mdio-cavium.c 	if (m == p->mode)
p                  21 drivers/net/phy/mdio-cavium.c 	smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK);
p                  24 drivers/net/phy/mdio-cavium.c 	oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK);
p                  25 drivers/net/phy/mdio-cavium.c 	p->mode = m;
p                  28 drivers/net/phy/mdio-cavium.c static int cavium_mdiobus_c45_addr(struct cavium_mdiobus *p,
p                  35 drivers/net/phy/mdio-cavium.c 	cavium_mdiobus_set_mode(p, C45);
p                  39 drivers/net/phy/mdio-cavium.c 	oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
p                  47 drivers/net/phy/mdio-cavium.c 	oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
p                  54 drivers/net/phy/mdio-cavium.c 		smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
p                  64 drivers/net/phy/mdio-cavium.c 	struct cavium_mdiobus *p = bus->priv;
p                  71 drivers/net/phy/mdio-cavium.c 		int r = cavium_mdiobus_c45_addr(p, phy_id, regnum);
p                  79 drivers/net/phy/mdio-cavium.c 		cavium_mdiobus_set_mode(p, C22);
p                  86 drivers/net/phy/mdio-cavium.c 	oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
p                  93 drivers/net/phy/mdio-cavium.c 		smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT);
p                 105 drivers/net/phy/mdio-cavium.c 	struct cavium_mdiobus *p = bus->priv;
p                 112 drivers/net/phy/mdio-cavium.c 		int r = cavium_mdiobus_c45_addr(p, phy_id, regnum);
p                 120 drivers/net/phy/mdio-cavium.c 		cavium_mdiobus_set_mode(p, C22);
p                 125 drivers/net/phy/mdio-cavium.c 	oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
p                 131 drivers/net/phy/mdio-cavium.c 	oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
p                 138 drivers/net/phy/mdio-cavium.c 		smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
p                  48 drivers/net/phy/mdio-mux-mmioreg.c 		void __iomem *p = ioremap(s->phys, s->iosize);
p                  49 drivers/net/phy/mdio-mux-mmioreg.c 		if (!p)
p                  56 drivers/net/phy/mdio-mux-mmioreg.c 			x = ioread8(p);
p                  59 drivers/net/phy/mdio-mux-mmioreg.c 				iowrite8((x & ~s->mask) | desired_child, p);
p                  68 drivers/net/phy/mdio-mux-mmioreg.c 			x = ioread16(p);
p                  71 drivers/net/phy/mdio-mux-mmioreg.c 				iowrite16((x & ~s->mask) | desired_child, p);
p                  80 drivers/net/phy/mdio-mux-mmioreg.c 			x = ioread32(p);
p                  83 drivers/net/phy/mdio-mux-mmioreg.c 				iowrite32((x & ~s->mask) | desired_child, p);
p                  91 drivers/net/phy/mdio-mux-mmioreg.c 		iounmap(p);
p                 166 drivers/net/phy/phy-core.c 	const struct phy_setting *p, *match = NULL, *last = NULL;
p                 169 drivers/net/phy/phy-core.c 	for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
p                 170 drivers/net/phy/phy-core.c 		if (p->bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
p                 171 drivers/net/phy/phy-core.c 		    test_bit(p->bit, mask)) {
p                 172 drivers/net/phy/phy-core.c 			last = p;
p                 173 drivers/net/phy/phy-core.c 			if (p->speed == speed && p->duplex == duplex) {
p                 175 drivers/net/phy/phy-core.c 				match = p;
p                 178 drivers/net/phy/phy-core.c 				if (!match && p->speed <= speed)
p                 180 drivers/net/phy/phy-core.c 					match = p;
p                 182 drivers/net/phy/phy-core.c 				if (p->speed < speed)
p                 212 drivers/net/phy/phy-core.c 	const struct phy_setting *p;
p                 215 drivers/net/phy/phy-core.c 	for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
p                 216 drivers/net/phy/phy-core.c 		if (p->speed > max_speed)
p                 217 drivers/net/phy/phy-core.c 			linkmode_clear_bit(p->bit, addr);
p                 401 drivers/net/phy/sfp.c 	u8 *p, check;
p                 403 drivers/net/phy/sfp.c 	for (p = buf, check = 0; len; p++, len--)
p                 404 drivers/net/phy/sfp.c 		check += *p;
p                  43 drivers/net/phy/spi_ks8995.c #define KS8995_REG_PC(p, r)	((0x10 * p) + r)	 /* Port Control */
p                  44 drivers/net/phy/spi_ks8995.c #define KS8995_REG_PS(p, r)	((0x10 * p) + r + 0xe)  /* Port Status */
p                 523 drivers/net/ppp/bsd_comp.c #define dict_ptrx(p,idx) &(p->dict[idx])
p                 524 drivers/net/ppp/bsd_comp.c #define lens_ptrx(p,idx) &(p->lens[idx])
p                 842 drivers/net/ppp/bsd_comp.c     unsigned char *p;
p                 995 drivers/net/ppp/bsd_comp.c 	p     = wptr;
p                1025 drivers/net/ppp/bsd_comp.c 	    *--p    = dictp->f.hs.suffix;
p                1028 drivers/net/ppp/bsd_comp.c 	*--p = finchar;
p                 289 drivers/net/ppp/ppp_async.c 	int __user *p = (int __user *)arg;
p                 297 drivers/net/ppp/ppp_async.c 		if (put_user(ppp_channel_index(&ap->chan), p))
p                 304 drivers/net/ppp/ppp_async.c 		if (put_user(ppp_unit_number(&ap->chan), p))
p                 318 drivers/net/ppp/ppp_async.c 		if (put_user(val, p))
p                 407 drivers/net/ppp/ppp_async.c 	int __user *p = argp;
p                 415 drivers/net/ppp/ppp_async.c 		if (put_user(val, p))
p                 420 drivers/net/ppp/ppp_async.c 		if (get_user(val, p))
p                 466 drivers/net/ppp/ppp_async.c 		if (put_user(ap->mru, p))
p                 471 drivers/net/ppp/ppp_async.c 		if (get_user(val, p))
p                 768 drivers/net/ppp/ppp_async.c 	unsigned char *p;
p                 779 drivers/net/ppp/ppp_async.c 	p = skb->data;
p                 785 drivers/net/ppp/ppp_async.c 		fcs = PPP_FCS(fcs, *p++);
p                 791 drivers/net/ppp/ppp_async.c 	p = skb->data;
p                 792 drivers/net/ppp/ppp_async.c 	if (p[0] == PPP_ALLSTATIONS) {
p                 794 drivers/net/ppp/ppp_async.c 		if (p[1] != PPP_UI || skb->len < 3)
p                 796 drivers/net/ppp/ppp_async.c 		p = skb_pull(skb, 2);
p                 800 drivers/net/ppp/ppp_async.c 	if (!(p[0] & 0x01)) {
p                 805 drivers/net/ppp/ppp_async.c 		proto = (p[0] << 8) + p[1];
p                 807 drivers/net/ppp/ppp_async.c 			async_lcp_peek(ap, p, skb->len, 1);
p                 286 drivers/net/ppp/ppp_generic.c static int unit_get(struct idr *p, void *ptr);
p                 287 drivers/net/ppp/ppp_generic.c static int unit_set(struct idr *p, void *ptr, int n);
p                 288 drivers/net/ppp/ppp_generic.c static void unit_put(struct idr *p, int n);
p                 289 drivers/net/ppp/ppp_generic.c static void *unit_find(struct idr *p, int n);
p                 557 drivers/net/ppp/ppp_generic.c static int get_filter(void __user *arg, struct sock_filter **p)
p                 567 drivers/net/ppp/ppp_generic.c 		*p = NULL;
p                 576 drivers/net/ppp/ppp_generic.c 	*p = code;
p                 591 drivers/net/ppp/ppp_generic.c 	int __user *p = argp;
p                 622 drivers/net/ppp/ppp_generic.c 			if (get_user(unit, p))
p                 652 drivers/net/ppp/ppp_generic.c 		if (get_user(val, p))
p                 659 drivers/net/ppp/ppp_generic.c 		if (get_user(val, p))
p                 676 drivers/net/ppp/ppp_generic.c 		if (put_user(val, p))
p                 686 drivers/net/ppp/ppp_generic.c 		if (put_user(ppp->file.index, p))
p                 692 drivers/net/ppp/ppp_generic.c 		if (get_user(val, p))
p                 699 drivers/net/ppp/ppp_generic.c 		if (put_user(ppp->debug, p))
p                 713 drivers/net/ppp/ppp_generic.c 		if (get_user(val, p))
p                 811 drivers/net/ppp/ppp_generic.c 		if (get_user(val, p))
p                 837 drivers/net/ppp/ppp_generic.c 	int __user *p = (int __user *)arg;
p                 842 drivers/net/ppp/ppp_generic.c 		if (get_user(unit, p))
p                 849 drivers/net/ppp/ppp_generic.c 		if (put_user(unit, p))
p                 856 drivers/net/ppp/ppp_generic.c 		if (get_user(unit, p))
p                 871 drivers/net/ppp/ppp_generic.c 		if (get_user(unit, p))
p                1685 drivers/net/ppp/ppp_generic.c 	unsigned char *p, *q;
p                1735 drivers/net/ppp/ppp_generic.c 	p = skb->data;
p                1737 drivers/net/ppp/ppp_generic.c 	if (*p == 0 && mp_protocol_compress) {
p                1738 drivers/net/ppp/ppp_generic.c 		++p;
p                1873 drivers/net/ppp/ppp_generic.c 		memcpy(q + hdrlen, p, flen);
p                1881 drivers/net/ppp/ppp_generic.c 		p += flen;
p                2415 drivers/net/ppp/ppp_generic.c 	struct sk_buff *p;
p                2421 drivers/net/ppp/ppp_generic.c 	skb_queue_walk(list, p) {
p                2422 drivers/net/ppp/ppp_generic.c 		if (seq_before(seq, PPP_MP_CB(p)->sequence))
p                2425 drivers/net/ppp/ppp_generic.c 	__skb_queue_before(list, p, skb);
p                2440 drivers/net/ppp/ppp_generic.c 	struct sk_buff *p, *tmp;
p                2449 drivers/net/ppp/ppp_generic.c 	skb_queue_walk_safe(list, p, tmp) {
p                2451 drivers/net/ppp/ppp_generic.c 		if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
p                2455 drivers/net/ppp/ppp_generic.c 				   PPP_MP_CB(p)->sequence, seq);
p                2456 drivers/net/ppp/ppp_generic.c 			__skb_unlink(p, list);
p                2457 drivers/net/ppp/ppp_generic.c 			kfree_skb(p);
p                2460 drivers/net/ppp/ppp_generic.c 		if (PPP_MP_CB(p)->sequence != seq) {
p                2469 drivers/net/ppp/ppp_generic.c 			seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
p                2470 drivers/net/ppp/ppp_generic.c 				minseq + 1: PPP_MP_CB(p)->sequence;
p                2489 drivers/net/ppp/ppp_generic.c 		if (PPP_MP_CB(p)->BEbits & B) {
p                2490 drivers/net/ppp/ppp_generic.c 			head = p;
p                2495 drivers/net/ppp/ppp_generic.c 		len += p->len;
p                2498 drivers/net/ppp/ppp_generic.c 		if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) &&
p                2506 drivers/net/ppp/ppp_generic.c 				tail = p;
p                2517 drivers/net/ppp/ppp_generic.c 		if (PPP_MP_CB(p)->BEbits & E) {
p                2520 drivers/net/ppp/ppp_generic.c 			skb_queue_reverse_walk_from_safe(list, p, tmp2) {
p                2524 drivers/net/ppp/ppp_generic.c 						      PPP_MP_CB(p)->sequence);
p                2525 drivers/net/ppp/ppp_generic.c 				__skb_unlink(p, list);
p                2526 drivers/net/ppp/ppp_generic.c 				kfree_skb(p);
p                2540 drivers/net/ppp/ppp_generic.c 			skb_queue_walk_safe(list, p, tmp) {
p                2541 drivers/net/ppp/ppp_generic.c 				if (p == head)
p                2546 drivers/net/ppp/ppp_generic.c 						      PPP_MP_CB(p)->sequence);
p                2547 drivers/net/ppp/ppp_generic.c 				__skb_unlink(p, list);
p                2548 drivers/net/ppp/ppp_generic.c 				kfree_skb(p);
p                2563 drivers/net/ppp/ppp_generic.c 			p = skb_queue_next(list, head);
p                2565 drivers/net/ppp/ppp_generic.c 			skb_queue_walk_from_safe(list, p, tmp) {
p                2566 drivers/net/ppp/ppp_generic.c 				__skb_unlink(p, list);
p                2567 drivers/net/ppp/ppp_generic.c 				*fragpp = p;
p                2568 drivers/net/ppp/ppp_generic.c 				p->next = NULL;
p                2569 drivers/net/ppp/ppp_generic.c 				fragpp = &p->next;
p                2571 drivers/net/ppp/ppp_generic.c 				skb->len += p->len;
p                2572 drivers/net/ppp/ppp_generic.c 				skb->data_len += p->len;
p                2573 drivers/net/ppp/ppp_generic.c 				skb->truesize += p->truesize;
p                2575 drivers/net/ppp/ppp_generic.c 				if (p == tail)
p                3008 drivers/net/ppp/ppp_generic.c 	st->p.ppp_ipackets = ppp->stats64.rx_packets;
p                3009 drivers/net/ppp/ppp_generic.c 	st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
p                3010 drivers/net/ppp/ppp_generic.c 	st->p.ppp_ibytes = ppp->stats64.rx_bytes;
p                3011 drivers/net/ppp/ppp_generic.c 	st->p.ppp_opackets = ppp->stats64.tx_packets;
p                3012 drivers/net/ppp/ppp_generic.c 	st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
p                3013 drivers/net/ppp/ppp_generic.c 	st->p.ppp_obytes = ppp->stats64.tx_bytes;
p                3286 drivers/net/ppp/ppp_generic.c static int unit_set(struct idr *p, void *ptr, int n)
p                3290 drivers/net/ppp/ppp_generic.c 	unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL);
p                3297 drivers/net/ppp/ppp_generic.c static int unit_get(struct idr *p, void *ptr)
p                3299 drivers/net/ppp/ppp_generic.c 	return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
p                3303 drivers/net/ppp/ppp_generic.c static void unit_put(struct idr *p, int n)
p                3305 drivers/net/ppp/ppp_generic.c 	idr_remove(p, n);
p                3309 drivers/net/ppp/ppp_generic.c static void *unit_find(struct idr *p, int n)
p                3311 drivers/net/ppp/ppp_generic.c 	return idr_find(p, n);
p                 120 drivers/net/ppp/ppp_mppe.c #define MPPE_BITS(p) ((p)[4] & 0xf0)
p                 121 drivers/net/ppp/ppp_mppe.c #define MPPE_CCOUNT(p) ((((p)[4] & 0x0f) << 8) + (p)[5])
p                 281 drivers/net/ppp/ppp_synctty.c 	int __user *p = (int __user *)arg;
p                 290 drivers/net/ppp/ppp_synctty.c 		if (put_user(ppp_channel_index(&ap->chan), p))
p                 297 drivers/net/ppp/ppp_synctty.c 		if (put_user(ppp_unit_number(&ap->chan), p))
p                 311 drivers/net/ppp/ppp_synctty.c 		if (put_user(val, p))
p                 402 drivers/net/ppp/ppp_synctty.c 	u32 __user *p = argp;
p                 423 drivers/net/ppp/ppp_synctty.c 		if (put_user(ap->xaccm[0], p))
p                 428 drivers/net/ppp/ppp_synctty.c 		if (get_user(ap->xaccm[0], p))
p                 434 drivers/net/ppp/ppp_synctty.c 		if (put_user(ap->raccm, p))
p                 439 drivers/net/ppp/ppp_synctty.c 		if (get_user(ap->raccm, p))
p                 671 drivers/net/ppp/ppp_synctty.c 	unsigned char *p;
p                 700 drivers/net/ppp/ppp_synctty.c 	p = skb->data;
p                 701 drivers/net/ppp/ppp_synctty.c 	if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
p                 705 drivers/net/ppp/ppp_synctty.c 		p = skb_pull(skb, 2);
p                 711 drivers/net/ppp/ppp_synctty.c 	if (!(p[0] & 0x01) && skb->len < 2)
p                 574 drivers/net/ppp/pptp.c 	int __user *p = argp;
p                 581 drivers/net/ppp/pptp.c 		if (put_user(val, p))
p                 586 drivers/net/ppp/pptp.c 		if (get_user(val, p))
p                 661 drivers/net/sb1000.c 	short p;
p                 672 drivers/net/sb1000.c 	p = PID[0];
p                 673 drivers/net/sb1000.c 	Command0[3] = p & 0xff;
p                 674 drivers/net/sb1000.c 	p >>= 8;
p                 675 drivers/net/sb1000.c 	Command0[2] = p & 0xff;
p                 679 drivers/net/sb1000.c 	p = PID[1];
p                 680 drivers/net/sb1000.c 	Command1[3] = p & 0xff;
p                 681 drivers/net/sb1000.c 	p >>= 8;
p                 682 drivers/net/sb1000.c 	Command1[2] = p & 0xff;
p                 686 drivers/net/sb1000.c 	p = PID[2];
p                 687 drivers/net/sb1000.c 	Command2[3] = p & 0xff;
p                 688 drivers/net/sb1000.c 	p >>= 8;
p                 689 drivers/net/sb1000.c 	Command2[2] = p & 0xff;
p                 693 drivers/net/sb1000.c 	p = PID[3];
p                 694 drivers/net/sb1000.c 	Command3[3] = p & 0xff;
p                 695 drivers/net/sb1000.c 	p >>= 8;
p                 696 drivers/net/sb1000.c 	Command3[2] = p & 0xff;
p                 102 drivers/net/slip/slip.c static int slip_esc(unsigned char *p, unsigned char *d, int len);
p                 105 drivers/net/slip/slip.c static int slip_esc6(unsigned char *p, unsigned char *d, int len);
p                 377 drivers/net/slip/slip.c 	unsigned char *p;
p                 387 drivers/net/slip/slip.c 	p = icp;
p                 390 drivers/net/slip/slip.c 		len = slhc_compress(sl->slcomp, p, len, sl->cbuff, &p, 1);
p                 394 drivers/net/slip/slip.c 		count = slip_esc6(p, sl->xbuff, len);
p                 397 drivers/net/slip/slip.c 		count = slip_esc(p, sl->xbuff, len);
p                1083 drivers/net/slip/slip.c 	int __user *p = (int __user *)arg;
p                1097 drivers/net/slip/slip.c 		if (put_user(sl->mode, p))
p                1102 drivers/net/slip/slip.c 		if (get_user(tmp, p))
p                1127 drivers/net/slip/slip.c 		if (get_user(tmp, p))
p                1148 drivers/net/slip/slip.c 		if (put_user(sl->keepalive, p))
p                1153 drivers/net/slip/slip.c 		if (get_user(tmp, p))
p                1173 drivers/net/slip/slip.c 		if (put_user(sl->outfill, p))
p                1192 drivers/net/slip/slip.c 	unsigned long *p = (unsigned long *)&rq->ifr_ifru;
p                1207 drivers/net/slip/slip.c 		if ((unsigned)*p > 255) {
p                1211 drivers/net/slip/slip.c 		sl->keepalive = (u8)*p;
p                1223 drivers/net/slip/slip.c 		*p = sl->keepalive;
p                1227 drivers/net/slip/slip.c 		if ((unsigned)*p > 255) { /* max for unchar */
p                1231 drivers/net/slip/slip.c 		sl->outfill = (u8)*p;
p                1241 drivers/net/slip/slip.c 		*p = sl->outfill;
p                1254 drivers/net/slip/slip.c 		if (*p)
p                1259 drivers/net/slip/slip.c 		*p = sl->leased;
p                1784 drivers/net/team/team.c static int team_set_mac_address(struct net_device *dev, void *p)
p                1786 drivers/net/team/team.c 	struct sockaddr *addr = p;
p                1841 drivers/net/team/team.c 	struct team_pcpu_stats *p;
p                1848 drivers/net/team/team.c 		p = per_cpu_ptr(team->pcpu_stats, i);
p                1850 drivers/net/team/team.c 			start = u64_stats_fetch_begin_irq(&p->syncp);
p                1851 drivers/net/team/team.c 			rx_packets	= p->rx_packets;
p                1852 drivers/net/team/team.c 			rx_bytes	= p->rx_bytes;
p                1853 drivers/net/team/team.c 			rx_multicast	= p->rx_multicast;
p                1854 drivers/net/team/team.c 			tx_packets	= p->tx_packets;
p                1855 drivers/net/team/team.c 			tx_bytes	= p->tx_bytes;
p                1856 drivers/net/team/team.c 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
p                1867 drivers/net/team/team.c 		rx_dropped	+= p->rx_dropped;
p                1868 drivers/net/team/team.c 		tx_dropped	+= p->tx_dropped;
p                1869 drivers/net/team/team.c 		rx_nohandler	+= p->rx_nohandler;
p                1160 drivers/net/tun.c 	struct tun_pcpu_stats *p;
p                1167 drivers/net/tun.c 		p = per_cpu_ptr(tun->pcpu_stats, i);
p                1169 drivers/net/tun.c 			start = u64_stats_fetch_begin(&p->syncp);
p                1170 drivers/net/tun.c 			rxpackets	= p->rx_packets;
p                1171 drivers/net/tun.c 			rxbytes		= p->rx_bytes;
p                1172 drivers/net/tun.c 			txpackets	= p->tx_packets;
p                1173 drivers/net/tun.c 			txbytes		= p->tx_bytes;
p                1174 drivers/net/tun.c 		} while (u64_stats_fetch_retry(&p->syncp, start));
p                1182 drivers/net/tun.c 		rx_dropped	+= p->rx_dropped;
p                1183 drivers/net/tun.c 		rx_frame_errors	+= p->rx_frame_errors;
p                1184 drivers/net/tun.c 		tx_dropped	+= p->tx_dropped;
p                 463 drivers/net/usb/aqc111.c static int aqc111_set_mac_addr(struct net_device *net, void *p)
p                 468 drivers/net/usb/aqc111.c 	ret = eth_mac_addr(net, p);
p                 241 drivers/net/usb/asix.h int asix_set_mac_address(struct net_device *net, void *p);
p                 741 drivers/net/usb/asix_common.c int asix_set_mac_address(struct net_device *net, void *p)
p                 745 drivers/net/usb/asix_common.c 	struct sockaddr *addr = p;
p                 929 drivers/net/usb/ax88179_178a.c static int ax88179_set_mac_addr(struct net_device *net, void *p)
p                 932 drivers/net/usb/ax88179_178a.c 	struct sockaddr *addr = p;
p                 111 drivers/net/usb/cdc_ncm.c 	char *p = NULL;
p                 114 drivers/net/usb/cdc_ncm.c 		p = (char *)ctx + cdc_ncm_gstrings_stats[i].stat_offset;
p                 115 drivers/net/usb/cdc_ncm.c 		data[i] = (cdc_ncm_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
p                 121 drivers/net/usb/cdc_ncm.c 	u8 *p = data;
p                 127 drivers/net/usb/cdc_ncm.c 			memcpy(p, cdc_ncm_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
p                 128 drivers/net/usb/cdc_ncm.c 			p += ETH_GSTRING_LEN;
p                 323 drivers/net/usb/dm9601.c static int dm9601_set_mac_address(struct net_device *net, void *p)
p                 325 drivers/net/usb/dm9601.c 	struct sockaddr *addr = p;
p                 584 drivers/net/usb/lan78xx.c 	u32 *p, *count, *max;
p                 592 drivers/net/usb/lan78xx.c 	p = (u32 *)&lan78xx_stats;
p                 603 drivers/net/usb/lan78xx.c 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
p                2306 drivers/net/usb/lan78xx.c static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
p                2309 drivers/net/usb/lan78xx.c 	struct sockaddr *addr = p;
p                2602 drivers/net/usb/lan78xx.c 	u32 *p;
p                2608 drivers/net/usb/lan78xx.c 	p = (u32 *)&dev->stats.rollover_max;
p                2610 drivers/net/usb/lan78xx.c 		p[i] = 0xFFFFF;
p                 144 drivers/net/usb/mcs7830.c static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
p                 148 drivers/net/usb/mcs7830.c 	struct sockaddr *addr = p;
p                 219 drivers/net/usb/pegasus.c static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
p                 230 drivers/net/usb/pegasus.c 	set_register(p, PhyCtrl, 0);
p                 231 drivers/net/usb/pegasus.c 	set_registers(p, PhyAddr, sizeof(data), data);
p                 232 drivers/net/usb/pegasus.c 	set_register(p, PhyCtrl, (indx | cmd));
p                 234 drivers/net/usb/pegasus.c 		ret = get_registers(p, PhyCtrl, 1, data);
p                 243 drivers/net/usb/pegasus.c 		ret = get_registers(p, PhyData, 2, &regdi);
p                 249 drivers/net/usb/pegasus.c 	netif_dbg(p, drv, p->net, "%s failed\n", __func__);
p                 603 drivers/net/usb/qmi_wwan.c static int qmi_wwan_mac_addr(struct net_device *dev, void *p)
p                 606 drivers/net/usb/qmi_wwan.c 	struct sockaddr *addr = p;
p                 608 drivers/net/usb/qmi_wwan.c 	ret = eth_prepare_mac_addr_change(dev, p);
p                 613 drivers/net/usb/qmi_wwan.c 	eth_commit_mac_addr_change(dev, p);
p                1199 drivers/net/usb/r8152.c static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
p                1202 drivers/net/usb/r8152.c 	struct sockaddr *addr = p;
p                 285 drivers/net/usb/rtl8150.c static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
p                 287 drivers/net/usb/rtl8150.c 	struct sockaddr *addr = p;
p                 233 drivers/net/usb/sierra_net.c static const u8 *save16bit(struct param *p, const u8 *datap)
p                 235 drivers/net/usb/sierra_net.c 	p->is_present = 1;
p                 236 drivers/net/usb/sierra_net.c 	p->word = get_unaligned_be16(datap);
p                 237 drivers/net/usb/sierra_net.c 	return datap + sizeof(p->word);
p                 240 drivers/net/usb/sierra_net.c static const u8 *save8bit(struct param *p, const u8 *datap)
p                 242 drivers/net/usb/sierra_net.c 	p->is_present = 1;
p                 243 drivers/net/usb/sierra_net.c 	p->byte = *datap;
p                 244 drivers/net/usb/sierra_net.c 	return datap + sizeof(p->byte);
p                 288 drivers/net/usb/sr9700.c static int sr9700_set_mac_address(struct net_device *netdev, void *p)
p                 291 drivers/net/usb/sr9700.c 	struct sockaddr *addr = p;
p                 495 drivers/net/usb/sr9800.c static int sr_set_mac_address(struct net_device *net, void *p)
p                 499 drivers/net/usb/sr9800.c 	struct sockaddr *addr = p;
p                 117 drivers/net/veth.c 	char *p = (char *)buf;
p                 122 drivers/net/veth.c 		memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
p                 123 drivers/net/veth.c 		p += sizeof(ethtool_stats_keys);
p                 126 drivers/net/veth.c 				snprintf(p, ETH_GSTRING_LEN,
p                 129 drivers/net/veth.c 				p += ETH_GSTRING_LEN;
p                 303 drivers/net/virtio_net.c 	struct page *p = rq->pages;
p                 305 drivers/net/virtio_net.c 	if (p) {
p                 306 drivers/net/virtio_net.c 		rq->pages = (struct page *)p->private;
p                 308 drivers/net/virtio_net.c 		p->private = 0;
p                 310 drivers/net/virtio_net.c 		p = alloc_page(gfp_mask);
p                 311 drivers/net/virtio_net.c 	return p;
p                 379 drivers/net/virtio_net.c 	char *p;
p                 381 drivers/net/virtio_net.c 	p = page_address(page) + offset;
p                 397 drivers/net/virtio_net.c 		memcpy(hdr, p, hdr_len);
p                 401 drivers/net/virtio_net.c 	p += hdr_padded_len;
p                 406 drivers/net/virtio_net.c 	skb_put_data(skb, p, copy);
p                 580 drivers/net/virtio_net.c 				       struct page *p,
p                 590 drivers/net/virtio_net.c 	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
p                 603 drivers/net/virtio_net.c 		p = virt_to_head_page(buf);
p                 604 drivers/net/virtio_net.c 		off = buf - page_address(p);
p                 610 drivers/net/virtio_net.c 			put_page(p);
p                 615 drivers/net/virtio_net.c 		       page_address(p) + off, buflen);
p                 617 drivers/net/virtio_net.c 		put_page(p);
p                1104 drivers/net/virtio_net.c 	char *p;
p                1129 drivers/net/virtio_net.c 	p = page_address(first);
p                1133 drivers/net/virtio_net.c 	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
p                1137 drivers/net/virtio_net.c 	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
p                1671 drivers/net/virtio_net.c static int virtnet_set_mac_address(struct net_device *dev, void *p)
p                1682 drivers/net/virtio_net.c 	addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
p                1710 drivers/net/virtio_net.c 	eth_commit_mac_addr_change(dev, p);
p                2084 drivers/net/virtio_net.c 	char *p = (char *)data;
p                2091 drivers/net/virtio_net.c 				snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
p                2093 drivers/net/virtio_net.c 				p += ETH_GSTRING_LEN;
p                2099 drivers/net/virtio_net.c 				snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s",
p                2101 drivers/net/virtio_net.c 				p += ETH_GSTRING_LEN;
p                2697 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
p                2699 drivers/net/vmxnet3/vmxnet3_drv.c 	struct sockaddr *addr = p;
p                 362 drivers/net/vmxnet3/vmxnet3_ethtool.c vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
p                 365 drivers/net/vmxnet3/vmxnet3_ethtool.c 	u32 *buf = p;
p                 368 drivers/net/vmxnet3/vmxnet3_ethtool.c 	memset(p, 0, vmxnet3_get_regs_len(netdev));
p                 695 drivers/net/vmxnet3/vmxnet3_ethtool.c vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc)
p                 703 drivers/net/vmxnet3/vmxnet3_ethtool.c 	if (!p)
p                 706 drivers/net/vmxnet3/vmxnet3_ethtool.c 		p[n] = rssConf->indTable[n];
p                 712 drivers/net/vmxnet3/vmxnet3_ethtool.c vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key,
p                 724 drivers/net/vmxnet3/vmxnet3_ethtool.c 	if (!p)
p                 727 drivers/net/vmxnet3/vmxnet3_ethtool.c 		rssConf->indTable[i] = p[i];
p                 732 drivers/net/vxlan.c 	struct sk_buff *p;
p                 767 drivers/net/vxlan.c 	list_for_each_entry(p, head, list) {
p                 768 drivers/net/vxlan.c 		if (!NAPI_GRO_CB(p)->same_flow)
p                 771 drivers/net/vxlan.c 		vh2 = (struct vxlanhdr *)(p->data + off_vx);
p                 774 drivers/net/vxlan.c 			NAPI_GRO_CB(p)->same_flow = 0;
p                2729 drivers/net/vxlan.c 		struct hlist_node *p, *n;
p                2732 drivers/net/vxlan.c 		hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
p                2734 drivers/net/vxlan.c 				= container_of(p, struct vxlan_fdb, hlist);
p                2857 drivers/net/vxlan.c 		struct hlist_node *p, *n;
p                2860 drivers/net/vxlan.c 		hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
p                2862 drivers/net/vxlan.c 				= container_of(p, struct vxlan_fdb, hlist);
p                3158 drivers/net/vxlan.c 		const struct ifla_vxlan_port_range *p
p                3161 drivers/net/vxlan.c 		if (ntohs(p->high) < ntohs(p->low)) {
p                3845 drivers/net/vxlan.c 			const struct ifla_vxlan_port_range *p
p                3847 drivers/net/vxlan.c 			conf->port_min = ntohs(p->low);
p                3848 drivers/net/vxlan.c 			conf->port_max = ntohs(p->high);
p                  47 drivers/net/wan/hdlc.c 		    struct packet_type *p, struct net_device *orig_dev)
p                 329 drivers/net/wan/hdlc.c 	struct hdlc_proto **p;
p                 332 drivers/net/wan/hdlc.c 	p = &first_proto;
p                 333 drivers/net/wan/hdlc.c 	while (*p != proto) {
p                 334 drivers/net/wan/hdlc.c 		BUG_ON(!*p);
p                 335 drivers/net/wan/hdlc.c 		p = &((*p)->next);
p                 337 drivers/net/wan/hdlc.c 	*p = proto->next;
p                 299 drivers/net/wan/pc300too.c 	u32 __iomem *p;
p                 376 drivers/net/wan/pc300too.c 	p = &card->plxbase->init_ctrl;
p                 377 drivers/net/wan/pc300too.c 	writel(card->init_ctrl_value | 0x40000000, p);
p                 378 drivers/net/wan/pc300too.c 	readl(p);		/* Flush the write - do not use sca_flush */
p                 381 drivers/net/wan/pc300too.c 	writel(card->init_ctrl_value, p);
p                 382 drivers/net/wan/pc300too.c 	readl(p);		/* Flush the write - do not use sca_flush */
p                 386 drivers/net/wan/pc300too.c 	writel(card->init_ctrl_value | 0x20000000, p);
p                 387 drivers/net/wan/pc300too.c 	readl(p);		/* Flush the write - do not use sca_flush */
p                 390 drivers/net/wan/pc300too.c 	writel(card->init_ctrl_value, p);
p                 391 drivers/net/wan/pc300too.c 	readl(p);		/* Flush the write - do not use sca_flush */
p                 277 drivers/net/wan/pci200syn.c 	u32 __iomem *p;
p                 335 drivers/net/wan/pci200syn.c 	p = &card->plxbase->init_ctrl;
p                 336 drivers/net/wan/pci200syn.c 	writel(readl(p) | 0x40000000, p);
p                 337 drivers/net/wan/pci200syn.c 	readl(p);		/* Flush the write - do not use sca_flush */
p                 340 drivers/net/wan/pci200syn.c 	writel(readl(p) & ~0x40000000, p);
p                 341 drivers/net/wan/pci200syn.c 	readl(p);		/* Flush the write - do not use sca_flush */
p                 366 drivers/net/wan/pci200syn.c 	p = &card->plxbase->intr_ctrl_stat;
p                 367 drivers/net/wan/pci200syn.c 	writew(readw(p) | 0x0040, p);
p                 444 drivers/net/wan/sbni.c 	struct net_device  *p;
p                 449 drivers/net/wan/sbni.c 	for( p = dev;  p; ) {
p                 450 drivers/net/wan/sbni.c 		struct net_local  *nl = netdev_priv(p);
p                 453 drivers/net/wan/sbni.c 			p = nl->link;
p                 457 drivers/net/wan/sbni.c 			prepare_to_send( skb, p );
p                 813 drivers/net/wan/sbni.c 	u8  *p;
p                 821 drivers/net/wan/sbni.c 	p = nl->rx_buf_p->data + nl->inppos;
p                 822 drivers/net/wan/sbni.c 	insb( dev->base_addr + DAT, p, framelen );
p                 823 drivers/net/wan/sbni.c 	if( calc_crc32( crc, p, framelen ) != CRC32_REMAINDER )
p                1162 drivers/net/wan/sbni.c 		struct net_device  **p = sbni_cards;
p                1163 drivers/net/wan/sbni.c 		for( ;  *p  &&  p < sbni_cards + SBNI_MAX_NUM_CARDS;  ++p )
p                1164 drivers/net/wan/sbni.c 			if( (*p)->irq == dev->irq &&
p                1165 drivers/net/wan/sbni.c 			    ((*p)->base_addr == dev->base_addr + 4 ||
p                1166 drivers/net/wan/sbni.c 			     (*p)->base_addr == dev->base_addr - 4) &&
p                1167 drivers/net/wan/sbni.c 			    (*p)->flags & IFF_UP ) {
p                1169 drivers/net/wan/sbni.c 				((struct net_local *) (netdev_priv(*p)))
p                1172 drivers/net/wan/sbni.c 					      (*p)->name);
p                1418 drivers/net/wan/sbni.c 	struct net_device  *p   = snl->master;
p                1419 drivers/net/wan/sbni.c 	struct net_local   *nl  = netdev_priv(p);
p                1430 drivers/net/wan/sbni.c 		struct net_local  *t = netdev_priv(p);
p                1435 drivers/net/wan/sbni.c 		p = t->link;
p                1518 drivers/net/wan/sbni.c sbni_setup( char  *p )
p                1522 drivers/net/wan/sbni.c 	if( *p++ != '(' )
p                1525 drivers/net/wan/sbni.c 	for( n = 0, parm = 0;  *p  &&  n < 8; ) {
p                1526 drivers/net/wan/sbni.c 		(*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
p                1527 drivers/net/wan/sbni.c 		if( !*p  ||  *p == ')' )
p                1529 drivers/net/wan/sbni.c 		if( *p == ';' )
p                1530 drivers/net/wan/sbni.c 			++p, ++n, parm = 0;
p                1531 drivers/net/wan/sbni.c 		else if( *p++ != ',' )
p                1549 drivers/net/wan/sbni.c calc_crc32( u32  crc,  u8  *p,  u32  len )
p                1552 drivers/net/wan/sbni.c 		crc = CRC32( *p++, crc );
p                  47 drivers/net/wan/x25_asy.c static int x25_asy_esc(unsigned char *p, unsigned char *d, int len);
p                 220 drivers/net/wan/x25_asy.c 	unsigned char *p;
p                 233 drivers/net/wan/x25_asy.c 	p = icp;
p                 234 drivers/net/wan/x25_asy.c 	count = x25_asy_esc(p, sl->xbuff, len);
p                  75 drivers/net/wan/z85230.c static inline int z8530_read_port(unsigned long p)
p                  77 drivers/net/wan/z85230.c 	u8 r=inb(Z8530_PORT_OF(p));
p                  78 drivers/net/wan/z85230.c 	if(p&Z8530_PORT_SLEEP)	/* gcc should figure this out efficiently ! */
p                  99 drivers/net/wan/z85230.c static inline void z8530_write_port(unsigned long p, u8 d)
p                 101 drivers/net/wan/z85230.c 	outb(d,Z8530_PORT_OF(p));
p                 102 drivers/net/wan/z85230.c 	if(p&Z8530_PORT_SLEEP)
p                1785 drivers/net/wireless/ath/ath10k/debug.c #define ATH10K_DFS_STAT(s, p) (\
p                1787 drivers/net/wireless/ath/ath10k/debug.c 			 ar->debug.dfs_stats.p))
p                1789 drivers/net/wireless/ath/ath10k/debug.c #define ATH10K_DFS_POOL_STAT(s, p) (\
p                1791 drivers/net/wireless/ath/ath10k/debug.c 			 ar->debug.dfs_pool_stats.p))
p                6822 drivers/net/wireless/ath/ath10k/mac.c 	struct wmi_wmm_params_arg *p = NULL;
p                6829 drivers/net/wireless/ath/ath10k/mac.c 		p = &arvif->wmm_params.ac_vo;
p                6832 drivers/net/wireless/ath/ath10k/mac.c 		p = &arvif->wmm_params.ac_vi;
p                6835 drivers/net/wireless/ath/ath10k/mac.c 		p = &arvif->wmm_params.ac_be;
p                6838 drivers/net/wireless/ath/ath10k/mac.c 		p = &arvif->wmm_params.ac_bk;
p                6842 drivers/net/wireless/ath/ath10k/mac.c 	if (WARN_ON(!p)) {
p                6847 drivers/net/wireless/ath/ath10k/mac.c 	p->cwmin = params->cw_min;
p                6848 drivers/net/wireless/ath/ath10k/mac.c 	p->cwmax = params->cw_max;
p                6849 drivers/net/wireless/ath/ath10k/mac.c 	p->aifs = params->aifs;
p                6856 drivers/net/wireless/ath/ath10k/mac.c 	p->txop = params->txop * 32;
p                 144 drivers/net/wireless/ath/ath5k/debug.c static void reg_stop(struct seq_file *seq, void *p)
p                 149 drivers/net/wireless/ath/ath5k/debug.c static void *reg_next(struct seq_file *seq, void *p, loff_t *pos)
p                 155 drivers/net/wireless/ath/ath5k/debug.c static int reg_show(struct seq_file *seq, void *p)
p                 158 drivers/net/wireless/ath/ath5k/debug.c 	struct reg *r = p;
p                2769 drivers/net/wireless/ath/ath6kl/cfg80211.c 	struct wmi_connect_cmd p;
p                2822 drivers/net/wireless/ath/ath6kl/cfg80211.c 	memset(&p, 0, sizeof(p));
p                2828 drivers/net/wireless/ath/ath6kl/cfg80211.c 				p.auth_mode |= WPA_AUTH;
p                2830 drivers/net/wireless/ath/ath6kl/cfg80211.c 				p.auth_mode |= WPA2_AUTH;
p                2834 drivers/net/wireless/ath/ath6kl/cfg80211.c 				p.auth_mode |= WPA_PSK_AUTH;
p                2836 drivers/net/wireless/ath/ath6kl/cfg80211.c 				p.auth_mode |= WPA2_PSK_AUTH;
p                2840 drivers/net/wireless/ath/ath6kl/cfg80211.c 	if (p.auth_mode == 0)
p                2841 drivers/net/wireless/ath/ath6kl/cfg80211.c 		p.auth_mode = NONE_AUTH;
p                2842 drivers/net/wireless/ath/ath6kl/cfg80211.c 	vif->auth_mode = p.auth_mode;
p                2848 drivers/net/wireless/ath/ath6kl/cfg80211.c 			p.prwise_crypto_type |= WEP_CRYPT;
p                2851 drivers/net/wireless/ath/ath6kl/cfg80211.c 			p.prwise_crypto_type |= TKIP_CRYPT;
p                2854 drivers/net/wireless/ath/ath6kl/cfg80211.c 			p.prwise_crypto_type |= AES_CRYPT;
p                2857 drivers/net/wireless/ath/ath6kl/cfg80211.c 			p.prwise_crypto_type |= WAPI_CRYPT;
p                2861 drivers/net/wireless/ath/ath6kl/cfg80211.c 	if (p.prwise_crypto_type == 0) {
p                2862 drivers/net/wireless/ath/ath6kl/cfg80211.c 		p.prwise_crypto_type = NONE_CRYPT;
p                2871 drivers/net/wireless/ath/ath6kl/cfg80211.c 		p.grp_crypto_type = WEP_CRYPT;
p                2874 drivers/net/wireless/ath/ath6kl/cfg80211.c 		p.grp_crypto_type = TKIP_CRYPT;
p                2877 drivers/net/wireless/ath/ath6kl/cfg80211.c 		p.grp_crypto_type = AES_CRYPT;
p                2880 drivers/net/wireless/ath/ath6kl/cfg80211.c 		p.grp_crypto_type = WAPI_CRYPT;
p                2883 drivers/net/wireless/ath/ath6kl/cfg80211.c 		p.grp_crypto_type = NONE_CRYPT;
p                2888 drivers/net/wireless/ath/ath6kl/cfg80211.c 	p.nw_type = AP_NETWORK;
p                2891 drivers/net/wireless/ath/ath6kl/cfg80211.c 	p.ssid_len = vif->ssid_len;
p                2892 drivers/net/wireless/ath/ath6kl/cfg80211.c 	memcpy(p.ssid, vif->ssid, vif->ssid_len);
p                2893 drivers/net/wireless/ath/ath6kl/cfg80211.c 	p.dot11_auth_mode = vif->dot11_auth_mode;
p                2894 drivers/net/wireless/ath/ath6kl/cfg80211.c 	p.ch = cpu_to_le16(info->chandef.chan->center_freq);
p                2902 drivers/net/wireless/ath/ath6kl/cfg80211.c 		p.nw_subtype = SUBTYPE_P2PGO;
p                2908 drivers/net/wireless/ath/ath6kl/cfg80211.c 		p.nw_subtype = SUBTYPE_NONE;
p                2948 drivers/net/wireless/ath/ath6kl/cfg80211.c 	memcpy(&vif->profile, &p, sizeof(p));
p                2949 drivers/net/wireless/ath/ath6kl/cfg80211.c 	res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
p                 821 drivers/net/wireless/ath/ath6kl/main.c 	struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
p                 828 drivers/net/wireless/ath/ath6kl/main.c 		if (len < sizeof(*p))
p                 833 drivers/net/wireless/ath/ath6kl/main.c 			st_p = &p->sta[ac];
p                3389 drivers/net/wireless/ath/ath6kl/wmi.c 				 struct wmi_connect_cmd *p)
p                3400 drivers/net/wireless/ath/ath6kl/wmi.c 	memcpy(cm, p, sizeof(*cm));
p                3406 drivers/net/wireless/ath/ath6kl/wmi.c 		   __func__, p->nw_type, p->auth_mode, le16_to_cpu(p->ch),
p                3407 drivers/net/wireless/ath/ath6kl/wmi.c 		   le32_to_cpu(p->ctrl_flags), res);
p                3559 drivers/net/wireless/ath/ath6kl/wmi.c 	struct wmi_set_appie_cmd *p;
p                3561 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*p) + ie_len);
p                3568 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_set_appie_cmd *) skb->data;
p                3569 drivers/net/wireless/ath/ath6kl/wmi.c 	p->mgmt_frm_type = mgmt_frm_type;
p                3570 drivers/net/wireless/ath/ath6kl/wmi.c 	p->ie_len = ie_len;
p                3573 drivers/net/wireless/ath/ath6kl/wmi.c 		memcpy(p->ie_info, ie, ie_len);
p                3583 drivers/net/wireless/ath/ath6kl/wmi.c 	struct wmi_set_ie_cmd *p;
p                3585 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*p) + ie_len);
p                3591 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_set_ie_cmd *) skb->data;
p                3592 drivers/net/wireless/ath/ath6kl/wmi.c 	p->ie_id = ie_id;
p                3593 drivers/net/wireless/ath/ath6kl/wmi.c 	p->ie_field = ie_field;
p                3594 drivers/net/wireless/ath/ath6kl/wmi.c 	p->ie_len = ie_len;
p                3596 drivers/net/wireless/ath/ath6kl/wmi.c 		memcpy(p->ie_info, ie_info, ie_len);
p                3623 drivers/net/wireless/ath/ath6kl/wmi.c 	struct wmi_remain_on_chnl_cmd *p;
p                3625 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*p));
p                3631 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_remain_on_chnl_cmd *) skb->data;
p                3632 drivers/net/wireless/ath/ath6kl/wmi.c 	p->freq = cpu_to_le32(freq);
p                3633 drivers/net/wireless/ath/ath6kl/wmi.c 	p->duration = cpu_to_le32(dur);
p                3647 drivers/net/wireless/ath/ath6kl/wmi.c 	struct wmi_send_action_cmd *p;
p                3657 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len);
p                3671 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_send_action_cmd *) skb->data;
p                3672 drivers/net/wireless/ath/ath6kl/wmi.c 	p->id = cpu_to_le32(id);
p                3673 drivers/net/wireless/ath/ath6kl/wmi.c 	p->freq = cpu_to_le32(freq);
p                3674 drivers/net/wireless/ath/ath6kl/wmi.c 	p->wait = cpu_to_le32(wait);
p                3675 drivers/net/wireless/ath/ath6kl/wmi.c 	p->len = cpu_to_le16(data_len);
p                3676 drivers/net/wireless/ath/ath6kl/wmi.c 	memcpy(p->data, data, data_len);
p                3686 drivers/net/wireless/ath/ath6kl/wmi.c 	struct wmi_send_mgmt_cmd *p;
p                3696 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len);
p                3710 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_send_mgmt_cmd *) skb->data;
p                3711 drivers/net/wireless/ath/ath6kl/wmi.c 	p->id = cpu_to_le32(id);
p                3712 drivers/net/wireless/ath/ath6kl/wmi.c 	p->freq = cpu_to_le32(freq);
p                3713 drivers/net/wireless/ath/ath6kl/wmi.c 	p->wait = cpu_to_le32(wait);
p                3714 drivers/net/wireless/ath/ath6kl/wmi.c 	p->no_cck = cpu_to_le32(no_cck);
p                3715 drivers/net/wireless/ath/ath6kl/wmi.c 	p->len = cpu_to_le16(data_len);
p                3716 drivers/net/wireless/ath/ath6kl/wmi.c 	memcpy(p->data, data, data_len);
p                3752 drivers/net/wireless/ath/ath6kl/wmi.c 	struct wmi_p2p_probe_response_cmd *p;
p                3753 drivers/net/wireless/ath/ath6kl/wmi.c 	size_t cmd_len = sizeof(*p) + data_len;
p                3765 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_p2p_probe_response_cmd *) skb->data;
p                3766 drivers/net/wireless/ath/ath6kl/wmi.c 	p->freq = cpu_to_le32(freq);
p                3767 drivers/net/wireless/ath/ath6kl/wmi.c 	memcpy(p->destination_addr, dst, ETH_ALEN);
p                3768 drivers/net/wireless/ath/ath6kl/wmi.c 	p->len = cpu_to_le16(data_len);
p                3769 drivers/net/wireless/ath/ath6kl/wmi.c 	memcpy(p->data, data, data_len);
p                3778 drivers/net/wireless/ath/ath6kl/wmi.c 	struct wmi_probe_req_report_cmd *p;
p                3780 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*p));
p                3786 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_probe_req_report_cmd *) skb->data;
p                3787 drivers/net/wireless/ath/ath6kl/wmi.c 	p->enable = enable ? 1 : 0;
p                3795 drivers/net/wireless/ath/ath6kl/wmi.c 	struct wmi_get_p2p_info *p;
p                3797 drivers/net/wireless/ath/ath6kl/wmi.c 	skb = ath6kl_wmi_get_new_buf(sizeof(*p));
p                3803 drivers/net/wireless/ath/ath6kl/wmi.c 	p = (struct wmi_get_p2p_info *) skb->data;
p                3804 drivers/net/wireless/ath/ath6kl/wmi.c 	p->info_req_flags = cpu_to_le32(info_req_flags);
p                2680 drivers/net/wireless/ath/ath6kl/wmi.h 				 struct wmi_connect_cmd *p);
p                 190 drivers/net/wireless/ath/ath9k/common-debug.c #define PHY_ERR(s, p) \
p                 192 drivers/net/wireless/ath/ath9k/common-debug.c 			 rxstats->phy_err_stats[p]);
p                  27 drivers/net/wireless/ath/ath9k/dfs_debug.c #define ATH9K_DFS_STAT(s, p) \
p                  29 drivers/net/wireless/ath/ath9k/dfs_debug.c 			 sc->debug.stats.dfs_stats.p);
p                  30 drivers/net/wireless/ath/ath9k/dfs_debug.c #define ATH9K_DFS_POOL_STAT(s, p) \
p                  32 drivers/net/wireless/ath/ath9k/dfs_debug.c 			 dfs_pool_stats.p);
p                 103 drivers/net/wireless/ath/dfs_pri_detector.c 		struct pulse_elem *p, *p0;
p                 105 drivers/net/wireless/ath/dfs_pri_detector.c 		list_for_each_entry_safe(p, p0, &pulse_pool, head) {
p                 106 drivers/net/wireless/ath/dfs_pri_detector.c 			list_del(&p->head);
p                 108 drivers/net/wireless/ath/dfs_pri_detector.c 			kfree(p);
p                 171 drivers/net/wireless/ath/dfs_pri_detector.c 	struct pulse_elem *p = pulse_queue_get_tail(pde);
p                 172 drivers/net/wireless/ath/dfs_pri_detector.c 	if (p != NULL) {
p                 173 drivers/net/wireless/ath/dfs_pri_detector.c 		list_del_init(&p->head);
p                 176 drivers/net/wireless/ath/dfs_pri_detector.c 		pool_put_pulse_elem(p);
p                 185 drivers/net/wireless/ath/dfs_pri_detector.c 	struct pulse_elem *p;
p                 195 drivers/net/wireless/ath/dfs_pri_detector.c 	while ((p = pulse_queue_get_tail(pde)) != NULL) {
p                 196 drivers/net/wireless/ath/dfs_pri_detector.c 		if (p->ts >= min_valid_ts)
p                 204 drivers/net/wireless/ath/dfs_pri_detector.c 	struct pulse_elem *p = pool_get_pulse_elem();
p                 205 drivers/net/wireless/ath/dfs_pri_detector.c 	if (p == NULL) {
p                 206 drivers/net/wireless/ath/dfs_pri_detector.c 		p = kmalloc(sizeof(*p), GFP_ATOMIC);
p                 207 drivers/net/wireless/ath/dfs_pri_detector.c 		if (p == NULL) {
p                 214 drivers/net/wireless/ath/dfs_pri_detector.c 	INIT_LIST_HEAD(&p->head);
p                 215 drivers/net/wireless/ath/dfs_pri_detector.c 	p->ts = ts;
p                 216 drivers/net/wireless/ath/dfs_pri_detector.c 	list_add(&p->head, &pde->pulses);
p                 228 drivers/net/wireless/ath/dfs_pri_detector.c 	struct pulse_elem *p;
p                 229 drivers/net/wireless/ath/dfs_pri_detector.c 	list_for_each_entry(p, &pde->pulses, head) {
p                 234 drivers/net/wireless/ath/dfs_pri_detector.c 		u32 delta_ts = ts - p->ts;
p                 247 drivers/net/wireless/ath/dfs_pri_detector.c 		ps.first_ts = p->ts;
p                 250 drivers/net/wireless/ath/dfs_pri_detector.c 			pde->rs->pri_max, ts - p->ts);
p                 254 drivers/net/wireless/ath/dfs_pri_detector.c 		p2 = p;
p                 362 drivers/net/wireless/ath/dfs_pri_detector.c 	struct pulse_elem *p, *p0;
p                 367 drivers/net/wireless/ath/dfs_pri_detector.c 	list_for_each_entry_safe(p, p0, &pde->pulses, head) {
p                 368 drivers/net/wireless/ath/dfs_pri_detector.c 		list_del_init(&p->head);
p                 369 drivers/net/wireless/ath/dfs_pri_detector.c 		pool_put_pulse_elem(p);
p                 275 drivers/net/wireless/ath/wil6210/debugfs.c static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
p                 278 drivers/net/wireless/ath/wil6210/debugfs.c 	seq_hex_dump(s, prefix, DUMP_PREFIX_NONE, 16, 1, p, len, false);
p                1059 drivers/net/wireless/ath/wil6210/debugfs.c 	void *p = skb->data;
p                1063 drivers/net/wireless/ath/wil6210/debugfs.c 	wil_seq_hexdump(s, p, len, "      : ");
p                1071 drivers/net/wireless/ath/wil6210/debugfs.c 			p = skb_frag_address_safe(frag);
p                1073 drivers/net/wireless/ath/wil6210/debugfs.c 			wil_seq_hexdump(s, p, len, "      : ");
p                1428 drivers/net/wireless/ath/wil6210/debugfs.c 		struct wil_sta_info *p = &wil->sta[i];
p                1433 drivers/net/wireless/ath/wil6210/debugfs.c 		switch (p->status) {
p                1444 drivers/net/wireless/ath/wil6210/debugfs.c 		mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
p                1446 drivers/net/wireless/ath/wil6210/debugfs.c 			   i, mid, p->addr, status);
p                1448 drivers/net/wireless/ath/wil6210/debugfs.c 		if (p->status != wil_sta_connected)
p                1624 drivers/net/wireless/ath/wil6210/debugfs.c __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
p                1630 drivers/net/wireless/ath/wil6210/debugfs.c 		struct wil_sta_info *p = &wil->sta[i];
p                1636 drivers/net/wireless/ath/wil6210/debugfs.c 		switch (p->status) {
p                1645 drivers/net/wireless/ath/wil6210/debugfs.c 			aid = p->aid;
p                1648 drivers/net/wireless/ath/wil6210/debugfs.c 		mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
p                1653 drivers/net/wireless/ath/wil6210/debugfs.c 			    p->status == wil_sta_connected)
p                1659 drivers/net/wireless/ath/wil6210/debugfs.c 				   i, p->addr, p->stats.ft_roams, mid, aid);
p                1662 drivers/net/wireless/ath/wil6210/debugfs.c 				   p->addr, status, mid, aid);
p                1664 drivers/net/wireless/ath/wil6210/debugfs.c 		if (p->status == wil_sta_connected) {
p                1665 drivers/net/wireless/ath/wil6210/debugfs.c 			spin_lock_bh(&p->tid_rx_lock);
p                1667 drivers/net/wireless/ath/wil6210/debugfs.c 				struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
p                1669 drivers/net/wireless/ath/wil6210/debugfs.c 						&p->tid_crypto_rx[tid];
p                1679 drivers/net/wireless/ath/wil6210/debugfs.c 					       &p->group_crypto_rx);
p                1680 drivers/net/wireless/ath/wil6210/debugfs.c 			spin_unlock_bh(&p->tid_rx_lock);
p                1683 drivers/net/wireless/ath/wil6210/debugfs.c 				   p->stats.rx_non_data_frame,
p                1684 drivers/net/wireless/ath/wil6210/debugfs.c 				   p->stats.rx_short_frame,
p                1685 drivers/net/wireless/ath/wil6210/debugfs.c 				   p->stats.rx_large_frame,
p                1686 drivers/net/wireless/ath/wil6210/debugfs.c 				   p->stats.rx_replay);
p                1689 drivers/net/wireless/ath/wil6210/debugfs.c 				   p->stats.rx_mic_error,
p                1690 drivers/net/wireless/ath/wil6210/debugfs.c 				   p->stats.rx_key_error,
p                1691 drivers/net/wireless/ath/wil6210/debugfs.c 				   p->stats.rx_amsdu_error,
p                1692 drivers/net/wireless/ath/wil6210/debugfs.c 				   p->stats.rx_csum_err);
p                1695 drivers/net/wireless/ath/wil6210/debugfs.c 			for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
p                1698 drivers/net/wireless/ath/wil6210/debugfs.c 					   p->stats.rx_per_mcs[mcs]);
p                1733 drivers/net/wireless/ath/wil6210/debugfs.c __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
p                1739 drivers/net/wireless/ath/wil6210/debugfs.c 		struct wil_sta_info *p = &wil->sta[i];
p                1744 drivers/net/wireless/ath/wil6210/debugfs.c 		if (!p->tx_latency_bins)
p                1747 drivers/net/wireless/ath/wil6210/debugfs.c 		switch (p->status) {
p                1756 drivers/net/wireless/ath/wil6210/debugfs.c 			aid = p->aid;
p                1759 drivers/net/wireless/ath/wil6210/debugfs.c 		mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
p                1760 drivers/net/wireless/ath/wil6210/debugfs.c 		seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i, p->addr, status,
p                1763 drivers/net/wireless/ath/wil6210/debugfs.c 		if (p->status == wil_sta_connected) {
p                1765 drivers/net/wireless/ath/wil6210/debugfs.c 			u64 tx_latency_avg = p->stats.tx_latency_total_us;
p                1770 drivers/net/wireless/ath/wil6210/debugfs.c 					   p->tx_latency_bins[bin]);
p                1771 drivers/net/wireless/ath/wil6210/debugfs.c 				num_packets += p->tx_latency_bins[bin];
p                1778 drivers/net/wireless/ath/wil6210/debugfs.c 				   p->stats.tx_latency_min_us,
p                1780 drivers/net/wireless/ath/wil6210/debugfs.c 				   p->stats.tx_latency_max_us);
p                 168 drivers/net/wireless/ath/wil6210/pmc.c 			u32 *p = (u32 *)pmc->descriptors[i].va + j;
p                 169 drivers/net/wireless/ath/wil6210/pmc.c 			*p = PCM_DATA_INVALID_DW_VAL | j;
p                1297 drivers/net/wireless/atmel/atmel.c static int atmel_set_mac_address(struct net_device *dev, void *p)
p                1299 drivers/net/wireless/atmel/atmel.c 	struct sockaddr *addr = p;
p                 731 drivers/net/wireless/broadcom/b43/b43.h 	struct ieee80211_tx_queue_params p;
p                  42 drivers/net/wireless/broadcom/b43/debugfs.c 	void *p;
p                  44 drivers/net/wireless/broadcom/b43/debugfs.c 	p = dev->dfsentry;
p                  45 drivers/net/wireless/broadcom/b43/debugfs.c 	p += dfops->file_struct_offset;
p                  47 drivers/net/wireless/broadcom/b43/debugfs.c 	return p;
p                3645 drivers/net/wireless/broadcom/b43/main.c 				  const struct ieee80211_tx_queue_params *p,
p                3655 drivers/net/wireless/broadcom/b43/main.c 	bslots = b43_read16(dev, B43_MMIO_RNG) & p->cw_min;
p                3659 drivers/net/wireless/broadcom/b43/main.c 	params[B43_QOSPARAM_TXOP] = p->txop * 32;
p                3660 drivers/net/wireless/broadcom/b43/main.c 	params[B43_QOSPARAM_CWMIN] = p->cw_min;
p                3661 drivers/net/wireless/broadcom/b43/main.c 	params[B43_QOSPARAM_CWMAX] = p->cw_max;
p                3662 drivers/net/wireless/broadcom/b43/main.c 	params[B43_QOSPARAM_CWCUR] = p->cw_min;
p                3663 drivers/net/wireless/broadcom/b43/main.c 	params[B43_QOSPARAM_AIFS] = p->aifs;
p                3665 drivers/net/wireless/broadcom/b43/main.c 	params[B43_QOSPARAM_REGGAP] = bslots + p->aifs;
p                3709 drivers/net/wireless/broadcom/b43/main.c 		b43_qos_params_upload(dev, &(params->p),
p                3730 drivers/net/wireless/broadcom/b43/main.c 			params->p.txop = 0;
p                3731 drivers/net/wireless/broadcom/b43/main.c 			params->p.aifs = 2;
p                3732 drivers/net/wireless/broadcom/b43/main.c 			params->p.cw_min = 0x0001;
p                3733 drivers/net/wireless/broadcom/b43/main.c 			params->p.cw_max = 0x0001;
p                3736 drivers/net/wireless/broadcom/b43/main.c 			params->p.txop = 0;
p                3737 drivers/net/wireless/broadcom/b43/main.c 			params->p.aifs = 2;
p                3738 drivers/net/wireless/broadcom/b43/main.c 			params->p.cw_min = 0x0001;
p                3739 drivers/net/wireless/broadcom/b43/main.c 			params->p.cw_max = 0x0001;
p                3742 drivers/net/wireless/broadcom/b43/main.c 			params->p.txop = 0;
p                3743 drivers/net/wireless/broadcom/b43/main.c 			params->p.aifs = 3;
p                3744 drivers/net/wireless/broadcom/b43/main.c 			params->p.cw_min = 0x0001;
p                3745 drivers/net/wireless/broadcom/b43/main.c 			params->p.cw_max = 0x03FF;
p                3748 drivers/net/wireless/broadcom/b43/main.c 			params->p.txop = 0;
p                3749 drivers/net/wireless/broadcom/b43/main.c 			params->p.aifs = 7;
p                3750 drivers/net/wireless/broadcom/b43/main.c 			params->p.cw_min = 0x0001;
p                3751 drivers/net/wireless/broadcom/b43/main.c 			params->p.cw_max = 0x03FF;
p                3806 drivers/net/wireless/broadcom/b43/main.c 	memcpy(&(wl->qos_params[queue].p), params, sizeof(*params));
p                3808 drivers/net/wireless/broadcom/b43/main.c 	b43_qos_params_upload(dev, &(wl->qos_params[queue].p),
p                 127 drivers/net/wireless/broadcom/b43/pio.c 	struct b43_pio_txpacket *p;
p                 149 drivers/net/wireless/broadcom/b43/pio.c 		p = &(q->packets[i]);
p                 150 drivers/net/wireless/broadcom/b43/pio.c 		INIT_LIST_HEAD(&p->list);
p                 151 drivers/net/wireless/broadcom/b43/pio.c 		p->index = i;
p                 152 drivers/net/wireless/broadcom/b43/pio.c 		p->queue = q;
p                 153 drivers/net/wireless/broadcom/b43/pio.c 		list_add(&p->list, &q->packets_list);
p                 572 drivers/net/wireless/broadcom/b43legacy/b43legacy.h 	struct ieee80211_tx_queue_params p;
p                  45 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	void *p;
p                  47 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	p = dev->dfsentry;
p                  48 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	p += dfops->file_struct_offset;
p                  50 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	return p;
p                 296 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c 	s8 *p;
p                 314 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c 	p = buf;
p                 317 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c 	memcpy(p, prefix, prefixlen);
p                 318 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c 	p += prefixlen;
p                 321 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c 	memcpy(p, name, namelen);
p                 322 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c 	p += namelen;
p                 326 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c 	memcpy(p, &bsscfgidx_le, sizeof(bsscfgidx_le));
p                 327 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c 	p += sizeof(bsscfgidx_le);
p                 331 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c 		memcpy(p, data, datalen);
p                1249 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			 struct sk_buff *p)
p                1262 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	entry = brcmf_skbcb(p)->mac;
p                1264 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		bphy_err(drvr, "no mac descriptor found for skb %p\n", p);
p                1268 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmf_dbg(DATA, "enter: fifo %d skb %p\n", fifo, p);
p                1284 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		fr_new = brcmf_skb_htod_tag_get_field(p, FREERUN);
p                1300 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			__skb_queue_tail(queue, p);
p                1309 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				__skb_queue_after(queue, p_tail, p);
p                1312 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				__skb_insert(p, p_tail->prev, p_tail, queue);
p                1320 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	} else if (brcmu_pktq_penq(&entry->psq, prec, p) == NULL) {
p                1330 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmf_skbcb(p)->state = state;
p                1338 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				     brcmf_skb_if_flags_get_field(p, INDEX));
p                1346 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct sk_buff *p;
p                1367 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out);
p                1368 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		if (p == NULL) {
p                1373 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				p = brcmu_pktq_mdeq(&entry->psq,
p                1377 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		if  (p == NULL)
p                1380 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_fws_macdesc_use_req_credit(entry, p);
p                1385 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 					     brcmf_skb_if_flags_get_field(p,
p                1403 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	p = NULL;
p                1405 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmf_dbg(DATA, "exit: fifo %d skb %p\n", fifo, p);
p                1406 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	return p;
p                1973 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				   struct sk_buff *p)
p                1975 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
p                1980 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation);
p                1982 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	if (brcmf_skb_if_flags_get_field(p, REQUESTED)) {
p                1989 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmf_skb_htod_tag_set_field(p, FLAGS, flags);
p                1990 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	return brcmf_fws_hdrpush(fws, p);
p                2097 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p,
p                2100 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
p                2106 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmf_skb_htod_tag_set_field(p, HSLOT, hslot);
p                2107 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmf_skb_htod_tag_set_field(p, FREERUN, skcb->mac->seq[fifo]);
p                2108 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	brcmf_skb_htod_tag_set_field(p, FIFO, fifo);
p                2109 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot);
p                 643 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c static void pkt_align(struct sk_buff *p, int len, int align)
p                 646 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	datalign = (unsigned long)(p->data);
p                 649 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		skb_pull(p, datalign);
p                 650 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	__skb_trim(p, len);
p                1246 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	struct sk_buff *p;
p                1250 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	skb_queue_walk(&bus->glom, p)
p                1251 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		total += p->len;
p                1306 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c #define SDPCM_GLOMDESC(p)		(((u8 *)p)[1] & 0x80)
p                2717 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	struct sk_buff *p;
p                2732 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		p = brcmu_pktq_peek_tail(q, &eprec);
p                2743 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		p = brcmu_pktq_pdeq_tail(q, eprec);
p                2744 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		if (p == NULL)
p                2746 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		brcmu_pkt_buf_free_skb(p);
p                2750 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	p = brcmu_pktq_penq(q, prec, pkt);
p                2751 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	if (p == NULL)
p                2754 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 	return p != NULL;
p                 518 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 			    struct sk_buff *p)
p                 524 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
p                 526 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 	struct d11txh *txh = (struct d11txh *)p->data;
p                 559 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 		if (p->priority != first->priority)
p                 568 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 	session->dma_len += p->len;
p                 570 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 	tid = (u8)p->priority;
p                 621 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 	skb_queue_tail(&session->skb_list, p);
p                 838 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 			      struct sk_buff *p, struct tx_status *txs,
p                 857 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
p                 865 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 	tid = (u8) (p->priority);
p                 938 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 	while (p) {
p                 939 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 		tx_info = IEEE80211_SKB_CB(p);
p                 940 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 		txh = (struct d11txh *) p->data;
p                 978 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 				skb_pull(p, D11_PHY_HDR_LEN);
p                 979 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 				skb_pull(p, D11_TXH_LEN);
p                 982 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 							    p);
p                 992 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 				ret = brcms_c_txfifo(wlc, queue, p);
p                1006 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 				skb_pull(p, D11_PHY_HDR_LEN);
p                1007 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 				skb_pull(p, D11_TXH_LEN);
p                1012 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 							    p);
p                1022 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 		p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
p                1031 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 		     struct sk_buff *p, struct tx_status *txs)
p                1039 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 	tx_info = IEEE80211_SKB_CB(p);
p                1064 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 		ini = &scb_ampdu->ini[p->priority];
p                1065 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 		brcms_c_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
p                1071 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 		while (p) {
p                1072 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 			tx_info = IEEE80211_SKB_CB(p);
p                1073 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 			txh = (struct d11txh *) p->data;
p                1077 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 			brcmu_pkt_buf_free_skb(p);
p                1082 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 			p = dma_getnexttxp(wlc->hw->di[queue],
p                  43 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.h 			    struct sk_buff *p);
p                  49 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.h 			      struct sk_buff *p, struct tx_status *txs);
p                 929 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	struct sk_buff *p, *next;
p                 937 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	p = _dma_getnextrxp(di, false);
p                 938 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	if (p == NULL)
p                 941 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	len = le16_to_cpu(*(__le16 *) (p->data));
p                 943 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	dma_spin_for_len(len, p);
p                 947 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	__skb_trim(p, pkt_len);
p                 948 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	skb_queue_tail(&dma_frames, p);
p                 953 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
p                 955 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 			__skb_trim(p, pkt_len);
p                 956 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 			skb_queue_tail(&dma_frames, p);
p                 978 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 			skb_queue_walk_safe(&dma_frames, p, next) {
p                 979 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 				skb_unlink(p, &dma_frames);
p                 980 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 				brcmu_pkt_buf_free_skb(p);
p                1026 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	struct sk_buff *p;
p                1058 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
p                1060 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		if (p == NULL) {
p                1073 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 			skb_pull(p, extra_offset);
p                1078 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		*(u32 *) (p->data) = 0;
p                1080 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
p                1083 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 			brcmu_pkt_buf_free_skb(p);
p                1088 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		di->rxp[rxout] = p;
p                1112 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	struct sk_buff *p;
p                1116 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	while ((p = _dma_getnextrxp(di, true)))
p                1117 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		brcmu_pkt_buf_free_skb(p);
p                1210 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	struct sk_buff *p;
p                1221 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	while ((p = dma_getnexttxp(pub, range))) {
p                1224 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 			brcmu_pkt_buf_free_skb(p);
p                1271 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c static void dma_txenq(struct dma_info *di, struct sk_buff *p)
p                1287 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	data = p->data;
p                1288 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	len = p->len;
p                1294 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		brcmu_pkt_buf_free_skb(p);
p                1311 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	di->txp[prevtxd(di, txout)] = p;
p                1320 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	struct sk_buff *p;
p                1335 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		p = skb_dequeue(&session->skb_list);
p                1336 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		dma_txenq(di, p);
p                1344 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c static void prep_ampdu_frame(struct dma_info *di, struct sk_buff *p)
p                1349 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	ret = brcms_c_ampdu_add_frame(session, p);
p                1356 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		ret = brcms_c_ampdu_add_frame(session, p);
p                1380 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	       struct sk_buff *p)
p                1388 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	if (p->len == 0)
p                1395 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	tx_info = IEEE80211_SKB_CB(p);
p                1398 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		prep_ampdu_frame(di, p);
p                1400 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 		dma_txenq(di, p);
p                1424 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	brcmu_pkt_buf_free_skb(p);
p                1656 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c void brcms_ucode_free_buf(void *p)
p                1658 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	kvfree(p);
p                 836 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	struct sk_buff *p = NULL;
p                 876 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
p                 877 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	if (p == NULL) {
p                 882 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	txh = (struct d11txh *) (p->data);
p                 893 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	tx_info = IEEE80211_SKB_CB(p);
p                 900 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		brcms_c_ampdu_dotxstatus(wlc->ampdu, scb, p, txs);
p                 992 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	totlen = p->len;
p                 997 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		skb_pull(p, D11_PHY_HDR_LEN);
p                 998 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		skb_pull(p, D11_TXH_LEN);
p                 999 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, p);
p                1013 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		brcmu_pkt_buf_free_skb(p);
p                2976 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	const u8 *p = (const u8 *)buf;
p                2983 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		v = p[i] | (p[i + 1] << 8);
p                2999 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	u8 *p = (u8 *) buf;
p                3007 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		p[i] = v & 0xFF;
p                3008 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		p[i + 1] = (v >> 8) & 0xFF;
p                6210 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		     struct sk_buff *p, struct scb *scb, uint frag,
p                6246 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	h = (struct ieee80211_hdr *)(p->data);
p                6250 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	len = p->len;
p                6254 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	tx_info = IEEE80211_SKB_CB(p);
p                6257 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	plcp = skb_push(p, D11_PHY_HDR_LEN);
p                6260 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	txh = (struct d11txh *) skb_push(p, D11_TXH_LEN);
p                6274 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 				scb->seqnum[p->priority]++;
p                6278 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 			seq |= (scb->seqnum[p->priority] << SEQNUM_SHIFT);
p                6756 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	ac = skb_get_queue_mapping(p);
p                6921 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p)
p                6927 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	ret = dma_txfast(wlc, dma, p);
p                6936 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	queue = skb_get_queue_mapping(p);
p                7053 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		     struct sk_buff *p,
p                7078 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	plcp = p->data;
p                7166 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		struct sk_buff *p)
p                7173 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	prep_mac80211_status(wlc, rxh, p, &rx_status);
p                7176 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	len_mpdu = p->len - D11_PHY_HDR_LEN - FCS_LEN;
p                7177 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	skb_pull(p, D11_PHY_HDR_LEN);
p                7178 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	__skb_trim(p, len_mpdu);
p                7182 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		hdr = (struct ieee80211_hdr *)p->data;
p                7187 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status));
p                7188 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
p                7653 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
p                7661 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	rxh = (struct d11rxhdr *) (p->data);
p                7664 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	skb_pull(p, BRCMS_HWRXOFF);
p                7668 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		if (p->len < 2) {
p                7671 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 				  wlc->pub->unit, p->len);
p                7674 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		skb_pull(p, 2);
p                7677 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	h = (struct ieee80211_hdr *)(p->data + D11_PHY_HDR_LEN);
p                7678 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	len = p->len;
p                7694 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	brcms_c_recvctl(wlc, rxh, p);
p                7698 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	brcmu_pkt_buf_free_skb(p);
p                7709 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	struct sk_buff *p;
p                7733 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	skb_queue_walk_safe(&recv_frames, p, next) {
p                7737 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		skb_unlink(p, &recv_frames);
p                7738 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		rxh_le = (struct d11rxhdr_le *)p->data;
p                7739 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		rxh = (struct d11rxhdr *)p->data;
p                7754 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		brcms_c_recv(wlc_hw->wlc, p);
p                 620 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p);
p                2270 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	s32 a, b, p;
p                2274 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	p = ((2 * b) + a) / (2 * a);
p                2276 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	return p;
p                  48 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 				      struct sk_buff *p)
p                  56 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	skb_queue_tail(q, p);
p                  62 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	return p;
p                  67 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 					   struct sk_buff *p)
p                  75 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	skb_queue_head(q, p);
p                  81 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	return p;
p                  88 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	struct sk_buff *p;
p                  91 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	p = skb_dequeue(q);
p                  92 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	if (p == NULL)
p                  96 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	return p;
p                 111 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	struct sk_buff *p, *next;
p                 114 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	skb_queue_walk_safe(q, p, next) {
p                 115 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 		if (match_fn == NULL || match_fn(p, arg)) {
p                 116 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 			skb_unlink(p, q);
p                 118 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 			return p;
p                 128 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	struct sk_buff *p;
p                 131 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	p = skb_dequeue_tail(q);
p                 132 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	if (p == NULL)
p                 136 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	return p;
p                 145 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	struct sk_buff *p, *next;
p                 148 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	skb_queue_walk_safe(q, p, next) {
p                 149 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 		if (fn == NULL || (*fn) (p, arg)) {
p                 150 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 			skb_unlink(p, q);
p                 151 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 			brcmu_pkt_buf_free_skb(p);
p                 224 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	struct sk_buff *p;
p                 240 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	p = skb_dequeue(q);
p                 241 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	if (p == NULL)
p                 249 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	return p;
p                 300 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	struct sk_buff *p;
p                 305 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 	for (p = p0; p; p = p->next)
p                 306 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, p->data, p->len);
p                 106 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p);
p                 108 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h 				     struct sk_buff *p);
p                 112 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h 				      bool (*match_fn)(struct sk_buff *p,
p                2324 drivers/net/wireless/cisco/airo.c static int airo_set_mac_address(struct net_device *dev, void *p)
p                2327 drivers/net/wireless/cisco/airo.c 	struct sockaddr *addr = p;
p                5098 drivers/net/wireless/cisco/airo.c 	char *p = data->wbuffer;
p                5099 drivers/net/wireless/cisco/airo.c 	char *end = p + data->writelen;
p                5108 drivers/net/wireless/cisco/airo.c 	for (i = 0; i < 3 && p < end; i++) {
p                5111 drivers/net/wireless/cisco/airo.c 		while (*p != '\n' && j < 32)
p                5112 drivers/net/wireless/cisco/airo.c 			SSID_rid.ssids[i].ssid[j++] = *p++;
p                5117 drivers/net/wireless/cisco/airo.c 		while (*p++ != '\n')
p                3432 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	dma_addr_t p;
p                3444 drivers/net/wireless/intel/ipw2x00/ipw2100.c 					  &p);
p                3456 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		priv->msg_buffers[i].info.c_struct.cmd_phys = p;
p                3534 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	struct ipw2100_priv *p = dev_get_drvdata(d);
p                3535 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	return sprintf(buf, "0x%08x\n", (int)p->config);
p                3543 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	struct ipw2100_priv *p = dev_get_drvdata(d);
p                3544 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	return sprintf(buf, "0x%08x\n", (int)p->status);
p                3552 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	struct ipw2100_priv *p = dev_get_drvdata(d);
p                3553 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	return sprintf(buf, "0x%08x\n", (int)p->capability);
p                3918 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	const char *p = buf;
p                3925 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	if (p[0] == '1' ||
p                3926 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	    (count >= 2 && tolower(p[0]) == 'o' && tolower(p[1]) == 'n')) {
p                3931 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	} else if (p[0] == '0' || (count >= 2 && tolower(p[0]) == 'o' &&
p                3932 drivers/net/wireless/intel/ipw2x00/ipw2100.c 				   tolower(p[1]) == 'f')) {
p                3937 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	} else if (tolower(p[0]) == 'r') {
p                4419 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	dma_addr_t p;
p                4441 drivers/net/wireless/intel/ipw2x00/ipw2100.c 					 &p);
p                4453 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		priv->tx_buffers[i].info.d_struct.data_phys = p;
p                5755 drivers/net/wireless/intel/ipw2x00/ipw2100.c static int ipw2100_set_address(struct net_device *dev, void *p)
p                5758 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	struct sockaddr *addr = p;
p                1192 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	char *p = (char *)buf;
p                1195 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
p                1196 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		p++;
p                1197 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		if (p[0] == 'x' || p[0] == 'X')
p                1198 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			p++;
p                1199 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		val = simple_strtoul(p, &p, 16);
p                1201 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		val = simple_strtoul(p, &p, 10);
p                1202 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (p == buf)
p                1482 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	char *p = buffer;
p                1489 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
p                1490 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		p++;
p                1491 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		if (p[0] == 'x' || p[0] == 'X')
p                1492 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			p++;
p                1493 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		val = simple_strtoul(p, &p, 16);
p                1495 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		val = simple_strtoul(p, &p, 10);
p                1496 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (p == buffer) {
p                1545 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_priv *p = dev_get_drvdata(d);
p                1546 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	return sprintf(buf, "0x%08x\n", (int)p->status);
p                1554 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_priv *p = dev_get_drvdata(d);
p                1555 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	return sprintf(buf, "0x%08x\n", (int)p->config);
p                1573 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_priv *p = dev_get_drvdata(d);
p                1575 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
p                1587 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_priv *p = dev_get_drvdata(d);
p                1589 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
p                1604 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_priv *p = dev_get_drvdata(d);
p                1605 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	int n = p->eeprom_delay;
p                1612 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_priv *p = dev_get_drvdata(d);
p                1613 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	sscanf(buf, "%i", &p->eeprom_delay);
p                1623 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_priv *p = dev_get_drvdata(d);
p                1625 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
p                1633 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_priv *p = dev_get_drvdata(d);
p                1636 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
p                1647 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_priv *p = dev_get_drvdata(d);
p                1649 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	reg = ipw_read_reg32(p, 0x301100);
p                1657 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_priv *p = dev_get_drvdata(d);
p                1660 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	ipw_write_reg32(p, 0x301100, reg);
p                1837 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	const char *p = buf;
p                1840 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	while ((channel = simple_strtol(p, NULL, 0))) {
p                1851 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		p = strchr(p, ' ');
p                1852 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		if (!p)
p                1854 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		while (*p == ' ' || *p == '\t')
p                1855 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			p++;
p                2600 drivers/net/wireless/intel/ipw2x00/ipw2200.c static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
p                2602 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
p                2605 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	udelay(p->eeprom_delay);
p                2626 drivers/net/wireless/intel/ipw2x00/ipw2200.c static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
p                2629 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	eeprom_write_reg(p, EEPROM_BIT_CS | d);
p                2630 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
p                8137 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			struct list_head *p;
p                8142 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			list_for_each(p, &priv->ibss_mac_hash[index]) {
p                8144 drivers/net/wireless/intel/ipw2x00/ipw2200.c 				    list_entry(p, struct ipw_ibss_seq, list);
p                8148 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			if (p == &priv->ibss_mac_hash[index]) {
p                9612 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	char *p = extra;
p                9614 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
p                9618 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
p                9621 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
p                9624 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		p += snprintf(p, MAX_WX_STRING - (p - extra),
p                9631 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
p                9633 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	wrqu->data.length = p - extra + 1;
p                10412 drivers/net/wireless/intel/ipw2x00/ipw2200.c static int ipw_net_set_mac_address(struct net_device *dev, void *p)
p                10415 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct sockaddr *addr = p;
p                10432 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_priv *p = libipw_priv(dev);
p                10441 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
p                10443 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
p                10447 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	strlcpy(info->bus_info, pci_name(p->pci_dev),
p                10465 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_priv *p = libipw_priv(dev);
p                10469 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	mutex_lock(&p->mutex);
p                10470 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
p                10471 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	mutex_unlock(&p->mutex);
p                10478 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct ipw_priv *p = libipw_priv(dev);
p                10483 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	mutex_lock(&p->mutex);
p                10484 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
p                10486 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
p                10487 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	mutex_unlock(&p->mutex);
p                11778 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	struct list_head *p, *q;
p                11832 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
p                11833 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			list_del(p);
p                11834 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			kfree(list_entry(p, struct ipw_ibss_seq, list));
p                1120 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 	char *p;
p                1154 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			p = rates_str;
p                1161 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 				p += snprintf(p, sizeof(rates_str) -
p                1162 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 					      (p - rates_str), "%02X ",
p                1181 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 			p = rates_str;
p                1188 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 				p += snprintf(p, sizeof(rates_str) -
p                1189 drivers/net/wireless/intel/ipw2x00/libipw_rx.c 					      (p - rates_str), "%02X ",
p                  51 drivers/net/wireless/intel/ipw2x00/libipw_wx.c 	char *p;
p                 188 drivers/net/wireless/intel/ipw2x00/libipw_wx.c 	p = custom;
p                 190 drivers/net/wireless/intel/ipw2x00/libipw_wx.c 	iwe.u.data.length = p - custom;
p                 215 drivers/net/wireless/intel/ipw2x00/libipw_wx.c 	p = custom;
p                 216 drivers/net/wireless/intel/ipw2x00/libipw_wx.c 	p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
p                 219 drivers/net/wireless/intel/ipw2x00/libipw_wx.c 	iwe.u.data.length = p - custom;
p                 225 drivers/net/wireless/intel/ipw2x00/libipw_wx.c 	p = custom;
p                 226 drivers/net/wireless/intel/ipw2x00/libipw_wx.c 	p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Channel flags: ");
p                 231 drivers/net/wireless/intel/ipw2x00/libipw_wx.c 		p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "INVALID ");
p                 237 drivers/net/wireless/intel/ipw2x00/libipw_wx.c 		p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "DFS ");
p                 241 drivers/net/wireless/intel/ipw2x00/libipw_wx.c 		iwe.u.data.length = p - custom;
p                  17 drivers/net/wireless/intel/iwlegacy/3945-debug.c 	int p = 0;
p                  19 drivers/net/wireless/intel/iwlegacy/3945-debug.c 	p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
p                  22 drivers/net/wireless/intel/iwlegacy/3945-debug.c 		p += scnprintf(buf + p, bufsz - p,
p                  24 drivers/net/wireless/intel/iwlegacy/3945-debug.c 	p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
p                  27 drivers/net/wireless/intel/iwlegacy/3945-debug.c 	p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
p                  30 drivers/net/wireless/intel/iwlegacy/3945-debug.c 	return p;
p                2568 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		int i, p = 0;
p                2574 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			scan->direct_scan[p].id = WLAN_EID_SSID;
p                2575 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			scan->direct_scan[p].len =
p                2577 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			memcpy(scan->direct_scan[p].ssid,
p                2581 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			p++;
p                3131 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	char *p = (char *)buf;
p                3134 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	val = simple_strtoul(p, &p, 10);
p                3135 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	if (p == buf)
p                3262 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		char *p = buffer;
p                3264 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		channel = simple_strtoul(p, NULL, 0);
p                3268 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		p = buffer;
p                3269 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		while (*p && *p != ' ')
p                3270 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			p++;
p                3271 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		if (*p)
p                3272 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			type = simple_strtoul(p + 1, NULL, 0);
p                3371 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	char *p = (char *)buf;
p                3373 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	if (p[0] == '1')
p                  21 drivers/net/wireless/intel/iwlegacy/4965-debug.c 	int p = 0;
p                  26 drivers/net/wireless/intel/iwlegacy/4965-debug.c 	p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
p                  28 drivers/net/wireless/intel/iwlegacy/4965-debug.c 		p += scnprintf(buf + p, bufsz - p,
p                  30 drivers/net/wireless/intel/iwlegacy/4965-debug.c 	p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
p                  33 drivers/net/wireless/intel/iwlegacy/4965-debug.c 	p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
p                  37 drivers/net/wireless/intel/iwlegacy/4965-debug.c 	return p;
p                 909 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		int i, p = 0;
p                 915 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			scan->direct_scan[p].id = WLAN_EID_SSID;
p                 916 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			scan->direct_scan[p].len =
p                 918 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			memcpy(scan->direct_scan[p].ssid,
p                 922 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			p++;
p                  44 drivers/net/wireless/intel/iwlegacy/common.c il_set_bit(struct il_priv *p, u32 r, u32 m)
p                  48 drivers/net/wireless/intel/iwlegacy/common.c 	spin_lock_irqsave(&p->reg_lock, reg_flags);
p                  49 drivers/net/wireless/intel/iwlegacy/common.c 	_il_set_bit(p, r, m);
p                  50 drivers/net/wireless/intel/iwlegacy/common.c 	spin_unlock_irqrestore(&p->reg_lock, reg_flags);
p                  55 drivers/net/wireless/intel/iwlegacy/common.c il_clear_bit(struct il_priv *p, u32 r, u32 m)
p                  59 drivers/net/wireless/intel/iwlegacy/common.c 	spin_lock_irqsave(&p->reg_lock, reg_flags);
p                  60 drivers/net/wireless/intel/iwlegacy/common.c 	_il_clear_bit(p, r, m);
p                  61 drivers/net/wireless/intel/iwlegacy/common.c 	spin_unlock_irqrestore(&p->reg_lock, reg_flags);
p                1959 drivers/net/wireless/intel/iwlegacy/common.h void il_set_bit(struct il_priv *p, u32 r, u32 m);
p                1960 drivers/net/wireless/intel/iwlegacy/common.h void il_clear_bit(struct il_priv *p, u32 r, u32 m);
p                2921 drivers/net/wireless/intel/iwlegacy/common.h #define il_print_hex_error(il, p, len)					\
p                2924 drivers/net/wireless/intel/iwlegacy/common.h 		       DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);		\
p                2935 drivers/net/wireless/intel/iwlegacy/common.h #define il_print_hex_dump(il, level, p, len)				\
p                2939 drivers/net/wireless/intel/iwlegacy/common.h 			       DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);	\
p                2945 drivers/net/wireless/intel/iwlegacy/common.h il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
p                 649 drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c 	int p = 0;
p                 656 drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c 	p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
p                 658 drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c 		p += scnprintf(buf + p, bufsz - p,
p                 660 drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c 	p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
p                 663 drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c 	p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
p                 667 drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c 	return p;
p                 706 drivers/net/wireless/intel/iwlwifi/dvm/scan.c 			int i, p = 0;
p                 720 drivers/net/wireless/intel/iwlwifi/dvm/scan.c 				scan->direct_scan[p].id = WLAN_EID_SSID;
p                 721 drivers/net/wireless/intel/iwlwifi/dvm/scan.c 				scan->direct_scan[p].len =
p                 723 drivers/net/wireless/intel/iwlwifi/dvm/scan.c 				memcpy(scan->direct_scan[p].ssid,
p                 727 drivers/net/wireless/intel/iwlwifi/dvm/scan.c 				p++;
p                  76 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define iwl_print_hex_error(m, p, len)					\
p                  79 drivers/net/wireless/intel/iwlwifi/iwl-debug.h 		       DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);		\
p                  95 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define iwl_print_hex_dump(m, level, p, len)				\
p                  99 drivers/net/wireless/intel/iwlwifi/iwl-debug.h 			       DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);	\
p                 102 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define iwl_print_hex_dump(m, level, p, len)
p                 169 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_INFO(p, f, a...)	IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
p                 170 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_TDLS(p, f, a...)	IWL_DEBUG(p, IWL_DL_TDLS, f, ## a)
p                 171 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_MAC80211(p, f, a...)	IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
p                 172 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_EXTERNAL(p, f, a...)	IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
p                 173 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_TEMP(p, f, a...)	IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
p                 174 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_SCAN(p, f, a...)	IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
p                 175 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_RX(p, f, a...)	IWL_DEBUG(p, IWL_DL_RX, f, ## a)
p                 176 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_TX(p, f, a...)	IWL_DEBUG(p, IWL_DL_TX, f, ## a)
p                 177 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_ISR(p, f, a...)	IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
p                 178 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_WEP(p, f, a...)	IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
p                 179 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_HC(p, f, a...)	IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
p                 180 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_QUOTA(p, f, a...)	IWL_DEBUG(p, IWL_DL_QUOTA, f, ## a)
p                 181 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_TE(p, f, a...)	IWL_DEBUG(p, IWL_DL_TE, f, ## a)
p                 183 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_CALIB(p, f, a...)	IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
p                 184 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_FW(p, f, a...)	IWL_DEBUG(p, IWL_DL_FW, f, ## a)
p                 185 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_RF_KILL(p, f, a...)	IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
p                 186 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_DROP(p, f, a...)	IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
p                 187 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_DROP_LIMIT(p, f, a...)	\
p                 188 drivers/net/wireless/intel/iwlwifi/iwl-debug.h 		IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
p                 189 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_COEX(p, f, a...)	IWL_DEBUG(p, IWL_DL_COEX, f, ## a)
p                 190 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_RATE(p, f, a...)	IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
p                 191 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_RATE_LIMIT(p, f, a...)	\
p                 192 drivers/net/wireless/intel/iwlwifi/iwl-debug.h 		IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
p                 193 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_ASSOC(p, f, a...)	\
p                 194 drivers/net/wireless/intel/iwlwifi/iwl-debug.h 		IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
p                 195 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_ASSOC_LIMIT(p, f, a...)	\
p                 196 drivers/net/wireless/intel/iwlwifi/iwl-debug.h 		IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
p                 197 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_HT(p, f, a...)	IWL_DEBUG(p, IWL_DL_HT, f, ## a)
p                 198 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_STATS(p, f, a...)	IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
p                 199 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_STATS_LIMIT(p, f, a...)	\
p                 200 drivers/net/wireless/intel/iwlwifi/iwl-debug.h 		IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
p                 201 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_TX_REPLY(p, f, a...)	IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
p                 202 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_TX_QUEUES(p, f, a...)	IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a)
p                 203 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_RADIO(p, f, a...)	IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
p                 204 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_DEV_RADIO(p, f, a...)	IWL_DEBUG_DEV(p, IWL_DL_RADIO, f, ## a)
p                 205 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_POWER(p, f, a...)	IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
p                 206 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_11H(p, f, a...)	IWL_DEBUG(p, IWL_DL_11H, f, ## a)
p                 207 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_TPT(p, f, a...)	IWL_DEBUG(p, IWL_DL_TPT, f, ## a)
p                 208 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_RPM(p, f, a...)	IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
p                 209 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_LAR(p, f, a...)	IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
p                 210 drivers/net/wireless/intel/iwlwifi/iwl-debug.h #define IWL_DEBUG_FW_INFO(p, f, a...)		\
p                 211 drivers/net/wireless/intel/iwlwifi/iwl-debug.h 		IWL_DEBUG(p, IWL_DL_INFO | IWL_DL_FW, f, ## a)
p                2002 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		struct iwl_tso_hdr_page *p =
p                2005 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		if (p->page)
p                2006 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 			__free_page(p->page);
p                1838 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		struct page *p = rxb_steal_page(rxb);
p                1841 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		meta->source->_rx_page_addr = (unsigned long)page_address(p);
p                2060 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
p                2062 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	if (!p->page)
p                2066 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
p                2067 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 		return p;
p                2070 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	__free_page(p->page);
p                2073 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	p->page = alloc_page(GFP_ATOMIC);
p                2074 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	if (!p->page)
p                2076 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	p->pos = page_address(p->page);
p                2077 drivers/net/wireless/intel/iwlwifi/pcie/tx.c 	return p;
p                1501 drivers/net/wireless/intersil/hostap/hostap_ap.c 	char body[12], *p, *lpos;
p                1650 drivers/net/wireless/intersil/hostap/hostap_ap.c 		p = (char *) pos;
p                1651 drivers/net/wireless/intersil/hostap/hostap_ap.c 		*p++ = WLAN_EID_SUPP_RATES;
p                1652 drivers/net/wireless/intersil/hostap/hostap_ap.c 		lpos = p;
p                1653 drivers/net/wireless/intersil/hostap/hostap_ap.c 		*p++ = 0; /* len */
p                1655 drivers/net/wireless/intersil/hostap/hostap_ap.c 			*p++ = local->basic_rates & WLAN_RATE_1M ? 0x82 : 0x02;
p                1659 drivers/net/wireless/intersil/hostap/hostap_ap.c 			*p++ = local->basic_rates & WLAN_RATE_2M ? 0x84 : 0x04;
p                1663 drivers/net/wireless/intersil/hostap/hostap_ap.c 			*p++ = local->basic_rates & WLAN_RATE_5M5 ?
p                1668 drivers/net/wireless/intersil/hostap/hostap_ap.c 			*p++ = local->basic_rates & WLAN_RATE_11M ?
p                1672 drivers/net/wireless/intersil/hostap/hostap_ap.c 		pos = (__le16 *) p;
p                3036 drivers/net/wireless/intersil/hostap/hostap_ioctl.c static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
p                3041 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	if (p->length < sizeof(struct prism2_download_param) ||
p                3042 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	    p->length > 1024 || !p->pointer)
p                3045 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	param = memdup_user(p->pointer, p->length);
p                3050 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	if (p->length < sizeof(struct prism2_download_param) +
p                3793 drivers/net/wireless/intersil/hostap/hostap_ioctl.c static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p)
p                3799 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	if (p->length < sizeof(struct prism2_hostapd_param) ||
p                3800 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	    p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
p                3803 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	param = memdup_user(p->pointer, p->length);
p                3810 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 		ret = prism2_ioctl_set_encryption(local, param, p->length);
p                3813 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 		ret = prism2_ioctl_get_encryption(local, param, p->length);
p                3816 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 		ret = prism2_ioctl_get_rid(local, param, p->length);
p                3819 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 		ret = prism2_ioctl_set_rid(local, param, p->length);
p                3822 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 		ret = prism2_ioctl_set_assoc_ap_addr(local, param, p->length);
p                3826 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 						       p->length);
p                3841 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 		if (copy_to_user(p->pointer, param, p->length)) {
p                 699 drivers/net/wireless/intersil/hostap/hostap_main.c static int prism2_set_mac_address(struct net_device *dev, void *p)
p                 704 drivers/net/wireless/intersil/hostap/hostap_main.c 	struct sockaddr *addr = p;
p                 276 drivers/net/wireless/intersil/hostap/hostap_proc.c 	u8 *p;
p                 297 drivers/net/wireless/intersil/hostap/hostap_proc.c 	p = scanres->sup_rates;
p                 299 drivers/net/wireless/intersil/hostap/hostap_proc.c 		if (p[i] == 0)
p                 301 drivers/net/wireless/intersil/hostap/hostap_proc.c 		seq_printf(m, "<%02x>", p[i]);
p                 305 drivers/net/wireless/intersil/hostap/hostap_proc.c 	p = scanres->ssid;
p                 310 drivers/net/wireless/intersil/hostap/hostap_proc.c 		unsigned char c = p[i];
p                 309 drivers/net/wireless/intersil/orinoco/hermes.h 	struct prism2_scan_apinfo	p;
p                1119 drivers/net/wireless/intersil/orinoco/hw.c 	char *p = (char *)(&essidbuf.val);
p                1158 drivers/net/wireless/intersil/orinoco/hw.c 	memcpy(buf, p, len);
p                1211 drivers/net/wireless/intersil/orinoco/hw.c 	unsigned char *p = (unsigned char *)&list.val;
p                1232 drivers/net/wireless/intersil/orinoco/hw.c 		rates[i] = (p[i] & 0x7f) * 500000; /* convert to bps */
p                  40 drivers/net/wireless/intersil/orinoco/main.h 	u8 *p = data;
p                  41 drivers/net/wireless/intersil/orinoco/main.h 	while ((p + 2 + WPA_SELECTOR_LEN) < (data + len)) {
p                  42 drivers/net/wireless/intersil/orinoco/main.h 		if ((p[0] == WLAN_EID_VENDOR_SPECIFIC) &&
p                  43 drivers/net/wireless/intersil/orinoco/main.h 		    (memcmp(&p[2], WPA_OUI_TYPE, WPA_SELECTOR_LEN) == 0))
p                  44 drivers/net/wireless/intersil/orinoco/main.h 			return p;
p                  45 drivers/net/wireless/intersil/orinoco/main.h 		p += p[1] + 2;
p                 105 drivers/net/wireless/intersil/orinoco/scan.c 		ie_len += prism_build_supp_rates(ie, bss->p.rates);
p                 554 drivers/net/wireless/intersil/p54/lmac.h struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *p, const u16 freq);
p                 157 drivers/net/wireless/intersil/prism54/islpci_mgt.c 	void *p;
p                 177 drivers/net/wireless/intersil/prism54/islpci_mgt.c 	p = buf.mem = kmalloc(frag_len, GFP_KERNEL);
p                 184 drivers/net/wireless/intersil/prism54/islpci_mgt.c 	pimfor_encode_header(operation, oid, length, (pimfor_header_t *) p);
p                 185 drivers/net/wireless/intersil/prism54/islpci_mgt.c 	p += PIMFOR_HEADER_SIZE;
p                 188 drivers/net/wireless/intersil/prism54/islpci_mgt.c 		memcpy(p, data, length);
p                 190 drivers/net/wireless/intersil/prism54/islpci_mgt.c 		memset(p, 0, length);
p                 201 drivers/net/wireless/intersil/prism54/islpci_mgt.c 		display_buffer(p, length);
p                 812 drivers/net/wireless/marvell/libertas/debugfs.c 	char *p;
p                 820 drivers/net/wireless/marvell/libertas/debugfs.c 	p = buf;
p                 834 drivers/net/wireless/marvell/libertas/debugfs.c 		pos += sprintf(p + pos, "%s=%d\n", d[i].name, val);
p                 837 drivers/net/wireless/marvell/libertas/debugfs.c 	res = simple_read_from_buffer(userbuf, count, ppos, p, pos);
p                 858 drivers/net/wireless/marvell/libertas/debugfs.c 	char *p;
p                 874 drivers/net/wireless/marvell/libertas/debugfs.c 			p = strstr(p0, d[i].name);
p                 875 drivers/net/wireless/marvell/libertas/debugfs.c 			if (p == NULL)
p                 877 drivers/net/wireless/marvell/libertas/debugfs.c 			p1 = strchr(p, '\n');
p                 881 drivers/net/wireless/marvell/libertas/debugfs.c 			p2 = strchr(p, '=');
p                  83 drivers/net/wireless/marvell/mwifiex/debugfs.c 	char *p = (char *) page, fmt[64];
p                  88 drivers/net/wireless/marvell/mwifiex/debugfs.c 	if (!p)
p                 100 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
p                 101 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "driver_version = %s", fmt);
p                 102 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "\nverext = %s", priv->version_str);
p                 103 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name);
p                 106 drivers/net/wireless/marvell/mwifiex/debugfs.c 		p += sprintf(p, "bss_mode=\"%d\"\n", info.bss_mode);
p                 108 drivers/net/wireless/marvell/mwifiex/debugfs.c 		p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
p                 110 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "media_state=\"%s\"\n",
p                 112 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr);
p                 115 drivers/net/wireless/marvell/mwifiex/debugfs.c 		p += sprintf(p, "multicast_count=\"%d\"\n",
p                 117 drivers/net/wireless/marvell/mwifiex/debugfs.c 		p += sprintf(p, "essid=\"%.*s\"\n", info.ssid.ssid_len,
p                 119 drivers/net/wireless/marvell/mwifiex/debugfs.c 		p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
p                 120 drivers/net/wireless/marvell/mwifiex/debugfs.c 		p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
p                 121 drivers/net/wireless/marvell/mwifiex/debugfs.c 		p += sprintf(p, "country_code = \"%s\"\n", info.country_code);
p                 122 drivers/net/wireless/marvell/mwifiex/debugfs.c 		p += sprintf(p, "region_code=\"0x%x\"\n",
p                 126 drivers/net/wireless/marvell/mwifiex/debugfs.c 			p += sprintf(p, "multicast_address[%d]=\"%pM\"\n",
p                 130 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "num_tx_bytes = %lu\n", priv->stats.tx_bytes);
p                 131 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "num_rx_bytes = %lu\n", priv->stats.rx_bytes);
p                 132 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "num_tx_pkts = %lu\n", priv->stats.tx_packets);
p                 133 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "num_rx_pkts = %lu\n", priv->stats.rx_packets);
p                 134 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "num_tx_pkts_dropped = %lu\n", priv->stats.tx_dropped);
p                 135 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "num_rx_pkts_dropped = %lu\n", priv->stats.rx_dropped);
p                 136 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "num_tx_pkts_err = %lu\n", priv->stats.tx_errors);
p                 137 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "num_rx_pkts_err = %lu\n", priv->stats.rx_errors);
p                 138 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "carrier %s\n", ((netif_carrier_ok(priv->netdev))
p                 140 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "tx queue");
p                 143 drivers/net/wireless/marvell/mwifiex/debugfs.c 		p += sprintf(p, " %d:%s", i, netif_tx_queue_stopped(txq) ?
p                 146 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "\n");
p                 149 drivers/net/wireless/marvell/mwifiex/debugfs.c 				      (unsigned long) p - page);
p                 184 drivers/net/wireless/marvell/mwifiex/debugfs.c 	char *p = (char *) page;
p                 188 drivers/net/wireless/marvell/mwifiex/debugfs.c 	if (!p)
p                 196 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "\n"
p                 236 drivers/net/wireless/marvell/mwifiex/debugfs.c 				      (unsigned long) p - page);
p                 263 drivers/net/wireless/marvell/mwifiex/debugfs.c 	char *p = (char *)page;
p                 265 drivers/net/wireless/marvell/mwifiex/debugfs.c 	if (!p)
p                 272 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p, "\n"
p                 276 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += sprintf(p,
p                 281 drivers/net/wireless/marvell/mwifiex/debugfs.c 		p += sprintf(p,
p                 284 drivers/net/wireless/marvell/mwifiex/debugfs.c 		p += sprintf(p, "\n");
p                 290 drivers/net/wireless/marvell/mwifiex/debugfs.c 			p += sprintf(p, "rx_rate[%02d] = %d\n", i, value);
p                 298 drivers/net/wireless/marvell/mwifiex/debugfs.c 				p += sprintf(p, "rx_rate[%02d] = %d\n",
p                 306 drivers/net/wireless/marvell/mwifiex/debugfs.c 			p += sprintf(p, "snr[%02ddB] = %d\n", i, value);
p                 311 drivers/net/wireless/marvell/mwifiex/debugfs.c 			p += sprintf(p, "noise_flr[%02ddBm] = %d\n",
p                 317 drivers/net/wireless/marvell/mwifiex/debugfs.c 			p += sprintf(p, "sig_strength[-%02ddBm] = %d\n",
p                 322 drivers/net/wireless/marvell/mwifiex/debugfs.c 				      (unsigned long)p - page);
p                 394 drivers/net/wireless/marvell/mwifiex/debugfs.c 	char *p = (char *) page;
p                 397 drivers/net/wireless/marvell/mwifiex/debugfs.c 	if (!p)
p                 404 drivers/net/wireless/marvell/mwifiex/debugfs.c 	p += mwifiex_debug_info_to_buffer(priv, p, &info);
p                 407 drivers/net/wireless/marvell/mwifiex/debugfs.c 				      (unsigned long) p - page);
p                1093 drivers/net/wireless/marvell/mwifiex/main.c 	char *p;
p                1104 drivers/net/wireless/marvell/mwifiex/main.c 	p = adapter->devdump_data;
p                1105 drivers/net/wireless/marvell/mwifiex/main.c 	strcpy(p, "========Start dump driverinfo========\n");
p                1106 drivers/net/wireless/marvell/mwifiex/main.c 	p += strlen("========Start dump driverinfo========\n");
p                1107 drivers/net/wireless/marvell/mwifiex/main.c 	p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
p                1111 drivers/net/wireless/marvell/mwifiex/main.c 	p += sprintf(p, "driver_version = %s\n", drv_version);
p                1115 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "tx_cmd_urb_pending = %d\n",
p                1117 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "tx_data_urb_pending_port_0 = %d\n",
p                1119 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "tx_data_urb_pending_port_1 = %d\n",
p                1121 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "rx_cmd_urb_pending = %d\n",
p                1123 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "rx_data_urb_pending = %d\n",
p                1127 drivers/net/wireless/marvell/mwifiex/main.c 	p += sprintf(p, "tx_pending = %d\n",
p                1129 drivers/net/wireless/marvell/mwifiex/main.c 	p += sprintf(p, "rx_pending = %d\n",
p                1134 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "\nmp_rd_bitmap=0x%x curr_rd_port=0x%x\n",
p                1136 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "mp_wr_bitmap=0x%x curr_wr_port=0x%x\n",
p                1144 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "\n[interface  : \"%s\"]\n",
p                1146 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "wmm_tx_pending[0] = %d\n",
p                1148 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "wmm_tx_pending[1] = %d\n",
p                1150 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "wmm_tx_pending[2] = %d\n",
p                1152 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "wmm_tx_pending[3] = %d\n",
p                1154 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "media_state=\"%s\"\n", !priv->media_connected ?
p                1156 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "carrier %s\n", (netif_carrier_ok(priv->netdev)
p                1160 drivers/net/wireless/marvell/mwifiex/main.c 			p += sprintf(p, "tx queue %d:%s  ", idx,
p                1164 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "\n%s: num_tx_timeout = %d\n",
p                1170 drivers/net/wireless/marvell/mwifiex/main.c 		p += sprintf(p, "\n=== %s register dump===\n",
p                1174 drivers/net/wireless/marvell/mwifiex/main.c 			p += adapter->if_ops.reg_dump(adapter, p);
p                1176 drivers/net/wireless/marvell/mwifiex/main.c 	p += sprintf(p, "\n=== more debug information\n");
p                1184 drivers/net/wireless/marvell/mwifiex/main.c 			p += mwifiex_debug_info_to_buffer(priv, p, debug_info);
p                1190 drivers/net/wireless/marvell/mwifiex/main.c 	strcpy(p, "\n========End dump========\n");
p                1191 drivers/net/wireless/marvell/mwifiex/main.c 	p += strlen("\n========End dump========\n");
p                1193 drivers/net/wireless/marvell/mwifiex/main.c 	adapter->devdump_len = p - (char *)adapter->devdump_data;
p                 169 drivers/net/wireless/marvell/mwifiex/main.h #define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1))
p                2576 drivers/net/wireless/marvell/mwifiex/pcie.c 	char *p = drv_buf;
p                2586 drivers/net/wireless/marvell/mwifiex/pcie.c 	if (!p)
p                2605 drivers/net/wireless/marvell/mwifiex/pcie.c 	p += sprintf(p, "%s\n", buf);
p                2609 drivers/net/wireless/marvell/mwifiex/pcie.c 	return p - drv_buf;
p                2595 drivers/net/wireless/marvell/mwifiex/sdio.c 	char *p = drv_buf;
p                2602 drivers/net/wireless/marvell/mwifiex/sdio.c 	if (!p)
p                2672 drivers/net/wireless/marvell/mwifiex/sdio.c 		p += sprintf(p, "%s\n", buf);
p                2679 drivers/net/wireless/marvell/mwifiex/sdio.c 	return p - drv_buf;
p                 254 drivers/net/wireless/marvell/mwifiex/util.c 	char *p = buf;
p                 264 drivers/net/wireless/marvell/mwifiex/util.c 		p += sprintf(p, "%s=", d[i].name);
p                 292 drivers/net/wireless/marvell/mwifiex/util.c 			p += sprintf(p, "%#lx ", val);
p                 296 drivers/net/wireless/marvell/mwifiex/util.c 		p += sprintf(p, "\n");
p                 300 drivers/net/wireless/marvell/mwifiex/util.c 		p += sprintf(p, "Tx BA stream table:\n");
p                 302 drivers/net/wireless/marvell/mwifiex/util.c 			p += sprintf(p, "tid = %d, ra = %pM\n",
p                 307 drivers/net/wireless/marvell/mwifiex/util.c 		p += sprintf(p, "Rx reorder table:\n");
p                 309 drivers/net/wireless/marvell/mwifiex/util.c 			p += sprintf(p, "tid = %d, ta = %pM, ",
p                 312 drivers/net/wireless/marvell/mwifiex/util.c 			p += sprintf(p, "start_win = %d, ",
p                 314 drivers/net/wireless/marvell/mwifiex/util.c 			p += sprintf(p, "win_size = %d, buffer: ",
p                 318 drivers/net/wireless/marvell/mwifiex/util.c 				p += sprintf(p, "%c ",
p                 322 drivers/net/wireless/marvell/mwifiex/util.c 			p += sprintf(p, "\n");
p                 327 drivers/net/wireless/marvell/mwifiex/util.c 		p += sprintf(p, "TDLS peer table:\n");
p                 329 drivers/net/wireless/marvell/mwifiex/util.c 			p += sprintf(p, "peer = %pM",
p                 331 drivers/net/wireless/marvell/mwifiex/util.c 			p += sprintf(p, "\n");
p                 335 drivers/net/wireless/marvell/mwifiex/util.c 	return p - buf;
p                 565 drivers/net/wireless/marvell/mwifiex/wmm.c static int mwifiex_free_ack_frame(int id, void *p, void *data)
p                 568 drivers/net/wireless/marvell/mwifiex/wmm.c 	kfree_skb(p);
p                4525 drivers/net/wireless/marvell/mwl8k.c 	struct peer_capability_info *p;
p                4538 drivers/net/wireless/marvell/mwl8k.c 	p = &cmd->peer_info;
p                4539 drivers/net/wireless/marvell/mwl8k.c 	p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
p                4540 drivers/net/wireless/marvell/mwl8k.c 	p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability);
p                4541 drivers/net/wireless/marvell/mwl8k.c 	p->ht_support = sta->ht_cap.ht_supported;
p                4542 drivers/net/wireless/marvell/mwl8k.c 	p->ht_caps = cpu_to_le16(sta->ht_cap.cap);
p                4543 drivers/net/wireless/marvell/mwl8k.c 	p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
p                4549 drivers/net/wireless/marvell/mwl8k.c 	legacy_rate_mask_to_array(p->legacy_rates, rates);
p                4550 drivers/net/wireless/marvell/mwl8k.c 	memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
p                4551 drivers/net/wireless/marvell/mwl8k.c 	p->interop = 1;
p                4552 drivers/net/wireless/marvell/mwl8k.c 	p->amsdu_enabled = 0;
p                4556 drivers/net/wireless/marvell/mwl8k.c 		rc = p->station_id;
p                  63 drivers/net/wireless/mediatek/mt76/mt7615/mcu.h #define MCU_PQ_ID(p, q)		(((p) << 15) | ((q) << 10))
p                  29 drivers/net/wireless/mediatek/mt7601u/dma.c 			void *data, u32 seg_len, u32 truesize, struct page *p)
p                  34 drivers/net/wireless/mediatek/mt7601u/dma.c 	skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
p                  62 drivers/net/wireless/mediatek/mt7601u/dma.c 		skb_add_rx_frag(skb, 0, p, data - page_address(p),
p                  64 drivers/net/wireless/mediatek/mt7601u/dma.c 		get_page(p);
p                  77 drivers/net/wireless/mediatek/mt7601u/dma.c 				   u32 seg_len, struct page *p)
p                 103 drivers/net/wireless/mediatek/mt7601u/dma.c 	skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
p                 131 drivers/net/wireless/mediatek/mt7601u/dma.c 	u8 *data = page_address(e->p);
p                 143 drivers/net/wireless/mediatek/mt7601u/dma.c 		mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
p                 155 drivers/net/wireless/mediatek/mt7601u/dma.c 		__free_pages(e->p, MT_RX_ORDER);
p                 157 drivers/net/wireless/mediatek/mt7601u/dma.c 		e->p = new_p;
p                 391 drivers/net/wireless/mediatek/mt7601u/dma.c 	u8 *buf = page_address(e->p);
p                 426 drivers/net/wireless/mediatek/mt7601u/dma.c 		__free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
p                 441 drivers/net/wireless/mediatek/mt7601u/dma.c 		dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
p                 443 drivers/net/wireless/mediatek/mt7601u/dma.c 		if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
p                  72 drivers/net/wireless/mediatek/mt7601u/mt7601u.h 		struct page *p;
p                 807 drivers/net/wireless/mediatek/mt7601u/phy.c 	struct mt7601u_tssi_params p;
p                 813 drivers/net/wireless/mediatek/mt7601u/phy.c 	p.tssi0 = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TSSI);
p                 817 drivers/net/wireless/mediatek/mt7601u/phy.c 	p.trgt_power = mt7601u_current_tx_power(dev);
p                 838 drivers/net/wireless/mediatek/mt7601u/phy.c 		p.trgt_power += rate_table[tx_rate / 2].bw20;
p                 840 drivers/net/wireless/mediatek/mt7601u/phy.c 		p.trgt_power += rate_table[tx_rate / 2].bw40;
p                 842 drivers/net/wireless/mediatek/mt7601u/phy.c 	p.trgt_power <<= 12;
p                 844 drivers/net/wireless/mediatek/mt7601u/phy.c 	dev_dbg(dev->dev, "tx_rate:%02hhx pwr:%08x\n", tx_rate, p.trgt_power);
p                 846 drivers/net/wireless/mediatek/mt7601u/phy.c 	p.trgt_power += mt7601u_phy_rf_pa_mode_val(dev, pkt_type & 0x03,
p                 852 drivers/net/wireless/mediatek/mt7601u/phy.c 			p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 18022 : 9830;
p                 854 drivers/net/wireless/mediatek/mt7601u/phy.c 			p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 819 : 24576;
p                 857 drivers/net/wireless/mediatek/mt7601u/phy.c 	p.trgt_power += static_power[mt7601u_bbp_rr(dev, 1) & 0x03];
p                 859 drivers/net/wireless/mediatek/mt7601u/phy.c 	p.trgt_power += dev->ee->tssi_data.tx0_delta_offset;
p                 863 drivers/net/wireless/mediatek/mt7601u/phy.c 		p.tssi0, p.trgt_power, dev->raw_temp, pkt_type);
p                 865 drivers/net/wireless/mediatek/mt7601u/phy.c 	return p;
p                1388 drivers/net/wireless/ray_cs.c 	struct status __iomem *p = local->sram + STATUS_BASE;
p                1404 drivers/net/wireless/ray_cs.c 		local->wstats.qual.noise = readb(&p->rxnoise);
p                1677 drivers/net/wireless/ray_cs.c 	struct status __iomem *p = local->sram + STATUS_BASE;
p                1682 drivers/net/wireless/ray_cs.c 	if (readb(&p->mrx_overflow_for_host)) {
p                1683 drivers/net/wireless/ray_cs.c 		local->stats.rx_over_errors += swab16(readw(&p->mrx_overflow));
p                1684 drivers/net/wireless/ray_cs.c 		writeb(0, &p->mrx_overflow);
p                1685 drivers/net/wireless/ray_cs.c 		writeb(0, &p->mrx_overflow_for_host);
p                1687 drivers/net/wireless/ray_cs.c 	if (readb(&p->mrx_checksum_error_for_host)) {
p                1689 drivers/net/wireless/ray_cs.c 		    swab16(readw(&p->mrx_checksum_error));
p                1690 drivers/net/wireless/ray_cs.c 		writeb(0, &p->mrx_checksum_error);
p                1691 drivers/net/wireless/ray_cs.c 		writeb(0, &p->mrx_checksum_error_for_host);
p                1693 drivers/net/wireless/ray_cs.c 	if (readb(&p->rx_hec_error_for_host)) {
p                1694 drivers/net/wireless/ray_cs.c 		local->stats.rx_frame_errors += swab16(readw(&p->rx_hec_error));
p                1695 drivers/net/wireless/ray_cs.c 		writeb(0, &p->rx_hec_error);
p                1696 drivers/net/wireless/ray_cs.c 		writeb(0, &p->rx_hec_error_for_host);
p                1742 drivers/net/wireless/ray_cs.c 	void __iomem *p = local->sram + HOST_TO_ECF_BASE;
p                1765 drivers/net/wireless/ray_cs.c 			memcpy_toio(p, ha->addr, ETH_ALEN);
p                1768 drivers/net/wireless/ray_cs.c 			p += ETH_ALEN;
p                2556 drivers/net/wireless/ray_cs.c 	UCHAR *p;
p                2586 drivers/net/wireless/ray_cs.c 	p = local->bss_id;
p                2587 drivers/net/wireless/ray_cs.c 	seq_printf(m, "BSSID                = %pM\n", p);
p                2609 drivers/net/wireless/ray_cs.c 		p = local->last_bcn.elements;
p                2610 drivers/net/wireless/ray_cs.c 		if (p[0] == C_ESSID_ELEMENT_ID)
p                2611 drivers/net/wireless/ray_cs.c 			p += p[1] + 2;
p                2615 drivers/net/wireless/ray_cs.c 				   p[0]);
p                2619 drivers/net/wireless/ray_cs.c 		if (p[0] == C_SUPPORTED_RATES_ELEMENT_ID) {
p                2621 drivers/net/wireless/ray_cs.c 			for (i = 2; i < p[1] + 2; i++)
p                2622 drivers/net/wireless/ray_cs.c 				seq_printf(m, "0x%02x ", p[i]);
p                2624 drivers/net/wireless/ray_cs.c 			p += p[1] + 2;
p                2630 drivers/net/wireless/ray_cs.c 		if (p[0] == C_FH_PARAM_SET_ELEMENT_ID) {
p                2631 drivers/net/wireless/ray_cs.c 			pfh = (struct freq_hop_element *)p;
p                2641 drivers/net/wireless/ray_cs.c 			p += p[1] + 2;
p                2730 drivers/net/wireless/ray_cs.c 	char *p;
p                2740 drivers/net/wireless/ray_cs.c 	p = proc_number;
p                2744 drivers/net/wireless/ray_cs.c 		unsigned int c = *p - '0';
p                2748 drivers/net/wireless/ray_cs.c 		p++;
p                 424 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 	u8 p = 0;
p                 430 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 	for (p = RF90_PATH_A; p <= RF90_PATH_B; ++p) {
p                 431 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		rtldm->swing_idx_ofdm_base[p] = rtldm->default_ofdm_index;
p                 432 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		rtldm->swing_idx_ofdm[p] = rtldm->default_ofdm_index;
p                 433 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		rtldm->ofdm_index[p] = rtldm->default_ofdm_index;
p                 435 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		rtldm->power_index_offset[p] = 0;
p                 436 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		rtldm->delta_power_index[p] = 0;
p                 437 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		rtldm->delta_power_index_last[p] = 0;
p                 439 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		rtldm->absolute_ofdm_swing_idx[p] = 0;
p                 440 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		rtldm->remnant_ofdm_swing_idx[p] = 0;
p                 475 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 	u8 p = 0;
p                 491 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 	for (p = RF90_PATH_A; p < MAX_RF_PATH; ++p) {
p                 492 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		rtldm->swing_idx_ofdm_base[p] =
p                 494 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		rtldm->ofdm_index[p] = rtldm->default_ofdm_index;
p                 495 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		rtldm->delta_power_index[p] = 0;
p                 496 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		rtldm->power_index_offset[p] = 0;
p                 497 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		rtldm->delta_power_index_last[p] = 0;
p                1132 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 	u8 p = 0;
p                1142 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
p                1143 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, 0);
p                1603 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 	u8 thermal_value = 0, delta, delta_lck, delta_iqk, p = 0, i = 0;
p                1783 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) {
p                1786 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 (p == RF90_PATH_A ? 'A' : 'B'));
p                1788 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			if (rtldm->delta_power_index[p] ==
p                1789 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->delta_power_index_last[p])
p                1792 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->power_index_offset[p] = 0;
p                1794 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->power_index_offset[p] =
p                1795 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 					rtldm->delta_power_index[p] -
p                1796 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 					rtldm->delta_power_index_last[p];
p                1802 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 (p == RF90_PATH_A ? 'A' : 'B'),
p                1803 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 rtldm->power_index_offset[p],
p                1804 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 rtldm->delta_power_index[p] ,
p                1805 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 rtldm->delta_power_index_last[p]);
p                1807 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			rtldm->ofdm_index[p] =
p                1808 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 					rtldm->swing_idx_ofdm_base[p] +
p                1809 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 					rtldm->power_index_offset[p];
p                1812 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 					rtldm->power_index_offset[p];
p                1815 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			rtldm->swing_idx_ofdm[p] = rtldm->ofdm_index[p];
p                1823 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->power_index_offset[p]);
p                1826 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 rtldm->swing_idx_ofdm[p],
p                1827 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 (p == RF90_PATH_A ? 'A' : 'B'),
p                1828 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 rtldm->swing_idx_ofdm_base[p],
p                1829 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 rtldm->power_index_offset[p]);
p                1833 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			if (rtldm->ofdm_index[p] > TXSCALE_TABLE_SIZE - 1)
p                1834 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->ofdm_index[p] = TXSCALE_TABLE_SIZE - 1;
p                1835 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			else if (rtldm->ofdm_index[p] < ofdm_min_index)
p                1836 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->ofdm_index[p] = ofdm_min_index;
p                1851 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
p                1852 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			rtldm->power_index_offset[p] = 0;
p                1858 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 	for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) {
p                1861 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			 rtldm->ofdm_index[p],
p                1862 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			 (p == RF90_PATH_A ? 'A' : 'B'),
p                1863 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			 rtldm->swing_idx_ofdm_base[p]);
p                1916 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
p                1918 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 								 p, 0);
p                1926 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
p                1928 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 								 p, index_for_channel);
p                1932 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
p                1933 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->swing_idx_ofdm_base[p] =
p                1934 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 					rtldm->swing_idx_ofdm[p];
p                2185 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 	u8 thermal_value = 0, delta, delta_lck, delta_iqk, p = 0, i = 0;
p                2329 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) {
p                2332 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 (p == RF90_PATH_A ? 'A' : 'B'));
p                2336 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			if (rtldm->delta_power_index[p] ==
p                2337 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->delta_power_index_last[p])
p                2339 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->power_index_offset[p] = 0;
p                2341 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->power_index_offset[p] =
p                2342 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 					rtldm->delta_power_index[p] -
p                2343 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 					rtldm->delta_power_index_last[p];
p                2348 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 (p == RF90_PATH_A ? 'A' : 'B'),
p                2349 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->power_index_offset[p],
p                2350 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->delta_power_index[p] ,
p                2351 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->delta_power_index_last[p]);
p                2353 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			rtldm->ofdm_index[p] =
p                2354 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 					rtldm->swing_idx_ofdm_base[p] +
p                2355 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 					rtldm->power_index_offset[p];
p                2358 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 					rtldm->power_index_offset[p];
p                2361 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			rtldm->swing_idx_ofdm[p] = rtldm->ofdm_index[p];
p                2369 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 rtldm->power_index_offset[p]);
p                2372 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 rtldm->swing_idx_ofdm[p],
p                2373 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 (p == RF90_PATH_A ? 'A' : 'B'),
p                2374 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 rtldm->swing_idx_ofdm_base[p],
p                2375 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				 rtldm->power_index_offset[p]);
p                2379 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			if (rtldm->ofdm_index[p] > TXSCALE_TABLE_SIZE - 1)
p                2380 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->ofdm_index[p] = TXSCALE_TABLE_SIZE - 1;
p                2381 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			else if (rtldm->ofdm_index[p] < ofdm_min_index)
p                2382 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 				rtldm->ofdm_index[p] = ofdm_min_index;
p                2397 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
p                2398 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			rtldm->power_index_offset[p] = 0;
p                2404 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 	for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) {
p                2407 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			 rtldm->ofdm_index[p],
p                2408 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			 (p == RF90_PATH_A ? 'A' : 'B'),
p                2409 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			 rtldm->swing_idx_ofdm_base[p]);
p                2448 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
p                2450 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 						MIX_MODE, p, index_for_channel);
p                2458 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
p                2460 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 					MIX_MODE, p, index_for_channel);
p                2464 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 		for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
p                2465 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c 			rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
p                 823 drivers/net/wireless/realtek/rtw88/phy.c 	const union phy_table_tile *p = tbl->data;
p                 824 drivers/net/wireless/realtek/rtw88/phy.c 	const union phy_table_tile *end = p + tbl->size / 2;
p                 830 drivers/net/wireless/realtek/rtw88/phy.c 	for (; p < end; p++) {
p                 831 drivers/net/wireless/realtek/rtw88/phy.c 		if (p->cond.pos) {
p                 832 drivers/net/wireless/realtek/rtw88/phy.c 			switch (p->cond.branch) {
p                 843 drivers/net/wireless/realtek/rtw88/phy.c 				pos_cond = p->cond;
p                 846 drivers/net/wireless/realtek/rtw88/phy.c 		} else if (p->cond.neg) {
p                 859 drivers/net/wireless/realtek/rtw88/phy.c 			(*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data);
p                1213 drivers/net/wireless/realtek/rtw88/phy.c 	const struct phy_pg_cfg_pair *p = tbl->data;
p                1214 drivers/net/wireless/realtek/rtw88/phy.c 	const struct phy_pg_cfg_pair *end = p + tbl->size / 6;
p                1218 drivers/net/wireless/realtek/rtw88/phy.c 	for (; p < end; p++) {
p                1219 drivers/net/wireless/realtek/rtw88/phy.c 		if (p->addr == 0xfe || p->addr == 0xffe) {
p                1223 drivers/net/wireless/realtek/rtw88/phy.c 		rtw_phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path,
p                1224 drivers/net/wireless/realtek/rtw88/phy.c 					       p->tx_num, p->addr, p->bitmask,
p                1225 drivers/net/wireless/realtek/rtw88/phy.c 					       p->data);
p                1362 drivers/net/wireless/realtek/rtw88/phy.c 	const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data;
p                1363 drivers/net/wireless/realtek/rtw88/phy.c 	const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size;
p                1365 drivers/net/wireless/realtek/rtw88/phy.c 	for (; p < end; p++) {
p                1366 drivers/net/wireless/realtek/rtw88/phy.c 		rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band,
p                1367 drivers/net/wireless/realtek/rtw88/phy.c 					   p->bw, p->rs, p->ch, p->txpwr_lmt);
p                 185 drivers/net/wireless/realtek/rtw88/rtw8822c.c 	u32 p, m, t, i;
p                 188 drivers/net/wireless/realtek/rtw88/rtw8822c.c 	p = 0;
p                 193 drivers/net/wireless/realtek/rtw88/rtw8822c.c 			p = vec[i] + p;
p                 196 drivers/net/wireless/realtek/rtw88/rtw8822c.c 	if (p > m) {
p                 197 drivers/net/wireless/realtek/rtw88/rtw8822c.c 		t = p - m;
p                 200 drivers/net/wireless/realtek/rtw88/rtw8822c.c 		t = m - p;
p                2065 drivers/net/wireless/realtek/rtw88/rtw8822c.c 	const struct dpk_cfg_pair *p = tbl->data;
p                2066 drivers/net/wireless/realtek/rtw88/rtw8822c.c 	const struct dpk_cfg_pair *end = p + tbl->size / 3;
p                2070 drivers/net/wireless/realtek/rtw88/rtw8822c.c 	for (; p < end; p++)
p                2071 drivers/net/wireless/realtek/rtw88/rtw8822c.c 		rtw_write32_mask(rtwdev, p->addr, p->bitmask, p->data);
p                1067 drivers/net/wireless/st/cw1200/sta.c 	const u8 *p = priv->sdd->data;
p                1070 drivers/net/wireless/st/cw1200/sta.c 	while (p + 2 <= priv->sdd->data + priv->sdd->size) {
p                1071 drivers/net/wireless/st/cw1200/sta.c 		if (p + p[1] + 2 > priv->sdd->data + priv->sdd->size) {
p                1075 drivers/net/wireless/st/cw1200/sta.c 		switch (p[0]) {
p                1078 drivers/net/wireless/st/cw1200/sta.c 			if (p[1] < 4) {
p                1083 drivers/net/wireless/st/cw1200/sta.c 			v = le16_to_cpu(*((__le16 *)(p + 2)));
p                1087 drivers/net/wireless/st/cw1200/sta.c 			v = le16_to_cpu(*((__le16 *)(p + 4)));
p                1094 drivers/net/wireless/st/cw1200/sta.c 			u16 clk = le16_to_cpu(*((__le16 *)(p + 2)));
p                1103 drivers/net/wireless/st/cw1200/sta.c 		p += p[1] + 2;
p                 279 drivers/net/wireless/st/cw1200/wsm.c 		const char *p = arg->buf;
p                 280 drivers/net/wireless/st/cw1200/wsm.c 		cw1200_enable_powersave(priv, (p[0] & 0x0F) ? true : false);
p                1102 drivers/net/wireless/st/cw1200/wsm.h 	struct wsm_set_tx_queue_params *p = &(queue_params)->params[queue]; \
p                1103 drivers/net/wireless/st/cw1200/wsm.h 	p->ackPolicy = (ack_policy);				\
p                1104 drivers/net/wireless/st/cw1200/wsm.h 	p->allowedMediumTime = (allowed_time);				\
p                1105 drivers/net/wireless/st/cw1200/wsm.h 	p->maxTransmitLifetime = (max_life_time);			\
p                1143 drivers/net/wireless/st/cw1200/wsm.h 		struct wsm_edca_queue_params *p = &(__edca)->params[__queue]; \
p                1144 drivers/net/wireless/st/cw1200/wsm.h 		p->cwmin = __cw_min;					\
p                1145 drivers/net/wireless/st/cw1200/wsm.h 		p->cwmax = __cw_max;					\
p                1146 drivers/net/wireless/st/cw1200/wsm.h 		p->aifns = __aifs;					\
p                1147 drivers/net/wireless/st/cw1200/wsm.h 		p->txop_limit = ((__txop) * TXOP_UNIT);			\
p                1148 drivers/net/wireless/st/cw1200/wsm.h 		p->max_rx_lifetime = __lifetime;			\
p                1469 drivers/net/wireless/st/cw1200/wsm.h 	u8 *p = skb_push(arg->skb, 4);
p                1470 drivers/net/wireless/st/cw1200/wsm.h 	p[0] = arg->frame_type;
p                1471 drivers/net/wireless/st/cw1200/wsm.h 	p[1] = arg->rate;
p                1472 drivers/net/wireless/st/cw1200/wsm.h 	((__le16 *)p)[1] = __cpu_to_le16(arg->skb->len - 4);
p                1473 drivers/net/wireless/st/cw1200/wsm.h 	ret = wsm_write_mib(priv, WSM_MIB_ID_TEMPLATE_FRAME, p, arg->skb->len);
p                 307 drivers/net/wireless/ti/wl1251/boot.c 	u8 *p, *buf;
p                 359 drivers/net/wireless/ti/wl1251/boot.c 		p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE;
p                 361 drivers/net/wireless/ti/wl1251/boot.c 			     p, addr);
p                 365 drivers/net/wireless/ti/wl1251/boot.c 		memcpy(buf, p, len);
p                 373 drivers/net/wireless/ti/wl1251/boot.c 	p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE;
p                 377 drivers/net/wireless/ti/wl1251/boot.c 	memcpy(buf, p, len);
p                 380 drivers/net/wireless/ti/wl1251/boot.c 		     len, p, addr);
p                 727 drivers/net/wireless/ti/wl18xx/main.c 		     wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q,
p                 774 drivers/net/wireless/ti/wl18xx/main.c 					   wl18xx_clk_table[clk_freq].p &
p                 781 drivers/net/wireless/ti/wl18xx/main.c 					(wl18xx_clk_table[clk_freq].p >> 16) &
p                 167 drivers/net/wireless/ti/wl18xx/wl18xx.h 	u32 p;
p                 165 drivers/net/wireless/ti/wlcore/boot.c 	u8 *p, *chunk;
p                 211 drivers/net/wireless/ti/wlcore/boot.c 		p = buf + chunk_num * CHUNK_SIZE;
p                 212 drivers/net/wireless/ti/wlcore/boot.c 		memcpy(chunk, p, CHUNK_SIZE);
p                 214 drivers/net/wireless/ti/wlcore/boot.c 			     p, addr);
p                 224 drivers/net/wireless/ti/wlcore/boot.c 	p = buf + chunk_num * CHUNK_SIZE;
p                 225 drivers/net/wireless/ti/wlcore/boot.c 	memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
p                 227 drivers/net/wireless/ti/wlcore/boot.c 		     fw_data_len % CHUNK_SIZE, p, addr);
p                 124 drivers/net/wireless/ti/wlcore/io.c 			 const struct wlcore_partition_set *p)
p                 129 drivers/net/wireless/ti/wlcore/io.c 	memcpy(&wl->curr_part, p, sizeof(*p));
p                 132 drivers/net/wireless/ti/wlcore/io.c 		     p->mem.start, p->mem.size);
p                 134 drivers/net/wireless/ti/wlcore/io.c 		     p->reg.start, p->reg.size);
p                 136 drivers/net/wireless/ti/wlcore/io.c 		     p->mem2.start, p->mem2.size);
p                 138 drivers/net/wireless/ti/wlcore/io.c 		     p->mem3.start, p->mem3.size);
p                 140 drivers/net/wireless/ti/wlcore/io.c 	ret = wlcore_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
p                 144 drivers/net/wireless/ti/wlcore/io.c 	ret = wlcore_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
p                 148 drivers/net/wireless/ti/wlcore/io.c 	ret = wlcore_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
p                 152 drivers/net/wireless/ti/wlcore/io.c 	ret = wlcore_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
p                 156 drivers/net/wireless/ti/wlcore/io.c 	ret = wlcore_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
p                 160 drivers/net/wireless/ti/wlcore/io.c 	ret = wlcore_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
p                 175 drivers/net/wireless/ti/wlcore/io.c 	ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
p                 179 drivers/net/wireless/ti/wlcore/io.c 	ret = wlcore_raw_write32(wl, HW_PART3_SIZE_ADDR, p->mem3.size);
p                 221 drivers/net/wireless/ti/wlcore/io.h 			 const struct wlcore_partition_set *p);
p                1345 drivers/net/wireless/ti/wlcore/main.c wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
p                1350 drivers/net/wireless/ti/wlcore/main.c 	if (!p->mask) {
p                1364 drivers/net/wireless/ti/wlcore/main.c 	for (i = 0; i < p->pattern_len; i++) {
p                1365 drivers/net/wireless/ti/wlcore/main.c 		if (test_bit(i, (unsigned long *)p->mask)) {
p                1489 drivers/net/wireless/ti/wlcore/main.c wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
p                1505 drivers/net/wireless/ti/wlcore/main.c 	while (i < p->pattern_len) {
p                1506 drivers/net/wireless/ti/wlcore/main.c 		if (!test_bit(i, (unsigned long *)p->mask)) {
p                1511 drivers/net/wireless/ti/wlcore/main.c 		for (j = i; j < p->pattern_len; j++) {
p                1512 drivers/net/wireless/ti/wlcore/main.c 			if (!test_bit(j, (unsigned long *)p->mask))
p                1533 drivers/net/wireless/ti/wlcore/main.c 						   &p->pattern[i], len);
p                1592 drivers/net/wireless/ti/wlcore/main.c 		struct cfg80211_pkt_pattern *p;
p                1595 drivers/net/wireless/ti/wlcore/main.c 		p = &wow->patterns[i];
p                1597 drivers/net/wireless/ti/wlcore/main.c 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
p                 847 drivers/net/wireless/zydas/zd1201.c static int zd1201_set_mac_address(struct net_device *dev, void *p)
p                 849 drivers/net/wireless/zydas/zd1201.c 	struct sockaddr *addr = p;
p                 138 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	u8 *p;
p                 143 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	p = kmalloc(MAX_TRANSFER_SIZE, GFP_KERNEL);
p                 144 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	if (!p) {
p                 156 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 		memcpy(p, data, transfer_size);
p                 160 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 			code_offset, 0, p, transfer_size, 1000 /* ms */);
p                 181 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 			0, 0, p, sizeof(ret), 5000 /* ms */);
p                 190 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 		ret = p[0];
p                 205 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	kfree(p);
p                 211 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	const __le16 *p = data;
p                 212 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 	return le16_to_cpu(p[offset]);
p                 656 drivers/nfc/fdp/fdp.c 	u8 i, *p;
p                 660 drivers/nfc/fdp/fdp.c 		p = rsp->data;
p                 663 drivers/nfc/fdp/fdp.c 			switch (*p++) {
p                 665 drivers/nfc/fdp/fdp.c 				p++;
p                 666 drivers/nfc/fdp/fdp.c 				info->ram_version = le32_to_cpup((__le32 *) p);
p                 667 drivers/nfc/fdp/fdp.c 				p += 4;
p                 670 drivers/nfc/fdp/fdp.c 				p++;
p                 671 drivers/nfc/fdp/fdp.c 				info->otp_version = le32_to_cpup((__le32 *) p);
p                 672 drivers/nfc/fdp/fdp.c 				p += 4;
p                 675 drivers/nfc/fdp/fdp.c 				p++;
p                 676 drivers/nfc/fdp/fdp.c 				info->otp_version = le32_to_cpup((__le32 *) p);
p                 677 drivers/nfc/fdp/fdp.c 				p += 4;
p                 680 drivers/nfc/fdp/fdp.c 				p++;
p                 681 drivers/nfc/fdp/fdp.c 				info->key_index = *p++;
p                 240 drivers/nfc/pn544/pn544.c 	struct hw_config *p = hw_config;
p                 248 drivers/nfc/pn544/pn544.c 		param[1] = p->adr[0];
p                 249 drivers/nfc/pn544/pn544.c 		param[2] = p->adr[1];
p                 250 drivers/nfc/pn544/pn544.c 		param[3] = p->value;
p                 262 drivers/nfc/pn544/pn544.c 		if (res_skb->data[0] != p->value) {
p                 269 drivers/nfc/pn544/pn544.c 		p++;
p                 824 drivers/nfc/pn544/pn544.c 	struct uicc_gatelist *p = uicc_gatelist;
p                 835 drivers/nfc/pn544/pn544.c 					PN544_WRITE, (u8 *)p, 4, &res_skb);
p                 844 drivers/nfc/pn544/pn544.c 			if (res_skb->data[0] != p->value) {
p                 851 drivers/nfc/pn544/pn544.c 			p++;
p                  73 drivers/nfc/st-nci/se.c #define ST_NCI_DM_IS_PIPE_OPEN(p) \
p                  74 drivers/nfc/st-nci/se.c 	((p & 0x0f) == (ST_NCI_DM_PIPE_CREATED | ST_NCI_DM_PIPE_OPEN))
p                  63 drivers/nfc/st21nfca/core.c #define ST21NFCA_DM_IS_PIPE_OPEN(p) \
p                  64 drivers/nfc/st21nfca/core.c 	((p & 0x0f) == (ST21NFCA_DM_PIPE_CREATED | ST21NFCA_DM_PIPE_OPEN))
p                  60 drivers/nubus/nubus.c static inline int not_useful(void *p, int map)
p                  62 drivers/nubus/nubus.c 	unsigned long pv = (unsigned long)p;
p                  74 drivers/nubus/nubus.c 	unsigned char *p = *ptr;
p                  78 drivers/nubus/nubus.c 		while (not_useful(p, map))
p                  79 drivers/nubus/nubus.c 			p++;
p                  80 drivers/nubus/nubus.c 		v |= *p++;
p                  83 drivers/nubus/nubus.c 	*ptr = p;
p                  89 drivers/nubus/nubus.c 	unsigned char *p = *ptr;
p                  93 drivers/nubus/nubus.c 			p--;
p                  94 drivers/nubus/nubus.c 		} while (not_useful(p, map));
p                  97 drivers/nubus/nubus.c 	*ptr = p;
p                 102 drivers/nubus/nubus.c 	unsigned char *p = *ptr;
p                 105 drivers/nubus/nubus.c 		while (not_useful(p, map))
p                 106 drivers/nubus/nubus.c 			p++;
p                 107 drivers/nubus/nubus.c 		p++;
p                 110 drivers/nubus/nubus.c 	*ptr = p;
p                 151 drivers/nubus/nubus.c 	unsigned char *p = nd->base;
p                 155 drivers/nubus/nubus.c 	nubus_move(&p, nubus_expand32(nd->data), nd->mask);
p                 157 drivers/nubus/nubus.c 	return p;
p                 167 drivers/nubus/nubus.c 	unsigned char *p = nubus_dirptr(dirent);
p                 170 drivers/nubus/nubus.c 		*t++ = nubus_get_rom(&p, 1, dirent->mask);
p                 180 drivers/nubus/nubus.c 	unsigned char *p = nubus_dirptr(dirent);
p                 183 drivers/nubus/nubus.c 		unsigned char c = nubus_get_rom(&p, 1, dirent->mask);
p                 202 drivers/nubus/nubus.c 	unsigned char *p = nubus_dirptr(dirent);
p                 209 drivers/nubus/nubus.c 			buf[i] = nubus_get_rom(&p, sizeof(buf[0]),
p                 216 drivers/nubus/nubus.c 		seq_putc(m, nubus_get_rom(&p, 1, dirent->mask));
p                 985 drivers/nvdimm/bus.c 	void __user *p = (void __user *) arg;
p                1009 drivers/nvdimm/bus.c 		if (copy_from_user(&pkg, p, sizeof(pkg)))
p                1053 drivers/nvdimm/bus.c 		if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
p                1090 drivers/nvdimm/bus.c 					p + in_len + out_len, copy)) {
p                1111 drivers/nvdimm/bus.c 	if (copy_from_user(buf, p, buf_len)) {
p                1133 drivers/nvdimm/bus.c 	if (copy_to_user(p, buf, buf_len))
p                 622 drivers/nvme/host/fabrics.c 	char *options, *o, *p;
p                 644 drivers/nvme/host/fabrics.c 	while ((p = strsep(&o, ",\n")) != NULL) {
p                 645 drivers/nvme/host/fabrics.c 		if (!*p)
p                 648 drivers/nvme/host/fabrics.c 		token = match_token(p, opt_tokens, args);
p                 652 drivers/nvme/host/fabrics.c 			p = match_strdup(args);
p                 653 drivers/nvme/host/fabrics.c 			if (!p) {
p                 658 drivers/nvme/host/fabrics.c 			opts->transport = p;
p                 661 drivers/nvme/host/fabrics.c 			p = match_strdup(args);
p                 662 drivers/nvme/host/fabrics.c 			if (!p) {
p                 667 drivers/nvme/host/fabrics.c 			opts->subsysnqn = p;
p                 680 drivers/nvme/host/fabrics.c 			p = match_strdup(args);
p                 681 drivers/nvme/host/fabrics.c 			if (!p) {
p                 686 drivers/nvme/host/fabrics.c 			opts->traddr = p;
p                 689 drivers/nvme/host/fabrics.c 			p = match_strdup(args);
p                 690 drivers/nvme/host/fabrics.c 			if (!p) {
p                 695 drivers/nvme/host/fabrics.c 			opts->trsvcid = p;
p                 761 drivers/nvme/host/fabrics.c 			p = match_strdup(args);
p                 762 drivers/nvme/host/fabrics.c 			if (!p) {
p                 766 drivers/nvme/host/fabrics.c 			nqnlen = strlen(p);
p                 769 drivers/nvme/host/fabrics.c 					p, NVMF_NQN_SIZE);
p                 770 drivers/nvme/host/fabrics.c 				kfree(p);
p                 775 drivers/nvme/host/fabrics.c 			opts->host = nvmf_host_add(p);
p                 776 drivers/nvme/host/fabrics.c 			kfree(p);
p                 795 drivers/nvme/host/fabrics.c 			p = match_strdup(args);
p                 796 drivers/nvme/host/fabrics.c 			if (!p) {
p                 801 drivers/nvme/host/fabrics.c 			opts->host_traddr = p;
p                 804 drivers/nvme/host/fabrics.c 			p = match_strdup(args);
p                 805 drivers/nvme/host/fabrics.c 			if (!p) {
p                 809 drivers/nvme/host/fabrics.c 			ret = uuid_parse(p, &hostid);
p                 811 drivers/nvme/host/fabrics.c 				pr_err("Invalid hostid %s\n", p);
p                 813 drivers/nvme/host/fabrics.c 				kfree(p);
p                 816 drivers/nvme/host/fabrics.c 			kfree(p);
p                 872 drivers/nvme/host/fabrics.c 				p);
p                 146 drivers/nvme/host/rdma.c static inline void put_unaligned_le24(u32 val, u8 *p)
p                 148 drivers/nvme/host/rdma.c 	*p++ = val;
p                 149 drivers/nvme/host/rdma.c 	*p++ = val >> 8;
p                 150 drivers/nvme/host/rdma.c 	*p++ = val >> 16;
p                  10 drivers/nvme/host/trace.c static const char *nvme_trace_delete_sq(struct trace_seq *p, u8 *cdw10)
p                  12 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  15 drivers/nvme/host/trace.c 	trace_seq_printf(p, "sqid=%u", sqid);
p                  16 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                  21 drivers/nvme/host/trace.c static const char *nvme_trace_create_sq(struct trace_seq *p, u8 *cdw10)
p                  23 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  30 drivers/nvme/host/trace.c 	trace_seq_printf(p, "sqid=%u, qsize=%u, sq_flags=0x%x, cqid=%u",
p                  32 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                  37 drivers/nvme/host/trace.c static const char *nvme_trace_delete_cq(struct trace_seq *p, u8 *cdw10)
p                  39 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  42 drivers/nvme/host/trace.c 	trace_seq_printf(p, "cqid=%u", cqid);
p                  43 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                  48 drivers/nvme/host/trace.c static const char *nvme_trace_create_cq(struct trace_seq *p, u8 *cdw10)
p                  50 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  56 drivers/nvme/host/trace.c 	trace_seq_printf(p, "cqid=%u, qsize=%u, cq_flags=0x%x, irq_vector=%u",
p                  58 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                  63 drivers/nvme/host/trace.c static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
p                  65 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  69 drivers/nvme/host/trace.c 	trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid);
p                  70 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                  75 drivers/nvme/host/trace.c static const char *nvme_trace_admin_get_features(struct trace_seq *p,
p                  78 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  83 drivers/nvme/host/trace.c 	trace_seq_printf(p, "fid=0x%x sel=0x%x cdw11=0x%x", fid, sel, cdw11);
p                  84 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                  89 drivers/nvme/host/trace.c static const char *nvme_trace_get_lba_status(struct trace_seq *p,
p                  92 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  98 drivers/nvme/host/trace.c 	trace_seq_printf(p, "slba=0x%llx, mndw=0x%x, rl=0x%x, atype=%u",
p                 100 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                 105 drivers/nvme/host/trace.c static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
p                 107 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 114 drivers/nvme/host/trace.c 	trace_seq_printf(p,
p                 117 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                 122 drivers/nvme/host/trace.c static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
p                 124 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 126 drivers/nvme/host/trace.c 	trace_seq_printf(p, "nr=%u, attributes=%u",
p                 129 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                 134 drivers/nvme/host/trace.c static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10)
p                 136 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 138 drivers/nvme/host/trace.c 	trace_seq_printf(p, "cdw10=%*ph", 24, cdw10);
p                 139 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                 144 drivers/nvme/host/trace.c const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
p                 149 drivers/nvme/host/trace.c 		return nvme_trace_delete_sq(p, cdw10);
p                 151 drivers/nvme/host/trace.c 		return nvme_trace_create_sq(p, cdw10);
p                 153 drivers/nvme/host/trace.c 		return nvme_trace_delete_cq(p, cdw10);
p                 155 drivers/nvme/host/trace.c 		return nvme_trace_create_cq(p, cdw10);
p                 157 drivers/nvme/host/trace.c 		return nvme_trace_admin_identify(p, cdw10);
p                 159 drivers/nvme/host/trace.c 		return nvme_trace_admin_get_features(p, cdw10);
p                 161 drivers/nvme/host/trace.c 		return nvme_trace_get_lba_status(p, cdw10);
p                 163 drivers/nvme/host/trace.c 		return nvme_trace_common(p, cdw10);
p                 167 drivers/nvme/host/trace.c const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
p                 174 drivers/nvme/host/trace.c 		return nvme_trace_read_write(p, cdw10);
p                 176 drivers/nvme/host/trace.c 		return nvme_trace_dsm(p, cdw10);
p                 178 drivers/nvme/host/trace.c 		return nvme_trace_common(p, cdw10);
p                 182 drivers/nvme/host/trace.c static const char *nvme_trace_fabrics_property_set(struct trace_seq *p, u8 *spc)
p                 184 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 189 drivers/nvme/host/trace.c 	trace_seq_printf(p, "attrib=%u, ofst=0x%x, value=0x%llx",
p                 191 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                 195 drivers/nvme/host/trace.c static const char *nvme_trace_fabrics_connect(struct trace_seq *p, u8 *spc)
p                 197 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 204 drivers/nvme/host/trace.c 	trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u",
p                 206 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                 210 drivers/nvme/host/trace.c static const char *nvme_trace_fabrics_property_get(struct trace_seq *p, u8 *spc)
p                 212 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 216 drivers/nvme/host/trace.c 	trace_seq_printf(p, "attrib=%u, ofst=0x%x", attrib, ofst);
p                 217 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                 221 drivers/nvme/host/trace.c static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc)
p                 223 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 225 drivers/nvme/host/trace.c 	trace_seq_printf(p, "specific=%*ph", 24, spc);
p                 226 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                 230 drivers/nvme/host/trace.c const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p,
p                 235 drivers/nvme/host/trace.c 		return nvme_trace_fabrics_property_set(p, spc);
p                 237 drivers/nvme/host/trace.c 		return nvme_trace_fabrics_connect(p, spc);
p                 239 drivers/nvme/host/trace.c 		return nvme_trace_fabrics_property_get(p, spc);
p                 241 drivers/nvme/host/trace.c 		return nvme_trace_fabrics_common(p, spc);
p                 245 drivers/nvme/host/trace.c const char *nvme_trace_disk_name(struct trace_seq *p, char *name)
p                 247 drivers/nvme/host/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 250 drivers/nvme/host/trace.c 		trace_seq_printf(p, "disk=%s, ", name);
p                 251 drivers/nvme/host/trace.c 	trace_seq_putc(p, 0);
p                  19 drivers/nvme/host/trace.h const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
p                  21 drivers/nvme/host/trace.h const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
p                  23 drivers/nvme/host/trace.h const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype,
p                  28 drivers/nvme/host/trace.h 	 nvme_trace_parse_fabrics_cmd(p, fctype, cdw10) :		\
p                  30 drivers/nvme/host/trace.h 	 nvme_trace_parse_nvm_cmd(p, opcode, cdw10) :			\
p                  31 drivers/nvme/host/trace.h 	 nvme_trace_parse_admin_cmd(p, opcode, cdw10)))
p                  33 drivers/nvme/host/trace.h const char *nvme_trace_disk_name(struct trace_seq *p, char *name);
p                  35 drivers/nvme/host/trace.h 	nvme_trace_disk_name(p, name)
p                 427 drivers/nvme/target/configfs.c 	const char *p = page;
p                 438 drivers/nvme/target/configfs.c 		if (p + 2 > page + count) {
p                 442 drivers/nvme/target/configfs.c 		if (!isxdigit(p[0]) || !isxdigit(p[1])) {
p                 447 drivers/nvme/target/configfs.c 		nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
p                 448 drivers/nvme/target/configfs.c 		p += 2;
p                 450 drivers/nvme/target/configfs.c 		if (*p == '-' || *p == ':')
p                 451 drivers/nvme/target/configfs.c 			p++;
p                 623 drivers/nvme/target/configfs.c 	struct nvmet_subsys_link *link, *p;
p                 638 drivers/nvme/target/configfs.c 	list_for_each_entry(p, &port->subsystems, entry) {
p                 639 drivers/nvme/target/configfs.c 		if (p->subsys == subsys)
p                 666 drivers/nvme/target/configfs.c 	struct nvmet_subsys_link *p;
p                 669 drivers/nvme/target/configfs.c 	list_for_each_entry(p, &port->subsystems, entry) {
p                 670 drivers/nvme/target/configfs.c 		if (p->subsys == subsys)
p                 677 drivers/nvme/target/configfs.c 	list_del(&p->entry);
p                 684 drivers/nvme/target/configfs.c 	kfree(p);
p                 702 drivers/nvme/target/configfs.c 	struct nvmet_host_link *link, *p;
p                 724 drivers/nvme/target/configfs.c 	list_for_each_entry(p, &subsys->hosts, entry) {
p                 725 drivers/nvme/target/configfs.c 		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
p                 744 drivers/nvme/target/configfs.c 	struct nvmet_host_link *p;
p                 747 drivers/nvme/target/configfs.c 	list_for_each_entry(p, &subsys->hosts, entry) {
p                 748 drivers/nvme/target/configfs.c 		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
p                 755 drivers/nvme/target/configfs.c 	list_del(&p->entry);
p                 759 drivers/nvme/target/configfs.c 	kfree(p);
p                 255 drivers/nvme/target/core.c 	struct nvmet_subsys_link *p;
p                 258 drivers/nvme/target/core.c 	list_for_each_entry(p, &port->subsystems, entry)
p                 259 drivers/nvme/target/core.c 		nvmet_send_ana_event(p->subsys, port);
p                1147 drivers/nvme/target/core.c 	struct nvmet_host_link *p;
p                1157 drivers/nvme/target/core.c 	list_for_each_entry(p, &subsys->hosts, entry) {
p                1158 drivers/nvme/target/core.c 		if (!strcmp(nvmet_host_name(p->host), hostnqn))
p                1366 drivers/nvme/target/core.c 	struct nvmet_subsys_link *p;
p                1378 drivers/nvme/target/core.c 	list_for_each_entry(p, &port->subsystems, entry) {
p                1379 drivers/nvme/target/core.c 		if (!strncmp(p->subsys->subsysnqn, subsysnqn,
p                1381 drivers/nvme/target/core.c 			if (!kref_get_unless_zero(&p->subsys->ref))
p                1384 drivers/nvme/target/core.c 			return p->subsys;
p                 146 drivers/nvme/target/discovery.c 	struct nvmet_subsys_link *p;
p                 150 drivers/nvme/target/discovery.c 	list_for_each_entry(p, &req->port->subsystems, entry) {
p                 151 drivers/nvme/target/discovery.c 		if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
p                 168 drivers/nvme/target/discovery.c 	struct nvmet_subsys_link *p;
p                 195 drivers/nvme/target/discovery.c 	list_for_each_entry(p, &req->port->subsystems, entry) {
p                 198 drivers/nvme/target/discovery.c 		if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
p                 203 drivers/nvme/target/discovery.c 				p->subsys->subsysnqn, traddr,
p                  51 drivers/nvme/target/fcloop.c 	char *options, *o, *p;
p                  59 drivers/nvme/target/fcloop.c 	while ((p = strsep(&o, ",\n")) != NULL) {
p                  60 drivers/nvme/target/fcloop.c 		if (!*p)
p                  63 drivers/nvme/target/fcloop.c 		token = match_token(p, opt_tokens, args);
p                 109 drivers/nvme/target/fcloop.c 			pr_warn("unknown parameter or missing value '%s'\n", p);
p                 126 drivers/nvme/target/fcloop.c 	char *options, *o, *p;
p                 137 drivers/nvme/target/fcloop.c 	while ((p = strsep(&o, ",\n")) != NULL) {
p                 138 drivers/nvme/target/fcloop.c 		if (!*p)
p                 141 drivers/nvme/target/fcloop.c 		token = match_token(p, opt_tokens, args);
p                 158 drivers/nvme/target/fcloop.c 			pr_warn("unknown parameter or missing value '%s'\n", p);
p                 553 drivers/nvme/target/loop.c 	struct nvmet_port *p, *found = NULL;
p                 556 drivers/nvme/target/loop.c 	list_for_each_entry(p, &nvme_loop_ports, entry) {
p                 559 drivers/nvme/target/loop.c 		    strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
p                 561 drivers/nvme/target/loop.c 		found = p;
p                 147 drivers/nvme/target/rdma.c static inline u32 get_unaligned_le24(const u8 *p)
p                 149 drivers/nvme/target/rdma.c 	return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
p                1245 drivers/nvme/target/rdma.c 		struct rdma_conn_param *p)
p                1253 drivers/nvme/target/rdma.c 	param.initiator_depth = min_t(u8, p->initiator_depth,
p                  10 drivers/nvme/target/trace.c static const char *nvmet_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
p                  12 drivers/nvme/target/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  16 drivers/nvme/target/trace.c 	trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid);
p                  17 drivers/nvme/target/trace.c 	trace_seq_putc(p, 0);
p                  22 drivers/nvme/target/trace.c static const char *nvmet_trace_admin_get_features(struct trace_seq *p,
p                  25 drivers/nvme/target/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  30 drivers/nvme/target/trace.c 	trace_seq_printf(p, "fid=0x%x sel=0x%x cdw11=0x%x", fid, sel, cdw11);
p                  31 drivers/nvme/target/trace.c 	trace_seq_putc(p, 0);
p                  36 drivers/nvme/target/trace.c static const char *nvmet_trace_get_lba_status(struct trace_seq *p,
p                  39 drivers/nvme/target/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  45 drivers/nvme/target/trace.c 	trace_seq_printf(p, "slba=0x%llx, mndw=0x%x, rl=0x%x, atype=%u",
p                  47 drivers/nvme/target/trace.c 	trace_seq_putc(p, 0);
p                  52 drivers/nvme/target/trace.c static const char *nvmet_trace_read_write(struct trace_seq *p, u8 *cdw10)
p                  54 drivers/nvme/target/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  61 drivers/nvme/target/trace.c 	trace_seq_printf(p,
p                  64 drivers/nvme/target/trace.c 	trace_seq_putc(p, 0);
p                  69 drivers/nvme/target/trace.c static const char *nvmet_trace_dsm(struct trace_seq *p, u8 *cdw10)
p                  71 drivers/nvme/target/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  73 drivers/nvme/target/trace.c 	trace_seq_printf(p, "nr=%u, attributes=%u",
p                  76 drivers/nvme/target/trace.c 	trace_seq_putc(p, 0);
p                  81 drivers/nvme/target/trace.c static const char *nvmet_trace_common(struct trace_seq *p, u8 *cdw10)
p                  83 drivers/nvme/target/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  85 drivers/nvme/target/trace.c 	trace_seq_printf(p, "cdw10=%*ph", 24, cdw10);
p                  86 drivers/nvme/target/trace.c 	trace_seq_putc(p, 0);
p                  91 drivers/nvme/target/trace.c const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
p                  96 drivers/nvme/target/trace.c 		return nvmet_trace_admin_identify(p, cdw10);
p                  98 drivers/nvme/target/trace.c 		return nvmet_trace_admin_get_features(p, cdw10);
p                 100 drivers/nvme/target/trace.c 		return nvmet_trace_get_lba_status(p, cdw10);
p                 102 drivers/nvme/target/trace.c 		return nvmet_trace_common(p, cdw10);
p                 106 drivers/nvme/target/trace.c const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
p                 113 drivers/nvme/target/trace.c 		return nvmet_trace_read_write(p, cdw10);
p                 115 drivers/nvme/target/trace.c 		return nvmet_trace_dsm(p, cdw10);
p                 117 drivers/nvme/target/trace.c 		return nvmet_trace_common(p, cdw10);
p                 121 drivers/nvme/target/trace.c static const char *nvmet_trace_fabrics_property_set(struct trace_seq *p,
p                 124 drivers/nvme/target/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 129 drivers/nvme/target/trace.c 	trace_seq_printf(p, "attrib=%u, ofst=0x%x, value=0x%llx",
p                 131 drivers/nvme/target/trace.c 	trace_seq_putc(p, 0);
p                 135 drivers/nvme/target/trace.c static const char *nvmet_trace_fabrics_connect(struct trace_seq *p,
p                 138 drivers/nvme/target/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 145 drivers/nvme/target/trace.c 	trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u",
p                 147 drivers/nvme/target/trace.c 	trace_seq_putc(p, 0);
p                 151 drivers/nvme/target/trace.c static const char *nvmet_trace_fabrics_property_get(struct trace_seq *p,
p                 154 drivers/nvme/target/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 158 drivers/nvme/target/trace.c 	trace_seq_printf(p, "attrib=%u, ofst=0x%x", attrib, ofst);
p                 159 drivers/nvme/target/trace.c 	trace_seq_putc(p, 0);
p                 163 drivers/nvme/target/trace.c static const char *nvmet_trace_fabrics_common(struct trace_seq *p, u8 *spc)
p                 165 drivers/nvme/target/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 167 drivers/nvme/target/trace.c 	trace_seq_printf(p, "specific=%*ph", 24, spc);
p                 168 drivers/nvme/target/trace.c 	trace_seq_putc(p, 0);
p                 172 drivers/nvme/target/trace.c const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p,
p                 177 drivers/nvme/target/trace.c 		return nvmet_trace_fabrics_property_set(p, spc);
p                 179 drivers/nvme/target/trace.c 		return nvmet_trace_fabrics_connect(p, spc);
p                 181 drivers/nvme/target/trace.c 		return nvmet_trace_fabrics_property_get(p, spc);
p                 183 drivers/nvme/target/trace.c 		return nvmet_trace_fabrics_common(p, spc);
p                 187 drivers/nvme/target/trace.c const char *nvmet_trace_disk_name(struct trace_seq *p, char *name)
p                 189 drivers/nvme/target/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 192 drivers/nvme/target/trace.c 		trace_seq_printf(p, "disk=%s, ", name);
p                 193 drivers/nvme/target/trace.c 	trace_seq_putc(p, 0);
p                 198 drivers/nvme/target/trace.c const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
p                 200 drivers/nvme/target/trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 212 drivers/nvme/target/trace.c 		trace_seq_printf(p, "%d", ctrl->cntlid);
p                 214 drivers/nvme/target/trace.c 		trace_seq_printf(p, "_");
p                 215 drivers/nvme/target/trace.c 	trace_seq_putc(p, 0);
p                  21 drivers/nvme/target/trace.h const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
p                  23 drivers/nvme/target/trace.h const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
p                  25 drivers/nvme/target/trace.h const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype,
p                  30 drivers/nvme/target/trace.h 	 nvmet_trace_parse_fabrics_cmd(p, fctype, cdw10) :		\
p                  32 drivers/nvme/target/trace.h 	 nvmet_trace_parse_nvm_cmd(p, opcode, cdw10) :			\
p                  33 drivers/nvme/target/trace.h 	 nvmet_trace_parse_admin_cmd(p, opcode, cdw10)))
p                  35 drivers/nvme/target/trace.h const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl);
p                  37 drivers/nvme/target/trace.h 	nvmet_trace_ctrl_name(p, ctrl)
p                  39 drivers/nvme/target/trace.h const char *nvmet_trace_disk_name(struct trace_seq *p, char *name);
p                  41 drivers/nvme/target/trace.h 	nvmet_trace_disk_name(p, name)
p                 119 drivers/nvmem/core.c 	struct nvmem_cell *cell, *p;
p                 121 drivers/nvmem/core.c 	list_for_each_entry_safe(cell, p, &nvmem->cells, node)
p                 909 drivers/nvmem/core.c 	u8 *p, *b;
p                 912 drivers/nvmem/core.c 	p = b = buf;
p                 920 drivers/nvmem/core.c 			*p |= *b << (BITS_PER_BYTE - bit_offset);
p                 922 drivers/nvmem/core.c 			p = b;
p                 927 drivers/nvmem/core.c 		p += cell->bytes - 1;
p                 933 drivers/nvmem/core.c 		*p-- = 0;
p                 936 drivers/nvmem/core.c 	*p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
p                 998 drivers/nvmem/core.c 	u8 v, *p, *buf, *b, pbyte, pbits;
p                1006 drivers/nvmem/core.c 	p = b = buf;
p                1023 drivers/nvmem/core.c 			p = b;
p                1036 drivers/nvmem/core.c 		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
p                  77 drivers/nvmem/imx-ocotp-scu.c 	void *p;
p                  87 drivers/nvmem/imx-ocotp-scu.c 	p = kzalloc(num_bytes, GFP_KERNEL);
p                  88 drivers/nvmem/imx-ocotp-scu.c 	if (!p)
p                  91 drivers/nvmem/imx-ocotp-scu.c 	buf = p;
p                 103 drivers/nvmem/imx-ocotp-scu.c 			kfree(p);
p                 109 drivers/nvmem/imx-ocotp-scu.c 	memcpy(val, (u8 *)p + offset % 4, bytes);
p                 111 drivers/nvmem/imx-ocotp-scu.c 	kfree(p);
p                 955 drivers/of/base.c 		const char *p = separator;
p                 957 drivers/of/base.c 		if (!p)
p                 958 drivers/of/base.c 			p = strchrnul(path, '/');
p                 959 drivers/of/base.c 		len = p - path;
p                 973 drivers/of/base.c 		path = p;
p                1203 drivers/of/base.c 	const char *compatible, *p;
p                1209 drivers/of/base.c 	p = strchr(compatible, ',');
p                1210 drivers/of/base.c 	strlcpy(modalias, p ? p + 1 : compatible, len);
p                 204 drivers/of/device.c 	struct property *p;
p                 220 drivers/of/device.c 	of_property_for_each_string(dev->of_node, "compatible", p, compat) {
p                 286 drivers/of/device.c 	struct property *p;
p                 301 drivers/of/device.c 	of_property_for_each_string(dev->of_node, "compatible", p, compat) {
p                  76 drivers/of/dynamic.c int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p)
p                  80 drivers/of/dynamic.c 	struct of_reconfig_data *pr = p;
p                  97 drivers/of/dynamic.c 	rc = blocking_notifier_call_chain(&of_reconfig_chain, action, p);
p                 175 drivers/of/fdt.c 		const char *p = nodename, *ps = p, *pa = NULL;
p                 178 drivers/of/fdt.c 		while (*p) {
p                 179 drivers/of/fdt.c 			if ((*p) == '@')
p                 180 drivers/of/fdt.c 				pa = p;
p                 181 drivers/of/fdt.c 			else if ((*p) == '/')
p                 182 drivers/of/fdt.c 				ps = p + 1;
p                 183 drivers/of/fdt.c 			p++;
p                 187 drivers/of/fdt.c 			pa = p;
p                 911 drivers/of/fdt.c 	const char *p, *q, *options = NULL;
p                 922 drivers/of/fdt.c 	p = fdt_getprop(fdt, offset, "stdout-path", &l);
p                 923 drivers/of/fdt.c 	if (!p)
p                 924 drivers/of/fdt.c 		p = fdt_getprop(fdt, offset, "linux,stdout-path", &l);
p                 925 drivers/of/fdt.c 	if (!p || !l)
p                 928 drivers/of/fdt.c 	q = strchrnul(p, ':');
p                 931 drivers/of/fdt.c 	l = q - p;
p                 934 drivers/of/fdt.c 	offset = fdt_path_offset_namelen(fdt, p, l);
p                 936 drivers/of/fdt.c 		pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
p                 987 drivers/of/fdt.c 	const __be32 *p = *cellp;
p                 989 drivers/of/fdt.c 	*cellp = p + s;
p                 990 drivers/of/fdt.c 	return of_read_number(p, s);
p                1047 drivers/of/fdt.c 	const char *p;
p                1059 drivers/of/fdt.c 	p = of_get_flat_dt_prop(node, "bootargs", &l);
p                1060 drivers/of/fdt.c 	if (p != NULL && l > 0)
p                1061 drivers/of/fdt.c 		strlcpy(data, p, min(l, COMMAND_LINE_SIZE));
p                  56 drivers/of/irq.c 	struct device_node *p;
p                  64 drivers/of/irq.c 			p = of_get_parent(child);
p                  67 drivers/of/irq.c 				p = of_node_get(of_irq_dflt_pic);
p                  69 drivers/of/irq.c 				p = of_find_node_by_phandle(parent);
p                  72 drivers/of/irq.c 		child = p;
p                  73 drivers/of/irq.c 	} while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL);
p                  75 drivers/of/irq.c 	return p;
p                 288 drivers/of/irq.c 	struct device_node *p;
p                 309 drivers/of/irq.c 	p = of_irq_find_parent(device);
p                 310 drivers/of/irq.c 	if (p == NULL)
p                 314 drivers/of/irq.c 	if (of_property_read_u32(p, "#interrupt-cells", &intsize)) {
p                 319 drivers/of/irq.c 	pr_debug(" parent=%pOF, intsize=%d\n", p, intsize);
p                 322 drivers/of/irq.c 	out_irq->np = p;
p                 338 drivers/of/irq.c 	of_node_put(p);
p                  27 drivers/of/pdt.c #define of_pdt_incr_unique_id(p) do { \
p                  28 drivers/of/pdt.c 	(p)->unique_id = of_pdt_unique_id++; \
p                  38 drivers/of/pdt.c static inline void of_pdt_incr_unique_id(void *p) { }
p                  71 drivers/of/pdt.c 	struct property *p;
p                  75 drivers/of/pdt.c 		p = tmp;
p                  76 drivers/of/pdt.c 		memset(p, 0, sizeof(*p) + 32);
p                  79 drivers/of/pdt.c 		p = prom_early_alloc(sizeof(struct property) + 32);
p                  80 drivers/of/pdt.c 		of_pdt_incr_unique_id(p);
p                  83 drivers/of/pdt.c 	p->name = (char *) (p + 1);
p                  85 drivers/of/pdt.c 		strcpy(p->name, special_name);
p                  86 drivers/of/pdt.c 		p->length = special_len;
p                  87 drivers/of/pdt.c 		p->value = prom_early_alloc(special_len);
p                  88 drivers/of/pdt.c 		memcpy(p->value, special_val, special_len);
p                  90 drivers/of/pdt.c 		err = of_pdt_prom_ops->nextprop(node, prev, p->name);
p                  92 drivers/of/pdt.c 			tmp = p;
p                  95 drivers/of/pdt.c 		p->length = of_pdt_prom_ops->getproplen(node, p->name);
p                  96 drivers/of/pdt.c 		if (p->length <= 0) {
p                  97 drivers/of/pdt.c 			p->length = 0;
p                 101 drivers/of/pdt.c 			p->value = prom_early_alloc(p->length + 1);
p                 102 drivers/of/pdt.c 			len = of_pdt_prom_ops->getproperty(node, p->name,
p                 103 drivers/of/pdt.c 					p->value, p->length);
p                 105 drivers/of/pdt.c 				p->length = 0;
p                 106 drivers/of/pdt.c 			((unsigned char *)p->value)[p->length] = '\0';
p                 109 drivers/of/pdt.c 	return p;
p                 423 drivers/of/property.c 	const char *p, *end;
p                 430 drivers/of/property.c 	p = prop->value;
p                 431 drivers/of/property.c 	end = p + prop->length;
p                 433 drivers/of/property.c 	for (i = 0; p < end; i++, p += l) {
p                 434 drivers/of/property.c 		l = strnlen(p, end - p) + 1;
p                 435 drivers/of/property.c 		if (p + l > end)
p                 437 drivers/of/property.c 		pr_debug("comparing %s with %s\n", string, p);
p                 438 drivers/of/property.c 		if (strcmp(string, p) == 0)
p                 462 drivers/of/property.c 	const char *p, *end;
p                 468 drivers/of/property.c 	p = prop->value;
p                 469 drivers/of/property.c 	end = p + prop->length;
p                 471 drivers/of/property.c 	for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
p                 472 drivers/of/property.c 		l = strnlen(p, end - p) + 1;
p                 473 drivers/of/property.c 		if (p + l > end)
p                 476 drivers/of/property.c 			*out_strs++ = p;
p                1031 drivers/parisc/ccio-dma.c static int ccio_proc_info(struct seq_file *m, void *p)
p                1097 drivers/parisc/ccio-dma.c static int ccio_proc_bitmap_info(struct seq_file *m, void *p)
p                 335 drivers/parisc/iosapic.c 	struct irt_entry *p = table;
p                 344 drivers/parisc/iosapic.c 	for (i = 0 ; i < num_entries ; i++, p++) {
p                 346 drivers/parisc/iosapic.c 		p->entry_type, p->entry_length, p->interrupt_type,
p                 347 drivers/parisc/iosapic.c 		p->polarity_trigger, p->src_bus_irq_devno, p->src_bus_id,
p                 348 drivers/parisc/iosapic.c 		p->src_seg_id, p->dest_iosapic_intin,
p                 349 drivers/parisc/iosapic.c 		((u32 *) p)[2],
p                 350 drivers/parisc/iosapic.c 		((u32 *) p)[3]
p                 493 drivers/parisc/iosapic.c 		struct pci_bus *p = pcidev->bus;
p                 513 drivers/parisc/iosapic.c 		while (p->parent->parent)
p                 514 drivers/parisc/iosapic.c 			p = p->parent;
p                 516 drivers/parisc/iosapic.c 		intr_slot = PCI_SLOT(p->self->devfn);
p                 563 drivers/parisc/iosapic.c 	struct irt_entry *p = vi->irte;
p                 565 drivers/parisc/iosapic.c 	if ((p->polarity_trigger & IRT_PO_MASK) == IRT_ACTIVE_LO)
p                 568 drivers/parisc/iosapic.c 	if (((p->polarity_trigger >> IRT_EL_SHIFT) & IRT_EL_MASK) == IRT_LEVEL_TRIG)
p                1064 drivers/parisc/lba_pci.c 		} *p, *io;
p                1067 drivers/parisc/lba_pci.c 		p = (void *) &(pa_pdc_cell->mod[2+i*3]);
p                1071 drivers/parisc/lba_pci.c 		switch(p->type & 0xff) {
p                1073 drivers/parisc/lba_pci.c 			lba_dev->hba.bus_num.start = p->start;
p                1074 drivers/parisc/lba_pci.c 			lba_dev->hba.bus_num.end   = p->end;
p                1085 drivers/parisc/lba_pci.c 				if ((p->end - p->start) != lba_len)
p                1086 drivers/parisc/lba_pci.c 					p->end = extend_lmmio_len(p->start,
p                1087 drivers/parisc/lba_pci.c 						p->end, lba_len);
p                1092 drivers/parisc/lba_pci.c 				lba_dev->hba.lmmio_space_offset = p->start -
p                1108 drivers/parisc/lba_pci.c 			r->start  = p->start;
p                1109 drivers/parisc/lba_pci.c 			r->end    = p->end;
p                1120 drivers/parisc/lba_pci.c 			r->start  = p->start;
p                1121 drivers/parisc/lba_pci.c 			r->end    = p->end;
p                1129 drivers/parisc/lba_pci.c 				i, p->start);
p                1137 drivers/parisc/lba_pci.c 			lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024);
p                1152 drivers/parisc/lba_pci.c 				i, p->type & 0xff);
p                1774 drivers/parisc/sba_iommu.c static int sba_proc_info(struct seq_file *m, void *p)
p                1845 drivers/parisc/sba_iommu.c sba_proc_bitmap_info(struct seq_file *m, void *p)
p                  58 drivers/parport/daisy.c 	struct daisydev *newdev, **p;
p                  65 drivers/parport/daisy.c 		for (p = &topology; *p && (*p)->devnum<devnum; p = &(*p)->next)
p                  67 drivers/parport/daisy.c 		newdev->next = *p;
p                  68 drivers/parport/daisy.c 		*p = newdev;
p                 177 drivers/parport/daisy.c 	struct daisydev **p;
p                 180 drivers/parport/daisy.c 	p = &topology;
p                 181 drivers/parport/daisy.c 	while (*p) {
p                 182 drivers/parport/daisy.c 		struct daisydev *dev = *p;
p                 184 drivers/parport/daisy.c 			p = &dev->next;
p                 187 drivers/parport/daisy.c 		*p = dev->next;
p                 215 drivers/parport/daisy.c 	struct daisydev *p = topology;
p                 221 drivers/parport/daisy.c 	while (p && p->devnum != devnum)
p                 222 drivers/parport/daisy.c 		p = p->next;
p                 224 drivers/parport/daisy.c 	if (!p) {
p                 229 drivers/parport/daisy.c 	daisy = p->daisy;
p                 230 drivers/parport/daisy.c 	port = parport_get_port(p->port);
p                  38 drivers/parport/parport_amiga.c static void amiga_write_data(struct parport *p, unsigned char data)
p                  46 drivers/parport/parport_amiga.c static unsigned char amiga_read_data(struct parport *p)
p                  60 drivers/parport/parport_amiga.c static void amiga_write_control(struct parport *p, unsigned char control)
p                  66 drivers/parport/parport_amiga.c static unsigned char amiga_read_control( struct parport *p)
p                  72 drivers/parport/parport_amiga.c static unsigned char amiga_frob_control( struct parport *p, unsigned char mask, unsigned char val)
p                  77 drivers/parport/parport_amiga.c 	old = amiga_read_control(p);
p                  78 drivers/parport/parport_amiga.c 	amiga_write_control(p, (old & ~mask) ^ val);
p                  97 drivers/parport/parport_amiga.c static unsigned char amiga_read_status(struct parport *p)
p                 106 drivers/parport/parport_amiga.c static void amiga_enable_irq(struct parport *p)
p                 111 drivers/parport/parport_amiga.c static void amiga_disable_irq(struct parport *p)
p                 116 drivers/parport/parport_amiga.c static void amiga_data_forward(struct parport *p)
p                 123 drivers/parport/parport_amiga.c static void amiga_data_reverse(struct parport *p)
p                 138 drivers/parport/parport_amiga.c static void amiga_save_state(struct parport *p, struct parport_state *s)
p                 148 drivers/parport/parport_amiga.c static void amiga_restore_state(struct parport *p, struct parport_state *s)
p                 198 drivers/parport/parport_amiga.c 	struct parport *p;
p                 205 drivers/parport/parport_amiga.c 	p = parport_register_port((unsigned long)&ciaa.prb, IRQ_AMIGA_CIAA_FLG,
p                 207 drivers/parport/parport_amiga.c 	if (!p)
p                 210 drivers/parport/parport_amiga.c 	err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name,
p                 211 drivers/parport/parport_amiga.c 			  p);
p                 215 drivers/parport/parport_amiga.c 	printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name);
p                 217 drivers/parport/parport_amiga.c 	parport_announce_port(p);
p                 219 drivers/parport/parport_amiga.c 	platform_set_drvdata(pdev, p);
p                 224 drivers/parport/parport_amiga.c 	parport_put_port(p);
p                  25 drivers/parport/parport_atari.c parport_atari_read_data(struct parport *p)
p                  38 drivers/parport/parport_atari.c parport_atari_write_data(struct parport *p, unsigned char data)
p                  49 drivers/parport/parport_atari.c parport_atari_read_control(struct parport *p)
p                  63 drivers/parport/parport_atari.c parport_atari_write_control(struct parport *p, unsigned char control)
p                  77 drivers/parport/parport_atari.c parport_atari_frob_control(struct parport *p, unsigned char mask,
p                  80 drivers/parport/parport_atari.c 	unsigned char old = parport_atari_read_control(p);
p                  81 drivers/parport/parport_atari.c 	parport_atari_write_control(p, (old & ~mask) ^ val);
p                  86 drivers/parport/parport_atari.c parport_atari_read_status(struct parport *p)
p                  98 drivers/parport/parport_atari.c parport_atari_save_state(struct parport *p, struct parport_state *s)
p                 103 drivers/parport/parport_atari.c parport_atari_restore_state(struct parport *p, struct parport_state *s)
p                 108 drivers/parport/parport_atari.c parport_atari_enable_irq(struct parport *p)
p                 114 drivers/parport/parport_atari.c parport_atari_disable_irq(struct parport *p)
p                 120 drivers/parport/parport_atari.c parport_atari_data_forward(struct parport *p)
p                 132 drivers/parport/parport_atari.c parport_atari_data_reverse(struct parport *p)
p                 175 drivers/parport/parport_atari.c 	struct parport *p;
p                 191 drivers/parport/parport_atari.c 		p = parport_register_port((unsigned long)&sound_ym.wd_data,
p                 194 drivers/parport/parport_atari.c 		if (!p)
p                 196 drivers/parport/parport_atari.c 		if (request_irq(IRQ_MFP_BUSY, parport_irq_handler, 0, p->name,
p                 197 drivers/parport/parport_atari.c 				p)) {
p                 198 drivers/parport/parport_atari.c 			parport_put_port (p);
p                 202 drivers/parport/parport_atari.c 		this_port = p;
p                 203 drivers/parport/parport_atari.c 		printk(KERN_INFO "%s: Atari built-in port using irq\n", p->name);
p                 204 drivers/parport/parport_atari.c 		parport_announce_port (p);
p                  46 drivers/parport/parport_ax88796.c static inline struct ax_drvdata *pp_to_drv(struct parport *p)
p                  48 drivers/parport/parport_ax88796.c 	return p->private_data;
p                  52 drivers/parport/parport_ax88796.c parport_ax88796_read_data(struct parport *p)
p                  54 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                  60 drivers/parport/parport_ax88796.c parport_ax88796_write_data(struct parport *p, unsigned char data)
p                  62 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                  68 drivers/parport/parport_ax88796.c parport_ax88796_read_control(struct parport *p)
p                  70 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                  90 drivers/parport/parport_ax88796.c parport_ax88796_write_control(struct parport *p, unsigned char control)
p                  92 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                 112 drivers/parport/parport_ax88796.c 	if (parport_ax88796_read_control(p) != control) {
p                 114 drivers/parport/parport_ax88796.c 			parport_ax88796_read_control(p), control);
p                 119 drivers/parport/parport_ax88796.c parport_ax88796_read_status(struct parport *p)
p                 121 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                 144 drivers/parport/parport_ax88796.c parport_ax88796_frob_control(struct parport *p, unsigned char mask,
p                 147 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                 148 drivers/parport/parport_ax88796.c 	unsigned char old = parport_ax88796_read_control(p);
p                 153 drivers/parport/parport_ax88796.c 	parport_ax88796_write_control(p, (old & ~mask) | val);
p                 158 drivers/parport/parport_ax88796.c parport_ax88796_enable_irq(struct parport *p)
p                 160 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                 165 drivers/parport/parport_ax88796.c 		enable_irq(p->irq);
p                 172 drivers/parport/parport_ax88796.c parport_ax88796_disable_irq(struct parport *p)
p                 174 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                 179 drivers/parport/parport_ax88796.c 		disable_irq(p->irq);
p                 186 drivers/parport/parport_ax88796.c parport_ax88796_data_forward(struct parport *p)
p                 188 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                 195 drivers/parport/parport_ax88796.c parport_ax88796_data_reverse(struct parport *p)
p                 197 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                 215 drivers/parport/parport_ax88796.c parport_ax88796_save_state(struct parport *p, struct parport_state *s)
p                 217 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                 219 drivers/parport/parport_ax88796.c 	dev_dbg(dd->dev, "save_state: %p: state=%p\n", p, s);
p                 224 drivers/parport/parport_ax88796.c parport_ax88796_restore_state(struct parport *p, struct parport_state *s)
p                 226 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                 228 drivers/parport/parport_ax88796.c 	dev_dbg(dd->dev, "restore_state: %p: state=%p\n", p, s);
p                 363 drivers/parport/parport_ax88796.c 	struct parport *p = platform_get_drvdata(pdev);
p                 364 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                 366 drivers/parport/parport_ax88796.c 	free_irq(p->irq, p);
p                 367 drivers/parport/parport_ax88796.c 	parport_remove_port(p);
p                 380 drivers/parport/parport_ax88796.c 	struct parport *p = platform_get_drvdata(dev);
p                 381 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                 383 drivers/parport/parport_ax88796.c 	parport_ax88796_save_state(p, &dd->suspend);
p                 390 drivers/parport/parport_ax88796.c 	struct parport *p = platform_get_drvdata(dev);
p                 391 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
p                 393 drivers/parport/parport_ax88796.c 	parport_ax88796_restore_state(p, &dd->suspend);
p                 122 drivers/parport/parport_cs.c     struct parport *p;
p                 140 drivers/parport/parport_cs.c     p = parport_pc_probe_port(link->resource[0]->start,
p                 144 drivers/parport/parport_cs.c     if (p == NULL) {
p                 152 drivers/parport/parport_cs.c     p->modes |= PARPORT_MODE_PCSPP;
p                 154 drivers/parport/parport_cs.c 	p->modes |= PARPORT_MODE_TRISTATE | PARPORT_MODE_EPP;
p                 156 drivers/parport/parport_cs.c     info->port = p;
p                 173 drivers/parport/parport_cs.c 		struct parport *p = info->port;
p                 174 drivers/parport/parport_cs.c 		parport_pc_unregister_port(p);
p                  83 drivers/parport/parport_gsc.c void parport_gsc_save_state(struct parport *p, struct parport_state *s)
p                  85 drivers/parport/parport_gsc.c 	s->u.pc.ctr = parport_readb (CONTROL (p));
p                  88 drivers/parport/parport_gsc.c void parport_gsc_restore_state(struct parport *p, struct parport_state *s)
p                  90 drivers/parport/parport_gsc.c 	parport_writeb (s->u.pc.ctr, CONTROL (p));
p                 237 drivers/parport/parport_gsc.c 	struct parport *p = &tmp;
p                 256 drivers/parport/parport_gsc.c 	p->base = base;
p                 257 drivers/parport/parport_gsc.c 	p->base_hi = base_hi;
p                 258 drivers/parport/parport_gsc.c 	p->irq = irq;
p                 259 drivers/parport/parport_gsc.c 	p->dma = dma;
p                 260 drivers/parport/parport_gsc.c 	p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
p                 261 drivers/parport/parport_gsc.c 	p->ops = ops;
p                 262 drivers/parport/parport_gsc.c 	p->private_data = priv;
p                 263 drivers/parport/parport_gsc.c 	p->physport = p;
p                 264 drivers/parport/parport_gsc.c 	if (!parport_SPP_supported (p)) {
p                 270 drivers/parport/parport_gsc.c 	parport_PS2_supported (p);
p                 272 drivers/parport/parport_gsc.c 	if (!(p = parport_register_port(base, PARPORT_IRQ_NONE,
p                 279 drivers/parport/parport_gsc.c 	p->dev = &padev->dev;
p                 280 drivers/parport/parport_gsc.c 	p->base_hi = base_hi;
p                 281 drivers/parport/parport_gsc.c 	p->modes = tmp.modes;
p                 282 drivers/parport/parport_gsc.c 	p->size = (p->modes & PARPORT_MODE_EPP)?8:3;
p                 283 drivers/parport/parport_gsc.c 	p->private_data = priv;
p                 285 drivers/parport/parport_gsc.c 	printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base);
p                 286 drivers/parport/parport_gsc.c 	p->irq = irq;
p                 287 drivers/parport/parport_gsc.c 	if (p->irq == PARPORT_IRQ_AUTO) {
p                 288 drivers/parport/parport_gsc.c 		p->irq = PARPORT_IRQ_NONE;
p                 290 drivers/parport/parport_gsc.c 	if (p->irq != PARPORT_IRQ_NONE) {
p                 291 drivers/parport/parport_gsc.c 		pr_cont(", irq %d", p->irq);
p                 293 drivers/parport/parport_gsc.c 		if (p->dma == PARPORT_DMA_AUTO) {
p                 294 drivers/parport/parport_gsc.c 			p->dma = PARPORT_DMA_NONE;
p                 297 drivers/parport/parport_gsc.c 	if (p->dma == PARPORT_DMA_AUTO) /* To use DMA, giving the irq
p                 299 drivers/parport/parport_gsc.c 		p->dma = PARPORT_DMA_NONE;
p                 302 drivers/parport/parport_gsc.c #define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
p                 315 drivers/parport/parport_gsc.c 	if (p->irq != PARPORT_IRQ_NONE) {
p                 316 drivers/parport/parport_gsc.c 		if (request_irq (p->irq, parport_irq_handler,
p                 317 drivers/parport/parport_gsc.c 				 0, p->name, p)) {
p                 320 drivers/parport/parport_gsc.c 				p->name, p->irq);
p                 321 drivers/parport/parport_gsc.c 			p->irq = PARPORT_IRQ_NONE;
p                 322 drivers/parport/parport_gsc.c 			p->dma = PARPORT_DMA_NONE;
p                 328 drivers/parport/parport_gsc.c 	parport_gsc_write_data(p, 0);
p                 329 drivers/parport/parport_gsc.c 	parport_gsc_data_forward (p);
p                 334 drivers/parport/parport_gsc.c 	parport_announce_port (p);
p                 336 drivers/parport/parport_gsc.c 	return p;
p                 346 drivers/parport/parport_gsc.c 	struct parport *p;
p                 370 drivers/parport/parport_gsc.c 	p = parport_gsc_probe_port(port, 0, dev->irq,
p                 372 drivers/parport/parport_gsc.c 	if (p)
p                 374 drivers/parport/parport_gsc.c 	dev_set_drvdata(&dev->dev, p);
p                 381 drivers/parport/parport_gsc.c 	struct parport *p = dev_get_drvdata(&dev->dev);
p                 382 drivers/parport/parport_gsc.c 	if (p) {
p                 383 drivers/parport/parport_gsc.c 		struct parport_gsc_private *priv = p->private_data;
p                 384 drivers/parport/parport_gsc.c 		struct parport_operations *ops = p->ops;
p                 385 drivers/parport/parport_gsc.c 		parport_remove_port(p);
p                 386 drivers/parport/parport_gsc.c 		if (p->dma != PARPORT_DMA_NONE)
p                 387 drivers/parport/parport_gsc.c 			free_dma(p->dma);
p                 388 drivers/parport/parport_gsc.c 		if (p->irq != PARPORT_IRQ_NONE)
p                 389 drivers/parport/parport_gsc.c 			free_irq(p->irq, p);
p                 394 drivers/parport/parport_gsc.c 		kfree (p->private_data);
p                 395 drivers/parport/parport_gsc.c 		parport_put_port(p);
p                  45 drivers/parport/parport_gsc.h #define EPPDATA(p)  ((p)->base    + 0x4)
p                  46 drivers/parport/parport_gsc.h #define EPPADDR(p)  ((p)->base    + 0x3)
p                  47 drivers/parport/parport_gsc.h #define CONTROL(p)  ((p)->base    + 0x2)
p                  48 drivers/parport/parport_gsc.h #define STATUS(p)   ((p)->base    + 0x1)
p                  49 drivers/parport/parport_gsc.h #define DATA(p)     ((p)->base    + 0x0)
p                  71 drivers/parport/parport_gsc.h static inline void parport_gsc_write_data(struct parport *p, unsigned char d)
p                  74 drivers/parport/parport_gsc.h 	printk (KERN_DEBUG "parport_gsc_write_data(%p,0x%02x)\n", p, d);
p                  76 drivers/parport/parport_gsc.h 	parport_writeb(d, DATA(p));
p                  79 drivers/parport/parport_gsc.h static inline unsigned char parport_gsc_read_data(struct parport *p)
p                  81 drivers/parport/parport_gsc.h 	unsigned char val = parport_readb (DATA (p));
p                  84 drivers/parport/parport_gsc.h 		p, val);
p                  91 drivers/parport/parport_gsc.h static inline unsigned char __parport_gsc_frob_control(struct parport *p,
p                  95 drivers/parport/parport_gsc.h 	struct parport_gsc_private *priv = p->physport->private_data;
p                 104 drivers/parport/parport_gsc.h 	parport_writeb (ctr, CONTROL (p));
p                 109 drivers/parport/parport_gsc.h static inline void parport_gsc_data_reverse(struct parport *p)
p                 111 drivers/parport/parport_gsc.h 	__parport_gsc_frob_control (p, 0x20, 0x20);
p                 114 drivers/parport/parport_gsc.h static inline void parport_gsc_data_forward(struct parport *p)
p                 116 drivers/parport/parport_gsc.h 	__parport_gsc_frob_control (p, 0x20, 0x00);
p                 119 drivers/parport/parport_gsc.h static inline void parport_gsc_write_control(struct parport *p,
p                 130 drivers/parport/parport_gsc.h 			p->name, p->cad->name);
p                 131 drivers/parport/parport_gsc.h 		parport_gsc_data_reverse (p);
p                 134 drivers/parport/parport_gsc.h 	__parport_gsc_frob_control (p, wm, d & wm);
p                 137 drivers/parport/parport_gsc.h static inline unsigned char parport_gsc_read_control(struct parport *p)
p                 143 drivers/parport/parport_gsc.h 	const struct parport_gsc_private *priv = p->physport->private_data;
p                 147 drivers/parport/parport_gsc.h static inline unsigned char parport_gsc_frob_control(struct parport *p,
p                 159 drivers/parport/parport_gsc.h 			p->name, p->cad->name,
p                 162 drivers/parport/parport_gsc.h 			parport_gsc_data_reverse (p);
p                 164 drivers/parport/parport_gsc.h 			parport_gsc_data_forward (p);
p                 171 drivers/parport/parport_gsc.h 	return __parport_gsc_frob_control (p, mask, val);
p                 174 drivers/parport/parport_gsc.h static inline unsigned char parport_gsc_read_status(struct parport *p)
p                 176 drivers/parport/parport_gsc.h 	return parport_readb (STATUS(p));
p                 179 drivers/parport/parport_gsc.h static inline void parport_gsc_disable_irq(struct parport *p)
p                 181 drivers/parport/parport_gsc.h 	__parport_gsc_frob_control (p, 0x10, 0x00);
p                 184 drivers/parport/parport_gsc.h static inline void parport_gsc_enable_irq(struct parport *p)
p                 186 drivers/parport/parport_gsc.h 	__parport_gsc_frob_control (p, 0x10, 0x10);
p                 189 drivers/parport/parport_gsc.h extern void parport_gsc_release_resources(struct parport *p);
p                 191 drivers/parport/parport_gsc.h extern int parport_gsc_claim_resources(struct parport *p);
p                 195 drivers/parport/parport_gsc.h extern void parport_gsc_save_state(struct parport *p, struct parport_state *s);
p                 197 drivers/parport/parport_gsc.h extern void parport_gsc_restore_state(struct parport *p, struct parport_state *s);
p                 285 drivers/parport/parport_ip32.c #define __pr_trace(pr, p, fmt, ...)					\
p                 287 drivers/parport/parport_ip32.c 	   ({ const struct parport *__p = (p);				\
p                 290 drivers/parport/parport_ip32.c #define pr_trace(p, fmt, ...)	__pr_trace(pr_debug, p, fmt , ##__VA_ARGS__)
p                 291 drivers/parport/parport_ip32.c #define pr_trace1(p, fmt, ...)	__pr_trace(pr_debug1, p, fmt , ##__VA_ARGS__)
p                 303 drivers/parport/parport_ip32.c #define pr_probe(p, fmt, ...)						\
p                 304 drivers/parport/parport_ip32.c 	__pr_probe(KERN_INFO PPIP32 "0x%lx: " fmt, (p)->base , ##__VA_ARGS__)
p                 318 drivers/parport/parport_ip32.c static void parport_ip32_dump_state(struct parport *p, char *str,
p                 321 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                 324 drivers/parport/parport_ip32.c 	printk(KERN_DEBUG PPIP32 "%s: state (%s):\n", p->name, str);
p                 440 drivers/parport/parport_ip32.c #define CHECK_EXTRA_BITS(p, b, m)					\
p                 446 drivers/parport/parport_ip32.c 				  (p)->name, __func__, #b, __b, __m);	\
p                 566 drivers/parport/parport_ip32.c static int parport_ip32_dma_start(struct parport *p,
p                 592 drivers/parport/parport_ip32.c 	parport_ip32_dma.buf = dma_map_single(&p->bus_dev, addr, count, dir);
p                 621 drivers/parport/parport_ip32.c static void parport_ip32_dma_stop(struct parport *p)
p                 677 drivers/parport/parport_ip32.c 	dma_unmap_single(&p->bus_dev, parport_ip32_dma.buf,
p                 752 drivers/parport/parport_ip32.c static inline void parport_ip32_wakeup(struct parport *p)
p                 754 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                 768 drivers/parport/parport_ip32.c 	struct parport * const p = dev_id;
p                 769 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                 777 drivers/parport/parport_ip32.c 		parport_ip32_wakeup(p);
p                 790 drivers/parport/parport_ip32.c static inline unsigned int parport_ip32_read_econtrol(struct parport *p)
p                 792 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                 801 drivers/parport/parport_ip32.c static inline void parport_ip32_write_econtrol(struct parport *p,
p                 804 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                 817 drivers/parport/parport_ip32.c static inline void parport_ip32_frob_econtrol(struct parport *p,
p                 822 drivers/parport/parport_ip32.c 	c = (parport_ip32_read_econtrol(p) & ~mask) ^ val;
p                 823 drivers/parport/parport_ip32.c 	parport_ip32_write_econtrol(p, c);
p                 834 drivers/parport/parport_ip32.c static void parport_ip32_set_mode(struct parport *p, unsigned int mode)
p                 839 drivers/parport/parport_ip32.c 	omode = parport_ip32_read_econtrol(p) & ECR_MODE_MASK;
p                 845 drivers/parport/parport_ip32.c 		parport_ip32_write_econtrol(p, ecr);
p                 847 drivers/parport/parport_ip32.c 	parport_ip32_write_econtrol(p, mode | ECR_nERRINTR | ECR_SERVINTR);
p                 856 drivers/parport/parport_ip32.c static inline unsigned char parport_ip32_read_data(struct parport *p)
p                 858 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                 867 drivers/parport/parport_ip32.c static inline void parport_ip32_write_data(struct parport *p, unsigned char d)
p                 869 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                 877 drivers/parport/parport_ip32.c static inline unsigned char parport_ip32_read_status(struct parport *p)
p                 879 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                 887 drivers/parport/parport_ip32.c static inline unsigned int __parport_ip32_read_control(struct parport *p)
p                 889 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                 898 drivers/parport/parport_ip32.c static inline void __parport_ip32_write_control(struct parport *p,
p                 901 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                 902 drivers/parport/parport_ip32.c 	CHECK_EXTRA_BITS(p, c, priv->dcr_writable);
p                 918 drivers/parport/parport_ip32.c static inline void __parport_ip32_frob_control(struct parport *p,
p                 923 drivers/parport/parport_ip32.c 	c = (__parport_ip32_read_control(p) & ~mask) ^ val;
p                 924 drivers/parport/parport_ip32.c 	__parport_ip32_write_control(p, c);
p                 934 drivers/parport/parport_ip32.c static inline unsigned char parport_ip32_read_control(struct parport *p)
p                 938 drivers/parport/parport_ip32.c 	return __parport_ip32_read_control(p) & rm;
p                 949 drivers/parport/parport_ip32.c static inline void parport_ip32_write_control(struct parport *p,
p                 954 drivers/parport/parport_ip32.c 	CHECK_EXTRA_BITS(p, c, wm);
p                 955 drivers/parport/parport_ip32.c 	__parport_ip32_frob_control(p, wm, c & wm);
p                 967 drivers/parport/parport_ip32.c static inline unsigned char parport_ip32_frob_control(struct parport *p,
p                 973 drivers/parport/parport_ip32.c 	CHECK_EXTRA_BITS(p, mask, wm);
p                 974 drivers/parport/parport_ip32.c 	CHECK_EXTRA_BITS(p, val, wm);
p                 975 drivers/parport/parport_ip32.c 	__parport_ip32_frob_control(p, mask & wm, val & wm);
p                 976 drivers/parport/parport_ip32.c 	return parport_ip32_read_control(p);
p                 983 drivers/parport/parport_ip32.c static inline void parport_ip32_disable_irq(struct parport *p)
p                 985 drivers/parport/parport_ip32.c 	__parport_ip32_frob_control(p, DCR_IRQ, 0);
p                 992 drivers/parport/parport_ip32.c static inline void parport_ip32_enable_irq(struct parport *p)
p                 994 drivers/parport/parport_ip32.c 	__parport_ip32_frob_control(p, DCR_IRQ, DCR_IRQ);
p                1003 drivers/parport/parport_ip32.c static inline void parport_ip32_data_forward(struct parport *p)
p                1005 drivers/parport/parport_ip32.c 	__parport_ip32_frob_control(p, DCR_DIR, 0);
p                1015 drivers/parport/parport_ip32.c static inline void parport_ip32_data_reverse(struct parport *p)
p                1017 drivers/parport/parport_ip32.c 	__parport_ip32_frob_control(p, DCR_DIR, DCR_DIR);
p                1037 drivers/parport/parport_ip32.c static void parport_ip32_save_state(struct parport *p,
p                1040 drivers/parport/parport_ip32.c 	s->u.ip32.dcr = __parport_ip32_read_control(p);
p                1041 drivers/parport/parport_ip32.c 	s->u.ip32.ecr = parport_ip32_read_econtrol(p);
p                1049 drivers/parport/parport_ip32.c static void parport_ip32_restore_state(struct parport *p,
p                1052 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, s->u.ip32.ecr & ECR_MODE_MASK);
p                1053 drivers/parport/parport_ip32.c 	parport_ip32_write_econtrol(p, s->u.ip32.ecr);
p                1054 drivers/parport/parport_ip32.c 	__parport_ip32_write_control(p, s->u.ip32.dcr);
p                1065 drivers/parport/parport_ip32.c static unsigned int parport_ip32_clear_epp_timeout(struct parport *p)
p                1067 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1070 drivers/parport/parport_ip32.c 	if (!(parport_ip32_read_status(p) & DSR_TIMEOUT))
p                1075 drivers/parport/parport_ip32.c 		parport_ip32_read_status(p);
p                1076 drivers/parport/parport_ip32.c 		r = parport_ip32_read_status(p);
p                1082 drivers/parport/parport_ip32.c 		r = parport_ip32_read_status(p);
p                1086 drivers/parport/parport_ip32.c 	pr_trace(p, "(): %s", cleared ? "cleared" : "failed");
p                1099 drivers/parport/parport_ip32.c 				    struct parport *p, void *buf,
p                1102 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1104 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_EPP);
p                1105 drivers/parport/parport_ip32.c 	parport_ip32_data_reverse(p);
p                1106 drivers/parport/parport_ip32.c 	parport_ip32_write_control(p, DCR_nINIT);
p                1110 drivers/parport/parport_ip32.c 			parport_ip32_clear_epp_timeout(p);
p                1119 drivers/parport/parport_ip32.c 				parport_ip32_clear_epp_timeout(p);
p                1124 drivers/parport/parport_ip32.c 	parport_ip32_data_forward(p);
p                1125 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_PS2);
p                1138 drivers/parport/parport_ip32.c 				     struct parport *p, const void *buf,
p                1141 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1143 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_EPP);
p                1144 drivers/parport/parport_ip32.c 	parport_ip32_data_forward(p);
p                1145 drivers/parport/parport_ip32.c 	parport_ip32_write_control(p, DCR_nINIT);
p                1149 drivers/parport/parport_ip32.c 			parport_ip32_clear_epp_timeout(p);
p                1158 drivers/parport/parport_ip32.c 				parport_ip32_clear_epp_timeout(p);
p                1163 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_PS2);
p                1174 drivers/parport/parport_ip32.c static size_t parport_ip32_epp_read_data(struct parport *p, void *buf,
p                1177 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1178 drivers/parport/parport_ip32.c 	return parport_ip32_epp_read(priv->regs.eppData0, p, buf, len, flags);
p                1188 drivers/parport/parport_ip32.c static size_t parport_ip32_epp_write_data(struct parport *p, const void *buf,
p                1191 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1192 drivers/parport/parport_ip32.c 	return parport_ip32_epp_write(priv->regs.eppData0, p, buf, len, flags);
p                1202 drivers/parport/parport_ip32.c static size_t parport_ip32_epp_read_addr(struct parport *p, void *buf,
p                1205 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1206 drivers/parport/parport_ip32.c 	return parport_ip32_epp_read(priv->regs.eppAddr, p, buf, len, flags);
p                1216 drivers/parport/parport_ip32.c static size_t parport_ip32_epp_write_addr(struct parport *p, const void *buf,
p                1219 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1220 drivers/parport/parport_ip32.c 	return parport_ip32_epp_write(priv->regs.eppAddr, p, buf, len, flags);
p                1237 drivers/parport/parport_ip32.c static unsigned int parport_ip32_fifo_wait_break(struct parport *p,
p                1242 drivers/parport/parport_ip32.c 		pr_debug1(PPIP32 "%s: FIFO write timed out\n", p->name);
p                1246 drivers/parport/parport_ip32.c 		pr_debug1(PPIP32 "%s: Signal pending\n", p->name);
p                1249 drivers/parport/parport_ip32.c 	if (!(parport_ip32_read_status(p) & DSR_nFAULT)) {
p                1250 drivers/parport/parport_ip32.c 		pr_debug1(PPIP32 "%s: nFault asserted low\n", p->name);
p                1264 drivers/parport/parport_ip32.c static unsigned int parport_ip32_fwp_wait_polling(struct parport *p)
p                1266 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1267 drivers/parport/parport_ip32.c 	struct parport * const physport = p->physport;
p                1275 drivers/parport/parport_ip32.c 		if (parport_ip32_fifo_wait_break(p, expire))
p                1282 drivers/parport/parport_ip32.c 		ecr = parport_ip32_read_econtrol(p);
p                1304 drivers/parport/parport_ip32.c static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
p                1307 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1308 drivers/parport/parport_ip32.c 	struct parport * const physport = p->physport;
p                1319 drivers/parport/parport_ip32.c 		if (parport_ip32_fifo_wait_break(p, expire))
p                1326 drivers/parport/parport_ip32.c 		parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
p                1331 drivers/parport/parport_ip32.c 		ecr = parport_ip32_read_econtrol(p);
p                1337 drivers/parport/parport_ip32.c 			ecr = parport_ip32_read_econtrol(p);
p                1342 drivers/parport/parport_ip32.c 				       p->name, __func__);
p                1348 drivers/parport/parport_ip32.c 		parport_ip32_frob_econtrol(p, ECR_SERVINTR, ECR_SERVINTR);
p                1381 drivers/parport/parport_ip32.c static size_t parport_ip32_fifo_write_block_pio(struct parport *p,
p                1384 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1393 drivers/parport/parport_ip32.c 		count = (p->irq == PARPORT_IRQ_NONE) ?
p                1394 drivers/parport/parport_ip32.c 			parport_ip32_fwp_wait_polling(p) :
p                1395 drivers/parport/parport_ip32.c 			parport_ip32_fwp_wait_interrupt(p);
p                1425 drivers/parport/parport_ip32.c static size_t parport_ip32_fifo_write_block_dma(struct parport *p,
p                1428 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1429 drivers/parport/parport_ip32.c 	struct parport * const physport = p->physport;
p                1437 drivers/parport/parport_ip32.c 	parport_ip32_dma_start(p, DMA_TO_DEVICE, (void *)buf, len);
p                1439 drivers/parport/parport_ip32.c 	parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN);
p                1445 drivers/parport/parport_ip32.c 		if (parport_ip32_fifo_wait_break(p, expire))
p                1449 drivers/parport/parport_ip32.c 		ecr = parport_ip32_read_econtrol(p);
p                1453 drivers/parport/parport_ip32.c 	parport_ip32_dma_stop(p);
p                1470 drivers/parport/parport_ip32.c static size_t parport_ip32_fifo_write_block(struct parport *p,
p                1477 drivers/parport/parport_ip32.c 		written = (p->modes & PARPORT_MODE_DMA) ?
p                1478 drivers/parport/parport_ip32.c 			parport_ip32_fifo_write_block_dma(p, buf, len) :
p                1479 drivers/parport/parport_ip32.c 			parport_ip32_fifo_write_block_pio(p, buf, len);
p                1491 drivers/parport/parport_ip32.c static unsigned int parport_ip32_drain_fifo(struct parport *p,
p                1500 drivers/parport/parport_ip32.c 		if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
p                1511 drivers/parport/parport_ip32.c 	while (!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY)) {
p                1521 drivers/parport/parport_ip32.c 	return !!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY);
p                1531 drivers/parport/parport_ip32.c static unsigned int parport_ip32_get_fifo_residue(struct parport *p,
p                1534 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1543 drivers/parport/parport_ip32.c 	if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
p                1546 drivers/parport/parport_ip32.c 		pr_debug1(PPIP32 "%s: FIFO is stuck\n", p->name);
p                1560 drivers/parport/parport_ip32.c 		parport_ip32_frob_control(p, DCR_STROBE, 0);
p                1564 drivers/parport/parport_ip32.c 			if (parport_ip32_read_econtrol(p) & ECR_F_FULL)
p                1571 drivers/parport/parport_ip32.c 			  p->name, residue,
p                1575 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_PS2);
p                1579 drivers/parport/parport_ip32.c 		parport_ip32_data_reverse(p);
p                1580 drivers/parport/parport_ip32.c 		parport_ip32_frob_control(p, DCR_nINIT, 0);
p                1581 drivers/parport/parport_ip32.c 		if (parport_wait_peripheral(p, DSR_PERROR, 0))
p                1583 drivers/parport/parport_ip32.c 				  p->name, __func__);
p                1584 drivers/parport/parport_ip32.c 		parport_ip32_frob_control(p, DCR_STROBE, DCR_STROBE);
p                1585 drivers/parport/parport_ip32.c 		parport_ip32_frob_control(p, DCR_nINIT, DCR_nINIT);
p                1586 drivers/parport/parport_ip32.c 		if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR))
p                1588 drivers/parport/parport_ip32.c 				  p->name, __func__);
p                1592 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_CFG);
p                1596 drivers/parport/parport_ip32.c 			  p->name, cnfga);
p                1598 drivers/parport/parport_ip32.c 			  p->name);
p                1606 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_PS2);
p                1607 drivers/parport/parport_ip32.c 	parport_ip32_data_forward(p);
p                1619 drivers/parport/parport_ip32.c static size_t parport_ip32_compat_write_data(struct parport *p,
p                1624 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1625 drivers/parport/parport_ip32.c 	struct parport * const physport = p->physport;
p                1631 drivers/parport/parport_ip32.c 		return parport_ieee1284_write_compat(p, buf, len, flags);
p                1634 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_PS2);
p                1635 drivers/parport/parport_ip32.c 	parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
p                1636 drivers/parport/parport_ip32.c 	parport_ip32_data_forward(p);
p                1637 drivers/parport/parport_ip32.c 	parport_ip32_disable_irq(p);
p                1638 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_PPF);
p                1642 drivers/parport/parport_ip32.c 	if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
p                1647 drivers/parport/parport_ip32.c 			       p->name, __func__);
p                1653 drivers/parport/parport_ip32.c 	written = parport_ip32_fifo_write_block(p, buf, len);
p                1656 drivers/parport/parport_ip32.c 	parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
p                1659 drivers/parport/parport_ip32.c 	written -= parport_ip32_get_fifo_residue(p, ECR_MODE_PPF);
p                1662 drivers/parport/parport_ip32.c 	if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
p                1664 drivers/parport/parport_ip32.c 		       p->name, __func__);
p                1668 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_PS2);
p                1685 drivers/parport/parport_ip32.c static size_t parport_ip32_ecp_write_data(struct parport *p,
p                1690 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1691 drivers/parport/parport_ip32.c 	struct parport * const physport = p->physport;
p                1697 drivers/parport/parport_ip32.c 		return parport_ieee1284_ecp_write_data(p, buf, len, flags);
p                1702 drivers/parport/parport_ip32.c 		parport_ip32_frob_control(p, DCR_nINIT | DCR_AUTOFD,
p                1706 drivers/parport/parport_ip32.c 		if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) {
p                1708 drivers/parport/parport_ip32.c 			       p->name, __func__);
p                1715 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_PS2);
p                1716 drivers/parport/parport_ip32.c 	parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
p                1717 drivers/parport/parport_ip32.c 	parport_ip32_data_forward(p);
p                1718 drivers/parport/parport_ip32.c 	parport_ip32_disable_irq(p);
p                1719 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_ECP);
p                1723 drivers/parport/parport_ip32.c 	if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
p                1728 drivers/parport/parport_ip32.c 			       p->name, __func__);
p                1734 drivers/parport/parport_ip32.c 	written = parport_ip32_fifo_write_block(p, buf, len);
p                1737 drivers/parport/parport_ip32.c 	parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
p                1740 drivers/parport/parport_ip32.c 	written -= parport_ip32_get_fifo_residue(p, ECR_MODE_ECP);
p                1743 drivers/parport/parport_ip32.c 	if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
p                1745 drivers/parport/parport_ip32.c 		       p->name, __func__);
p                1749 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_PS2);
p                1807 drivers/parport/parport_ip32.c static __init unsigned int parport_ip32_ecp_supported(struct parport *p)
p                1809 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1817 drivers/parport/parport_ip32.c 	pr_probe(p, "Found working ECR register\n");
p                1818 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_SPP);
p                1819 drivers/parport/parport_ip32.c 	parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
p                1823 drivers/parport/parport_ip32.c 	pr_probe(p, "ECR register not found\n");
p                1835 drivers/parport/parport_ip32.c static __init unsigned int parport_ip32_fifo_supported(struct parport *p)
p                1837 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                1843 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_CFG);
p                1859 drivers/parport/parport_ip32.c 		pr_probe(p, "Unknown implementation ID: 0x%0x\n",
p                1865 drivers/parport/parport_ip32.c 		pr_probe(p, "Unsupported PWord size: %u\n", pword);
p                1869 drivers/parport/parport_ip32.c 	pr_probe(p, "PWord is %u bits\n", 8 * priv->pword);
p                1874 drivers/parport/parport_ip32.c 		pr_probe(p, "Hardware compression detected (unsupported)\n");
p                1878 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_TST);
p                1882 drivers/parport/parport_ip32.c 		pr_probe(p, "FIFO not reset\n");
p                1897 drivers/parport/parport_ip32.c 		pr_probe(p, "Can't fill FIFO\n");
p                1901 drivers/parport/parport_ip32.c 		pr_probe(p, "Can't get FIFO depth\n");
p                1904 drivers/parport/parport_ip32.c 	pr_probe(p, "FIFO is %u PWords deep\n", priv->fifo_depth);
p                1907 drivers/parport/parport_ip32.c 	parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
p                1914 drivers/parport/parport_ip32.c 			pr_probe(p, "Invalid data in FIFO\n");
p                1924 drivers/parport/parport_ip32.c 			pr_probe(p, "Data lost in FIFO\n");
p                1929 drivers/parport/parport_ip32.c 		pr_probe(p, "Can't get writeIntrThreshold\n");
p                1932 drivers/parport/parport_ip32.c 	pr_probe(p, "writeIntrThreshold is %u\n", priv->writeIntrThreshold);
p                1936 drivers/parport/parport_ip32.c 		pr_probe(p, "Can't empty FIFO\n");
p                1941 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_PS2);
p                1943 drivers/parport/parport_ip32.c 	parport_ip32_data_reverse(p);
p                1945 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_TST);
p                1947 drivers/parport/parport_ip32.c 	parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
p                1961 drivers/parport/parport_ip32.c 		pr_probe(p, "Can't get readIntrThreshold\n");
p                1964 drivers/parport/parport_ip32.c 	pr_probe(p, "readIntrThreshold is %u\n", priv->readIntrThreshold);
p                1967 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_PS2);
p                1968 drivers/parport/parport_ip32.c 	parport_ip32_data_forward(p);
p                1969 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_SPP);
p                1974 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_SPP);
p                2028 drivers/parport/parport_ip32.c 	struct parport *p = NULL;
p                2036 drivers/parport/parport_ip32.c 	p = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, ops);
p                2037 drivers/parport/parport_ip32.c 	if (ops == NULL || priv == NULL || p == NULL) {
p                2041 drivers/parport/parport_ip32.c 	p->base = MACE_BASE + offsetof(struct sgi_mace, isa.parallel);
p                2042 drivers/parport/parport_ip32.c 	p->base_hi = MACE_BASE + offsetof(struct sgi_mace, isa.ecp1284);
p                2043 drivers/parport/parport_ip32.c 	p->private_data = priv;
p                2055 drivers/parport/parport_ip32.c 	if (!parport_ip32_ecp_supported(p)) {
p                2059 drivers/parport/parport_ip32.c 	parport_ip32_dump_state(p, "begin init", 0);
p                2063 drivers/parport/parport_ip32.c 	p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
p                2064 drivers/parport/parport_ip32.c 	p->modes |= PARPORT_MODE_TRISTATE;
p                2066 drivers/parport/parport_ip32.c 	if (!parport_ip32_fifo_supported(p)) {
p                2068 drivers/parport/parport_ip32.c 		       "%s: error: FIFO disabled\n", p->name);
p                2079 drivers/parport/parport_ip32.c 		if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) {
p                2081 drivers/parport/parport_ip32.c 			       "%s: error: IRQ disabled\n", p->name);
p                2085 drivers/parport/parport_ip32.c 			pr_probe(p, "Interrupt support enabled\n");
p                2086 drivers/parport/parport_ip32.c 			p->irq = irq;
p                2095 drivers/parport/parport_ip32.c 			       "%s: error: DMA disabled\n", p->name);
p                2097 drivers/parport/parport_ip32.c 			pr_probe(p, "DMA support enabled\n");
p                2098 drivers/parport/parport_ip32.c 			p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */
p                2099 drivers/parport/parport_ip32.c 			p->modes |= PARPORT_MODE_DMA;
p                2105 drivers/parport/parport_ip32.c 		p->ops->compat_write_data = parport_ip32_compat_write_data;
p                2106 drivers/parport/parport_ip32.c 		p->modes |= PARPORT_MODE_COMPAT;
p                2107 drivers/parport/parport_ip32.c 		pr_probe(p, "Hardware support for SPP mode enabled\n");
p                2111 drivers/parport/parport_ip32.c 		p->ops->epp_read_data = parport_ip32_epp_read_data;
p                2112 drivers/parport/parport_ip32.c 		p->ops->epp_write_data = parport_ip32_epp_write_data;
p                2113 drivers/parport/parport_ip32.c 		p->ops->epp_read_addr = parport_ip32_epp_read_addr;
p                2114 drivers/parport/parport_ip32.c 		p->ops->epp_write_addr = parport_ip32_epp_write_addr;
p                2115 drivers/parport/parport_ip32.c 		p->modes |= PARPORT_MODE_EPP;
p                2116 drivers/parport/parport_ip32.c 		pr_probe(p, "Hardware support for EPP mode enabled\n");
p                2120 drivers/parport/parport_ip32.c 		p->ops->ecp_write_data = parport_ip32_ecp_write_data;
p                2124 drivers/parport/parport_ip32.c 		p->modes |= PARPORT_MODE_ECP;
p                2125 drivers/parport/parport_ip32.c 		pr_probe(p, "Hardware support for ECP mode enabled\n");
p                2129 drivers/parport/parport_ip32.c 	parport_ip32_set_mode(p, ECR_MODE_PS2);
p                2130 drivers/parport/parport_ip32.c 	parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
p                2131 drivers/parport/parport_ip32.c 	parport_ip32_data_forward(p);
p                2132 drivers/parport/parport_ip32.c 	parport_ip32_disable_irq(p);
p                2133 drivers/parport/parport_ip32.c 	parport_ip32_write_data(p, 0x00);
p                2134 drivers/parport/parport_ip32.c 	parport_ip32_dump_state(p, "end init", 0);
p                2138 drivers/parport/parport_ip32.c 	       p->name, p->base, p->base_hi);
p                2139 drivers/parport/parport_ip32.c 	if (p->irq != PARPORT_IRQ_NONE)
p                2140 drivers/parport/parport_ip32.c 		printk(", irq %d", p->irq);
p                2142 drivers/parport/parport_ip32.c #define printmode(x)	if (p->modes & PARPORT_MODE_##x)		\
p                2156 drivers/parport/parport_ip32.c 	parport_announce_port(p);
p                2157 drivers/parport/parport_ip32.c 	return p;
p                2160 drivers/parport/parport_ip32.c 	if (p)
p                2161 drivers/parport/parport_ip32.c 		parport_put_port(p);
p                2174 drivers/parport/parport_ip32.c static __exit void parport_ip32_unregister_port(struct parport *p)
p                2176 drivers/parport/parport_ip32.c 	struct parport_ip32_private * const priv = p->physport->private_data;
p                2177 drivers/parport/parport_ip32.c 	struct parport_operations *ops = p->ops;
p                2179 drivers/parport/parport_ip32.c 	parport_remove_port(p);
p                2180 drivers/parport/parport_ip32.c 	if (p->modes & PARPORT_MODE_DMA)
p                2182 drivers/parport/parport_ip32.c 	if (p->irq != PARPORT_IRQ_NONE)
p                2183 drivers/parport/parport_ip32.c 		free_irq(p->irq, p);
p                2184 drivers/parport/parport_ip32.c 	parport_put_port(p);
p                  85 drivers/parport/parport_mfc3.c static void mfc3_write_data(struct parport *p, unsigned char data)
p                  89 drivers/parport/parport_mfc3.c 	dummy = pia(p)->pprb; /* clears irq bit */
p                  91 drivers/parport/parport_mfc3.c 	pia(p)->pprb = data;
p                  94 drivers/parport/parport_mfc3.c static unsigned char mfc3_read_data(struct parport *p)
p                  97 drivers/parport/parport_mfc3.c 	return pia(p)->pprb;
p                 129 drivers/parport/parport_mfc3.c static void mfc3_write_control(struct parport *p, unsigned char control)
p                 132 drivers/parport/parport_mfc3.c 	pia(p)->ppra = (pia(p)->ppra & 0x1f) | control_pc_to_mfc3(control);
p                 135 drivers/parport/parport_mfc3.c static unsigned char mfc3_read_control( struct parport *p)
p                 138 drivers/parport/parport_mfc3.c 	return control_mfc3_to_pc(pia(p)->ppra & 0xe0);
p                 141 drivers/parport/parport_mfc3.c static unsigned char mfc3_frob_control( struct parport *p, unsigned char mask, unsigned char val)
p                 146 drivers/parport/parport_mfc3.c 	old = mfc3_read_control(p);
p                 147 drivers/parport/parport_mfc3.c 	mfc3_write_control(p, (old & ~mask) ^ val);
p                 169 drivers/parport/parport_mfc3.c static unsigned char mfc3_read_status(struct parport *p)
p                 173 drivers/parport/parport_mfc3.c 	status = status_mfc3_to_pc(pia(p)->ppra & 0x1f);
p                 193 drivers/parport/parport_mfc3.c static void mfc3_enable_irq(struct parport *p)
p                 195 drivers/parport/parport_mfc3.c 	pia(p)->crb |= PIA_C1_ENABLE_IRQ;
p                 198 drivers/parport/parport_mfc3.c static void mfc3_disable_irq(struct parport *p)
p                 200 drivers/parport/parport_mfc3.c 	pia(p)->crb &= ~PIA_C1_ENABLE_IRQ;
p                 203 drivers/parport/parport_mfc3.c static void mfc3_data_forward(struct parport *p)
p                 206 drivers/parport/parport_mfc3.c 	pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */
p                 207 drivers/parport/parport_mfc3.c 	pia(p)->pddrb = 255; /* all pins output */
p                 208 drivers/parport/parport_mfc3.c 	pia(p)->crb |= PIA_DDR; /* make data register visible - default */
p                 211 drivers/parport/parport_mfc3.c static void mfc3_data_reverse(struct parport *p)
p                 214 drivers/parport/parport_mfc3.c 	pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */
p                 215 drivers/parport/parport_mfc3.c 	pia(p)->pddrb = 0; /* all pins input */
p                 216 drivers/parport/parport_mfc3.c 	pia(p)->crb |= PIA_DDR; /* make data register visible - default */
p                 227 drivers/parport/parport_mfc3.c static void mfc3_save_state(struct parport *p, struct parport_state *s)
p                 229 drivers/parport/parport_mfc3.c 	s->u.amiga.data = pia(p)->pprb;
p                 230 drivers/parport/parport_mfc3.c 	pia(p)->crb &= ~PIA_DDR;
p                 231 drivers/parport/parport_mfc3.c 	s->u.amiga.datadir = pia(p)->pddrb;
p                 232 drivers/parport/parport_mfc3.c 	pia(p)->crb |= PIA_DDR;
p                 233 drivers/parport/parport_mfc3.c 	s->u.amiga.status = pia(p)->ppra;
p                 234 drivers/parport/parport_mfc3.c 	pia(p)->cra &= ~PIA_DDR;
p                 235 drivers/parport/parport_mfc3.c 	s->u.amiga.statusdir = pia(p)->pddrb;
p                 236 drivers/parport/parport_mfc3.c 	pia(p)->cra |= PIA_DDR;
p                 239 drivers/parport/parport_mfc3.c static void mfc3_restore_state(struct parport *p, struct parport_state *s)
p                 241 drivers/parport/parport_mfc3.c 	pia(p)->pprb = s->u.amiga.data;
p                 242 drivers/parport/parport_mfc3.c 	pia(p)->crb &= ~PIA_DDR;
p                 243 drivers/parport/parport_mfc3.c 	pia(p)->pddrb = s->u.amiga.datadir;
p                 244 drivers/parport/parport_mfc3.c 	pia(p)->crb |= PIA_DDR;
p                 245 drivers/parport/parport_mfc3.c 	pia(p)->ppra = s->u.amiga.status;
p                 246 drivers/parport/parport_mfc3.c 	pia(p)->cra &= ~PIA_DDR;
p                 247 drivers/parport/parport_mfc3.c 	pia(p)->pddrb = s->u.amiga.statusdir;
p                 248 drivers/parport/parport_mfc3.c 	pia(p)->cra |= PIA_DDR;
p                 291 drivers/parport/parport_mfc3.c 	struct parport *p;
p                 315 drivers/parport/parport_mfc3.c 		p = parport_register_port((unsigned long)pp, IRQ_AMIGA_PORTS,
p                 317 drivers/parport/parport_mfc3.c 		if (!p)
p                 320 drivers/parport/parport_mfc3.c 		if (p->irq != PARPORT_IRQ_NONE) {
p                 322 drivers/parport/parport_mfc3.c 				if (request_irq(IRQ_AMIGA_PORTS, mfc3_interrupt, IRQF_SHARED, p->name, &pp_mfc3_ops))
p                 325 drivers/parport/parport_mfc3.c 		p->dev = &z->dev;
p                 327 drivers/parport/parport_mfc3.c 		this_port[pias++] = p;
p                 328 drivers/parport/parport_mfc3.c 		printk(KERN_INFO "%s: Multiface III port using irq\n", p->name);
p                 331 drivers/parport/parport_mfc3.c 		p->private_data = (void *)piabase;
p                 332 drivers/parport/parport_mfc3.c 		parport_announce_port (p);
p                 339 drivers/parport/parport_mfc3.c 		parport_put_port(p);
p                  86 drivers/parport/parport_pc.c #define ECR_WRITE(p, v) frob_econtrol((p), 0xff, (v))
p                 127 drivers/parport/parport_pc.c static inline void frob_set_mode(struct parport *p, int mode)
p                 129 drivers/parport/parport_pc.c 	frob_econtrol(p, ECR_MODE_MASK, mode << 5);
p                 139 drivers/parport/parport_pc.c static int change_mode(struct parport *p, int m)
p                 141 drivers/parport/parport_pc.c 	const struct parport_pc_private *priv = p->physport->private_data;
p                 153 drivers/parport/parport_pc.c 	oecr = inb(ECONTROL(p));
p                 161 drivers/parport/parport_pc.c 		unsigned long expire = jiffies + p->physport->cad->timeout;
p                 168 drivers/parport/parport_pc.c 				if (inb(ECONTROL(p)) & 0x01)
p                 176 drivers/parport/parport_pc.c 			while (!(inb(ECONTROL(p)) & 0x01)) {
p                 192 drivers/parport/parport_pc.c 		ECR_WRITE(p, oecr);
p                 198 drivers/parport/parport_pc.c 	ECR_WRITE(p, oecr);
p                 246 drivers/parport/parport_pc.c static void parport_pc_save_state(struct parport *p, struct parport_state *s)
p                 248 drivers/parport/parport_pc.c 	const struct parport_pc_private *priv = p->physport->private_data;
p                 251 drivers/parport/parport_pc.c 		s->u.pc.ecr = inb(ECONTROL(p));
p                 254 drivers/parport/parport_pc.c static void parport_pc_restore_state(struct parport *p,
p                 257 drivers/parport/parport_pc.c 	struct parport_pc_private *priv = p->physport->private_data;
p                 259 drivers/parport/parport_pc.c 	outb(c, CONTROL(p));
p                 262 drivers/parport/parport_pc.c 		ECR_WRITE(p, s->u.pc.ecr);
p                1377 drivers/parport/parport_pc.c static struct superio_struct *find_superio(struct parport *p)
p                1381 drivers/parport/parport_pc.c 		if (superios[i].io == p->base)
p                1386 drivers/parport/parport_pc.c static int get_superio_dma(struct parport *p)
p                1388 drivers/parport/parport_pc.c 	struct superio_struct *s = find_superio(p);
p                1394 drivers/parport/parport_pc.c static int get_superio_irq(struct parport *p)
p                1396 drivers/parport/parport_pc.c 	struct superio_struct *s = find_superio(p);
p                1987 drivers/parport/parport_pc.c static int programmable_dma_support(struct parport *p)
p                1989 drivers/parport/parport_pc.c 	unsigned char oecr = inb(ECONTROL(p));
p                1992 drivers/parport/parport_pc.c 	frob_set_mode(p, ECR_CNF);
p                1994 drivers/parport/parport_pc.c 	dma = inb(CONFIGB(p)) & 0x07;
p                2000 drivers/parport/parport_pc.c 	ECR_WRITE(p, oecr);
p                2004 drivers/parport/parport_pc.c static int parport_dma_probe(struct parport *p)
p                2006 drivers/parport/parport_pc.c 	const struct parport_pc_private *priv = p->private_data;
p                2008 drivers/parport/parport_pc.c 		p->dma = programmable_dma_support(p);
p                2009 drivers/parport/parport_pc.c 	if (p->dma == PARPORT_DMA_NONE) {
p                2013 drivers/parport/parport_pc.c 		p->dma = get_superio_dma(p);
p                2016 drivers/parport/parport_pc.c 	return p->dma;
p                2032 drivers/parport/parport_pc.c 	struct parport *p;
p                2065 drivers/parport/parport_pc.c 	p = parport_register_port(base, irq, dma, ops);
p                2066 drivers/parport/parport_pc.c 	if (!p)
p                2069 drivers/parport/parport_pc.c 	base_res = request_region(base, 3, p->name);
p                2081 drivers/parport/parport_pc.c 	priv->port = p;
p                2083 drivers/parport/parport_pc.c 	p->dev = dev;
p                2084 drivers/parport/parport_pc.c 	p->base_hi = base_hi;
p                2085 drivers/parport/parport_pc.c 	p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
p                2086 drivers/parport/parport_pc.c 	p->private_data = priv;
p                2089 drivers/parport/parport_pc.c 		ECR_res = request_region(base_hi, 3, p->name);
p                2091 drivers/parport/parport_pc.c 			parport_ECR_present(p);
p                2095 drivers/parport/parport_pc.c 		EPP_res = request_region(base+0x3, 5, p->name);
p                2097 drivers/parport/parport_pc.c 			if (!parport_EPP_supported(p))
p                2098 drivers/parport/parport_pc.c 				parport_ECPEPP_supported(p);
p                2100 drivers/parport/parport_pc.c 	if (!parport_SPP_supported(p))
p                2104 drivers/parport/parport_pc.c 		parport_ECPPS2_supported(p);
p                2106 drivers/parport/parport_pc.c 		parport_PS2_supported(p);
p                2108 drivers/parport/parport_pc.c 	p->size = (p->modes & PARPORT_MODE_EPP) ? 8 : 3;
p                2110 drivers/parport/parport_pc.c 	printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base);
p                2111 drivers/parport/parport_pc.c 	if (p->base_hi && priv->ecr)
p                2112 drivers/parport/parport_pc.c 		printk(KERN_CONT " (0x%lx)", p->base_hi);
p                2113 drivers/parport/parport_pc.c 	if (p->irq == PARPORT_IRQ_AUTO) {
p                2114 drivers/parport/parport_pc.c 		p->irq = PARPORT_IRQ_NONE;
p                2115 drivers/parport/parport_pc.c 		parport_irq_probe(p);
p                2116 drivers/parport/parport_pc.c 	} else if (p->irq == PARPORT_IRQ_PROBEONLY) {
p                2117 drivers/parport/parport_pc.c 		p->irq = PARPORT_IRQ_NONE;
p                2118 drivers/parport/parport_pc.c 		parport_irq_probe(p);
p                2119 drivers/parport/parport_pc.c 		probedirq = p->irq;
p                2120 drivers/parport/parport_pc.c 		p->irq = PARPORT_IRQ_NONE;
p                2122 drivers/parport/parport_pc.c 	if (p->irq != PARPORT_IRQ_NONE) {
p                2123 drivers/parport/parport_pc.c 		printk(KERN_CONT ", irq %d", p->irq);
p                2126 drivers/parport/parport_pc.c 		if (p->dma == PARPORT_DMA_AUTO) {
p                2127 drivers/parport/parport_pc.c 			p->dma = PARPORT_DMA_NONE;
p                2128 drivers/parport/parport_pc.c 			parport_dma_probe(p);
p                2131 drivers/parport/parport_pc.c 	if (p->dma == PARPORT_DMA_AUTO) /* To use DMA, giving the irq
p                2133 drivers/parport/parport_pc.c 		p->dma = PARPORT_DMA_NONE;
p                2136 drivers/parport/parport_pc.c 	if (parport_ECP_supported(p) &&
p                2137 drivers/parport/parport_pc.c 	    p->dma != PARPORT_DMA_NOFIFO &&
p                2138 drivers/parport/parport_pc.c 	    priv->fifo_depth > 0 && p->irq != PARPORT_IRQ_NONE) {
p                2139 drivers/parport/parport_pc.c 		p->modes |= PARPORT_MODE_ECP | PARPORT_MODE_COMPAT;
p                2140 drivers/parport/parport_pc.c 		p->ops->compat_write_data = parport_pc_compat_write_block_pio;
p                2142 drivers/parport/parport_pc.c 		p->ops->ecp_write_data = parport_pc_ecp_write_block_pio;
p                2146 drivers/parport/parport_pc.c 		if (p->dma != PARPORT_DMA_NONE) {
p                2147 drivers/parport/parport_pc.c 			printk(KERN_CONT ", dma %d", p->dma);
p                2148 drivers/parport/parport_pc.c 			p->modes |= PARPORT_MODE_DMA;
p                2153 drivers/parport/parport_pc.c 		p->dma = PARPORT_DMA_NONE;
p                2160 drivers/parport/parport_pc.c 		if (p->modes & PARPORT_MODE_##x) {\
p                2181 drivers/parport/parport_pc.c 		printk(KERN_INFO "%s: irq %d detected\n", p->name, probedirq);
p                2184 drivers/parport/parport_pc.c 	if (ECR_res && (p->modes & PARPORT_MODE_ECP) == 0) {
p                2189 drivers/parport/parport_pc.c 	if (EPP_res && (p->modes & PARPORT_MODE_EPP) == 0) {
p                2193 drivers/parport/parport_pc.c 	if (p->irq != PARPORT_IRQ_NONE) {
p                2194 drivers/parport/parport_pc.c 		if (request_irq(p->irq, parport_irq_handler,
p                2195 drivers/parport/parport_pc.c 				 irqflags, p->name, p)) {
p                2198 drivers/parport/parport_pc.c 				p->name, p->irq);
p                2199 drivers/parport/parport_pc.c 			p->irq = PARPORT_IRQ_NONE;
p                2200 drivers/parport/parport_pc.c 			p->dma = PARPORT_DMA_NONE;
p                2205 drivers/parport/parport_pc.c 		if (p->dma != PARPORT_DMA_NONE) {
p                2206 drivers/parport/parport_pc.c 			if (request_dma(p->dma, p->name)) {
p                2209 drivers/parport/parport_pc.c 					p->name, p->dma);
p                2210 drivers/parport/parport_pc.c 				p->dma = PARPORT_DMA_NONE;
p                2221 drivers/parport/parport_pc.c 						p->name);
p                2222 drivers/parport/parport_pc.c 					free_dma(p->dma);
p                2223 drivers/parport/parport_pc.c 					p->dma = PARPORT_DMA_NONE;
p                2237 drivers/parport/parport_pc.c 		ECR_WRITE(p, 0x34);
p                2239 drivers/parport/parport_pc.c 	parport_pc_write_data(p, 0);
p                2240 drivers/parport/parport_pc.c 	parport_pc_data_forward(p);
p                2248 drivers/parport/parport_pc.c 	parport_announce_port(p);
p                2250 drivers/parport/parport_pc.c 	return p;
p                2259 drivers/parport/parport_pc.c 	parport_del_port(p);
p                2271 drivers/parport/parport_pc.c void parport_pc_unregister_port(struct parport *p)
p                2273 drivers/parport/parport_pc.c 	struct parport_pc_private *priv = p->private_data;
p                2274 drivers/parport/parport_pc.c 	struct parport_operations *ops = p->ops;
p                2276 drivers/parport/parport_pc.c 	parport_remove_port(p);
p                2281 drivers/parport/parport_pc.c 	if (p->dma != PARPORT_DMA_NONE)
p                2282 drivers/parport/parport_pc.c 		free_dma(p->dma);
p                2284 drivers/parport/parport_pc.c 	if (p->irq != PARPORT_IRQ_NONE)
p                2285 drivers/parport/parport_pc.c 		free_irq(p->irq, p);
p                2286 drivers/parport/parport_pc.c 	release_region(p->base, 3);
p                2287 drivers/parport/parport_pc.c 	if (p->size > 3)
p                2288 drivers/parport/parport_pc.c 		release_region(p->base + 3, p->size - 3);
p                2289 drivers/parport/parport_pc.c 	if (p->modes & PARPORT_MODE_ECP)
p                2290 drivers/parport/parport_pc.c 		release_region(p->base_hi, 3);
p                2293 drivers/parport/parport_pc.c 		dma_free_coherent(p->physport->dev, PAGE_SIZE,
p                2297 drivers/parport/parport_pc.c 	kfree(p->private_data);
p                2298 drivers/parport/parport_pc.c 	parport_del_port(p);
p                  51 drivers/parport/parport_sunbpp.c static void parport_sunbpp_disable_irq(struct parport *p)
p                  53 drivers/parport/parport_sunbpp.c 	struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
p                  61 drivers/parport/parport_sunbpp.c static void parport_sunbpp_enable_irq(struct parport *p)
p                  63 drivers/parport/parport_sunbpp.c 	struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
p                  71 drivers/parport/parport_sunbpp.c static void parport_sunbpp_write_data(struct parport *p, unsigned char d)
p                  73 drivers/parport/parport_sunbpp.c 	struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
p                  79 drivers/parport/parport_sunbpp.c static unsigned char parport_sunbpp_read_data(struct parport *p)
p                  81 drivers/parport/parport_sunbpp.c 	struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
p                  86 drivers/parport/parport_sunbpp.c static unsigned char status_sunbpp_to_pc(struct parport *p)
p                  88 drivers/parport/parport_sunbpp.c 	struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
p                 109 drivers/parport/parport_sunbpp.c static unsigned char control_sunbpp_to_pc(struct parport *p)
p                 111 drivers/parport/parport_sunbpp.c 	struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
p                 130 drivers/parport/parport_sunbpp.c static unsigned char parport_sunbpp_read_control(struct parport *p)
p                 132 drivers/parport/parport_sunbpp.c 	return control_sunbpp_to_pc(p);
p                 135 drivers/parport/parport_sunbpp.c static unsigned char parport_sunbpp_frob_control(struct parport *p,
p                 139 drivers/parport/parport_sunbpp.c 	struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
p                 178 drivers/parport/parport_sunbpp.c 	return parport_sunbpp_read_control(p);
p                 181 drivers/parport/parport_sunbpp.c static void parport_sunbpp_write_control(struct parport *p, unsigned char d)
p                 188 drivers/parport/parport_sunbpp.c 	parport_sunbpp_frob_control (p, wm, d & wm);
p                 191 drivers/parport/parport_sunbpp.c static unsigned char parport_sunbpp_read_status(struct parport *p)
p                 193 drivers/parport/parport_sunbpp.c 	return status_sunbpp_to_pc(p);
p                 196 drivers/parport/parport_sunbpp.c static void parport_sunbpp_data_forward (struct parport *p)
p                 198 drivers/parport/parport_sunbpp.c 	struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
p                 206 drivers/parport/parport_sunbpp.c static void parport_sunbpp_data_reverse (struct parport *p)
p                 208 drivers/parport/parport_sunbpp.c 	struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
p                 222 drivers/parport/parport_sunbpp.c static void parport_sunbpp_save_state(struct parport *p, struct parport_state *s)
p                 224 drivers/parport/parport_sunbpp.c 	s->u.pc.ctr = parport_sunbpp_read_control(p);
p                 227 drivers/parport/parport_sunbpp.c static void parport_sunbpp_restore_state(struct parport *p, struct parport_state *s)
p                 229 drivers/parport/parport_sunbpp.c 	parport_sunbpp_write_control(p, s->u.pc.ctr);
p                 276 drivers/parport/parport_sunbpp.c 	struct parport *p;
p                 296 drivers/parport/parport_sunbpp.c 	if (!(p = parport_register_port((unsigned long)base, irq, dma, ops))) {
p                 301 drivers/parport/parport_sunbpp.c 	p->size = size;
p                 302 drivers/parport/parport_sunbpp.c 	p->dev = &op->dev;
p                 304 drivers/parport/parport_sunbpp.c 	if ((err = request_irq(p->irq, parport_irq_handler,
p                 305 drivers/parport/parport_sunbpp.c 			       IRQF_SHARED, p->name, p)) != 0) {
p                 309 drivers/parport/parport_sunbpp.c 	parport_sunbpp_enable_irq(p);
p                 311 drivers/parport/parport_sunbpp.c 	regs = (struct bpp_regs __iomem *)p->base;
p                 317 drivers/parport/parport_sunbpp.c 	printk(KERN_INFO "%s: sunbpp at 0x%lx\n", p->name, p->base);
p                 319 drivers/parport/parport_sunbpp.c 	dev_set_drvdata(&op->dev, p);
p                 321 drivers/parport/parport_sunbpp.c 	parport_announce_port(p);
p                 326 drivers/parport/parport_sunbpp.c 	parport_put_port(p);
p                 339 drivers/parport/parport_sunbpp.c 	struct parport *p = dev_get_drvdata(&op->dev);
p                 340 drivers/parport/parport_sunbpp.c 	struct parport_operations *ops = p->ops;
p                 342 drivers/parport/parport_sunbpp.c 	parport_remove_port(p);
p                 344 drivers/parport/parport_sunbpp.c 	if (p->irq != PARPORT_IRQ_NONE) {
p                 345 drivers/parport/parport_sunbpp.c 		parport_sunbpp_disable_irq(p);
p                 346 drivers/parport/parport_sunbpp.c 		free_irq(p->irq, p);
p                 349 drivers/parport/parport_sunbpp.c 	of_iounmap(&op->resource[0], (void __iomem *) p->base, p->size);
p                 350 drivers/parport/parport_sunbpp.c 	parport_put_port(p);
p                  56 drivers/parport/probe.c 	char *p = txt, *q;
p                  65 drivers/parport/probe.c 	while (p) {
p                  67 drivers/parport/probe.c 		q = strchr(p, ';');
p                  69 drivers/parport/probe.c 		sep = strchr(p, ':');
p                  75 drivers/parport/probe.c 			while (u >= p && *u == ' ')
p                  77 drivers/parport/probe.c 			u = p;
p                  82 drivers/parport/probe.c 			if (!strcmp(p, "MFG") || !strcmp(p, "MANUFACTURER")) {
p                  85 drivers/parport/probe.c 			} else if (!strcmp(p, "MDL") || !strcmp(p, "MODEL")) {
p                  88 drivers/parport/probe.c 			} else if (!strcmp(p, "CLS") || !strcmp(p, "CLASS")) {
p                 103 drivers/parport/probe.c 			} else if (!strcmp(p, "CMD") ||
p                 104 drivers/parport/probe.c 				   !strcmp(p, "COMMAND SET")) {
p                 111 drivers/parport/probe.c 			} else if (!strcmp(p, "DES") || !strcmp(p, "DESCRIPTION")) {
p                 118 drivers/parport/probe.c 			p = q + 1;
p                 120 drivers/parport/probe.c 			p = NULL;
p                  57 drivers/parport/share.c static void dead_write_lines(struct parport *p, unsigned char b){}
p                  58 drivers/parport/share.c static unsigned char dead_read_lines(struct parport *p) { return 0; }
p                  59 drivers/parport/share.c static unsigned char dead_frob_lines(struct parport *p, unsigned char b,
p                  61 drivers/parport/share.c static void dead_onearg(struct parport *p){}
p                  63 drivers/parport/share.c static void dead_state(struct parport *p, struct parport_state *s) { }
p                  64 drivers/parport/share.c static size_t dead_write(struct parport *p, const void *b, size_t l, int f)
p                  66 drivers/parport/share.c static size_t dead_read(struct parport *p, void *b, size_t l, int f)
p                 502 drivers/parport/share.c 		struct parport *p = list_entry(l, struct parport, full_list);
p                 503 drivers/parport/share.c 		if (p->number != num)
p                 189 drivers/pci/controller/pci-ftpci100.c static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number,
p                 198 drivers/pci/controller/pci-ftpci100.c 			p->base + PCI_CONFIG);
p                 200 drivers/pci/controller/pci-ftpci100.c 	*value = readl(p->base + PCI_DATA);
p                 213 drivers/pci/controller/pci-ftpci100.c 	struct faraday_pci *p = bus->sysdata;
p                 219 drivers/pci/controller/pci-ftpci100.c 	return faraday_raw_pci_read_config(p, bus->number, fn, config, size, value);
p                 222 drivers/pci/controller/pci-ftpci100.c static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number,
p                 233 drivers/pci/controller/pci-ftpci100.c 			p->base + PCI_CONFIG);
p                 237 drivers/pci/controller/pci-ftpci100.c 		writel(value, p->base + PCI_DATA);
p                 240 drivers/pci/controller/pci-ftpci100.c 		writew(value, p->base + PCI_DATA + (config & 3));
p                 243 drivers/pci/controller/pci-ftpci100.c 		writeb(value, p->base + PCI_DATA + (config & 3));
p                 255 drivers/pci/controller/pci-ftpci100.c 	struct faraday_pci *p = bus->sysdata;
p                 261 drivers/pci/controller/pci-ftpci100.c 	return faraday_raw_pci_write_config(p, bus->number, fn, config, size,
p                 272 drivers/pci/controller/pci-ftpci100.c 	struct faraday_pci *p = irq_data_get_irq_chip_data(d);
p                 275 drivers/pci/controller/pci-ftpci100.c 	faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
p                 278 drivers/pci/controller/pci-ftpci100.c 	faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
p                 283 drivers/pci/controller/pci-ftpci100.c 	struct faraday_pci *p = irq_data_get_irq_chip_data(d);
p                 286 drivers/pci/controller/pci-ftpci100.c 	faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
p                 289 drivers/pci/controller/pci-ftpci100.c 	faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
p                 294 drivers/pci/controller/pci-ftpci100.c 	struct faraday_pci *p = irq_data_get_irq_chip_data(d);
p                 297 drivers/pci/controller/pci-ftpci100.c 	faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
p                 300 drivers/pci/controller/pci-ftpci100.c 	faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
p                 305 drivers/pci/controller/pci-ftpci100.c 	struct faraday_pci *p = irq_desc_get_handler_data(desc);
p                 309 drivers/pci/controller/pci-ftpci100.c 	faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
p                 317 drivers/pci/controller/pci-ftpci100.c 		generic_handle_irq(irq_find_mapping(p->irqdomain, i));
p                 343 drivers/pci/controller/pci-ftpci100.c static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
p                 345 drivers/pci/controller/pci-ftpci100.c 	struct device_node *intc = of_get_next_child(p->dev->of_node, NULL);
p                 350 drivers/pci/controller/pci-ftpci100.c 		dev_err(p->dev, "missing child interrupt-controller node\n");
p                 357 drivers/pci/controller/pci-ftpci100.c 		dev_err(p->dev, "failed to get parent IRQ\n");
p                 362 drivers/pci/controller/pci-ftpci100.c 	p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
p                 363 drivers/pci/controller/pci-ftpci100.c 					     &faraday_pci_irqdomain_ops, p);
p                 365 drivers/pci/controller/pci-ftpci100.c 	if (!p->irqdomain) {
p                 366 drivers/pci/controller/pci-ftpci100.c 		dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
p                 370 drivers/pci/controller/pci-ftpci100.c 	irq_set_chained_handler_and_data(irq, faraday_pci_irq_handler, p);
p                 373 drivers/pci/controller/pci-ftpci100.c 		irq_create_mapping(p->irqdomain, i);
p                 378 drivers/pci/controller/pci-ftpci100.c static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
p                 383 drivers/pci/controller/pci-ftpci100.c 	struct device *dev = p->dev;
p                 414 drivers/pci/controller/pci-ftpci100.c 			faraday_raw_pci_write_config(p, 0, 0, confreg[i],
p                 435 drivers/pci/controller/pci-ftpci100.c 	struct faraday_pci *p;
p                 446 drivers/pci/controller/pci-ftpci100.c 	host = devm_pci_alloc_host_bridge(dev, sizeof(*p));
p                 456 drivers/pci/controller/pci-ftpci100.c 	p = pci_host_bridge_priv(host);
p                 457 drivers/pci/controller/pci-ftpci100.c 	host->sysdata = p;
p                 458 drivers/pci/controller/pci-ftpci100.c 	p->dev = dev;
p                 469 drivers/pci/controller/pci-ftpci100.c 	p->bus_clk = devm_clk_get(dev, "PCICLK");
p                 470 drivers/pci/controller/pci-ftpci100.c 	if (IS_ERR(p->bus_clk))
p                 471 drivers/pci/controller/pci-ftpci100.c 		return PTR_ERR(p->bus_clk);
p                 472 drivers/pci/controller/pci-ftpci100.c 	ret = clk_prepare_enable(p->bus_clk);
p                 479 drivers/pci/controller/pci-ftpci100.c 	p->base = devm_ioremap_resource(dev, regs);
p                 480 drivers/pci/controller/pci-ftpci100.c 	if (IS_ERR(p->base))
p                 481 drivers/pci/controller/pci-ftpci100.c 		return PTR_ERR(p->base);
p                 501 drivers/pci/controller/pci-ftpci100.c 				writel(val, p->base + PCI_IOSIZE);
p                 525 drivers/pci/controller/pci-ftpci100.c 	val = readl(p->base + PCI_CTRL);
p                 529 drivers/pci/controller/pci-ftpci100.c 	writel(val, p->base + PCI_CTRL);
p                 531 drivers/pci/controller/pci-ftpci100.c 	faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000);
p                 533 drivers/pci/controller/pci-ftpci100.c 		ret = faraday_pci_setup_cascaded_irq(p);
p                 541 drivers/pci/controller/pci-ftpci100.c 	if (!IS_ERR(p->bus_clk)) {
p                 545 drivers/pci/controller/pci-ftpci100.c 		faraday_raw_pci_read_config(p, 0, 0,
p                 547 drivers/pci/controller/pci-ftpci100.c 		rate = clk_get_rate(p->bus_clk);
p                 552 drivers/pci/controller/pci-ftpci100.c 			ret = clk_set_rate(p->bus_clk, 66000000);
p                 561 drivers/pci/controller/pci-ftpci100.c 		rate = clk_get_rate(p->bus_clk);
p                 568 drivers/pci/controller/pci-ftpci100.c 	ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node);
p                 578 drivers/pci/controller/pci-ftpci100.c 	p->bus = host->bus;
p                 579 drivers/pci/controller/pci-ftpci100.c 	p->bus->max_bus_speed = max_bus_speed;
p                 580 drivers/pci/controller/pci-ftpci100.c 	p->bus->cur_bus_speed = cur_bus_speed;
p                 582 drivers/pci/controller/pci-ftpci100.c 	pci_bus_assign_resources(p->bus);
p                 583 drivers/pci/controller/pci-ftpci100.c 	pci_bus_add_devices(p->bus);
p                  63 drivers/pci/hotplug/cpcihp_generic.c 	char *p;
p                  74 drivers/pci/hotplug/cpcihp_generic.c 	tmp = simple_strtoul(str, &p, 16);
p                  75 drivers/pci/hotplug/cpcihp_generic.c 	if (p == str || tmp > 0xff) {
p                  81 drivers/pci/hotplug/cpcihp_generic.c 	if (*p != ':') {
p                  85 drivers/pci/hotplug/cpcihp_generic.c 	str = p + 1;
p                  86 drivers/pci/hotplug/cpcihp_generic.c 	tmp = simple_strtoul(str, &p, 16);
p                  87 drivers/pci/hotplug/cpcihp_generic.c 	if (p == str || tmp > 0x1f) {
p                  43 drivers/pci/pci-stub.c 	char *p, *id;
p                  55 drivers/pci/pci-stub.c 	p = ids;
p                  56 drivers/pci/pci-stub.c 	while ((id = strsep(&p, ","))) {
p                 222 drivers/pci/pci.c 	char *wpath, *p;
p                 232 drivers/pci/pci.c 		p = strrchr(wpath, '/');
p                 233 drivers/pci/pci.c 		if (!p)
p                 235 drivers/pci/pci.c 		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
p                 258 drivers/pci/pci.c 		*p = 0;
p                 311 drivers/pci/pci.c static int pci_dev_str_match(struct pci_dev *dev, const char *p,
p                 318 drivers/pci/pci.c 	if (strncmp(p, "pci:", 4) == 0) {
p                 320 drivers/pci/pci.c 		p += 4;
p                 321 drivers/pci/pci.c 		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
p                 324 drivers/pci/pci.c 			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
p                 332 drivers/pci/pci.c 		p += count;
p                 346 drivers/pci/pci.c 		ret = pci_dev_str_match_path(dev, p, &p);
p                 353 drivers/pci/pci.c 	*endptr = p;
p                 357 drivers/pci/pci.c 	*endptr = p;
p                3184 drivers/pci/pci.c 	const char *p;
p                3191 drivers/pci/pci.c 	p = disable_acs_redir_param;
p                3192 drivers/pci/pci.c 	while (*p) {
p                3193 drivers/pci/pci.c 		ret = pci_dev_str_match(dev, p, &p);
p                3204 drivers/pci/pci.c 		if (*p != ';' && *p != ',') {
p                3208 drivers/pci/pci.c 		p++;
p                6114 drivers/pci/pci.c 	const char *p;
p                6118 drivers/pci/pci.c 	p = resource_alignment_param;
p                6119 drivers/pci/pci.c 	if (!p || !*p)
p                6127 drivers/pci/pci.c 	while (*p) {
p                6129 drivers/pci/pci.c 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
p                6130 drivers/pci/pci.c 							p[count] == '@') {
p                6131 drivers/pci/pci.c 			p += count + 1;
p                6136 drivers/pci/pci.c 		ret = pci_dev_str_match(dev, p, &p);
p                6146 drivers/pci/pci.c 			       p);
p                6150 drivers/pci/pci.c 		if (*p != ';' && *p != ',') {
p                6154 drivers/pci/pci.c 		p++;
p                 218 drivers/pci/pcie/aer.c static inline int hest_match_pci(struct acpi_hest_aer_common *p,
p                 221 drivers/pci/pcie/aer.c 	return   ACPI_HEST_SEGMENT(p->bus) == pci_domain_nr(pci->bus) &&
p                 222 drivers/pci/pcie/aer.c 		 ACPI_HEST_BUS(p->bus)     == pci->bus->number &&
p                 223 drivers/pci/pcie/aer.c 		 p->device                 == PCI_SLOT(pci->devfn) &&
p                 224 drivers/pci/pcie/aer.c 		 p->function               == PCI_FUNC(pci->devfn);
p                 260 drivers/pci/pcie/aer.c 	struct acpi_hest_aer_common *p;
p                 266 drivers/pci/pcie/aer.c 	p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
p                 267 drivers/pci/pcie/aer.c 	ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
p                 279 drivers/pci/pcie/aer.c 	if (p->flags & ACPI_HEST_GLOBAL) {
p                 283 drivers/pci/pcie/aer.c 		if (hest_match_pci(p, info->pci_dev))
p                 340 drivers/pci/quirks.c 	struct pci_dev *p;
p                 347 drivers/pci/quirks.c 	p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
p                 348 drivers/pci/quirks.c 	if (p != NULL) {
p                 355 drivers/pci/quirks.c 		if (p->revision < 0x40 || p->revision > 0x42)
p                 358 drivers/pci/quirks.c 		p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
p                 359 drivers/pci/quirks.c 		if (p == NULL)	/* No problem parts */
p                 363 drivers/pci/quirks.c 		if (p->revision < 0x10 || p->revision > 0x12)
p                 390 drivers/pci/quirks.c 	pci_dev_put(p);
p                2415 drivers/pci/quirks.c 	struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,
p                2423 drivers/pci/quirks.c 	p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
p                2424 drivers/pci/quirks.c 	if (!p)
p                2426 drivers/pci/quirks.c 	pci_dev_put(p);
p                2957 drivers/pci/quirks.c 	struct pci_dev *p;
p                2964 drivers/pci/quirks.c 	p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
p                2966 drivers/pci/quirks.c 	if (!p)
p                2969 drivers/pci/quirks.c 	if ((p->revision < 0x3B) && (p->revision >= 0x30))
p                2971 drivers/pci/quirks.c 	pci_dev_put(p);
p                5010 drivers/pci/quirks.c 	const struct pci_dev_acs_ops *p;
p                5014 drivers/pci/quirks.c 		p = &pci_dev_acs_ops[i];
p                5015 drivers/pci/quirks.c 		if ((p->vendor == dev->vendor ||
p                5016 drivers/pci/quirks.c 		     p->vendor == (u16)PCI_ANY_ID) &&
p                5017 drivers/pci/quirks.c 		    (p->device == dev->device ||
p                5018 drivers/pci/quirks.c 		     p->device == (u16)PCI_ANY_ID) &&
p                5019 drivers/pci/quirks.c 		    p->enable_acs) {
p                5020 drivers/pci/quirks.c 			ret = p->enable_acs(dev);
p                5031 drivers/pci/quirks.c 	const struct pci_dev_acs_ops *p;
p                5035 drivers/pci/quirks.c 		p = &pci_dev_acs_ops[i];
p                5036 drivers/pci/quirks.c 		if ((p->vendor == dev->vendor ||
p                5037 drivers/pci/quirks.c 		     p->vendor == (u16)PCI_ANY_ID) &&
p                5038 drivers/pci/quirks.c 		    (p->device == dev->device ||
p                5039 drivers/pci/quirks.c 		     p->device == (u16)PCI_ANY_ID) &&
p                5040 drivers/pci/quirks.c 		    p->disable_acs_redir) {
p                5041 drivers/pci/quirks.c 			ret = p->disable_acs_redir(dev);
p                 896 drivers/pci/switch/switchtec.c 	struct switchtec_ioctl_pff_port p;
p                 898 drivers/pci/switch/switchtec.c 	if (copy_from_user(&p, up, sizeof(p)))
p                 901 drivers/pci/switch/switchtec.c 	p.port = -1;
p                 904 drivers/pci/switch/switchtec.c 		p.partition = part;
p                 907 drivers/pci/switch/switchtec.c 		if (reg == p.pff) {
p                 908 drivers/pci/switch/switchtec.c 			p.port = 0;
p                 913 drivers/pci/switch/switchtec.c 		if (reg == p.pff) {
p                 914 drivers/pci/switch/switchtec.c 			p.port = SWITCHTEC_IOCTL_PFF_VEP;
p                 920 drivers/pci/switch/switchtec.c 			if (reg != p.pff)
p                 923 drivers/pci/switch/switchtec.c 			p.port = i + 1;
p                 927 drivers/pci/switch/switchtec.c 		if (p.port != -1)
p                 931 drivers/pci/switch/switchtec.c 	if (copy_to_user(up, &p, sizeof(p)))
p                 940 drivers/pci/switch/switchtec.c 	struct switchtec_ioctl_pff_port p;
p                 943 drivers/pci/switch/switchtec.c 	if (copy_from_user(&p, up, sizeof(p)))
p                 946 drivers/pci/switch/switchtec.c 	if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
p                 948 drivers/pci/switch/switchtec.c 	else if (p.partition < stdev->partition_count)
p                 949 drivers/pci/switch/switchtec.c 		pcfg = &stdev->mmio_part_cfg_all[p.partition];
p                 953 drivers/pci/switch/switchtec.c 	switch (p.port) {
p                 955 drivers/pci/switch/switchtec.c 		p.pff = ioread32(&pcfg->usp_pff_inst_id);
p                 958 drivers/pci/switch/switchtec.c 		p.pff = ioread32(&pcfg->vep_pff_inst_id);
p                 961 drivers/pci/switch/switchtec.c 		if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
p                 963 drivers/pci/switch/switchtec.c 		p.port = array_index_nospec(p.port,
p                 965 drivers/pci/switch/switchtec.c 		p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
p                 969 drivers/pci/switch/switchtec.c 	if (copy_to_user(up, &p, sizeof(p)))
p                 662 drivers/pcmcia/cistpl.c 	u_char *p, *q;
p                 664 drivers/pcmcia/cistpl.c 	p = (u_char *)tuple->TupleData;
p                 665 drivers/pcmcia/cistpl.c 	q = p + tuple->TupleDataLen;
p                 670 drivers/pcmcia/cistpl.c 		if (*p == 0xff)
p                 672 drivers/pcmcia/cistpl.c 		device->dev[i].type = (*p >> 4);
p                 673 drivers/pcmcia/cistpl.c 		device->dev[i].wp = (*p & 0x08) ? 1 : 0;
p                 674 drivers/pcmcia/cistpl.c 		switch (*p & 0x07) {
p                 691 drivers/pcmcia/cistpl.c 			if (++p == q)
p                 693 drivers/pcmcia/cistpl.c 			device->dev[i].speed = SPEED_CVT(*p);
p                 694 drivers/pcmcia/cistpl.c 			while (*p & 0x80)
p                 695 drivers/pcmcia/cistpl.c 				if (++p == q)
p                 702 drivers/pcmcia/cistpl.c 		if (++p == q)
p                 704 drivers/pcmcia/cistpl.c 		if (*p == 0xff)
p                 706 drivers/pcmcia/cistpl.c 		scale = *p & 7;
p                 709 drivers/pcmcia/cistpl.c 		device->dev[i].size = ((*p >> 3) + 1) * (512 << (scale*2));
p                 711 drivers/pcmcia/cistpl.c 		if (++p == q)
p                 721 drivers/pcmcia/cistpl.c 	u_char *p;
p                 724 drivers/pcmcia/cistpl.c 	p = (u_char *) tuple->TupleData;
p                 725 drivers/pcmcia/cistpl.c 	csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2;
p                 726 drivers/pcmcia/cistpl.c 	csum->len = get_unaligned_le16(p + 2);
p                 727 drivers/pcmcia/cistpl.c 	csum->sum = *(p + 4);
p                 743 drivers/pcmcia/cistpl.c 	u_char *p;
p                 746 drivers/pcmcia/cistpl.c 	p = (u_char *)tuple->TupleData;
p                 748 drivers/pcmcia/cistpl.c 	link->nfn = *p; p++;
p                 752 drivers/pcmcia/cistpl.c 		link->fn[i].space = *p; p++;
p                 753 drivers/pcmcia/cistpl.c 		link->fn[i].addr = get_unaligned_le32(p);
p                 754 drivers/pcmcia/cistpl.c 		p += 4;
p                 760 drivers/pcmcia/cistpl.c static int parse_strings(u_char *p, u_char *q, int max,
p                 765 drivers/pcmcia/cistpl.c 	if (p == q)
p                 769 drivers/pcmcia/cistpl.c 		if (*p == 0xff)
p                 774 drivers/pcmcia/cistpl.c 			s[j++] = (*p == 0xff) ? '\0' : *p;
p                 775 drivers/pcmcia/cistpl.c 			if ((*p == '\0') || (*p == 0xff))
p                 777 drivers/pcmcia/cistpl.c 			if (++p == q)
p                 780 drivers/pcmcia/cistpl.c 		if ((*p == 0xff) || (++p == q))
p                 794 drivers/pcmcia/cistpl.c 	u_char *p, *q;
p                 796 drivers/pcmcia/cistpl.c 	p = (u_char *)tuple->TupleData;
p                 797 drivers/pcmcia/cistpl.c 	q = p + tuple->TupleDataLen;
p                 799 drivers/pcmcia/cistpl.c 	vers_1->major = *p; p++;
p                 800 drivers/pcmcia/cistpl.c 	vers_1->minor = *p; p++;
p                 801 drivers/pcmcia/cistpl.c 	if (p >= q)
p                 804 drivers/pcmcia/cistpl.c 	return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS,
p                 811 drivers/pcmcia/cistpl.c 	u_char *p, *q;
p                 813 drivers/pcmcia/cistpl.c 	p = (u_char *)tuple->TupleData;
p                 814 drivers/pcmcia/cistpl.c 	q = p + tuple->TupleDataLen;
p                 816 drivers/pcmcia/cistpl.c 	return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS,
p                 823 drivers/pcmcia/cistpl.c 	u_char *p, *q;
p                 826 drivers/pcmcia/cistpl.c 	p = (u_char *)tuple->TupleData;
p                 827 drivers/pcmcia/cistpl.c 	q = p + tuple->TupleDataLen;
p                 830 drivers/pcmcia/cistpl.c 		if (p > q-2)
p                 832 drivers/pcmcia/cistpl.c 		jedec->id[nid].mfr = p[0];
p                 833 drivers/pcmcia/cistpl.c 		jedec->id[nid].info = p[1];
p                 834 drivers/pcmcia/cistpl.c 		p += 2;
p                 853 drivers/pcmcia/cistpl.c 	u_char *p;
p                 856 drivers/pcmcia/cistpl.c 	p = (u_char *)tuple->TupleData;
p                 857 drivers/pcmcia/cistpl.c 	f->func = p[0];
p                 858 drivers/pcmcia/cistpl.c 	f->sysinit = p[1];
p                 865 drivers/pcmcia/cistpl.c 	u_char *p;
p                 869 drivers/pcmcia/cistpl.c 	p = (u_char *)tuple->TupleData;
p                 870 drivers/pcmcia/cistpl.c 	f->type = p[0];
p                 872 drivers/pcmcia/cistpl.c 		f->data[i-1] = p[i];
p                 880 drivers/pcmcia/cistpl.c 	u_char *p;
p                 882 drivers/pcmcia/cistpl.c 	p = (u_char *)tuple->TupleData;
p                 883 drivers/pcmcia/cistpl.c 	rasz = *p & 0x03;
p                 884 drivers/pcmcia/cistpl.c 	rmsz = (*p & 0x3c) >> 2;
p                 887 drivers/pcmcia/cistpl.c 	config->last_idx = *(++p);
p                 888 drivers/pcmcia/cistpl.c 	p++;
p                 891 drivers/pcmcia/cistpl.c 		config->base += p[i] << (8*i);
p                 892 drivers/pcmcia/cistpl.c 	p += rasz+1;
p                 896 drivers/pcmcia/cistpl.c 		config->rmask[i>>2] += p[i] << (8*(i%4));
p                 905 drivers/pcmcia/cistpl.c static u_char *parse_power(u_char *p, u_char *q, cistpl_power_t *pwr)
p                 910 drivers/pcmcia/cistpl.c 	if (p == q)
p                 912 drivers/pcmcia/cistpl.c 	pwr->present = *p;
p                 914 drivers/pcmcia/cistpl.c 	p++;
p                 917 drivers/pcmcia/cistpl.c 			if (p == q)
p                 919 drivers/pcmcia/cistpl.c 			pwr->param[i] = POWER_CVT(*p);
p                 920 drivers/pcmcia/cistpl.c 			scale = POWER_SCALE(*p);
p                 921 drivers/pcmcia/cistpl.c 			while (*p & 0x80) {
p                 922 drivers/pcmcia/cistpl.c 				if (++p == q)
p                 924 drivers/pcmcia/cistpl.c 				if ((*p & 0x7f) < 100)
p                 926 drivers/pcmcia/cistpl.c 						(*p & 0x7f) * scale / 100;
p                 927 drivers/pcmcia/cistpl.c 				else if (*p == 0x7d)
p                 929 drivers/pcmcia/cistpl.c 				else if (*p == 0x7e)
p                 931 drivers/pcmcia/cistpl.c 				else if (*p == 0x7f)
p                 936 drivers/pcmcia/cistpl.c 			p++;
p                 938 drivers/pcmcia/cistpl.c 	return p;
p                 942 drivers/pcmcia/cistpl.c static u_char *parse_timing(u_char *p, u_char *q, cistpl_timing_t *timing)
p                 946 drivers/pcmcia/cistpl.c 	if (p == q)
p                 948 drivers/pcmcia/cistpl.c 	scale = *p;
p                 950 drivers/pcmcia/cistpl.c 		if (++p == q)
p                 952 drivers/pcmcia/cistpl.c 		timing->wait = SPEED_CVT(*p);
p                 958 drivers/pcmcia/cistpl.c 		if (++p == q)
p                 960 drivers/pcmcia/cistpl.c 		timing->ready = SPEED_CVT(*p);
p                 966 drivers/pcmcia/cistpl.c 		if (++p == q)
p                 968 drivers/pcmcia/cistpl.c 		timing->reserved = SPEED_CVT(*p);
p                 972 drivers/pcmcia/cistpl.c 	p++;
p                 973 drivers/pcmcia/cistpl.c 	return p;
p                 977 drivers/pcmcia/cistpl.c static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
p                 981 drivers/pcmcia/cistpl.c 	if (p == q)
p                 983 drivers/pcmcia/cistpl.c 	io->flags = *p;
p                 985 drivers/pcmcia/cistpl.c 	if (!(*p & 0x80)) {
p                 989 drivers/pcmcia/cistpl.c 		return p+1;
p                 992 drivers/pcmcia/cistpl.c 	if (++p == q)
p                 994 drivers/pcmcia/cistpl.c 	io->nwin = (*p & 0x0f) + 1;
p                 995 drivers/pcmcia/cistpl.c 	bsz = (*p & 0x30) >> 4;
p                 998 drivers/pcmcia/cistpl.c 	lsz = (*p & 0xc0) >> 6;
p                1001 drivers/pcmcia/cistpl.c 	p++;
p                1006 drivers/pcmcia/cistpl.c 		for (j = 0; j < bsz; j++, p++) {
p                1007 drivers/pcmcia/cistpl.c 			if (p == q)
p                1009 drivers/pcmcia/cistpl.c 			io->win[i].base += *p << (j*8);
p                1011 drivers/pcmcia/cistpl.c 		for (j = 0; j < lsz; j++, p++) {
p                1012 drivers/pcmcia/cistpl.c 			if (p == q)
p                1014 drivers/pcmcia/cistpl.c 			io->win[i].len += *p << (j*8);
p                1017 drivers/pcmcia/cistpl.c 	return p;
p                1021 drivers/pcmcia/cistpl.c static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem)
p                1026 drivers/pcmcia/cistpl.c 	if (p == q)
p                1029 drivers/pcmcia/cistpl.c 	mem->nwin = (*p & 0x07) + 1;
p                1030 drivers/pcmcia/cistpl.c 	lsz = (*p & 0x18) >> 3;
p                1031 drivers/pcmcia/cistpl.c 	asz = (*p & 0x60) >> 5;
p                1032 drivers/pcmcia/cistpl.c 	has_ha = (*p & 0x80);
p                1033 drivers/pcmcia/cistpl.c 	if (++p == q)
p                1038 drivers/pcmcia/cistpl.c 		for (j = 0; j < lsz; j++, p++) {
p                1039 drivers/pcmcia/cistpl.c 			if (p == q)
p                1041 drivers/pcmcia/cistpl.c 			len += *p << (j*8);
p                1043 drivers/pcmcia/cistpl.c 		for (j = 0; j < asz; j++, p++) {
p                1044 drivers/pcmcia/cistpl.c 			if (p == q)
p                1046 drivers/pcmcia/cistpl.c 			ca += *p << (j*8);
p                1049 drivers/pcmcia/cistpl.c 			for (j = 0; j < asz; j++, p++) {
p                1050 drivers/pcmcia/cistpl.c 				if (p == q)
p                1052 drivers/pcmcia/cistpl.c 				ha += *p << (j*8);
p                1058 drivers/pcmcia/cistpl.c 	return p;
p                1062 drivers/pcmcia/cistpl.c static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq)
p                1064 drivers/pcmcia/cistpl.c 	if (p == q)
p                1066 drivers/pcmcia/cistpl.c 	irq->IRQInfo1 = *p; p++;
p                1068 drivers/pcmcia/cistpl.c 		if (p+2 > q)
p                1070 drivers/pcmcia/cistpl.c 		irq->IRQInfo2 = (p[1]<<8) + p[0];
p                1071 drivers/pcmcia/cistpl.c 		p += 2;
p                1073 drivers/pcmcia/cistpl.c 	return p;
p                1080 drivers/pcmcia/cistpl.c 	u_char *p, *q, features;
p                1082 drivers/pcmcia/cistpl.c 	p = tuple->TupleData;
p                1083 drivers/pcmcia/cistpl.c 	q = p + tuple->TupleDataLen;
p                1084 drivers/pcmcia/cistpl.c 	entry->index = *p & 0x3f;
p                1086 drivers/pcmcia/cistpl.c 	if (*p & 0x40)
p                1088 drivers/pcmcia/cistpl.c 	if (*p & 0x80) {
p                1089 drivers/pcmcia/cistpl.c 		if (++p == q)
p                1091 drivers/pcmcia/cistpl.c 		if (*p & 0x10)
p                1093 drivers/pcmcia/cistpl.c 		if (*p & 0x20)
p                1095 drivers/pcmcia/cistpl.c 		if (*p & 0x40)
p                1097 drivers/pcmcia/cistpl.c 		if (*p & 0x80)
p                1099 drivers/pcmcia/cistpl.c 		entry->interface = *p & 0x0f;
p                1104 drivers/pcmcia/cistpl.c 	if (++p == q)
p                1106 drivers/pcmcia/cistpl.c 	features = *p; p++;
p                1110 drivers/pcmcia/cistpl.c 		p = parse_power(p, q, &entry->vcc);
p                1111 drivers/pcmcia/cistpl.c 		if (p == NULL)
p                1116 drivers/pcmcia/cistpl.c 		p = parse_power(p, q, &entry->vpp1);
p                1117 drivers/pcmcia/cistpl.c 		if (p == NULL)
p                1122 drivers/pcmcia/cistpl.c 		p = parse_power(p, q, &entry->vpp2);
p                1123 drivers/pcmcia/cistpl.c 		if (p == NULL)
p                1130 drivers/pcmcia/cistpl.c 		p = parse_timing(p, q, &entry->timing);
p                1131 drivers/pcmcia/cistpl.c 		if (p == NULL)
p                1141 drivers/pcmcia/cistpl.c 		p = parse_io(p, q, &entry->io);
p                1142 drivers/pcmcia/cistpl.c 		if (p == NULL)
p                1149 drivers/pcmcia/cistpl.c 		p = parse_irq(p, q, &entry->irq);
p                1150 drivers/pcmcia/cistpl.c 		if (p == NULL)
p                1161 drivers/pcmcia/cistpl.c 		entry->mem.win[0].len = get_unaligned_le16(p) << 8;
p                1164 drivers/pcmcia/cistpl.c 		p += 2;
p                1165 drivers/pcmcia/cistpl.c 		if (p > q)
p                1170 drivers/pcmcia/cistpl.c 		entry->mem.win[0].len = get_unaligned_le16(p) << 8;
p                1171 drivers/pcmcia/cistpl.c 		entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8;
p                1173 drivers/pcmcia/cistpl.c 		p += 4;
p                1174 drivers/pcmcia/cistpl.c 		if (p > q)
p                1178 drivers/pcmcia/cistpl.c 		p = parse_mem(p, q, &entry->mem);
p                1179 drivers/pcmcia/cistpl.c 		if (p == NULL)
p                1186 drivers/pcmcia/cistpl.c 		if (p == q)
p                1188 drivers/pcmcia/cistpl.c 		entry->flags |= (*p << 8);
p                1189 drivers/pcmcia/cistpl.c 		while (*p & 0x80)
p                1190 drivers/pcmcia/cistpl.c 			if (++p == q)
p                1192 drivers/pcmcia/cistpl.c 		p++;
p                1195 drivers/pcmcia/cistpl.c 	entry->subtuples = q-p;
p                1203 drivers/pcmcia/cistpl.c 	u_char *p, *q;
p                1206 drivers/pcmcia/cistpl.c 	p = (u_char *)tuple->TupleData;
p                1207 drivers/pcmcia/cistpl.c 	q = p + tuple->TupleDataLen;
p                1210 drivers/pcmcia/cistpl.c 		if (p > q-6)
p                1212 drivers/pcmcia/cistpl.c 		geo->geo[n].buswidth = p[0];
p                1213 drivers/pcmcia/cistpl.c 		geo->geo[n].erase_block = 1 << (p[1]-1);
p                1214 drivers/pcmcia/cistpl.c 		geo->geo[n].read_block  = 1 << (p[2]-1);
p                1215 drivers/pcmcia/cistpl.c 		geo->geo[n].write_block = 1 << (p[3]-1);
p                1216 drivers/pcmcia/cistpl.c 		geo->geo[n].partition   = 1 << (p[4]-1);
p                1217 drivers/pcmcia/cistpl.c 		geo->geo[n].interleave  = 1 << (p[5]-1);
p                1218 drivers/pcmcia/cistpl.c 		p += 6;
p                1227 drivers/pcmcia/cistpl.c 	u_char *p, *q;
p                1232 drivers/pcmcia/cistpl.c 	p = tuple->TupleData;
p                1233 drivers/pcmcia/cistpl.c 	q = p + tuple->TupleDataLen;
p                1235 drivers/pcmcia/cistpl.c 	v2->vers = p[0];
p                1236 drivers/pcmcia/cistpl.c 	v2->comply = p[1];
p                1237 drivers/pcmcia/cistpl.c 	v2->dindex = get_unaligned_le16(p + 2);
p                1238 drivers/pcmcia/cistpl.c 	v2->vspec8 = p[6];
p                1239 drivers/pcmcia/cistpl.c 	v2->vspec9 = p[7];
p                1240 drivers/pcmcia/cistpl.c 	v2->nhdr = p[8];
p                1241 drivers/pcmcia/cistpl.c 	p += 9;
p                1242 drivers/pcmcia/cistpl.c 	return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL);
p                1248 drivers/pcmcia/cistpl.c 	u_char *p, *q;
p                1251 drivers/pcmcia/cistpl.c 	p = tuple->TupleData;
p                1252 drivers/pcmcia/cistpl.c 	q = p + tuple->TupleDataLen;
p                1253 drivers/pcmcia/cistpl.c 	if (p == q)
p                1255 drivers/pcmcia/cistpl.c 	org->data_org = *p;
p                1256 drivers/pcmcia/cistpl.c 	if (++p == q)
p                1259 drivers/pcmcia/cistpl.c 		org->desc[i] = *p;
p                1260 drivers/pcmcia/cistpl.c 		if (*p == '\0')
p                1262 drivers/pcmcia/cistpl.c 		if (++p == q)
p                1271 drivers/pcmcia/cistpl.c 	u_char *p;
p                1276 drivers/pcmcia/cistpl.c 	p = tuple->TupleData;
p                1278 drivers/pcmcia/cistpl.c 	fmt->type = p[0];
p                1279 drivers/pcmcia/cistpl.c 	fmt->edc = p[1];
p                1280 drivers/pcmcia/cistpl.c 	fmt->offset = get_unaligned_le32(p + 2);
p                1281 drivers/pcmcia/cistpl.c 	fmt->length = get_unaligned_le32(p + 6);
p                1378 drivers/pcmcia/cistpl.c 	cisparse_t *p;
p                1400 drivers/pcmcia/cistpl.c 	p = kmalloc(sizeof(*p), GFP_KERNEL);
p                1401 drivers/pcmcia/cistpl.c 	if (p == NULL) {
p                1417 drivers/pcmcia/cistpl.c 	    (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY, p)) ||
p                1418 drivers/pcmcia/cistpl.c 	    (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY_CB, p)))
p                1424 drivers/pcmcia/cistpl.c 	if ((pccard_read_tuple(s, BIND_FN_ALL, CISTPL_MANFID, p) == 0) ||
p                1425 drivers/pcmcia/cistpl.c 	    (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_1, p) == 0) ||
p                1426 drivers/pcmcia/cistpl.c 	    (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_2, p) != -ENOSPC))
p                1465 drivers/pcmcia/cistpl.c 	kfree(p);
p                 248 drivers/pcmcia/db1xxx_ss.c 	int v, p, ret;
p                 253 drivers/pcmcia/db1xxx_ss.c 	v = p = ret = 0;
p                 271 drivers/pcmcia/db1xxx_ss.c 		++p;
p                 275 drivers/pcmcia/db1xxx_ss.c 		++p;
p                 289 drivers/pcmcia/db1xxx_ss.c 		v = p = 0;
p                 295 drivers/pcmcia/db1xxx_ss.c 		cr_set |= ((v << 2) | p) << (sock->nr * 8);
p                 297 drivers/pcmcia/i82365.c     struct cirrus_state *p = &socket[s].state.cirrus;
p                 298 drivers/pcmcia/i82365.c     p->misc1 = i365_get(s, PD67_MISC_CTL_1);
p                 299 drivers/pcmcia/i82365.c     p->misc1 &= (PD67_MC1_MEDIA_ENA | PD67_MC1_INPACK_ENA);
p                 300 drivers/pcmcia/i82365.c     p->misc2 = i365_get(s, PD67_MISC_CTL_2);
p                 302 drivers/pcmcia/i82365.c 	p->timer[i] = i365_get(s, PD67_TIME_SETUP(0)+i);
p                 309 drivers/pcmcia/i82365.c     struct cirrus_state *p = &socket[s].state.cirrus;
p                 312 drivers/pcmcia/i82365.c     i365_set(s, PD67_MISC_CTL_2, p->misc2);
p                 316 drivers/pcmcia/i82365.c     i365_set(s, PD67_MISC_CTL_1, misc | p->misc1);
p                 318 drivers/pcmcia/i82365.c 	i365_set(s, PD67_TIME_SETUP(0)+i, p->timer[i]);
p                 324 drivers/pcmcia/i82365.c     struct cirrus_state *p = &socket[s].state.cirrus;
p                 328 drivers/pcmcia/i82365.c     flip(p->misc2, PD67_MC2_IRQ15_RI, has_ring);
p                 329 drivers/pcmcia/i82365.c     flip(p->misc2, PD67_MC2_DYNAMIC_MODE, dynamic_mode);
p                 330 drivers/pcmcia/i82365.c     flip(p->misc2, PD67_MC2_FREQ_BYPASS, freq_bypass);
p                 331 drivers/pcmcia/i82365.c     if (p->misc2 & PD67_MC2_IRQ15_RI)
p                 333 drivers/pcmcia/i82365.c     if (p->misc2 & PD67_MC2_DYNAMIC_MODE)
p                 335 drivers/pcmcia/i82365.c     if (p->misc2 & PD67_MC2_FREQ_BYPASS)
p                 337 drivers/pcmcia/i82365.c     if (p->misc1 & PD67_MC1_INPACK_ENA)
p                 339 drivers/pcmcia/i82365.c     if (p->misc2 & PD67_MC2_IRQ15_RI)
p                 351 drivers/pcmcia/i82365.c 	    p->timer[0] = p->timer[3] = setup_time;
p                 353 drivers/pcmcia/i82365.c 	    p->timer[1] = cmd_time;
p                 354 drivers/pcmcia/i82365.c 	    p->timer[4] = cmd_time*2+4;
p                 356 drivers/pcmcia/i82365.c 	if (p->timer[1] == 0) {
p                 357 drivers/pcmcia/i82365.c 	    p->timer[1] = 6; p->timer[4] = 16;
p                 358 drivers/pcmcia/i82365.c 	    if (p->timer[0] == 0)
p                 359 drivers/pcmcia/i82365.c 		p->timer[0] = p->timer[3] = 1;
p                 362 drivers/pcmcia/i82365.c 	    p->timer[2] = p->timer[5] = recov_time;
p                 364 drivers/pcmcia/i82365.c 	sprintf(buf, " [%d/%d/%d] [%d/%d/%d]", p->timer[0], p->timer[1],
p                 365 drivers/pcmcia/i82365.c 		p->timer[2], p->timer[3], p->timer[4], p->timer[5]);
p                 380 drivers/pcmcia/i82365.c     struct vg46x_state *p = &socket[s].state.vg46x;
p                 381 drivers/pcmcia/i82365.c     p->ctl = i365_get(s, VG468_CTL);
p                 383 drivers/pcmcia/i82365.c 	p->ema = i365_get(s, VG469_EXT_MODE);
p                 388 drivers/pcmcia/i82365.c     struct vg46x_state *p = &socket[s].state.vg46x;
p                 389 drivers/pcmcia/i82365.c     i365_set(s, VG468_CTL, p->ctl);
p                 391 drivers/pcmcia/i82365.c 	i365_set(s, VG469_EXT_MODE, p->ema);
p                 396 drivers/pcmcia/i82365.c     struct vg46x_state *p = &socket[s].state.vg46x;
p                 398 drivers/pcmcia/i82365.c     flip(p->ctl, VG468_CTL_ASYNC, async_clock);
p                 399 drivers/pcmcia/i82365.c     flip(p->ema, VG469_MODE_CABLE, cable_mode);
p                 400 drivers/pcmcia/i82365.c     if (p->ctl & VG468_CTL_ASYNC)
p                 402 drivers/pcmcia/i82365.c     if (p->ctl & VG468_CTL_INPACK)
p                 411 drivers/pcmcia/i82365.c 	if (p->ema & VG469_MODE_CABLE)
p                 413 drivers/pcmcia/i82365.c 	if (p->ema & VG469_MODE_COMPAT)
p                 110 drivers/pcmcia/rsrc_nonstatic.c 	struct resource_map *p, *q;
p                 112 drivers/pcmcia/rsrc_nonstatic.c 	for (p = map; ; p = p->next) {
p                 113 drivers/pcmcia/rsrc_nonstatic.c 		if ((p != map) && (p->base+p->num >= base)) {
p                 114 drivers/pcmcia/rsrc_nonstatic.c 			p->num = max(num + base - p->base, p->num);
p                 117 drivers/pcmcia/rsrc_nonstatic.c 		if ((p->next == map) || (p->next->base > base+num-1))
p                 126 drivers/pcmcia/rsrc_nonstatic.c 	q->next = p->next; p->next = q;
p                 134 drivers/pcmcia/rsrc_nonstatic.c 	struct resource_map *p, *q;
p                 136 drivers/pcmcia/rsrc_nonstatic.c 	for (p = map; ; p = q) {
p                 137 drivers/pcmcia/rsrc_nonstatic.c 		q = p->next;
p                 144 drivers/pcmcia/rsrc_nonstatic.c 					p->next = q->next;
p                 147 drivers/pcmcia/rsrc_nonstatic.c 					q = p;
p                 158 drivers/pcmcia/rsrc_nonstatic.c 				p = kmalloc(sizeof(struct resource_map),
p                 160 drivers/pcmcia/rsrc_nonstatic.c 				if (!p) {
p                 164 drivers/pcmcia/rsrc_nonstatic.c 				p->base = base+num;
p                 165 drivers/pcmcia/rsrc_nonstatic.c 				p->num = q->base+q->num - p->base;
p                 167 drivers/pcmcia/rsrc_nonstatic.c 				p->next = q->next ; q->next = p;
p                1036 drivers/pcmcia/rsrc_nonstatic.c 	struct resource_map *p, *q;
p                1038 drivers/pcmcia/rsrc_nonstatic.c 	for (p = data->mem_db_valid.next; p != &data->mem_db_valid; p = q) {
p                1039 drivers/pcmcia/rsrc_nonstatic.c 		q = p->next;
p                1040 drivers/pcmcia/rsrc_nonstatic.c 		kfree(p);
p                1042 drivers/pcmcia/rsrc_nonstatic.c 	for (p = data->mem_db.next; p != &data->mem_db; p = q) {
p                1043 drivers/pcmcia/rsrc_nonstatic.c 		q = p->next;
p                1044 drivers/pcmcia/rsrc_nonstatic.c 		kfree(p);
p                1046 drivers/pcmcia/rsrc_nonstatic.c 	for (p = data->io_db.next; p != &data->io_db; p = q) {
p                1047 drivers/pcmcia/rsrc_nonstatic.c 		q = p->next;
p                1048 drivers/pcmcia/rsrc_nonstatic.c 		kfree(p);
p                1070 drivers/pcmcia/rsrc_nonstatic.c 	struct resource_map *p;
p                1076 drivers/pcmcia/rsrc_nonstatic.c 	for (p = data->io_db.next; p != &data->io_db; p = p->next) {
p                1081 drivers/pcmcia/rsrc_nonstatic.c 				((unsigned long) p->base),
p                1082 drivers/pcmcia/rsrc_nonstatic.c 				((unsigned long) p->base + p->num - 1));
p                1126 drivers/pcmcia/rsrc_nonstatic.c 	struct resource_map *p;
p                1132 drivers/pcmcia/rsrc_nonstatic.c 	for (p = data->mem_db_valid.next; p != &data->mem_db_valid;
p                1133 drivers/pcmcia/rsrc_nonstatic.c 	     p = p->next) {
p                1138 drivers/pcmcia/rsrc_nonstatic.c 				((unsigned long) p->base),
p                1139 drivers/pcmcia/rsrc_nonstatic.c 				((unsigned long) p->base + p->num - 1));
p                1142 drivers/pcmcia/rsrc_nonstatic.c 	for (p = data->mem_db.next; p != &data->mem_db; p = p->next) {
p                1147 drivers/pcmcia/rsrc_nonstatic.c 				((unsigned long) p->base),
p                1148 drivers/pcmcia/rsrc_nonstatic.c 				((unsigned long) p->base + p->num - 1));
p                 149 drivers/pcmcia/sa11xx_base.c 	char *p = buf;
p                 153 drivers/pcmcia/sa11xx_base.c 	p+=sprintf(p, "I/O      : %uns (%uns)\n", timing.io,
p                 156 drivers/pcmcia/sa11xx_base.c 	p+=sprintf(p, "attribute: %uns (%uns)\n", timing.attr,
p                 159 drivers/pcmcia/sa11xx_base.c 	p+=sprintf(p, "common   : %uns (%uns)\n", timing.mem,
p                 162 drivers/pcmcia/sa11xx_base.c 	return p - buf;
p                 687 drivers/pcmcia/soc_common.c static void dump_bits(char **p, const char *prefix,
p                 690 drivers/pcmcia/soc_common.c 	char *b = *p;
p                 698 drivers/pcmcia/soc_common.c 	*p = b;
p                 711 drivers/pcmcia/soc_common.c 	char *p = buf;
p                 713 drivers/pcmcia/soc_common.c 	p += sprintf(p, "slot     : %d\n", skt->nr);
p                 715 drivers/pcmcia/soc_common.c 	dump_bits(&p, "status", skt->status,
p                 717 drivers/pcmcia/soc_common.c 	dump_bits(&p, "csc_mask", skt->cs_state.csc_mask,
p                 719 drivers/pcmcia/soc_common.c 	dump_bits(&p, "cs_flags", skt->cs_state.flags,
p                 722 drivers/pcmcia/soc_common.c 	p += sprintf(p, "Vcc      : %d\n", skt->cs_state.Vcc);
p                 723 drivers/pcmcia/soc_common.c 	p += sprintf(p, "Vpp      : %d\n", skt->cs_state.Vpp);
p                 724 drivers/pcmcia/soc_common.c 	p += sprintf(p, "IRQ      : %d (%d)\n", skt->cs_state.io_irq,
p                 727 drivers/pcmcia/soc_common.c 		p += skt->ops->show_timing(skt, p);
p                 729 drivers/pcmcia/soc_common.c 	return p-buf;
p                 118 drivers/perf/arm_smmuv3_pmu.c #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
p                  75 drivers/perf/arm_spe_pmu.c #define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
p                  40 drivers/perf/fsl_imx8_ddr_perf.c #define to_ddr_pmu(p)		container_of(p, struct ddr_pmu, pmu)
p                 474 drivers/perf/fsl_imx8_ddr_perf.c static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
p                 477 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *pmu = (struct ddr_pmu *) p;
p                  24 drivers/perf/hisilicon/hisi_uncore_pmu.h #define to_hisi_pmu(p)	(container_of(p, struct hisi_pmu, pmu))
p                 192 drivers/perf/qcom_l2_pmu.c #define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu))
p                 163 drivers/perf/qcom_l3_pmu.c #define to_l3cache_pmu(p) (container_of(p, struct l3cache_pmu, pmu))
p                  80 drivers/perf/xgene_pmu.c #define to_pmu_dev(p)     container_of(p, struct xgene_pmu_dev, pmu)
p                 103 drivers/phy/broadcom/phy-bcm-cygnus-pcie.c static int cygnus_pcie_phy_power_on(struct phy *p)
p                 105 drivers/phy/broadcom/phy-bcm-cygnus-pcie.c 	struct cygnus_pcie_phy *phy = phy_get_drvdata(p);
p                 110 drivers/phy/broadcom/phy-bcm-cygnus-pcie.c static int cygnus_pcie_phy_power_off(struct phy *p)
p                 112 drivers/phy/broadcom/phy-bcm-cygnus-pcie.c 	struct cygnus_pcie_phy *phy = phy_get_drvdata(p);
p                 153 drivers/phy/broadcom/phy-bcm-cygnus-pcie.c 		struct cygnus_pcie_phy *p;
p                 174 drivers/phy/broadcom/phy-bcm-cygnus-pcie.c 		p = &core->phys[id];
p                 175 drivers/phy/broadcom/phy-bcm-cygnus-pcie.c 		p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops);
p                 176 drivers/phy/broadcom/phy-bcm-cygnus-pcie.c 		if (IS_ERR(p->phy)) {
p                 178 drivers/phy/broadcom/phy-bcm-cygnus-pcie.c 			ret = PTR_ERR(p->phy);
p                 182 drivers/phy/broadcom/phy-bcm-cygnus-pcie.c 		p->core = core;
p                 183 drivers/phy/broadcom/phy-bcm-cygnus-pcie.c 		p->id = id;
p                 184 drivers/phy/broadcom/phy-bcm-cygnus-pcie.c 		phy_set_drvdata(p->phy, p);
p                  26 drivers/phy/broadcom/phy-bcm-ns2-pcie.c static int ns2_pci_phy_init(struct phy *p)
p                  28 drivers/phy/broadcom/phy-bcm-ns2-pcie.c 	struct mdio_device *mdiodev = phy_get_drvdata(p);
p                 153 drivers/phy/broadcom/phy-bcm-sr-pcie.c static int sr_pcie_phy_init(struct phy *p)
p                 155 drivers/phy/broadcom/phy-bcm-sr-pcie.c 	struct sr_pcie_phy *phy = phy_get_drvdata(p);
p                 168 drivers/phy/broadcom/phy-bcm-sr-pcie.c static int sr_paxc_phy_init(struct phy *p)
p                 170 drivers/phy/broadcom/phy-bcm-sr-pcie.c 	struct sr_pcie_phy *phy = phy_get_drvdata(p);
p                 256 drivers/phy/broadcom/phy-bcm-sr-pcie.c 		struct sr_pcie_phy *p = &core->phys[phy_idx];
p                 264 drivers/phy/broadcom/phy-bcm-sr-pcie.c 		p->phy = devm_phy_create(dev, NULL, ops);
p                 265 drivers/phy/broadcom/phy-bcm-sr-pcie.c 		if (IS_ERR(p->phy)) {
p                 267 drivers/phy/broadcom/phy-bcm-sr-pcie.c 			return PTR_ERR(p->phy);
p                 270 drivers/phy/broadcom/phy-bcm-sr-pcie.c 		p->core = core;
p                 271 drivers/phy/broadcom/phy-bcm-sr-pcie.c 		p->index = phy_idx;
p                 272 drivers/phy/broadcom/phy-bcm-sr-pcie.c 		phy_set_drvdata(p->phy, p);
p                 354 drivers/phy/mscc/phy-ocelot-serdes.c #define SERDES_MUX_SGMII(i, p, m, c) \
p                 355 drivers/phy/mscc/phy-ocelot-serdes.c 	SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_SGMII, m, c)
p                 356 drivers/phy/mscc/phy-ocelot-serdes.c #define SERDES_MUX_QSGMII(i, p, m, c) \
p                 357 drivers/phy/mscc/phy-ocelot-serdes.c 	SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_QSGMII, m, c)
p                 118 drivers/phy/phy-core.c 	struct phy_lookup *p, *pl = NULL;
p                 121 drivers/phy/phy-core.c 	list_for_each_entry(p, &phys, node)
p                 122 drivers/phy/phy-core.c 		if (!strcmp(p->dev_id, dev_id) && !strcmp(p->con_id, con_id)) {
p                 123 drivers/phy/phy-core.c 			pl = p;
p                 204 drivers/phy/qualcomm/phy-qcom-usb-hs.c 	struct phy_provider *p;
p                 269 drivers/phy/qualcomm/phy-qcom-usb-hs.c 	p = devm_of_phy_provider_register(&ulpi->dev, of_phy_simple_xlate);
p                 270 drivers/phy/qualcomm/phy-qcom-usb-hs.c 	return PTR_ERR_OR_ZERO(p);
p                 105 drivers/phy/qualcomm/phy-qcom-usb-hsic.c 	struct phy_provider *p;
p                 136 drivers/phy/qualcomm/phy-qcom-usb-hsic.c 	p = devm_of_phy_provider_register(&ulpi->dev, of_phy_simple_xlate);
p                 137 drivers/phy/qualcomm/phy-qcom-usb-hsic.c 	return PTR_ERR_OR_ZERO(p);
p                  77 drivers/phy/renesas/phy-rcar-gen2.c static int rcar_gen2_phy_init(struct phy *p)
p                  79 drivers/phy/renesas/phy-rcar-gen2.c 	struct rcar_gen2_phy *phy = phy_get_drvdata(p);
p                 105 drivers/phy/renesas/phy-rcar-gen2.c static int rcar_gen2_phy_exit(struct phy *p)
p                 107 drivers/phy/renesas/phy-rcar-gen2.c 	struct rcar_gen2_phy *phy = phy_get_drvdata(p);
p                 117 drivers/phy/renesas/phy-rcar-gen2.c static int rcar_gen2_phy_power_on(struct phy *p)
p                 119 drivers/phy/renesas/phy-rcar-gen2.c 	struct rcar_gen2_phy *phy = phy_get_drvdata(p);
p                 161 drivers/phy/renesas/phy-rcar-gen2.c static int rcar_gen2_phy_power_off(struct phy *p)
p                 163 drivers/phy/renesas/phy-rcar-gen2.c 	struct rcar_gen2_phy *phy = phy_get_drvdata(p);
p                 193 drivers/phy/renesas/phy-rcar-gen2.c static int rz_g1c_phy_power_on(struct phy *p)
p                 195 drivers/phy/renesas/phy-rcar-gen2.c 	struct rcar_gen2_phy *phy = phy_get_drvdata(p);
p                 222 drivers/phy/renesas/phy-rcar-gen2.c static int rz_g1c_phy_power_off(struct phy *p)
p                 224 drivers/phy/renesas/phy-rcar-gen2.c 	struct rcar_gen2_phy *phy = phy_get_drvdata(p);
p                  28 drivers/phy/renesas/phy-rcar-gen3-pcie.c static void rcar_gen3_phy_pcie_modify_reg(struct phy *p, unsigned int reg,
p                  31 drivers/phy/renesas/phy-rcar-gen3-pcie.c 	struct rcar_gen3_phy *phy = phy_get_drvdata(p);
p                  46 drivers/phy/renesas/phy-rcar-gen3-pcie.c static int r8a77980_phy_pcie_power_on(struct phy *p)
p                  49 drivers/phy/renesas/phy-rcar-gen3-pcie.c 	rcar_gen3_phy_pcie_modify_reg(p, PHY_CTRL, PHY_CTRL_PHY_PWDN, 0);
p                  54 drivers/phy/renesas/phy-rcar-gen3-pcie.c static int r8a77980_phy_pcie_power_off(struct phy *p)
p                  57 drivers/phy/renesas/phy-rcar-gen3-pcie.c 	rcar_gen3_phy_pcie_modify_reg(p, PHY_CTRL, 0, PHY_CTRL_PHY_PWDN);
p                 392 drivers/phy/renesas/phy-rcar-gen3-usb2.c static int rcar_gen3_phy_usb2_init(struct phy *p)
p                 394 drivers/phy/renesas/phy-rcar-gen3-usb2.c 	struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
p                 418 drivers/phy/renesas/phy-rcar-gen3-usb2.c static int rcar_gen3_phy_usb2_exit(struct phy *p)
p                 420 drivers/phy/renesas/phy-rcar-gen3-usb2.c 	struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
p                 439 drivers/phy/renesas/phy-rcar-gen3-usb2.c static int rcar_gen3_phy_usb2_power_on(struct phy *p)
p                 441 drivers/phy/renesas/phy-rcar-gen3-usb2.c 	struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
p                 471 drivers/phy/renesas/phy-rcar-gen3-usb2.c static int rcar_gen3_phy_usb2_power_off(struct phy *p)
p                 473 drivers/phy/renesas/phy-rcar-gen3-usb2.c 	struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
p                 104 drivers/phy/renesas/phy-rcar-gen3-usb3.c static int rcar_gen3_phy_usb3_init(struct phy *p)
p                 106 drivers/phy/renesas/phy-rcar-gen3-usb3.c 	struct rcar_gen3_usb3 *r = phy_get_drvdata(p);
p                 222 drivers/phy/samsung/phy-samsung-usb2.c 		struct samsung_usb2_phy_instance *p = &drv->instances[i];
p                 225 drivers/phy/samsung/phy-samsung-usb2.c 		p->phy = devm_phy_create(dev, NULL, &samsung_usb2_phy_ops);
p                 226 drivers/phy/samsung/phy-samsung-usb2.c 		if (IS_ERR(p->phy)) {
p                 229 drivers/phy/samsung/phy-samsung-usb2.c 			return PTR_ERR(p->phy);
p                 232 drivers/phy/samsung/phy-samsung-usb2.c 		p->cfg = &drv->cfg->phys[i];
p                 233 drivers/phy/samsung/phy-samsung-usb2.c 		p->drv = drv;
p                 234 drivers/phy/samsung/phy-samsung-usb2.c 		phy_set_bus_width(p->phy, 8);
p                 235 drivers/phy/samsung/phy-samsung-usb2.c 		phy_set_drvdata(p->phy, p);
p                  64 drivers/phy/socionext/phy-uniphier-usb3hs.c #define trim_param_is_valid(p)	((p)->rterm || (p)->sel_t || (p)->hs_i)
p                 173 drivers/phy/socionext/phy-uniphier-usb3hs.c 				       const struct uniphier_u3hsphy_param *p)
p                 176 drivers/phy/socionext/phy-uniphier-usb3hs.c 	u32 field_mask = GENMASK(p->field.msb, p->field.lsb);
p                 181 drivers/phy/socionext/phy-uniphier-usb3hs.c 	val |= FIELD_PREP(HSPHY_CFG1_ADR_MASK, p->field.reg_no)
p                 191 drivers/phy/socionext/phy-uniphier-usb3hs.c 	data = field_mask & (p->value << p->field.lsb);
p                  77 drivers/phy/socionext/phy-uniphier-usb3ss.c 				       const struct uniphier_u3ssphy_param *p)
p                  80 drivers/phy/socionext/phy-uniphier-usb3ss.c 	u8 field_mask = GENMASK(p->field.msb, p->field.lsb);
p                  85 drivers/phy/socionext/phy-uniphier-usb3ss.c 	val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
p                  91 drivers/phy/socionext/phy-uniphier-usb3ss.c 	data = field_mask & (p->value << p->field.lsb);
p                  93 drivers/phy/socionext/phy-uniphier-usb3ss.c 	val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
p                 100 drivers/phy/socionext/phy-uniphier-usb3ss.c 	val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
p                 342 drivers/phy/ti/phy-am654-serdes.c 	const int *p;
p                 360 drivers/phy/ti/phy-am654-serdes.c 		p = serdes_am654_mux_table[val];
p                 363 drivers/phy/ti/phy-am654-serdes.c 			if (parents[i] != p[i]) {
p                  62 drivers/pinctrl/bcm/pinctrl-bcm2835.c #define FSEL_REG(p)		(GPFSEL0 + (((p) / 10) * 4))
p                  63 drivers/pinctrl/bcm/pinctrl-bcm2835.c #define FSEL_SHIFT(p)		(((p) % 10) * 3)
p                  64 drivers/pinctrl/bcm/pinctrl-bcm2835.c #define GPIO_REG_OFFSET(p)	((p) / 32)
p                  65 drivers/pinctrl/bcm/pinctrl-bcm2835.c #define GPIO_REG_SHIFT(p)	((p) % 32)
p                  68 drivers/pinctrl/bcm/pinctrl-bcm2835.c #define PUD_2711_REG_OFFSET(p)	((p) / 16)
p                  69 drivers/pinctrl/bcm/pinctrl-bcm2835.c #define PUD_2711_REG_SHIFT(p)	(((p) % 16) * 2)
p                 150 drivers/pinctrl/bcm/pinctrl-cygnus-mux.c #define CYGNUS_PIN_DESC(p, n, i, o, s)	\
p                 152 drivers/pinctrl/bcm/pinctrl-cygnus-mux.c 	.pin = p,			\
p                 170 drivers/pinctrl/bcm/pinctrl-ns2-mux.c #define NS2_PIN_DESC(p, n, b, o, s, i, pu, d)	\
p                 172 drivers/pinctrl/bcm/pinctrl-ns2-mux.c 	.pin = p,				\
p                 141 drivers/pinctrl/bcm/pinctrl-nsp-mux.c #define NSP_PIN_DESC(p, n, g)		\
p                 143 drivers/pinctrl/bcm/pinctrl-nsp-mux.c 	.pin = p,			\
p                 912 drivers/pinctrl/core.c static struct pinctrl_state *find_state(struct pinctrl *p,
p                 917 drivers/pinctrl/core.c 	list_for_each_entry(state, &p->states, node)
p                 924 drivers/pinctrl/core.c static struct pinctrl_state *create_state(struct pinctrl *p,
p                 936 drivers/pinctrl/core.c 	list_add_tail(&state->node, &p->states);
p                 941 drivers/pinctrl/core.c static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev,
p                 948 drivers/pinctrl/core.c 	state = find_state(p, map->name);
p                 950 drivers/pinctrl/core.c 		state = create_state(p, map->name);
p                 977 drivers/pinctrl/core.c 		dev_info(p->dev, "unknown pinctrl device %s in map entry, deferring probe",
p                1008 drivers/pinctrl/core.c 	struct pinctrl *p;
p                1011 drivers/pinctrl/core.c 	list_for_each_entry(p, &pinctrl_list, node)
p                1012 drivers/pinctrl/core.c 		if (p->dev == dev) {
p                1014 drivers/pinctrl/core.c 			return p;
p                1021 drivers/pinctrl/core.c static void pinctrl_free(struct pinctrl *p, bool inlist);
p                1026 drivers/pinctrl/core.c 	struct pinctrl *p;
p                1038 drivers/pinctrl/core.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                1039 drivers/pinctrl/core.c 	if (!p)
p                1041 drivers/pinctrl/core.c 	p->dev = dev;
p                1042 drivers/pinctrl/core.c 	INIT_LIST_HEAD(&p->states);
p                1043 drivers/pinctrl/core.c 	INIT_LIST_HEAD(&p->dt_maps);
p                1045 drivers/pinctrl/core.c 	ret = pinctrl_dt_to_map(p, pctldev);
p                1047 drivers/pinctrl/core.c 		kfree(p);
p                1070 drivers/pinctrl/core.c 		ret = add_setting(p, pctldev, map);
p                1085 drivers/pinctrl/core.c 			pinctrl_free(p, false);
p                1094 drivers/pinctrl/core.c 		pinctrl_free(p, false);
p                1098 drivers/pinctrl/core.c 	kref_init(&p->users);
p                1102 drivers/pinctrl/core.c 	list_add_tail(&p->node, &pinctrl_list);
p                1105 drivers/pinctrl/core.c 	return p;
p                1114 drivers/pinctrl/core.c 	struct pinctrl *p;
p                1124 drivers/pinctrl/core.c 	p = find_pinctrl(dev);
p                1125 drivers/pinctrl/core.c 	if (p) {
p                1127 drivers/pinctrl/core.c 		kref_get(&p->users);
p                1128 drivers/pinctrl/core.c 		return p;
p                1153 drivers/pinctrl/core.c static void pinctrl_free(struct pinctrl *p, bool inlist)
p                1159 drivers/pinctrl/core.c 	list_for_each_entry_safe(state, n1, &p->states, node) {
p                1161 drivers/pinctrl/core.c 			pinctrl_free_setting(state == p->state, setting);
p                1169 drivers/pinctrl/core.c 	pinctrl_dt_free_maps(p);
p                1172 drivers/pinctrl/core.c 		list_del(&p->node);
p                1173 drivers/pinctrl/core.c 	kfree(p);
p                1183 drivers/pinctrl/core.c 	struct pinctrl *p = container_of(kref, struct pinctrl, users);
p                1185 drivers/pinctrl/core.c 	pinctrl_free(p, true);
p                1192 drivers/pinctrl/core.c void pinctrl_put(struct pinctrl *p)
p                1194 drivers/pinctrl/core.c 	kref_put(&p->users, pinctrl_release);
p                1203 drivers/pinctrl/core.c struct pinctrl_state *pinctrl_lookup_state(struct pinctrl *p,
p                1208 drivers/pinctrl/core.c 	state = find_state(p, name);
p                1212 drivers/pinctrl/core.c 			dev_dbg(p->dev, "using pinctrl dummy state (%s)\n",
p                1214 drivers/pinctrl/core.c 			state = create_state(p, name);
p                1237 drivers/pinctrl/core.c static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
p                1240 drivers/pinctrl/core.c 	struct pinctrl_state *old_state = p->state;
p                1243 drivers/pinctrl/core.c 	if (p->state) {
p                1250 drivers/pinctrl/core.c 		list_for_each_entry(setting, &p->state->settings, node) {
p                1257 drivers/pinctrl/core.c 	p->state = NULL;
p                1279 drivers/pinctrl/core.c 		if (p != setting->pctldev->p)
p                1280 drivers/pinctrl/core.c 			pinctrl_link_add(setting->pctldev, p->dev);
p                1283 drivers/pinctrl/core.c 	p->state = state;
p                1288 drivers/pinctrl/core.c 	dev_err(p->dev, "Error applying setting, reverse things back\n");
p                1306 drivers/pinctrl/core.c 		pinctrl_select_state(p, old_state);
p                1316 drivers/pinctrl/core.c int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state)
p                1318 drivers/pinctrl/core.c 	if (p->state == state)
p                1321 drivers/pinctrl/core.c 	return pinctrl_commit_state(p, state);
p                1339 drivers/pinctrl/core.c 	struct pinctrl **ptr, *p;
p                1345 drivers/pinctrl/core.c 	p = pinctrl_get(dev);
p                1346 drivers/pinctrl/core.c 	if (!IS_ERR(p)) {
p                1347 drivers/pinctrl/core.c 		*ptr = p;
p                1353 drivers/pinctrl/core.c 	return p;
p                1359 drivers/pinctrl/core.c 	struct pinctrl **p = res;
p                1361 drivers/pinctrl/core.c 	return *p == data;
p                1372 drivers/pinctrl/core.c void devm_pinctrl_put(struct pinctrl *p)
p                1374 drivers/pinctrl/core.c 	WARN_ON(devres_release(p->dev, devm_pinctrl_release,
p                1375 drivers/pinctrl/core.c 			       devm_pinctrl_match, p));
p                1488 drivers/pinctrl/core.c 	if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_sleep))
p                1489 drivers/pinctrl/core.c 		return pinctrl_commit_state(pctldev->p, pctldev->hog_sleep);
p                1500 drivers/pinctrl/core.c 	if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_default))
p                1501 drivers/pinctrl/core.c 		return pinctrl_commit_state(pctldev->p, pctldev->hog_default);
p                1525 drivers/pinctrl/core.c 	if (pins->p->state != pins->init_state)
p                1531 drivers/pinctrl/core.c 	ret = pinctrl_select_state(pins->p, pins->default_state);
p                1553 drivers/pinctrl/core.c 	ret = pinctrl_select_state(pins->p, state);
p                1800 drivers/pinctrl/core.c 	struct pinctrl *p;
p                1808 drivers/pinctrl/core.c 	list_for_each_entry(p, &pinctrl_list, node) {
p                1810 drivers/pinctrl/core.c 			   dev_name(p->dev),
p                1811 drivers/pinctrl/core.c 			   p->state ? p->state->name : "none");
p                1813 drivers/pinctrl/core.c 		list_for_each_entry(state, &p->states, node) {
p                2014 drivers/pinctrl/core.c 	pctldev->p = create_pinctrl(pctldev->dev, pctldev);
p                2015 drivers/pinctrl/core.c 	if (PTR_ERR(pctldev->p) == -ENODEV) {
p                2021 drivers/pinctrl/core.c 	if (IS_ERR(pctldev->p)) {
p                2023 drivers/pinctrl/core.c 			PTR_ERR(pctldev->p));
p                2025 drivers/pinctrl/core.c 		return PTR_ERR(pctldev->p);
p                2029 drivers/pinctrl/core.c 		pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
p                2034 drivers/pinctrl/core.c 		if (pinctrl_select_state(pctldev->p,
p                2041 drivers/pinctrl/core.c 		pinctrl_lookup_state(pctldev->p,
p                2118 drivers/pinctrl/core.c 	struct pinctrl_dev *p;
p                2120 drivers/pinctrl/core.c 	p = pinctrl_init_controller(pctldesc, dev, driver_data);
p                2121 drivers/pinctrl/core.c 	if (IS_ERR(p))
p                2122 drivers/pinctrl/core.c 		return PTR_ERR(p);
p                2130 drivers/pinctrl/core.c 	*pctldev = p;
p                2153 drivers/pinctrl/core.c 	if (!IS_ERR_OR_NULL(pctldev->p))
p                2154 drivers/pinctrl/core.c 		pinctrl_put(pctldev->p);
p                  58 drivers/pinctrl/core.h 	struct pinctrl *p;
p                  49 drivers/pinctrl/devicetree.c void pinctrl_dt_free_maps(struct pinctrl *p)
p                  53 drivers/pinctrl/devicetree.c 	list_for_each_entry_safe(dt_map, n1, &p->dt_maps, node) {
p                  61 drivers/pinctrl/devicetree.c 	of_node_put(p->dev->of_node);
p                  64 drivers/pinctrl/devicetree.c static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
p                  75 drivers/pinctrl/devicetree.c 		devname = kstrdup_const(dev_name(p->dev), GFP_KERNEL);
p                  93 drivers/pinctrl/devicetree.c 	list_add_tail(&dt_map->node, &p->dt_maps);
p                 107 drivers/pinctrl/devicetree.c static int dt_to_map_one_config(struct pinctrl *p,
p                 132 drivers/pinctrl/devicetree.c 				return driver_deferred_probe_check_state_continue(p->dev);
p                 134 drivers/pinctrl/devicetree.c 			return driver_deferred_probe_check_state(p->dev);
p                 137 drivers/pinctrl/devicetree.c 		if (hog_pctldev && (np_pctldev == p->dev->of_node)) {
p                 145 drivers/pinctrl/devicetree.c 		if (np_pctldev == p->dev->of_node) {
p                 158 drivers/pinctrl/devicetree.c 		dev_err(p->dev, "pctldev %s doesn't support DT\n",
p                 167 drivers/pinctrl/devicetree.c 	return dt_remember_or_free_map(p, statename, pctldev, map, num_maps);
p                 170 drivers/pinctrl/devicetree.c static int dt_remember_dummy_state(struct pinctrl *p, const char *statename)
p                 181 drivers/pinctrl/devicetree.c 	return dt_remember_or_free_map(p, statename, NULL, map, 1);
p                 199 drivers/pinctrl/devicetree.c int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
p                 201 drivers/pinctrl/devicetree.c 	struct device_node *np = p->dev->of_node;
p                 214 drivers/pinctrl/devicetree.c 			dev_dbg(p->dev,
p                 256 drivers/pinctrl/devicetree.c 				dev_err(p->dev,
p                 264 drivers/pinctrl/devicetree.c 			ret = dt_to_map_one_config(p, pctldev, statename,
p                 273 drivers/pinctrl/devicetree.c 			ret = dt_remember_dummy_state(p, statename);
p                 282 drivers/pinctrl/devicetree.c 	pinctrl_dt_free_maps(p);
p                  14 drivers/pinctrl/devicetree.h void pinctrl_dt_free_maps(struct pinctrl *p);
p                  15 drivers/pinctrl/devicetree.h int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev);
p                  31 drivers/pinctrl/devicetree.h static inline int pinctrl_dt_to_map(struct pinctrl *p,
p                  37 drivers/pinctrl/devicetree.h static inline void pinctrl_dt_free_maps(struct pinctrl *p)
p                 118 drivers/pinctrl/freescale/pinctrl-imx.h #define IMX_CFG_PARAMS_DECODE(p, m, o) \
p                 119 drivers/pinctrl/freescale/pinctrl-imx.h 	{ .param = p, .mask = m, .shift = o, .invert = false, }
p                 121 drivers/pinctrl/freescale/pinctrl-imx.h #define IMX_CFG_PARAMS_DECODE_INVERT(p, m, o) \
p                 122 drivers/pinctrl/freescale/pinctrl-imx.h 	{ .param = p, .mask = m, .shift = o, .invert = true, }
p                  29 drivers/pinctrl/freescale/pinctrl-mxs.h #define PINID_TO_BANK(p)	((p) >> 5)
p                  30 drivers/pinctrl/freescale/pinctrl-mxs.h #define PINID_TO_PIN(p)		((p) % 32)
p                 101 drivers/pinctrl/intel/pinctrl-baytrail.c #define COMMUNITY(p, n, map)		\
p                 103 drivers/pinctrl/intel/pinctrl-baytrail.c 		.pin_base	= (p),	\
p                 174 drivers/pinctrl/intel/pinctrl-cherryview.c #define ALTERNATE_FUNCTION(p, m, i)		\
p                 176 drivers/pinctrl/intel/pinctrl-cherryview.c 		.pin = (p),			\
p                 181 drivers/pinctrl/intel/pinctrl-cherryview.c #define PIN_GROUP_WITH_ALT(n, p, m, i)		\
p                 184 drivers/pinctrl/intel/pinctrl-cherryview.c 		.pins = (p),			\
p                 185 drivers/pinctrl/intel/pinctrl-cherryview.c 		.npins = ARRAY_SIZE((p)),	\
p                 190 drivers/pinctrl/intel/pinctrl-cherryview.c #define PIN_GROUP_WITH_OVERRIDE(n, p, m, i, o)	\
p                 193 drivers/pinctrl/intel/pinctrl-cherryview.c 		.pins = (p),			\
p                 194 drivers/pinctrl/intel/pinctrl-cherryview.c 		.npins = ARRAY_SIZE((p)),	\
p                  35 drivers/pinctrl/intel/pinctrl-intel.c #define PADOWN_SHIFT(p)			((p) % 8 * PADOWN_BITS)
p                  36 drivers/pinctrl/intel/pinctrl-intel.c #define PADOWN_MASK(p)			(GENMASK(3, 0) << PADOWN_SHIFT(p))
p                  37 drivers/pinctrl/intel/pinctrl-intel.c #define PADOWN_GPP(p)			((p) / 8)
p                 121 drivers/pinctrl/intel/pinctrl-intel.c #define pin_to_padno(c, p)	((p) - (c)->pin_base)
p                 122 drivers/pinctrl/intel/pinctrl-intel.c #define padgroup_offset(g, p)	((p) - (g)->base)
p                 132 drivers/pinctrl/intel/pinctrl-intel.h #define PIN_GROUP(n, p, m)					\
p                 135 drivers/pinctrl/intel/pinctrl-intel.h 		.pins = (p),					\
p                 136 drivers/pinctrl/intel/pinctrl-intel.h 		.npins = ARRAY_SIZE((p)),			\
p                 436 drivers/pinctrl/intel/pinctrl-merrifield.c #define pin_to_bufno(f, p)		((p) - (f)->pin_base)
p                 619 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c 	u32 p, l, ret;
p                 625 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c 	p = readl(info->base + IRQ_POL + 4 * reg_idx);
p                 626 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c 	if ((p ^ l) & (1 << bit_num)) {
p                 637 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c 		p ^= 1 << bit_num;
p                 638 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c 		writel(p, info->base + IRQ_POL + 4 * reg_idx);
p                 576 drivers/pinctrl/mvebu/pinctrl-mvebu.c 	void *p;
p                 638 drivers/pinctrl/mvebu/pinctrl-mvebu.c 	p = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
p                 639 drivers/pinctrl/mvebu/pinctrl-mvebu.c 	if (!p)
p                 642 drivers/pinctrl/mvebu/pinctrl-mvebu.c 	pctl->groups = p;
p                 643 drivers/pinctrl/mvebu/pinctrl-mvebu.c 	noname_buf = p + pctl->num_groups * sizeof(*pctl->groups);
p                 600 drivers/pinctrl/nomadik/pinctrl-abx500.c 	const struct abx500_pinrange *p;
p                 610 drivers/pinctrl/nomadik/pinctrl-abx500.c 		p = &pct->soc->gpio_ranges[i];
p                 611 drivers/pinctrl/nomadik/pinctrl-abx500.c 		if ((offset >= p->offset) &&
p                 612 drivers/pinctrl/nomadik/pinctrl-abx500.c 		    (offset < (p->offset + p->npins)))
p                 622 drivers/pinctrl/nomadik/pinctrl-abx500.c 		p->altfunc, offset);
p                 625 drivers/pinctrl/nomadik/pinctrl-abx500.c 			      offset, p->altfunc);
p                 955 drivers/pinctrl/nomadik/pinctrl-abx500.c 		const struct abx500_pinrange *p;
p                 957 drivers/pinctrl/nomadik/pinctrl-abx500.c 		p = &soc->gpio_ranges[i];
p                 958 drivers/pinctrl/nomadik/pinctrl-abx500.c 		gstart = p->offset;
p                 959 drivers/pinctrl/nomadik/pinctrl-abx500.c 		gend = p->offset + p->npins - 1;
p                1057 drivers/pinctrl/nomadik/pinctrl-abx500.c 		const struct abx500_pinrange *p = &pct->soc->gpio_ranges[i];
p                1061 drivers/pinctrl/nomadik/pinctrl-abx500.c 					p->offset - 1, p->offset, p->npins);
p                 719 drivers/pinctrl/pinctrl-coh901.c 		struct coh901_pinpair *p = &coh901_pintable[i];
p                 722 drivers/pinctrl/pinctrl-coh901.c 					     p->offset, p->pin_base, 1);
p                  39 drivers/pinctrl/pinctrl-falcon.c #define pad_r32(p, reg)		ltq_r32(p + reg)
p                  40 drivers/pinctrl/pinctrl-falcon.c #define pad_w32(p, val, reg)	ltq_w32(val, p + reg)
p                  44 drivers/pinctrl/pinctrl-falcon.c #define pad_getbit(m, r, p)	(!!(ltq_r32(m + r) & (1 << p)))
p                  63 drivers/pinctrl/pinctrl-falcon.c #define GRP_MUX(a, m, p)	\
p                  67 drivers/pinctrl/pinctrl-falcon.c 	.pins = p,		\
p                  68 drivers/pinctrl/pinctrl-falcon.c 	.npins = ARRAY_SIZE(p),	\
p                1275 drivers/pinctrl/pinctrl-lpc18xx.c 	struct lpc18xx_pin_caps *p = lpc18xx_pins[pin].drv_data;
p                1278 drivers/pinctrl/pinctrl-lpc18xx.c 	if (function == FUNC_DAC && p->analog == DAC)
p                1281 drivers/pinctrl/pinctrl-lpc18xx.c 	if (function == FUNC_ADC && p->analog)
p                1284 drivers/pinctrl/pinctrl-lpc18xx.c 	if (function == FUNC_I2C0 && p->type == TYPE_I2C0)
p                1287 drivers/pinctrl/pinctrl-lpc18xx.c 	if (function == FUNC_USB1 && p->type == TYPE_USB1)
p                1291 drivers/pinctrl/pinctrl-lpc18xx.c 		if (function == p->functions[i])
p                 612 drivers/pinctrl/pinctrl-max77620.c 	int pin, p;
p                 617 drivers/pinctrl/pinctrl-max77620.c 		for (p = 0; p < 3; ++p)
p                 619 drivers/pinctrl/pinctrl-max77620.c 				mpci, pin, max77620_suspend_fps_param[p]);
p                 628 drivers/pinctrl/pinctrl-max77620.c 	int pin, p;
p                 633 drivers/pinctrl/pinctrl-max77620.c 		for (p = 0; p < 3; ++p)
p                 635 drivers/pinctrl/pinctrl-max77620.c 				mpci, pin, max77620_active_fps_param[p]);
p                 153 drivers/pinctrl/pinctrl-ocelot.c #define OCELOT_P(p, f0, f1, f2)						\
p                 154 drivers/pinctrl/pinctrl-ocelot.c static struct ocelot_pin_caps ocelot_pin_##p = {			\
p                 155 drivers/pinctrl/pinctrl-ocelot.c 	.pin = p,							\
p                 215 drivers/pinctrl/pinctrl-ocelot.c #define JAGUAR2_P(p, f0, f1)						\
p                 216 drivers/pinctrl/pinctrl-ocelot.c static struct ocelot_pin_caps jaguar2_pin_##p = {			\
p                 217 drivers/pinctrl/pinctrl-ocelot.c 	.pin = p,							\
p                 388 drivers/pinctrl/pinctrl-ocelot.c 	struct ocelot_pin_caps *p = info->desc->pins[pin].drv_data;
p                 392 drivers/pinctrl/pinctrl-ocelot.c 		if (function == p->functions[i])
p                 399 drivers/pinctrl/pinctrl-ocelot.c #define REG_ALT(msb, info, p) (OCELOT_GPIO_ALT0 * (info)->stride + 4 * ((msb) + ((info)->stride * ((p) / 32))))
p                 406 drivers/pinctrl/pinctrl-ocelot.c 	unsigned int p = pin->pin % 32;
p                 421 drivers/pinctrl/pinctrl-ocelot.c 			   BIT(p), f << p);
p                 423 drivers/pinctrl/pinctrl-ocelot.c 			   BIT(p), f << (p - 1));
p                 428 drivers/pinctrl/pinctrl-ocelot.c #define REG(r, info, p) ((r) * (info)->stride + (4 * ((p) / 32)))
p                 435 drivers/pinctrl/pinctrl-ocelot.c 	unsigned int p = pin % 32;
p                 437 drivers/pinctrl/pinctrl-ocelot.c 	regmap_update_bits(info->map, REG(OCELOT_GPIO_OE, info, pin), BIT(p),
p                 438 drivers/pinctrl/pinctrl-ocelot.c 			   input ? 0 : BIT(p));
p                 448 drivers/pinctrl/pinctrl-ocelot.c 	unsigned int p = offset % 32;
p                 451 drivers/pinctrl/pinctrl-ocelot.c 			   BIT(p), 0);
p                 453 drivers/pinctrl/pinctrl-ocelot.c 			   BIT(p), 0);
p                 116 drivers/pinctrl/pinctrl-pistachio.c #define PISTACHIO_PIN_MFIO(p)		(p)
p                 127 drivers/pinctrl/pinctrl-pistachio.c #define MFIO_PIN_DESC(p)	PINCTRL_PIN(PISTACHIO_PIN_MFIO(p), "mfio" #p)
p                1716 drivers/pinctrl/pinctrl-single.c 	struct property *p;
p                1728 drivers/pinctrl/pinctrl-single.c 	p = devm_kzalloc(pcs->dev, sizeof(*p), GFP_KERNEL);
p                1729 drivers/pinctrl/pinctrl-single.c 	if (!p)
p                1732 drivers/pinctrl/pinctrl-single.c 	p->length = sizeof(__be32);
p                1733 drivers/pinctrl/pinctrl-single.c 	p->value = devm_kzalloc(pcs->dev, sizeof(__be32), GFP_KERNEL);
p                1734 drivers/pinctrl/pinctrl-single.c 	if (!p->value)
p                1736 drivers/pinctrl/pinctrl-single.c 	*(__be32 *)p->value = cpu_to_be32(cells);
p                1738 drivers/pinctrl/pinctrl-single.c 	p->name = devm_kstrdup(pcs->dev, name, GFP_KERNEL);
p                1739 drivers/pinctrl/pinctrl-single.c 	if (!p->name)
p                1742 drivers/pinctrl/pinctrl-single.c 	pcs->missing_nr_pinctrl_cells = p;
p                  42 drivers/pinctrl/pinctrl-xway.c #define GPIO_BASE(p)		(REG_OFF * PORT(p))
p                  43 drivers/pinctrl/pinctrl-xway.c #define GPIO_OUT(p)		GPIO_BASE(p)
p                  44 drivers/pinctrl/pinctrl-xway.c #define GPIO_IN(p)		(GPIO_BASE(p) + 0x04)
p                  45 drivers/pinctrl/pinctrl-xway.c #define GPIO_DIR(p)		(GPIO_BASE(p) + 0x08)
p                  46 drivers/pinctrl/pinctrl-xway.c #define GPIO_ALT0(p)		(GPIO_BASE(p) + 0x0C)
p                  47 drivers/pinctrl/pinctrl-xway.c #define GPIO_ALT1(p)		(GPIO_BASE(p) + 0x10)
p                  48 drivers/pinctrl/pinctrl-xway.c #define GPIO_OD(p)		(GPIO_BASE(p) + 0x14)
p                  49 drivers/pinctrl/pinctrl-xway.c #define GPIO_PUDSEL(p)		(GPIO_BASE(p) + 0x1c)
p                  50 drivers/pinctrl/pinctrl-xway.c #define GPIO_PUDEN(p)		(GPIO_BASE(p) + 0x20)
p                  59 drivers/pinctrl/pinctrl-xway.c #define gpio_getbit(m, r, p)	(!!(ltq_r32(m + r) & BIT(p)))
p                  60 drivers/pinctrl/pinctrl-xway.c #define gpio_setbit(m, r, p)	ltq_w32_mask(0, BIT(p), m + r)
p                  61 drivers/pinctrl/pinctrl-xway.c #define gpio_clearbit(m, r, p)	ltq_w32_mask(BIT(p), 0, m + r)
p                  75 drivers/pinctrl/pinctrl-xway.c #define GRP_MUX(a, m, p)		\
p                  76 drivers/pinctrl/pinctrl-xway.c 	{ .name = a, .mux = XWAY_MUX_##m, .pins = p, .npins = ARRAY_SIZE(p), }
p                  80 drivers/pinctrl/samsung/pinctrl-samsung.h #define PIN_GROUP(n, p, f)				\
p                  83 drivers/pinctrl/samsung/pinctrl-samsung.h 		.pins		= p,			\
p                  84 drivers/pinctrl/samsung/pinctrl-samsung.h 		.num_pins	= ARRAY_SIZE(p),	\
p                 276 drivers/pinctrl/sirf/pinctrl-atlas7.c #define GROUP(n, p)  \
p                 279 drivers/pinctrl/sirf/pinctrl-atlas7.c 		.pins = p,	\
p                 280 drivers/pinctrl/sirf/pinctrl-atlas7.c 		.num_pins = ARRAY_SIZE(p),	\
p                 751 drivers/pinctrl/sirf/pinctrl-sirf.c 	const unsigned long *p = (const unsigned long *)pullups;
p                 754 drivers/pinctrl/sirf/pinctrl-sirf.c 		for_each_set_bit(n, p + i, BITS_PER_LONG) {
p                 768 drivers/pinctrl/sirf/pinctrl-sirf.c 	const unsigned long *p = (const unsigned long *)pulldowns;
p                 771 drivers/pinctrl/sirf/pinctrl-sirf.c 		for_each_set_bit(n, p + i, BITS_PER_LONG) {
p                  82 drivers/pinctrl/sirf/pinctrl-sirf.h #define SIRFSOC_PIN_GROUP(n, p)  \
p                  85 drivers/pinctrl/sirf/pinctrl-sirf.h 		.pins = p,	\
p                  86 drivers/pinctrl/sirf/pinctrl-sirf.h 		.num_pins = ARRAY_SIZE(p),	\
p                1330 drivers/pinctrl/stm32/pinctrl-stm32.c 	const struct stm32_desc_pin *p;
p                1334 drivers/pinctrl/stm32/pinctrl-stm32.c 		p = pctl->match_data->pins + i;
p                1335 drivers/pinctrl/stm32/pinctrl-stm32.c 		if (pctl->pkg && !(pctl->pkg & p->pkg))
p                1337 drivers/pinctrl/stm32/pinctrl-stm32.c 		pins->pin = p->pin;
p                1338 drivers/pinctrl/stm32/pinctrl-stm32.c 		pins->functions = p->functions;
p                 199 drivers/platform/chrome/cros_ec_debugfs.c 	char read_buf[EC_USB_PD_MAX_PORTS * 40], *p = read_buf;
p                 236 drivers/platform/chrome/cros_ec_debugfs.c 		p += scnprintf(p, sizeof(read_buf) + read_buf - p,
p                 243 drivers/platform/chrome/cros_ec_debugfs.c 				       read_buf, p - read_buf);
p                 256 drivers/platform/olpc/olpc-xo175-ec.c 	const struct ec_cmd_t *p;
p                 258 drivers/platform/olpc/olpc-xo175-ec.c 	for (p = olpc_xo175_ec_cmds; p->cmd; p++) {
p                 259 drivers/platform/olpc/olpc-xo175-ec.c 		if (p->cmd == cmd)
p                 260 drivers/platform/olpc/olpc-xo175-ec.c 			return p->bytes_returned;
p                  55 drivers/platform/x86/asus-wireless.c 	struct acpi_object_list p;
p                  63 drivers/platform/x86/asus-wireless.c 	p.count = 1;
p                  64 drivers/platform/x86/asus-wireless.c 	p.pointer = &obj;
p                  66 drivers/platform/x86/asus-wireless.c 	s = acpi_evaluate_integer(handle, (acpi_string) method, &p, ret);
p                4357 drivers/platform/x86/sony-laptop.c 			struct acpi_resource_irq *p = &resource->data.irq;
p                4359 drivers/platform/x86/sony-laptop.c 			if (!p || !p->interrupt_count) {
p                4367 drivers/platform/x86/sony-laptop.c 			for (i = 0; i < p->interrupt_count; i++) {
p                4368 drivers/platform/x86/sony-laptop.c 				if (!p->interrupts[i]) {
p                4370 drivers/platform/x86/sony-laptop.c 						p->interrupts[i]);
p                4379 drivers/platform/x86/sony-laptop.c 				interrupt->irq.triggering = p->triggering;
p                4380 drivers/platform/x86/sony-laptop.c 				interrupt->irq.polarity = p->polarity;
p                4381 drivers/platform/x86/sony-laptop.c 				interrupt->irq.shareable = p->shareable;
p                4383 drivers/platform/x86/sony-laptop.c 				interrupt->irq.interrupts[0] = p->interrupts[i];
p                 646 drivers/platform/x86/thinkpad_acpi.c static int acpi_ec_read(int i, u8 *p)
p                 653 drivers/platform/x86/thinkpad_acpi.c 		*p = v;
p                 655 drivers/platform/x86/thinkpad_acpi.c 		if (ec_read(i, p) < 0)
p                 293 drivers/pnp/pnpacpi/rsparser.c 					    struct acpi_resource_dma *p)
p                 298 drivers/pnp/pnpacpi/rsparser.c 	for (i = 0; i < p->channel_count; i++)
p                 299 drivers/pnp/pnpacpi/rsparser.c 		map |= 1 << p->channels[i];
p                 301 drivers/pnp/pnpacpi/rsparser.c 	flags = dma_flags(dev, p->type, p->bus_master, p->transfer);
p                 307 drivers/pnp/pnpacpi/rsparser.c 					    struct acpi_resource_irq *p)
p                 314 drivers/pnp/pnpacpi/rsparser.c 	for (i = 0; i < p->interrupt_count; i++)
p                 315 drivers/pnp/pnpacpi/rsparser.c 		if (p->interrupts[i])
p                 316 drivers/pnp/pnpacpi/rsparser.c 			__set_bit(p->interrupts[i], map.bits);
p                 318 drivers/pnp/pnpacpi/rsparser.c 	flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable);
p                 324 drivers/pnp/pnpacpi/rsparser.c 					struct acpi_resource_extended_irq *p)
p                 331 drivers/pnp/pnpacpi/rsparser.c 	for (i = 0; i < p->interrupt_count; i++) {
p                 332 drivers/pnp/pnpacpi/rsparser.c 		if (p->interrupts[i]) {
p                 333 drivers/pnp/pnpacpi/rsparser.c 			if (p->interrupts[i] < PNP_IRQ_NR)
p                 334 drivers/pnp/pnpacpi/rsparser.c 				__set_bit(p->interrupts[i], map.bits);
p                 338 drivers/pnp/pnpacpi/rsparser.c 					p->interrupts[i], PNP_IRQ_NR);
p                 342 drivers/pnp/pnpacpi/rsparser.c 	flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable);
p                 368 drivers/pnp/pnpacpi/rsparser.c 					      struct acpi_resource_memory24 *p)
p                 372 drivers/pnp/pnpacpi/rsparser.c 	if (p->write_protect == ACPI_READ_WRITE_MEMORY)
p                 374 drivers/pnp/pnpacpi/rsparser.c 	pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
p                 375 drivers/pnp/pnpacpi/rsparser.c 				  p->alignment, p->address_length, flags);
p                 380 drivers/pnp/pnpacpi/rsparser.c 					      struct acpi_resource_memory32 *p)
p                 384 drivers/pnp/pnpacpi/rsparser.c 	if (p->write_protect == ACPI_READ_WRITE_MEMORY)
p                 386 drivers/pnp/pnpacpi/rsparser.c 	pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
p                 387 drivers/pnp/pnpacpi/rsparser.c 				  p->alignment, p->address_length, flags);
p                 392 drivers/pnp/pnpacpi/rsparser.c 					struct acpi_resource_fixed_memory32 *p)
p                 396 drivers/pnp/pnpacpi/rsparser.c 	if (p->write_protect == ACPI_READ_WRITE_MEMORY)
p                 398 drivers/pnp/pnpacpi/rsparser.c 	pnp_register_mem_resource(dev, option_flags, p->address, p->address,
p                 399 drivers/pnp/pnpacpi/rsparser.c 				  0, p->address_length, flags);
p                 406 drivers/pnp/pnpacpi/rsparser.c 	struct acpi_resource_address64 addr, *p = &addr;
p                 410 drivers/pnp/pnpacpi/rsparser.c 	status = acpi_resource_to_address64(r, p);
p                 417 drivers/pnp/pnpacpi/rsparser.c 	if (p->resource_type == ACPI_MEMORY_RANGE) {
p                 418 drivers/pnp/pnpacpi/rsparser.c 		if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
p                 420 drivers/pnp/pnpacpi/rsparser.c 		pnp_register_mem_resource(dev, option_flags, p->address.minimum,
p                 421 drivers/pnp/pnpacpi/rsparser.c 					  p->address.minimum, 0, p->address.address_length,
p                 423 drivers/pnp/pnpacpi/rsparser.c 	} else if (p->resource_type == ACPI_IO_RANGE)
p                 424 drivers/pnp/pnpacpi/rsparser.c 		pnp_register_port_resource(dev, option_flags, p->address.minimum,
p                 425 drivers/pnp/pnpacpi/rsparser.c 					   p->address.minimum, 0, p->address.address_length,
p                 433 drivers/pnp/pnpacpi/rsparser.c 	struct acpi_resource_extended_address64 *p = &r->data.ext_address64;
p                 436 drivers/pnp/pnpacpi/rsparser.c 	if (p->resource_type == ACPI_MEMORY_RANGE) {
p                 437 drivers/pnp/pnpacpi/rsparser.c 		if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
p                 439 drivers/pnp/pnpacpi/rsparser.c 		pnp_register_mem_resource(dev, option_flags, p->address.minimum,
p                 440 drivers/pnp/pnpacpi/rsparser.c 					  p->address.minimum, 0, p->address.address_length,
p                 442 drivers/pnp/pnpacpi/rsparser.c 	} else if (p->resource_type == ACPI_IO_RANGE)
p                 443 drivers/pnp/pnpacpi/rsparser.c 		pnp_register_port_resource(dev, option_flags, p->address.minimum,
p                 444 drivers/pnp/pnpacpi/rsparser.c 					   p->address.minimum, 0, p->address.address_length,
p                 660 drivers/pnp/pnpacpi/rsparser.c 			       struct resource *p)
p                 665 drivers/pnp/pnpacpi/rsparser.c 	if (!pnp_resource_enabled(p)) {
p                 668 drivers/pnp/pnpacpi/rsparser.c 			p ? "disabled" : "missing");
p                 672 drivers/pnp/pnpacpi/rsparser.c 	decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable);
p                 677 drivers/pnp/pnpacpi/rsparser.c 	irq->interrupts[0] = p->start;
p                 680 drivers/pnp/pnpacpi/rsparser.c 		(int) p->start,
p                 689 drivers/pnp/pnpacpi/rsparser.c 				   struct resource *p)
p                 694 drivers/pnp/pnpacpi/rsparser.c 	if (!pnp_resource_enabled(p)) {
p                 697 drivers/pnp/pnpacpi/rsparser.c 			p ? "disabled" : "missing");
p                 701 drivers/pnp/pnpacpi/rsparser.c 	decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable);
p                 707 drivers/pnp/pnpacpi/rsparser.c 	extended_irq->interrupts[0] = p->start;
p                 709 drivers/pnp/pnpacpi/rsparser.c 	pnp_dbg(&dev->dev, "  encode irq %d %s %s %s\n", (int) p->start,
p                 717 drivers/pnp/pnpacpi/rsparser.c 			       struct resource *p)
p                 721 drivers/pnp/pnpacpi/rsparser.c 	if (!pnp_resource_enabled(p)) {
p                 724 drivers/pnp/pnpacpi/rsparser.c 			p ? "disabled" : "missing");
p                 729 drivers/pnp/pnpacpi/rsparser.c 	switch (p->flags & IORESOURCE_DMA_SPEED_MASK) {
p                 743 drivers/pnp/pnpacpi/rsparser.c 	switch (p->flags & IORESOURCE_DMA_TYPE_MASK) {
p                 754 drivers/pnp/pnpacpi/rsparser.c 	dma->bus_master = !!(p->flags & IORESOURCE_DMA_MASTER);
p                 756 drivers/pnp/pnpacpi/rsparser.c 	dma->channels[0] = p->start;
p                 760 drivers/pnp/pnpacpi/rsparser.c 		(int) p->start, dma->type, dma->transfer, dma->bus_master);
p                 765 drivers/pnp/pnpacpi/rsparser.c 			      struct resource *p)
p                 769 drivers/pnp/pnpacpi/rsparser.c 	if (pnp_resource_enabled(p)) {
p                 771 drivers/pnp/pnpacpi/rsparser.c 		io->io_decode = (p->flags & IORESOURCE_IO_16BIT_ADDR) ?
p                 773 drivers/pnp/pnpacpi/rsparser.c 		io->minimum = p->start;
p                 774 drivers/pnp/pnpacpi/rsparser.c 		io->maximum = p->end;
p                 776 drivers/pnp/pnpacpi/rsparser.c 		io->address_length = resource_size(p);
p                 788 drivers/pnp/pnpacpi/rsparser.c 				    struct resource *p)
p                 792 drivers/pnp/pnpacpi/rsparser.c 	if (pnp_resource_enabled(p)) {
p                 793 drivers/pnp/pnpacpi/rsparser.c 		fixed_io->address = p->start;
p                 794 drivers/pnp/pnpacpi/rsparser.c 		fixed_io->address_length = resource_size(p);
p                 806 drivers/pnp/pnpacpi/rsparser.c 				 struct resource *p)
p                 810 drivers/pnp/pnpacpi/rsparser.c 	if (pnp_resource_enabled(p)) {
p                 812 drivers/pnp/pnpacpi/rsparser.c 		memory24->write_protect = p->flags & IORESOURCE_MEM_WRITEABLE ?
p                 814 drivers/pnp/pnpacpi/rsparser.c 		memory24->minimum = p->start;
p                 815 drivers/pnp/pnpacpi/rsparser.c 		memory24->maximum = p->end;
p                 817 drivers/pnp/pnpacpi/rsparser.c 		memory24->address_length = resource_size(p);
p                 831 drivers/pnp/pnpacpi/rsparser.c 				 struct resource *p)
p                 835 drivers/pnp/pnpacpi/rsparser.c 	if (pnp_resource_enabled(p)) {
p                 836 drivers/pnp/pnpacpi/rsparser.c 		memory32->write_protect = p->flags & IORESOURCE_MEM_WRITEABLE ?
p                 838 drivers/pnp/pnpacpi/rsparser.c 		memory32->minimum = p->start;
p                 839 drivers/pnp/pnpacpi/rsparser.c 		memory32->maximum = p->end;
p                 841 drivers/pnp/pnpacpi/rsparser.c 		memory32->address_length = resource_size(p);
p                 855 drivers/pnp/pnpacpi/rsparser.c 				       struct resource *p)
p                 859 drivers/pnp/pnpacpi/rsparser.c 	if (pnp_resource_enabled(p)) {
p                 861 drivers/pnp/pnpacpi/rsparser.c 		    p->flags & IORESOURCE_MEM_WRITEABLE ?
p                 863 drivers/pnp/pnpacpi/rsparser.c 		fixed_memory32->address = p->start;
p                 864 drivers/pnp/pnpacpi/rsparser.c 		fixed_memory32->address_length = resource_size(p);
p                 241 drivers/pnp/pnpbios/core.c 	unsigned char *p = (char *)node->data;
p                 246 drivers/pnp/pnpbios/core.c 	while ((char *)p < (char *)end) {
p                 247 drivers/pnp/pnpbios/core.c 		if (p[0] & 0x80) {	/* large tag */
p                 248 drivers/pnp/pnpbios/core.c 			len = (p[2] << 8) | p[1];
p                 249 drivers/pnp/pnpbios/core.c 			p += 3;
p                 251 drivers/pnp/pnpbios/core.c 			if (((p[0] >> 3) & 0x0f) == 0x0f)
p                 253 drivers/pnp/pnpbios/core.c 			len = p[0] & 0x07;
p                 254 drivers/pnp/pnpbios/core.c 			p += 1;
p                 257 drivers/pnp/pnpbios/core.c 			p[i] = 0;
p                 258 drivers/pnp/pnpbios/core.c 		p += len;
p                  81 drivers/pnp/pnpbios/rsparser.c 							    unsigned char *p,
p                  87 drivers/pnp/pnpbios/rsparser.c 	if (!p)
p                  94 drivers/pnp/pnpbios/rsparser.c 	while ((char *)p < (char *)end) {
p                  97 drivers/pnp/pnpbios/rsparser.c 		if (p[0] & LARGE_TAG) {	/* large tag */
p                  98 drivers/pnp/pnpbios/rsparser.c 			len = (p[2] << 8) | p[1];
p                  99 drivers/pnp/pnpbios/rsparser.c 			tag = p[0];
p                 101 drivers/pnp/pnpbios/rsparser.c 			len = p[0] & 0x07;
p                 102 drivers/pnp/pnpbios/rsparser.c 			tag = ((p[0] >> 3) & 0x0f);
p                 110 drivers/pnp/pnpbios/rsparser.c 			io = *(short *)&p[4];
p                 111 drivers/pnp/pnpbios/rsparser.c 			size = *(short *)&p[10];
p                 126 drivers/pnp/pnpbios/rsparser.c 			io = *(int *)&p[4];
p                 127 drivers/pnp/pnpbios/rsparser.c 			size = *(int *)&p[16];
p                 134 drivers/pnp/pnpbios/rsparser.c 			io = *(int *)&p[4];
p                 135 drivers/pnp/pnpbios/rsparser.c 			size = *(int *)&p[8];
p                 144 drivers/pnp/pnpbios/rsparser.c 			mask = p[1] + p[2] * 256;
p                 160 drivers/pnp/pnpbios/rsparser.c 			mask = p[1];
p                 172 drivers/pnp/pnpbios/rsparser.c 			io = p[2] + p[3] * 256;
p                 173 drivers/pnp/pnpbios/rsparser.c 			size = p[7];
p                 184 drivers/pnp/pnpbios/rsparser.c 			io = p[1] + p[2] * 256;
p                 185 drivers/pnp/pnpbios/rsparser.c 			size = p[3];
p                 190 drivers/pnp/pnpbios/rsparser.c 			p = p + 2;
p                 191 drivers/pnp/pnpbios/rsparser.c 			return (unsigned char *)p;
p                 202 drivers/pnp/pnpbios/rsparser.c 		if (p[0] & LARGE_TAG)
p                 203 drivers/pnp/pnpbios/rsparser.c 			p += len + 3;
p                 205 drivers/pnp/pnpbios/rsparser.c 			p += len + 1;
p                 218 drivers/pnp/pnpbios/rsparser.c 					    unsigned char *p, int size,
p                 224 drivers/pnp/pnpbios/rsparser.c 	min = ((p[5] << 8) | p[4]) << 8;
p                 225 drivers/pnp/pnpbios/rsparser.c 	max = ((p[7] << 8) | p[6]) << 8;
p                 226 drivers/pnp/pnpbios/rsparser.c 	align = (p[9] << 8) | p[8];
p                 227 drivers/pnp/pnpbios/rsparser.c 	len = ((p[11] << 8) | p[10]) << 8;
p                 228 drivers/pnp/pnpbios/rsparser.c 	flags = p[3];
p                 234 drivers/pnp/pnpbios/rsparser.c 					      unsigned char *p, int size,
p                 240 drivers/pnp/pnpbios/rsparser.c 	min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4];
p                 241 drivers/pnp/pnpbios/rsparser.c 	max = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8];
p                 242 drivers/pnp/pnpbios/rsparser.c 	align = (p[15] << 24) | (p[14] << 16) | (p[13] << 8) | p[12];
p                 243 drivers/pnp/pnpbios/rsparser.c 	len = (p[19] << 24) | (p[18] << 16) | (p[17] << 8) | p[16];
p                 244 drivers/pnp/pnpbios/rsparser.c 	flags = p[3];
p                 250 drivers/pnp/pnpbios/rsparser.c 						    unsigned char *p, int size,
p                 256 drivers/pnp/pnpbios/rsparser.c 	base = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4];
p                 257 drivers/pnp/pnpbios/rsparser.c 	len = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8];
p                 258 drivers/pnp/pnpbios/rsparser.c 	flags = p[3];
p                 263 drivers/pnp/pnpbios/rsparser.c 					    unsigned char *p, int size,
p                 270 drivers/pnp/pnpbios/rsparser.c 	bits = (p[2] << 8) | p[1];
p                 276 drivers/pnp/pnpbios/rsparser.c 		flags = p[3];
p                 282 drivers/pnp/pnpbios/rsparser.c 					    unsigned char *p, int size,
p                 285 drivers/pnp/pnpbios/rsparser.c 	pnp_register_dma_resource(dev, option_flags, p[1], p[2]);
p                 289 drivers/pnp/pnpbios/rsparser.c 					     unsigned char *p, int size,
p                 295 drivers/pnp/pnpbios/rsparser.c 	min = (p[3] << 8) | p[2];
p                 296 drivers/pnp/pnpbios/rsparser.c 	max = (p[5] << 8) | p[4];
p                 297 drivers/pnp/pnpbios/rsparser.c 	align = p[6];
p                 298 drivers/pnp/pnpbios/rsparser.c 	len = p[7];
p                 299 drivers/pnp/pnpbios/rsparser.c 	flags = p[1] ? IORESOURCE_IO_16BIT_ADDR : 0;
p                 305 drivers/pnp/pnpbios/rsparser.c 						   unsigned char *p, int size,
p                 310 drivers/pnp/pnpbios/rsparser.c 	base = (p[2] << 8) | p[1];
p                 311 drivers/pnp/pnpbios/rsparser.c 	len = p[3];
p                 317 drivers/pnp/pnpbios/rsparser.c pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
p                 324 drivers/pnp/pnpbios/rsparser.c 	if (!p)
p                 329 drivers/pnp/pnpbios/rsparser.c 	while ((char *)p < (char *)end) {
p                 332 drivers/pnp/pnpbios/rsparser.c 		if (p[0] & LARGE_TAG) {	/* large tag */
p                 333 drivers/pnp/pnpbios/rsparser.c 			len = (p[2] << 8) | p[1];
p                 334 drivers/pnp/pnpbios/rsparser.c 			tag = p[0];
p                 336 drivers/pnp/pnpbios/rsparser.c 			len = p[0] & 0x07;
p                 337 drivers/pnp/pnpbios/rsparser.c 			tag = ((p[0] >> 3) & 0x0f);
p                 345 drivers/pnp/pnpbios/rsparser.c 			pnpbios_parse_mem_option(dev, p, len, option_flags);
p                 351 drivers/pnp/pnpbios/rsparser.c 			pnpbios_parse_mem32_option(dev, p, len, option_flags);
p                 357 drivers/pnp/pnpbios/rsparser.c 			pnpbios_parse_fixed_mem32_option(dev, p, len,
p                 364 drivers/pnp/pnpbios/rsparser.c 			pnpbios_parse_irq_option(dev, p, len, option_flags);
p                 370 drivers/pnp/pnpbios/rsparser.c 			pnpbios_parse_dma_option(dev, p, len, option_flags);
p                 376 drivers/pnp/pnpbios/rsparser.c 			pnpbios_parse_port_option(dev, p, len, option_flags);
p                 386 drivers/pnp/pnpbios/rsparser.c 			pnpbios_parse_fixed_port_option(dev, p, len,
p                 395 drivers/pnp/pnpbios/rsparser.c 				priority = p[1];
p                 406 drivers/pnp/pnpbios/rsparser.c 			return p + 2;
p                 416 drivers/pnp/pnpbios/rsparser.c 		if (p[0] & LARGE_TAG)
p                 417 drivers/pnp/pnpbios/rsparser.c 			p += len + 3;
p                 419 drivers/pnp/pnpbios/rsparser.c 			p += len + 1;
p                 431 drivers/pnp/pnpbios/rsparser.c static unsigned char *pnpbios_parse_compatible_ids(unsigned char *p,
p                 440 drivers/pnp/pnpbios/rsparser.c 	if (!p)
p                 443 drivers/pnp/pnpbios/rsparser.c 	while ((char *)p < (char *)end) {
p                 446 drivers/pnp/pnpbios/rsparser.c 		if (p[0] & LARGE_TAG) {	/* large tag */
p                 447 drivers/pnp/pnpbios/rsparser.c 			len = (p[2] << 8) | p[1];
p                 448 drivers/pnp/pnpbios/rsparser.c 			tag = p[0];
p                 450 drivers/pnp/pnpbios/rsparser.c 			len = p[0] & 0x07;
p                 451 drivers/pnp/pnpbios/rsparser.c 			tag = ((p[0] >> 3) & 0x0f);
p                 457 drivers/pnp/pnpbios/rsparser.c 			strncpy(dev->name, p + 3,
p                 466 drivers/pnp/pnpbios/rsparser.c 			eisa_id = p[1] | p[2] << 8 | p[3] << 16 | p[4] << 24;
p                 474 drivers/pnp/pnpbios/rsparser.c 			p = p + 2;
p                 475 drivers/pnp/pnpbios/rsparser.c 			return (unsigned char *)p;
p                 486 drivers/pnp/pnpbios/rsparser.c 		if (p[0] & LARGE_TAG)
p                 487 drivers/pnp/pnpbios/rsparser.c 			p += len + 3;
p                 489 drivers/pnp/pnpbios/rsparser.c 			p += len + 1;
p                 501 drivers/pnp/pnpbios/rsparser.c static void pnpbios_encode_mem(struct pnp_dev *dev, unsigned char *p,
p                 515 drivers/pnp/pnpbios/rsparser.c 	p[4] = (base >> 8) & 0xff;
p                 516 drivers/pnp/pnpbios/rsparser.c 	p[5] = ((base >> 8) >> 8) & 0xff;
p                 517 drivers/pnp/pnpbios/rsparser.c 	p[6] = (base >> 8) & 0xff;
p                 518 drivers/pnp/pnpbios/rsparser.c 	p[7] = ((base >> 8) >> 8) & 0xff;
p                 519 drivers/pnp/pnpbios/rsparser.c 	p[10] = (len >> 8) & 0xff;
p                 520 drivers/pnp/pnpbios/rsparser.c 	p[11] = ((len >> 8) >> 8) & 0xff;
p                 525 drivers/pnp/pnpbios/rsparser.c static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p,
p                 539 drivers/pnp/pnpbios/rsparser.c 	p[4] = base & 0xff;
p                 540 drivers/pnp/pnpbios/rsparser.c 	p[5] = (base >> 8) & 0xff;
p                 541 drivers/pnp/pnpbios/rsparser.c 	p[6] = (base >> 16) & 0xff;
p                 542 drivers/pnp/pnpbios/rsparser.c 	p[7] = (base >> 24) & 0xff;
p                 543 drivers/pnp/pnpbios/rsparser.c 	p[8] = base & 0xff;
p                 544 drivers/pnp/pnpbios/rsparser.c 	p[9] = (base >> 8) & 0xff;
p                 545 drivers/pnp/pnpbios/rsparser.c 	p[10] = (base >> 16) & 0xff;
p                 546 drivers/pnp/pnpbios/rsparser.c 	p[11] = (base >> 24) & 0xff;
p                 547 drivers/pnp/pnpbios/rsparser.c 	p[16] = len & 0xff;
p                 548 drivers/pnp/pnpbios/rsparser.c 	p[17] = (len >> 8) & 0xff;
p                 549 drivers/pnp/pnpbios/rsparser.c 	p[18] = (len >> 16) & 0xff;
p                 550 drivers/pnp/pnpbios/rsparser.c 	p[19] = (len >> 24) & 0xff;
p                 555 drivers/pnp/pnpbios/rsparser.c static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p,
p                 569 drivers/pnp/pnpbios/rsparser.c 	p[4] = base & 0xff;
p                 570 drivers/pnp/pnpbios/rsparser.c 	p[5] = (base >> 8) & 0xff;
p                 571 drivers/pnp/pnpbios/rsparser.c 	p[6] = (base >> 16) & 0xff;
p                 572 drivers/pnp/pnpbios/rsparser.c 	p[7] = (base >> 24) & 0xff;
p                 573 drivers/pnp/pnpbios/rsparser.c 	p[8] = len & 0xff;
p                 574 drivers/pnp/pnpbios/rsparser.c 	p[9] = (len >> 8) & 0xff;
p                 575 drivers/pnp/pnpbios/rsparser.c 	p[10] = (len >> 16) & 0xff;
p                 576 drivers/pnp/pnpbios/rsparser.c 	p[11] = (len >> 24) & 0xff;
p                 582 drivers/pnp/pnpbios/rsparser.c static void pnpbios_encode_irq(struct pnp_dev *dev, unsigned char *p,
p                 592 drivers/pnp/pnpbios/rsparser.c 	p[1] = map & 0xff;
p                 593 drivers/pnp/pnpbios/rsparser.c 	p[2] = (map >> 8) & 0xff;
p                 598 drivers/pnp/pnpbios/rsparser.c static void pnpbios_encode_dma(struct pnp_dev *dev, unsigned char *p,
p                 608 drivers/pnp/pnpbios/rsparser.c 	p[1] = map & 0xff;
p                 613 drivers/pnp/pnpbios/rsparser.c static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p,
p                 627 drivers/pnp/pnpbios/rsparser.c 	p[2] = base & 0xff;
p                 628 drivers/pnp/pnpbios/rsparser.c 	p[3] = (base >> 8) & 0xff;
p                 629 drivers/pnp/pnpbios/rsparser.c 	p[4] = base & 0xff;
p                 630 drivers/pnp/pnpbios/rsparser.c 	p[5] = (base >> 8) & 0xff;
p                 631 drivers/pnp/pnpbios/rsparser.c 	p[7] = len & 0xff;
p                 636 drivers/pnp/pnpbios/rsparser.c static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p,
p                 650 drivers/pnp/pnpbios/rsparser.c 	p[1] = base & 0xff;
p                 651 drivers/pnp/pnpbios/rsparser.c 	p[2] = (base >> 8) & 0xff;
p                 652 drivers/pnp/pnpbios/rsparser.c 	p[3] = len & 0xff;
p                 660 drivers/pnp/pnpbios/rsparser.c 							     unsigned char *p,
p                 666 drivers/pnp/pnpbios/rsparser.c 	if (!p)
p                 669 drivers/pnp/pnpbios/rsparser.c 	while ((char *)p < (char *)end) {
p                 672 drivers/pnp/pnpbios/rsparser.c 		if (p[0] & LARGE_TAG) {	/* large tag */
p                 673 drivers/pnp/pnpbios/rsparser.c 			len = (p[2] << 8) | p[1];
p                 674 drivers/pnp/pnpbios/rsparser.c 			tag = p[0];
p                 676 drivers/pnp/pnpbios/rsparser.c 			len = p[0] & 0x07;
p                 677 drivers/pnp/pnpbios/rsparser.c 			tag = ((p[0] >> 3) & 0x0f);
p                 685 drivers/pnp/pnpbios/rsparser.c 			pnpbios_encode_mem(dev, p,
p                 693 drivers/pnp/pnpbios/rsparser.c 			pnpbios_encode_mem32(dev, p,
p                 701 drivers/pnp/pnpbios/rsparser.c 			pnpbios_encode_fixed_mem32(dev, p,
p                 709 drivers/pnp/pnpbios/rsparser.c 			pnpbios_encode_irq(dev, p,
p                 717 drivers/pnp/pnpbios/rsparser.c 			pnpbios_encode_dma(dev, p,
p                 725 drivers/pnp/pnpbios/rsparser.c 			pnpbios_encode_port(dev, p,
p                 737 drivers/pnp/pnpbios/rsparser.c 			pnpbios_encode_fixed_port(dev, p,
p                 743 drivers/pnp/pnpbios/rsparser.c 			p = p + 2;
p                 744 drivers/pnp/pnpbios/rsparser.c 			return (unsigned char *)p;
p                 755 drivers/pnp/pnpbios/rsparser.c 		if (p[0] & LARGE_TAG)
p                 756 drivers/pnp/pnpbios/rsparser.c 			p += len + 3;
p                 758 drivers/pnp/pnpbios/rsparser.c 			p += len + 1;
p                 773 drivers/pnp/pnpbios/rsparser.c 	unsigned char *p = (char *)node->data;
p                 776 drivers/pnp/pnpbios/rsparser.c 	p = pnpbios_parse_allocated_resource_data(dev, p, end);
p                 777 drivers/pnp/pnpbios/rsparser.c 	if (!p)
p                 779 drivers/pnp/pnpbios/rsparser.c 	p = pnpbios_parse_resource_option_data(p, end, dev);
p                 780 drivers/pnp/pnpbios/rsparser.c 	if (!p)
p                 782 drivers/pnp/pnpbios/rsparser.c 	p = pnpbios_parse_compatible_ids(p, end, dev);
p                 783 drivers/pnp/pnpbios/rsparser.c 	if (!p)
p                 791 drivers/pnp/pnpbios/rsparser.c 	unsigned char *p = (char *)node->data;
p                 794 drivers/pnp/pnpbios/rsparser.c 	p = pnpbios_parse_allocated_resource_data(dev, p, end);
p                 795 drivers/pnp/pnpbios/rsparser.c 	if (!p)
p                 803 drivers/pnp/pnpbios/rsparser.c 	unsigned char *p = (char *)node->data;
p                 806 drivers/pnp/pnpbios/rsparser.c 	p = pnpbios_encode_allocated_resource_data(dev, p, end);
p                 807 drivers/pnp/pnpbios/rsparser.c 	if (!p)
p                  76 drivers/power/reset/ltc2952-poweroff.c #define to_ltc2952(p, m) container_of(p, struct ltc2952_poweroff, m)
p                 171 drivers/power/reset/reboot-mode.c 	struct reboot_mode_driver **p = res;
p                 173 drivers/power/reset/reboot-mode.c 	if (WARN_ON(!p || !*p))
p                 176 drivers/power/reset/reboot-mode.c 	return *p == data;
p                 490 drivers/power/supply/test_power.c #define param_check_ac_online(name, p) __param_check(name, p, void);
p                 491 drivers/power/supply/test_power.c #define param_check_usb_online(name, p) __param_check(name, p, void);
p                 492 drivers/power/supply/test_power.c #define param_check_battery_status(name, p) __param_check(name, p, void);
p                 493 drivers/power/supply/test_power.c #define param_check_battery_present(name, p) __param_check(name, p, void);
p                 494 drivers/power/supply/test_power.c #define param_check_battery_technology(name, p) __param_check(name, p, void);
p                 495 drivers/power/supply/test_power.c #define param_check_battery_health(name, p) __param_check(name, p, void);
p                 496 drivers/power/supply/test_power.c #define param_check_battery_capacity(name, p) __param_check(name, p, void);
p                 497 drivers/power/supply/test_power.c #define param_check_battery_voltage(name, p) __param_check(name, p, void);
p                 121 drivers/powercap/intel_rapl_common.c #define PRIMITIVE_INFO_INIT(p, m, s, i, u, f) {	\
p                 122 drivers/powercap/intel_rapl_common.c 		.name = #p,			\
p                 327 drivers/ps3/ps3av_cmd.c u32 ps3av_cmd_set_av_video_cs(void *p, u32 avport, int video_vid, int cs_out,
p                 332 drivers/ps3/ps3av_cmd.c 	av_video_cs = (struct ps3av_pkt_av_video_cs *)p;
p                 366 drivers/ps3/ps3av_cmd.c u32 ps3av_cmd_set_video_mode(void *p, u32 head, int video_vid, int video_fmt,
p                 372 drivers/ps3/ps3av_cmd.c 	video_mode = (struct ps3av_pkt_video_mode *)p;
p                 623 drivers/ps3/ps3av_cmd.c u32 ps3av_cmd_set_av_audio_param(void *p, u32 port,
p                 629 drivers/ps3/ps3av_cmd.c 	param = (struct ps3av_pkt_av_audio_param *)p;
p                 877 drivers/pwm/core.c 	struct pwm_lookup *p, *chosen = NULL;
p                 914 drivers/pwm/core.c 	list_for_each_entry(p, &pwm_lookup_list, list) {
p                 917 drivers/pwm/core.c 		if (p->dev_id) {
p                 918 drivers/pwm/core.c 			if (!dev_id || strcmp(p->dev_id, dev_id))
p                 924 drivers/pwm/core.c 		if (p->con_id) {
p                 925 drivers/pwm/core.c 			if (!con_id || strcmp(p->con_id, con_id))
p                 932 drivers/pwm/core.c 			chosen = p;
p                1117 drivers/pwm/core.c 	struct pwm_device **p = res;
p                1119 drivers/pwm/core.c 	if (WARN_ON(!p || !*p))
p                1122 drivers/pwm/core.c 	return *p == data;
p                  61 drivers/pwm/pwm-brcmstb.c static inline u32 brcmstb_pwm_readl(struct brcmstb_pwm *p,
p                  65 drivers/pwm/pwm-brcmstb.c 		return __raw_readl(p->base + offset);
p                  67 drivers/pwm/pwm-brcmstb.c 		return readl_relaxed(p->base + offset);
p                  70 drivers/pwm/pwm-brcmstb.c static inline void brcmstb_pwm_writel(struct brcmstb_pwm *p, u32 value,
p                  74 drivers/pwm/pwm-brcmstb.c 		__raw_writel(value, p->base + offset);
p                  76 drivers/pwm/pwm-brcmstb.c 		writel_relaxed(value, p->base + offset);
p                 100 drivers/pwm/pwm-brcmstb.c 	struct brcmstb_pwm *p = to_brcmstb_pwm(chip);
p                 123 drivers/pwm/pwm-brcmstb.c 		rate = (u64)clk_get_rate(p->clk) * (u64)cword;
p                 167 drivers/pwm/pwm-brcmstb.c 	spin_lock(&p->lock);
p                 168 drivers/pwm/pwm-brcmstb.c 	brcmstb_pwm_writel(p, cword >> 8, PWM_CWORD_MSB(channel));
p                 169 drivers/pwm/pwm-brcmstb.c 	brcmstb_pwm_writel(p, cword & 0xff, PWM_CWORD_LSB(channel));
p                 172 drivers/pwm/pwm-brcmstb.c 	value = brcmstb_pwm_readl(p, PWM_CTRL2);
p                 174 drivers/pwm/pwm-brcmstb.c 	brcmstb_pwm_writel(p, value, PWM_CTRL2);
p                 177 drivers/pwm/pwm-brcmstb.c 	brcmstb_pwm_writel(p, pc, PWM_PERIOD(channel));
p                 178 drivers/pwm/pwm-brcmstb.c 	brcmstb_pwm_writel(p, dc, PWM_ON(channel));
p                 179 drivers/pwm/pwm-brcmstb.c 	spin_unlock(&p->lock);
p                 184 drivers/pwm/pwm-brcmstb.c static inline void brcmstb_pwm_enable_set(struct brcmstb_pwm *p,
p                 190 drivers/pwm/pwm-brcmstb.c 	spin_lock(&p->lock);
p                 191 drivers/pwm/pwm-brcmstb.c 	value = brcmstb_pwm_readl(p, PWM_CTRL);
p                 201 drivers/pwm/pwm-brcmstb.c 	brcmstb_pwm_writel(p, value, PWM_CTRL);
p                 202 drivers/pwm/pwm-brcmstb.c 	spin_unlock(&p->lock);
p                 207 drivers/pwm/pwm-brcmstb.c 	struct brcmstb_pwm *p = to_brcmstb_pwm(chip);
p                 209 drivers/pwm/pwm-brcmstb.c 	brcmstb_pwm_enable_set(p, pwm->hwpwm, true);
p                 216 drivers/pwm/pwm-brcmstb.c 	struct brcmstb_pwm *p = to_brcmstb_pwm(chip);
p                 218 drivers/pwm/pwm-brcmstb.c 	brcmstb_pwm_enable_set(p, pwm->hwpwm, false);
p                 236 drivers/pwm/pwm-brcmstb.c 	struct brcmstb_pwm *p;
p                 240 drivers/pwm/pwm-brcmstb.c 	p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
p                 241 drivers/pwm/pwm-brcmstb.c 	if (!p)
p                 244 drivers/pwm/pwm-brcmstb.c 	spin_lock_init(&p->lock);
p                 246 drivers/pwm/pwm-brcmstb.c 	p->clk = devm_clk_get(&pdev->dev, NULL);
p                 247 drivers/pwm/pwm-brcmstb.c 	if (IS_ERR(p->clk)) {
p                 249 drivers/pwm/pwm-brcmstb.c 		return PTR_ERR(p->clk);
p                 252 drivers/pwm/pwm-brcmstb.c 	ret = clk_prepare_enable(p->clk);
p                 258 drivers/pwm/pwm-brcmstb.c 	platform_set_drvdata(pdev, p);
p                 260 drivers/pwm/pwm-brcmstb.c 	p->chip.dev = &pdev->dev;
p                 261 drivers/pwm/pwm-brcmstb.c 	p->chip.ops = &brcmstb_pwm_ops;
p                 262 drivers/pwm/pwm-brcmstb.c 	p->chip.base = -1;
p                 263 drivers/pwm/pwm-brcmstb.c 	p->chip.npwm = 2;
p                 266 drivers/pwm/pwm-brcmstb.c 	p->base = devm_ioremap_resource(&pdev->dev, res);
p                 267 drivers/pwm/pwm-brcmstb.c 	if (IS_ERR(p->base)) {
p                 268 drivers/pwm/pwm-brcmstb.c 		ret = PTR_ERR(p->base);
p                 272 drivers/pwm/pwm-brcmstb.c 	ret = pwmchip_add(&p->chip);
p                 281 drivers/pwm/pwm-brcmstb.c 	clk_disable_unprepare(p->clk);
p                 287 drivers/pwm/pwm-brcmstb.c 	struct brcmstb_pwm *p = platform_get_drvdata(pdev);
p                 290 drivers/pwm/pwm-brcmstb.c 	ret = pwmchip_remove(&p->chip);
p                 291 drivers/pwm/pwm-brcmstb.c 	clk_disable_unprepare(p->clk);
p                 299 drivers/pwm/pwm-brcmstb.c 	struct brcmstb_pwm *p = dev_get_drvdata(dev);
p                 301 drivers/pwm/pwm-brcmstb.c 	clk_disable(p->clk);
p                 308 drivers/pwm/pwm-brcmstb.c 	struct brcmstb_pwm *p = dev_get_drvdata(dev);
p                 310 drivers/pwm/pwm-brcmstb.c 	clk_enable(p->clk);
p                  90 drivers/pwm/pwm-imx-tpm.c 				   struct imx_tpm_pwm_param *p,
p                 108 drivers/pwm/pwm-imx-tpm.c 	p->prescale = prescale;
p                 111 drivers/pwm/pwm-imx-tpm.c 	p->mod = period_count;
p                 128 drivers/pwm/pwm-imx-tpm.c 	tmp = (u64)p->mod * real_state->duty_cycle;
p                 129 drivers/pwm/pwm-imx-tpm.c 	p->val = DIV_ROUND_CLOSEST_ULL(tmp, real_state->period);
p                 173 drivers/pwm/pwm-imx-tpm.c 				struct imx_tpm_pwm_param *p,
p                 197 drivers/pwm/pwm-imx-tpm.c 		if (cmod && cur_prescale != p->prescale)
p                 202 drivers/pwm/pwm-imx-tpm.c 		val |= FIELD_PREP(PWM_IMX_TPM_SC_PS, p->prescale);
p                 213 drivers/pwm/pwm-imx-tpm.c 		writel(p->mod, tpm->base + PWM_IMX_TPM_MOD);
p                 233 drivers/pwm/pwm-imx-tpm.c 		writel(p->val, tpm->base + PWM_IMX_TPM_CnV(pwm->hwpwm));
p                 241 drivers/pwm/pwm-imx-tpm.c 		while (readl(tpm->base + PWM_IMX_TPM_MOD) != p->mod
p                 243 drivers/pwm/pwm-imx-tpm.c 		       != p->val) {
p                  67 drivers/pwm/pwm-imx1.c 	u32 max, p;
p                  87 drivers/pwm/pwm-imx1.c 	p = max * duty_ns / period_ns;
p                  89 drivers/pwm/pwm-imx1.c 	writel(max - p, imx->mmio_base + MX1_PWMS);
p                  25 drivers/pwm/pwm-mxs.c #define  PERIOD_PERIOD(p)	((p) & 0xffff)
p                1531 drivers/rapidio/rio_cm.c 	u32 __user *p = arg;
p                1536 drivers/rapidio/rio_cm.c 	if (get_user(mport_id, p))
p                1658 drivers/rapidio/rio_cm.c 	u16 __user *p = arg;
p                1662 drivers/rapidio/rio_cm.c 	if (get_user(ch_num, p))
p                1674 drivers/rapidio/rio_cm.c 	return put_user(ch_num, p);
p                1684 drivers/rapidio/rio_cm.c 	u16 __user *p = arg;
p                1688 drivers/rapidio/rio_cm.c 	if (get_user(ch_num, p))
p                1732 drivers/rapidio/rio_cm.c 	u16 __user *p = arg;
p                1735 drivers/rapidio/rio_cm.c 	if (get_user(ch_num, p))
p                  78 drivers/regulator/dbx500-prcmu.c static int ux500_regulator_power_state_cnt_show(struct seq_file *s, void *p)
p                  88 drivers/regulator/dbx500-prcmu.c static int ux500_regulator_status_show(struct seq_file *s, void *p)
p                 163 drivers/regulator/mc13xxx-regulator-core.c 	struct mc13xxx_regulator_init_data *data, *p;
p                 181 drivers/regulator/mc13xxx-regulator-core.c 	p = data;
p                 191 drivers/regulator/mc13xxx-regulator-core.c 				p->id = i;
p                 192 drivers/regulator/mc13xxx-regulator-core.c 				p->init_data = of_get_regulator_init_data(
p                 195 drivers/regulator/mc13xxx-regulator-core.c 				p->node = child;
p                 196 drivers/regulator/mc13xxx-regulator-core.c 				p++;
p                 304 drivers/regulator/tps6586x-regulator.c 			int id, struct regulator_init_data *p)
p                 307 drivers/regulator/tps6586x-regulator.c 	struct tps6586x_settings *setting = p->driver_data;
p                  85 drivers/remoteproc/da8xx_remoteproc.c static irqreturn_t handle_event(int irq, void *p)
p                  87 drivers/remoteproc/da8xx_remoteproc.c 	struct rproc *rproc = (struct rproc *)p;
p                 105 drivers/remoteproc/da8xx_remoteproc.c static irqreturn_t da8xx_rproc_callback(int irq, void *p)
p                 107 drivers/remoteproc/da8xx_remoteproc.c 	struct rproc *rproc = (struct rproc *)p;
p                1994 drivers/remoteproc/remoteproc_core.c 	char *p, *template = "rproc-%s-fw";
p                2006 drivers/remoteproc/remoteproc_core.c 		p = kmalloc(name_len, GFP_KERNEL);
p                2007 drivers/remoteproc/remoteproc_core.c 		if (!p)
p                2009 drivers/remoteproc/remoteproc_core.c 		snprintf(p, name_len, template, name);
p                2011 drivers/remoteproc/remoteproc_core.c 		p = kstrdup(firmware, GFP_KERNEL);
p                2012 drivers/remoteproc/remoteproc_core.c 		if (!p)
p                2018 drivers/remoteproc/remoteproc_core.c 		kfree(p);
p                2024 drivers/remoteproc/remoteproc_core.c 		kfree(p);
p                2029 drivers/remoteproc/remoteproc_core.c 	rproc->firmware = p;
p                 188 drivers/remoteproc/remoteproc_debugfs.c static int rproc_rsc_table_show(struct seq_file *seq, void *p)
p                 285 drivers/remoteproc/remoteproc_debugfs.c static int rproc_carveouts_show(struct seq_file *seq, void *p)
p                  27 drivers/remoteproc/remoteproc_sysfs.c 	char *p;
p                  49 drivers/remoteproc/remoteproc_sysfs.c 	p = kstrndup(buf, len, GFP_KERNEL);
p                  50 drivers/remoteproc/remoteproc_sysfs.c 	if (!p) {
p                  56 drivers/remoteproc/remoteproc_sysfs.c 	rproc->firmware = p;
p                  17 drivers/reset/reset-axs10x.c #define to_axs10x_rst(p)	container_of((p), struct axs10x_rst, rcdev)
p                  28 drivers/reset/reset-berlin.c #define to_berlin_reset_priv(p)		\
p                  29 drivers/reset/reset-berlin.c 	container_of((p), struct berlin_reset_priv, rcdev)
p                  21 drivers/reset/reset-hsdk.c #define to_hsdk_rst(p)	container_of((p), struct hsdk_rst, rcdev)
p                  42 drivers/reset/reset-lpc18xx.c #define to_rgu_data(p) container_of(p, struct lpc18xx_rgu_data, rcdev)
p                  24 drivers/reset/reset-scmi.c #define to_scmi_reset_data(p)	container_of((p), struct scmi_reset_data, rcdev)
p                  25 drivers/reset/reset-scmi.c #define to_scmi_handle(p)	(to_scmi_reset_data(p)->handle)
p                  51 drivers/reset/reset-ti-sci.c #define to_ti_sci_reset_data(p)	\
p                  52 drivers/reset/reset-ti-sci.c 	container_of((p), struct ti_sci_reset_data, rcdev)
p                 257 drivers/reset/reset-uniphier.c 	const struct uniphier_reset_data *p;
p                 259 drivers/reset/reset-uniphier.c 	for (p = priv->data; p->id != UNIPHIER_RESET_ID_END; p++) {
p                 262 drivers/reset/reset-uniphier.c 		if (p->id != id)
p                 265 drivers/reset/reset-uniphier.c 		mask = BIT(p->bit);
p                 272 drivers/reset/reset-uniphier.c 		if (p->flags & UNIPHIER_RESET_ACTIVE_LOW)
p                 275 drivers/reset/reset-uniphier.c 		return regmap_write_bits(priv->regmap, p->reg, mask, val);
p                 298 drivers/reset/reset-uniphier.c 	const struct uniphier_reset_data *p;
p                 300 drivers/reset/reset-uniphier.c 	for (p = priv->data; p->id != UNIPHIER_RESET_ID_END; p++) {
p                 304 drivers/reset/reset-uniphier.c 		if (p->id != id)
p                 307 drivers/reset/reset-uniphier.c 		ret = regmap_read(priv->regmap, p->reg, &val);
p                 311 drivers/reset/reset-uniphier.c 		asserted = !!(val & BIT(p->bit));
p                 313 drivers/reset/reset-uniphier.c 		if (p->flags & UNIPHIER_RESET_ACTIVE_LOW)
p                 333 drivers/reset/reset-uniphier.c 	const struct uniphier_reset_data *p, *data;
p                 355 drivers/reset/reset-uniphier.c 	for (p = data; p->id != UNIPHIER_RESET_ID_END; p++)
p                 356 drivers/reset/reset-uniphier.c 		nr_resets = max(nr_resets, p->id + 1);
p                  26 drivers/reset/reset-zynq.c #define to_zynq_reset_data(p)		\
p                  27 drivers/reset/reset-zynq.c 	container_of((p), struct zynq_reset_data, rcdev)
p                  32 drivers/rpmsg/qcom_glink_rpm.c #define to_rpm_pipe(p) container_of(p, struct glink_rpm_pipe, native)
p                  47 drivers/rpmsg/qcom_glink_smem.c #define to_smem_pipe(p) container_of(p, struct glink_smem_pipe, native)
p                1526 drivers/rpmsg/qcom_smd.c 	void *p;
p                1529 drivers/rpmsg/qcom_smd.c 	p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL);
p                1530 drivers/rpmsg/qcom_smd.c 	if (PTR_ERR(p) == -EPROBE_DEFER)
p                1531 drivers/rpmsg/qcom_smd.c 		return PTR_ERR(p);
p                  30 drivers/rtc/rtc-bq4802.c static u8 bq4802_read_io(struct bq4802 *p, int off)
p                  32 drivers/rtc/rtc-bq4802.c 	return inb(p->ioport + off);
p                  35 drivers/rtc/rtc-bq4802.c static void bq4802_write_io(struct bq4802 *p, int off, u8 val)
p                  37 drivers/rtc/rtc-bq4802.c 	outb(val, p->ioport + off);
p                  40 drivers/rtc/rtc-bq4802.c static u8 bq4802_read_mem(struct bq4802 *p, int off)
p                  42 drivers/rtc/rtc-bq4802.c 	return readb(p->regs + off);
p                  45 drivers/rtc/rtc-bq4802.c static void bq4802_write_mem(struct bq4802 *p, int off, u8 val)
p                  47 drivers/rtc/rtc-bq4802.c 	writeb(val, p->regs + off);
p                  52 drivers/rtc/rtc-bq4802.c 	struct bq4802 *p = dev_get_drvdata(dev);
p                  57 drivers/rtc/rtc-bq4802.c 	spin_lock_irqsave(&p->lock, flags);
p                  59 drivers/rtc/rtc-bq4802.c 	val = p->read(p, 0x0e);
p                  60 drivers/rtc/rtc-bq4802.c 	p->write(p, 0xe, val | 0x08);
p                  62 drivers/rtc/rtc-bq4802.c 	tm->tm_sec = p->read(p, 0x00);
p                  63 drivers/rtc/rtc-bq4802.c 	tm->tm_min = p->read(p, 0x02);
p                  64 drivers/rtc/rtc-bq4802.c 	tm->tm_hour = p->read(p, 0x04);
p                  65 drivers/rtc/rtc-bq4802.c 	tm->tm_mday = p->read(p, 0x06);
p                  66 drivers/rtc/rtc-bq4802.c 	tm->tm_mon = p->read(p, 0x09);
p                  67 drivers/rtc/rtc-bq4802.c 	tm->tm_year = p->read(p, 0x0a);
p                  68 drivers/rtc/rtc-bq4802.c 	tm->tm_wday = p->read(p, 0x08);
p                  69 drivers/rtc/rtc-bq4802.c 	century = p->read(p, 0x0f);
p                  71 drivers/rtc/rtc-bq4802.c 	p->write(p, 0x0e, val);
p                  73 drivers/rtc/rtc-bq4802.c 	spin_unlock_irqrestore(&p->lock, flags);
p                  94 drivers/rtc/rtc-bq4802.c 	struct bq4802 *p = dev_get_drvdata(dev);
p                 117 drivers/rtc/rtc-bq4802.c 	spin_lock_irqsave(&p->lock, flags);
p                 119 drivers/rtc/rtc-bq4802.c 	val = p->read(p, 0x0e);
p                 120 drivers/rtc/rtc-bq4802.c 	p->write(p, 0x0e, val | 0x08);
p                 122 drivers/rtc/rtc-bq4802.c 	p->write(p, 0x00, sec);
p                 123 drivers/rtc/rtc-bq4802.c 	p->write(p, 0x02, min);
p                 124 drivers/rtc/rtc-bq4802.c 	p->write(p, 0x04, hrs);
p                 125 drivers/rtc/rtc-bq4802.c 	p->write(p, 0x06, day);
p                 126 drivers/rtc/rtc-bq4802.c 	p->write(p, 0x09, mon);
p                 127 drivers/rtc/rtc-bq4802.c 	p->write(p, 0x0a, yrs);
p                 128 drivers/rtc/rtc-bq4802.c 	p->write(p, 0x0f, century);
p                 130 drivers/rtc/rtc-bq4802.c 	p->write(p, 0x0e, val);
p                 132 drivers/rtc/rtc-bq4802.c 	spin_unlock_irqrestore(&p->lock, flags);
p                 144 drivers/rtc/rtc-bq4802.c 	struct bq4802 *p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
p                 147 drivers/rtc/rtc-bq4802.c 	if (!p)
p                 150 drivers/rtc/rtc-bq4802.c 	spin_lock_init(&p->lock);
p                 152 drivers/rtc/rtc-bq4802.c 	p->r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
p                 153 drivers/rtc/rtc-bq4802.c 	if (!p->r) {
p                 154 drivers/rtc/rtc-bq4802.c 		p->r = platform_get_resource(pdev, IORESOURCE_IO, 0);
p                 156 drivers/rtc/rtc-bq4802.c 		if (!p->r)
p                 159 drivers/rtc/rtc-bq4802.c 	if (p->r->flags & IORESOURCE_IO) {
p                 160 drivers/rtc/rtc-bq4802.c 		p->ioport = p->r->start;
p                 161 drivers/rtc/rtc-bq4802.c 		p->read = bq4802_read_io;
p                 162 drivers/rtc/rtc-bq4802.c 		p->write = bq4802_write_io;
p                 163 drivers/rtc/rtc-bq4802.c 	} else if (p->r->flags & IORESOURCE_MEM) {
p                 164 drivers/rtc/rtc-bq4802.c 		p->regs = devm_ioremap(&pdev->dev, p->r->start,
p                 165 drivers/rtc/rtc-bq4802.c 					resource_size(p->r));
p                 166 drivers/rtc/rtc-bq4802.c 		if (!p->regs){
p                 170 drivers/rtc/rtc-bq4802.c 		p->read = bq4802_read_mem;
p                 171 drivers/rtc/rtc-bq4802.c 		p->write = bq4802_write_mem;
p                 177 drivers/rtc/rtc-bq4802.c 	platform_set_drvdata(pdev, p);
p                 179 drivers/rtc/rtc-bq4802.c 	p->rtc = devm_rtc_device_register(&pdev->dev, "bq4802",
p                 181 drivers/rtc/rtc-bq4802.c 	if (IS_ERR(p->rtc)) {
p                 182 drivers/rtc/rtc-bq4802.c 		err = PTR_ERR(p->rtc);
p                 650 drivers/rtc/rtc-cmos.c static irqreturn_t cmos_interrupt(int irq, void *p)
p                 692 drivers/rtc/rtc-cmos.c 		rtc_update_irq(p, 1, irqstat);
p                 479 drivers/rtc/rtc-ds1305.c static irqreturn_t ds1305_irq(int irq, void *p)
p                 481 drivers/rtc/rtc-ds1305.c 	struct ds1305		*ds1305 = p;
p                 278 drivers/rtc/rtc-isl12026.c static int isl12026_nvm_read(void *p, unsigned int offset,
p                 281 drivers/rtc/rtc-isl12026.c 	struct isl12026 *priv = p;
p                 322 drivers/rtc/rtc-isl12026.c static int isl12026_nvm_write(void *p, unsigned int offset,
p                 325 drivers/rtc/rtc-isl12026.c 	struct isl12026 *priv = p;
p                 296 drivers/rtc/rtc-mrst.c static irqreturn_t mrst_rtc_irq(int irq, void *p)
p                 307 drivers/rtc/rtc-mrst.c 		rtc_update_irq(p, 1, irqstat);
p                 685 drivers/s390/block/dasd_int.h 	struct list_head *p, *left;
p                 691 drivers/s390/block/dasd_int.h 	list_for_each(p, chunk_list) {
p                 692 drivers/s390/block/dasd_int.h 		if (list_entry(p, struct dasd_mchunk, list) > chunk)
p                 694 drivers/s390/block/dasd_int.h 		left = p;
p                 439 drivers/s390/char/keyboard.c 	char *p;
p                 452 drivers/s390/char/keyboard.c 		p = kbd->func_table[kb_func];
p                 453 drivers/s390/char/keyboard.c 		if (p) {
p                 454 drivers/s390/char/keyboard.c 			len = strlen(p);
p                 457 drivers/s390/char/keyboard.c 			if (copy_to_user(u_kbs->kb_string, p, len))
p                 467 drivers/s390/char/keyboard.c 		p = strndup_user(u_kbs->kb_string, sizeof(u_kbs->kb_string));
p                 468 drivers/s390/char/keyboard.c 		if (IS_ERR(p))
p                 469 drivers/s390/char/keyboard.c 			return PTR_ERR(p);
p                 471 drivers/s390/char/keyboard.c 		kbd->func_table[kb_func] = p;
p                 432 drivers/s390/char/monreader.c static __poll_t mon_poll(struct file *filp, struct poll_table_struct *p)
p                 436 drivers/s390/char/monreader.c 	poll_wait(filp, &mon_read_wait_queue, p);
p                 247 drivers/s390/char/raw3270.h 	struct list_head *p, *left;
p                 251 drivers/s390/char/raw3270.h 	list_for_each(p, free_list) {
p                 252 drivers/s390/char/raw3270.h 		if (list_entry(p, struct string, list) > cs)
p                 254 drivers/s390/char/raw3270.h 		left = p;
p                 666 drivers/s390/char/sclp_vt220.c 	struct list_head *page, *p;
p                 668 drivers/s390/char/sclp_vt220.c 	list_for_each_safe(page, p, &sclp_vt220_empty) {
p                 139 drivers/s390/char/tape_34xx.c 	struct tape_34xx_work *p =
p                 141 drivers/s390/char/tape_34xx.c 	struct tape_device *device = p->device;
p                 143 drivers/s390/char/tape_34xx.c 	switch(p->op) {
p                 151 drivers/s390/char/tape_34xx.c 	kfree(p);
p                 157 drivers/s390/char/tape_34xx.c 	struct tape_34xx_work *p;
p                 159 drivers/s390/char/tape_34xx.c 	if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
p                 162 drivers/s390/char/tape_34xx.c 	INIT_WORK(&p->work, tape_34xx_work_handler);
p                 164 drivers/s390/char/tape_34xx.c 	p->device = tape_get_device(device);
p                 165 drivers/s390/char/tape_34xx.c 	p->op     = op;
p                 167 drivers/s390/char/tape_34xx.c 	schedule_work(&p->work);
p                 627 drivers/s390/char/tape_3590.c 	struct work_handler_data *p =
p                 630 drivers/s390/char/tape_3590.c 	switch (p->op) {
p                 632 drivers/s390/char/tape_3590.c 		tape_3590_sense_medium_async(p->device);
p                 635 drivers/s390/char/tape_3590.c 		tape_3590_read_attmsg_async(p->device);
p                 638 drivers/s390/char/tape_3590.c 		tape_3592_enable_crypt_async(p->device);
p                 641 drivers/s390/char/tape_3590.c 		tape_3592_disable_crypt_async(p->device);
p                 645 drivers/s390/char/tape_3590.c 			  "operation 0x%02x\n", p->op);
p                 647 drivers/s390/char/tape_3590.c 	tape_put_device(p->device);
p                 648 drivers/s390/char/tape_3590.c 	kfree(p);
p                 654 drivers/s390/char/tape_3590.c 	struct work_handler_data *p;
p                 656 drivers/s390/char/tape_3590.c 	if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
p                 659 drivers/s390/char/tape_3590.c 	INIT_WORK(&p->work, tape_3590_work_handler);
p                 661 drivers/s390/char/tape_3590.c 	p->device = tape_get_device(device);
p                 662 drivers/s390/char/tape_3590.c 	p->op = op;
p                 664 drivers/s390/char/tape_3590.c 	queue_work(tape_3590_wq, &p->work);
p                 223 drivers/s390/char/tape_core.c 	struct tape_med_state_work_data *p =
p                 225 drivers/s390/char/tape_core.c 	struct tape_device *device = p->device;
p                 228 drivers/s390/char/tape_core.c 	switch (p->state) {
p                 245 drivers/s390/char/tape_core.c 	kfree(p);
p                 251 drivers/s390/char/tape_core.c 	struct tape_med_state_work_data *p;
p                 253 drivers/s390/char/tape_core.c 	p = kzalloc(sizeof(*p), GFP_ATOMIC);
p                 254 drivers/s390/char/tape_core.c 	if (p) {
p                 255 drivers/s390/char/tape_core.c 		INIT_WORK(&p->work, tape_med_state_work_handler);
p                 256 drivers/s390/char/tape_core.c 		p->device = tape_get_device(device);
p                 257 drivers/s390/char/tape_core.c 		p->state = state;
p                 258 drivers/s390/char/tape_core.c 		schedule_work(&p->work);
p                  44 drivers/s390/char/vmcp.c static int __init early_parse_vmcp_cma(char *p)
p                  46 drivers/s390/char/vmcp.c 	if (!p)
p                  48 drivers/s390/char/vmcp.c 	vmcp_cma_size = ALIGN(memparse(p, NULL), PAGE_SIZE);
p                 306 drivers/s390/cio/blacklist.c 	loff_t p = *offset;
p                 309 drivers/s390/cio/blacklist.c 	if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
p                 175 drivers/s390/cio/chsc.h 	u32 p:4;
p                 280 drivers/s390/cio/device_pgid.c static int pgid_is_reset(struct pgid *p)
p                 284 drivers/s390/cio/device_pgid.c 	for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
p                 300 drivers/s390/cio/device_pgid.c static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
p                 329 drivers/s390/cio/device_pgid.c 	*p = first;
p                  21 drivers/s390/cio/eadm_sch.h #define set_eadm_private(n, p) (dev_set_drvdata(&n->dev, p))
p                  30 drivers/s390/cio/io_sch.h #define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
p                 141 drivers/s390/cio/scm.c 	scmdev->attrs.persistence = sale->p;
p                 234 drivers/s390/cio/vfio_ccw_fsm.c inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
p                 236 drivers/s390/cio/vfio_ccw_fsm.c 	return p->sch->schid;
p                 102 drivers/s390/crypto/zcrypt_cca_key.h static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
p                 115 drivers/s390/crypto/zcrypt_cca_key.h 	} __packed *key = p;
p                 171 drivers/s390/crypto/zcrypt_cca_key.h static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
p                 184 drivers/s390/crypto/zcrypt_cca_key.h 	} __packed *key = p;
p                  77 drivers/s390/crypto/zcrypt_cex2a.h 	unsigned char	p[64];
p                  90 drivers/s390/crypto/zcrypt_cex2a.h 	unsigned char	p[128];
p                 103 drivers/s390/crypto/zcrypt_cex2a.h 	unsigned char	p[256];
p                 105 drivers/s390/crypto/zcrypt_msgtype50.c 	unsigned char	p[64];
p                 118 drivers/s390/crypto/zcrypt_msgtype50.c 	unsigned char	p[128];
p                 131 drivers/s390/crypto/zcrypt_msgtype50.c 	unsigned char	p[256];
p                 266 drivers/s390/crypto/zcrypt_msgtype50.c 	unsigned char *p, *q, *dp, *dq, *u, *inp;
p                 285 drivers/s390/crypto/zcrypt_msgtype50.c 		p = crb1->p + sizeof(crb1->p) - short_len;
p                 299 drivers/s390/crypto/zcrypt_msgtype50.c 		p = crb2->p + sizeof(crb2->p) - short_len;
p                 314 drivers/s390/crypto/zcrypt_msgtype50.c 		p = crb3->p + sizeof(crb3->p) - short_len;
p                 327 drivers/s390/crypto/zcrypt_msgtype50.c 	if (copy_from_user(p, crt->np_prime + MSGTYPE_ADJUSTMENT, short_len) ||
p                 296 drivers/s390/net/ctcm_main.h #define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC)
p                 230 drivers/s390/net/ctcm_mpc.c 	__u8 *p = skb->data;
p                 236 drivers/s390/net/ctcm_mpc.c 	if (p == NULL)
p                 239 drivers/s390/net/ctcm_mpc.c 	p += offset;
p                 240 drivers/s390/net/ctcm_mpc.c 	header = (struct th_header *)p;
p                 261 drivers/s390/net/ctcm_mpc.c 		pheader = (struct pdu *)p;
p                 283 drivers/s390/net/ctcm_mpc.c 		ctcm_pr_debug("%02x%s", *p++, (i % 16) ? " " : "\n");
p                  79 drivers/s390/net/ctcm_sysfs.c 	char *p;
p                  86 drivers/s390/net/ctcm_sysfs.c 	p = sbuf;
p                  88 drivers/s390/net/ctcm_sysfs.c 	p += sprintf(p, "  Device FSM state: %s\n",
p                  90 drivers/s390/net/ctcm_sysfs.c 	p += sprintf(p, "  RX channel FSM state: %s\n",
p                  92 drivers/s390/net/ctcm_sysfs.c 	p += sprintf(p, "  TX channel FSM state: %s\n",
p                  94 drivers/s390/net/ctcm_sysfs.c 	p += sprintf(p, "  Max. TX buffer used: %ld\n",
p                  96 drivers/s390/net/ctcm_sysfs.c 	p += sprintf(p, "  Max. chained SKBs: %ld\n",
p                  98 drivers/s390/net/ctcm_sysfs.c 	p += sprintf(p, "  TX single write ops: %ld\n",
p                 100 drivers/s390/net/ctcm_sysfs.c 	p += sprintf(p, "  TX multi write ops: %ld\n",
p                 102 drivers/s390/net/ctcm_sysfs.c 	p += sprintf(p, "  Netto bytes written: %ld\n",
p                 104 drivers/s390/net/ctcm_sysfs.c 	p += sprintf(p, "  Max. TX IO-time: %u\n",
p                 273 drivers/s390/net/netiucv.c 	char *p = tmp;
p                 276 drivers/s390/net/netiucv.c 	while (*p && ((p - tmp) < len) && (!isspace(*p)))
p                 277 drivers/s390/net/netiucv.c 		p++;
p                 278 drivers/s390/net/netiucv.c 	*p = '\0';
p                1431 drivers/s390/net/netiucv.c 	const char *p;
p                1434 drivers/s390/net/netiucv.c 	p = strchr(buf, '.');
p                1435 drivers/s390/net/netiucv.c 	if ((p && ((count > 26) ||
p                1436 drivers/s390/net/netiucv.c 		   ((p - buf) > 8) ||
p                1437 drivers/s390/net/netiucv.c 		   (buf + count - p > 18))) ||
p                1438 drivers/s390/net/netiucv.c 	    (!p && (count > 9))) {
p                1443 drivers/s390/net/netiucv.c 	for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
p                1444 drivers/s390/net/netiucv.c 		if (isalnum(*p) || *p == '$') {
p                1445 drivers/s390/net/netiucv.c 			username[i] = toupper(*p);
p                1448 drivers/s390/net/netiucv.c 		if (*p == '\n')
p                1452 drivers/s390/net/netiucv.c 			       "conn_write: invalid character %02x\n", *p);
p                1459 drivers/s390/net/netiucv.c 	if (*p == '.') {
p                1460 drivers/s390/net/netiucv.c 		p++;
p                1461 drivers/s390/net/netiucv.c 		for (i = 0; i < 16 && *p; i++, p++) {
p                1462 drivers/s390/net/netiucv.c 			if (*p == '\n')
p                1464 drivers/s390/net/netiucv.c 			userdata[i] = toupper(*p);
p                2079 drivers/s390/net/netiucv.c 	const char *p;
p                2087 drivers/s390/net/netiucv.c 	for (i = 0, p = buf; i < count && *p; i++, p++) {
p                2088 drivers/s390/net/netiucv.c 		if (*p == '\n' || *p == ' ')
p                2091 drivers/s390/net/netiucv.c 		name[i] = *p;
p                 406 drivers/s390/net/qeth_l2_main.c static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
p                 408 drivers/s390/net/qeth_l2_main.c 	struct sockaddr *addr = p;
p                 100 drivers/s390/net/smsgiucv_app.c 	struct smsg_app_event *p, *n;
p                 111 drivers/s390/net/smsgiucv_app.c 	list_for_each_entry_safe(p, n, &event_queue, list) {
p                 112 drivers/s390/net/smsgiucv_app.c 		list_del(&p->list);
p                 113 drivers/s390/net/smsgiucv_app.c 		kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, p->envp);
p                 114 drivers/s390/net/smsgiucv_app.c 		smsg_app_event_free(p);
p                 115 drivers/s390/scsi/zfcp_ccw.c 	struct zfcp_port *port, *p;
p                 127 drivers/s390/scsi/zfcp_ccw.c 	list_for_each_entry_safe(port, p, &adapter->port_list, list) {
p                 140 drivers/s390/scsi/zfcp_ccw.c 	list_for_each_entry_safe(port, p, &port_remove_lh, list)
p                  89 drivers/sbus/char/display7seg.c 		struct d7s *p = d7s_device;
p                  92 drivers/sbus/char/display7seg.c 		regval = readb(p->regs);
p                  93 drivers/sbus/char/display7seg.c 		if (p->flipped)
p                  97 drivers/sbus/char/display7seg.c 		writeb(regval, p->regs);
p                 105 drivers/sbus/char/display7seg.c 	struct d7s *p = d7s_device;
p                 106 drivers/sbus/char/display7seg.c 	u8 regs = readb(p->regs);
p                 129 drivers/sbus/char/display7seg.c 		writeb(ireg, p->regs);
p                 148 drivers/sbus/char/display7seg.c 		writeb(regs, p->regs);
p                 175 drivers/sbus/char/display7seg.c 	struct d7s *p;
p                 181 drivers/sbus/char/display7seg.c 	p = devm_kzalloc(&op->dev, sizeof(*p), GFP_KERNEL);
p                 183 drivers/sbus/char/display7seg.c 	if (!p)
p                 186 drivers/sbus/char/display7seg.c 	p->regs = of_ioremap(&op->resource[0], 0, sizeof(u8), "d7s");
p                 187 drivers/sbus/char/display7seg.c 	if (!p->regs) {
p                 202 drivers/sbus/char/display7seg.c 	regs = readb(p->regs);
p                 206 drivers/sbus/char/display7seg.c 		p->flipped = true;
p                 208 drivers/sbus/char/display7seg.c 	if (p->flipped)
p                 213 drivers/sbus/char/display7seg.c 	writeb(regs,  p->regs);
p                 221 drivers/sbus/char/display7seg.c 	dev_set_drvdata(&op->dev, p);
p                 222 drivers/sbus/char/display7seg.c 	d7s_device = p;
p                 230 drivers/sbus/char/display7seg.c 	of_iounmap(&op->resource[0], p->regs, sizeof(u8));
p                 238 drivers/sbus/char/display7seg.c 	struct d7s *p = dev_get_drvdata(&op->dev);
p                 239 drivers/sbus/char/display7seg.c 	u8 regs = readb(p->regs);
p                 243 drivers/sbus/char/display7seg.c 		if (p->flipped)
p                 247 drivers/sbus/char/display7seg.c 		writeb(regs, p->regs);
p                 251 drivers/sbus/char/display7seg.c 	of_iounmap(&op->resource[0], p->regs, sizeof(u8));
p                 108 drivers/sbus/char/flash.c 	loff_t p = *ppos;
p                 111 drivers/sbus/char/flash.c 	if (count > flash.read_size - p)
p                 112 drivers/sbus/char/flash.c 		count = flash.read_size - p;
p                 115 drivers/sbus/char/flash.c 		u8 data = upa_readb(flash.read_base + p + i);
p                 409 drivers/sbus/char/oradax.c 			struct page *p = ctx->pages[i][j];
p                 411 drivers/sbus/char/oradax.c 			if (p) {
p                 412 drivers/sbus/char/oradax.c 				dax_dbg("freeing page %p", p);
p                 414 drivers/sbus/char/oradax.c 					set_page_dirty(p);
p                 415 drivers/sbus/char/oradax.c 				put_page(p);
p                 422 drivers/sbus/char/oradax.c static int dax_lock_page(void *va, struct page **p)
p                 428 drivers/sbus/char/oradax.c 	ret = get_user_pages_fast((unsigned long)va, 1, FOLL_WRITE, p);
p                 430 drivers/sbus/char/oradax.c 		dax_dbg("locked page %p, for VA %p", *p, va);
p                 352 drivers/sbus/char/uctrl.c 	struct uctrl_driver *p;
p                 355 drivers/sbus/char/uctrl.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 356 drivers/sbus/char/uctrl.c 	if (!p) {
p                 361 drivers/sbus/char/uctrl.c 	p->regs = of_ioremap(&op->resource[0], 0,
p                 364 drivers/sbus/char/uctrl.c 	if (!p->regs) {
p                 369 drivers/sbus/char/uctrl.c 	p->irq = op->archdata.irqs[0];
p                 370 drivers/sbus/char/uctrl.c 	err = request_irq(p->irq, uctrl_interrupt, 0, "uctrl", p);
p                 382 drivers/sbus/char/uctrl.c 	sbus_writel(UCTRL_INTR_RXNE_REQ|UCTRL_INTR_RXNE_MSK, &p->regs->uctrl_intr);
p                 384 drivers/sbus/char/uctrl.c 	       op->dev.of_node, p->regs, p->irq);
p                 385 drivers/sbus/char/uctrl.c 	uctrl_get_event_status(p);
p                 386 drivers/sbus/char/uctrl.c 	uctrl_get_external_status(p);
p                 388 drivers/sbus/char/uctrl.c 	dev_set_drvdata(&op->dev, p);
p                 389 drivers/sbus/char/uctrl.c 	global_driver = p;
p                 395 drivers/sbus/char/uctrl.c 	free_irq(p->irq, p);
p                 398 drivers/sbus/char/uctrl.c 	of_iounmap(&op->resource[0], p->regs, resource_size(&op->resource[0]));
p                 401 drivers/sbus/char/uctrl.c 	kfree(p);
p                 407 drivers/sbus/char/uctrl.c 	struct uctrl_driver *p = dev_get_drvdata(&op->dev);
p                 409 drivers/sbus/char/uctrl.c 	if (p) {
p                 411 drivers/sbus/char/uctrl.c 		free_irq(p->irq, p);
p                 412 drivers/sbus/char/uctrl.c 		of_iounmap(&op->resource[0], p->regs, resource_size(&op->resource[0]));
p                 413 drivers/sbus/char/uctrl.c 		kfree(p);
p                1108 drivers/scsi/53c700.c 			struct NCR_700_Device_Parameters *p = SDp->hostdata;
p                1109 drivers/scsi/53c700.c 			struct scsi_cmnd *SCp = p->current_cmnd;
p                1813 drivers/scsi/53c700.c 		struct NCR_700_Device_Parameters *p = SCp->device->hostdata;
p                1817 drivers/scsi/53c700.c 		p->current_cmnd = SCp;
p                 747 drivers/scsi/NCR5380.c 	unsigned char p;
p                 750 drivers/scsi/NCR5380.c 		p = hostdata->connected->SCp.phase;
p                 751 drivers/scsi/NCR5380.c 		if (p & SR_IO) {
p                 795 drivers/scsi/NCR5380.c 		if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) {
p                 808 drivers/scsi/NCR5380.c 				NCR5380_transfer_pio(instance, &p, &cnt, data);
p                1246 drivers/scsi/NCR5380.c 	unsigned char p = *phase, tmp;
p                1256 drivers/scsi/NCR5380.c 	NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
p                1270 drivers/scsi/NCR5380.c 		if ((NCR5380_read(STATUS_REG) & PHASE_MASK) != p) {
p                1277 drivers/scsi/NCR5380.c 		if (!(p & SR_IO))
p                1291 drivers/scsi/NCR5380.c 		if (!(p & SR_IO)) {
p                1292 drivers/scsi/NCR5380.c 			if (!((p & SR_MSG) && c > 1)) {
p                1326 drivers/scsi/NCR5380.c 		if (!(p == PHASE_MSGIN && c == 1)) {
p                1327 drivers/scsi/NCR5380.c 			if (p == PHASE_MSGOUT && c > 1)
p                1348 drivers/scsi/NCR5380.c 	if (!c || (*phase == p))
p                1469 drivers/scsi/NCR5380.c 	unsigned char p = *phase;
p                1474 drivers/scsi/NCR5380.c 	if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
p                1479 drivers/scsi/NCR5380.c 	hostdata->connected->SCp.phase = p;
p                1481 drivers/scsi/NCR5380.c 	if (p & SR_IO) {
p                1489 drivers/scsi/NCR5380.c 	         (p & SR_IO) ? "receive" : "send", c, d);
p                1496 drivers/scsi/NCR5380.c 	NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
p                1504 drivers/scsi/NCR5380.c 		if (p & SR_IO)
p                1515 drivers/scsi/NCR5380.c 	if (p & SR_IO) {
p                1538 drivers/scsi/NCR5380.c 		if (p & SR_IO)
p                1592 drivers/scsi/NCR5380.c 		if (p & SR_IO) {
p                 642 drivers/scsi/aacraid/commctrl.c 			void *p;
p                 656 drivers/scsi/aacraid/commctrl.c 			p = kmalloc(sg_count[i], GFP_KERNEL);
p                 657 drivers/scsi/aacraid/commctrl.c 			if (!p) {
p                 670 drivers/scsi/aacraid/commctrl.c 			sg_list[i] = p; // save so we can clean up later
p                 674 drivers/scsi/aacraid/commctrl.c 				if (copy_from_user(p, sg_user[i],
p                 680 drivers/scsi/aacraid/commctrl.c 			addr = pci_map_single(dev->pdev, p, sg_count[i],
p                 709 drivers/scsi/aacraid/commctrl.c 				void* p;
p                 721 drivers/scsi/aacraid/commctrl.c 				p = kmalloc(sg_count[i], GFP_KERNEL);
p                 722 drivers/scsi/aacraid/commctrl.c 				if(!p) {
p                 731 drivers/scsi/aacraid/commctrl.c 				sg_list[i] = p; // save so we can clean up later
p                 735 drivers/scsi/aacraid/commctrl.c 					if (copy_from_user(p, sg_user[i],
p                 742 drivers/scsi/aacraid/commctrl.c 				addr = pci_map_single(dev->pdev, p,
p                 764 drivers/scsi/aacraid/commctrl.c 				void* p;
p                 777 drivers/scsi/aacraid/commctrl.c 				p = kmalloc(sg_count[i], GFP_KERNEL);
p                 778 drivers/scsi/aacraid/commctrl.c 				if(!p) {
p                 786 drivers/scsi/aacraid/commctrl.c 				sg_list[i] = p; // save so we can clean up later
p                 790 drivers/scsi/aacraid/commctrl.c 					if (copy_from_user(p, sg_user[i],
p                 798 drivers/scsi/aacraid/commctrl.c 				addr = pci_map_single(dev->pdev, p,
p                 822 drivers/scsi/aacraid/commctrl.c 				void* p;
p                 833 drivers/scsi/aacraid/commctrl.c 				p = kmalloc(sg_count[i], GFP_KERNEL);
p                 834 drivers/scsi/aacraid/commctrl.c 				if (!p) {
p                 843 drivers/scsi/aacraid/commctrl.c 				sg_list[i] = p; // save so we can clean up later
p                 847 drivers/scsi/aacraid/commctrl.c 					if (copy_from_user(p, sg_user[i],
p                 854 drivers/scsi/aacraid/commctrl.c 				addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
p                 863 drivers/scsi/aacraid/commctrl.c 				void* p;
p                 874 drivers/scsi/aacraid/commctrl.c 				p = kmalloc(sg_count[i], GFP_KERNEL);
p                 875 drivers/scsi/aacraid/commctrl.c 				if (!p) {
p                 882 drivers/scsi/aacraid/commctrl.c 				sg_list[i] = p; // save so we can clean up later
p                 886 drivers/scsi/aacraid/commctrl.c 					if (copy_from_user(p, sg_user[i],
p                 893 drivers/scsi/aacraid/commctrl.c 				addr = pci_map_single(dev->pdev, p,
p                3248 drivers/scsi/aha152x.c 			void __iomem *p = ioremap(addresses[i], 0x4000);
p                3249 drivers/scsi/aha152x.c 			if (!p)
p                3252 drivers/scsi/aha152x.c 				ok = check_signature(p + signatures[j].sig_offset,
p                3254 drivers/scsi/aha152x.c 			iounmap(p);
p                  66 drivers/scsi/aha1542.h static inline void any2scsi(u8 *p, u32 v)
p                  68 drivers/scsi/aha1542.h 	p[0] = v >> 16;
p                  69 drivers/scsi/aha1542.h 	p[1] = v >> 8;
p                  70 drivers/scsi/aha1542.h 	p[2] = v;
p                  85 drivers/scsi/aha1740.h #define any2scsi(up, p)				\
p                  86 drivers/scsi/aha1740.h (up)[0] = (((unsigned long)(p)) >> 16)  ;	\
p                  87 drivers/scsi/aha1740.h (up)[1] = (((unsigned long)(p)) >> 8);		\
p                  88 drivers/scsi/aha1740.h (up)[2] = ((unsigned long)(p));
p                  92 drivers/scsi/aha1740.h #define xany2scsi(up, p)	\
p                  93 drivers/scsi/aha1740.h (up)[0] = ((long)(p)) >> 24;	\
p                  94 drivers/scsi/aha1740.h (up)[1] = ((long)(p)) >> 16;	\
p                  95 drivers/scsi/aha1740.h (up)[2] = ((long)(p)) >> 8;	\
p                  96 drivers/scsi/aha1740.h (up)[3] = ((long)(p));
p                 365 drivers/scsi/aic7xxx/aic79xx_osm.c static void ahd_linux_setup_tag_info_global(char *p);
p                1029 drivers/scsi/aic7xxx/aic79xx_osm.c ahd_linux_setup_tag_info_global(char *p)
p                1033 drivers/scsi/aic7xxx/aic79xx_osm.c 	tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
p                1145 drivers/scsi/aic7xxx/aic79xx_osm.c 	char   *p;
p                1178 drivers/scsi/aic7xxx/aic79xx_osm.c 	while ((p = strsep(&s, ",.")) != NULL) {
p                1179 drivers/scsi/aic7xxx/aic79xx_osm.c 		if (*p == '\0')
p                1184 drivers/scsi/aic7xxx/aic79xx_osm.c 			if (strncmp(options[i].name, p, n) == 0)
p                1190 drivers/scsi/aic7xxx/aic79xx_osm.c 		if (strncmp(p, "global_tag_depth", n) == 0) {
p                1191 drivers/scsi/aic7xxx/aic79xx_osm.c 			ahd_linux_setup_tag_info_global(p + n);
p                1192 drivers/scsi/aic7xxx/aic79xx_osm.c 		} else if (strncmp(p, "tag_info", n) == 0) {
p                1193 drivers/scsi/aic7xxx/aic79xx_osm.c 			s = ahd_parse_brace_option("tag_info", p + n, end,
p                1195 drivers/scsi/aic7xxx/aic79xx_osm.c 		} else if (strncmp(p, "slewrate", n) == 0) {
p                1197 drivers/scsi/aic7xxx/aic79xx_osm.c 			    p + n, end, 1, ahd_linux_setup_iocell_info,
p                1199 drivers/scsi/aic7xxx/aic79xx_osm.c 		} else if (strncmp(p, "precomp", n) == 0) {
p                1201 drivers/scsi/aic7xxx/aic79xx_osm.c 			    p + n, end, 1, ahd_linux_setup_iocell_info,
p                1203 drivers/scsi/aic7xxx/aic79xx_osm.c 		} else if (strncmp(p, "amplitude", n) == 0) {
p                1205 drivers/scsi/aic7xxx/aic79xx_osm.c 			    p + n, end, 1, ahd_linux_setup_iocell_info,
p                1207 drivers/scsi/aic7xxx/aic79xx_osm.c 		} else if (p[n] == ':') {
p                1208 drivers/scsi/aic7xxx/aic79xx_osm.c 			*(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
p                1209 drivers/scsi/aic7xxx/aic79xx_osm.c 		} else if (!strncmp(p, "verbose", n)) {
p                 376 drivers/scsi/aic7xxx/aic7xxx_osm.c static void ahc_linux_setup_tag_info_global(char *p);
p                 907 drivers/scsi/aic7xxx/aic7xxx_osm.c ahc_linux_setup_tag_info_global(char *p)
p                 911 drivers/scsi/aic7xxx/aic7xxx_osm.c 	tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
p                1023 drivers/scsi/aic7xxx/aic7xxx_osm.c 	char   *p;
p                1053 drivers/scsi/aic7xxx/aic7xxx_osm.c 	while ((p = strsep(&s, ",.")) != NULL) {
p                1054 drivers/scsi/aic7xxx/aic7xxx_osm.c 		if (*p == '\0')
p                1059 drivers/scsi/aic7xxx/aic7xxx_osm.c 			if (strncmp(options[i].name, p, n) == 0)
p                1065 drivers/scsi/aic7xxx/aic7xxx_osm.c 		if (strncmp(p, "global_tag_depth", n) == 0) {
p                1066 drivers/scsi/aic7xxx/aic7xxx_osm.c 			ahc_linux_setup_tag_info_global(p + n);
p                1067 drivers/scsi/aic7xxx/aic7xxx_osm.c 		} else if (strncmp(p, "tag_info", n) == 0) {
p                1068 drivers/scsi/aic7xxx/aic7xxx_osm.c 			s = ahc_parse_brace_option("tag_info", p + n, end,
p                1070 drivers/scsi/aic7xxx/aic7xxx_osm.c 		} else if (p[n] == ':') {
p                1071 drivers/scsi/aic7xxx/aic7xxx_osm.c 			*(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
p                1072 drivers/scsi/aic7xxx/aic7xxx_osm.c 		} else if (strncmp(p, "verbose", n) == 0) {
p                  45 drivers/scsi/aic94xx/aic94xx.h static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
p                  48 drivers/scsi/aic94xx/aic94xx.h 	for (i = 0; i < SAS_ADDR_SIZE; i++, p += 2)
p                  49 drivers/scsi/aic94xx/aic94xx.h 		snprintf(p, 3, "%02X", sas_addr[i]);
p                  50 drivers/scsi/aic94xx/aic94xx.h 	*p = '\0';
p                 287 drivers/scsi/aic94xx/aic94xx_reg.c 	u8 *p = dst;
p                 291 drivers/scsi/aic94xx/aic94xx_reg.c 	for ( ; count > 0; count--, offs++, p++)
p                 292 drivers/scsi/aic94xx/aic94xx_reg.c 		*p = __asd_read_reg_byte(asd_ha, offs);
p                 306 drivers/scsi/aic94xx/aic94xx_reg.c 	u8 *p = src;
p                 310 drivers/scsi/aic94xx/aic94xx_reg.c 	for ( ; count > 0; count--, offs++, p++)
p                 311 drivers/scsi/aic94xx/aic94xx_reg.c 		__asd_write_reg_byte(asd_ha, offs, *p);
p                 119 drivers/scsi/aic94xx/aic94xx_sds.c 	u8 *p = buffer;
p                 123 drivers/scsi/aic94xx/aic94xx_sds.c 		for ( ; size > 0; size--, offs++, p++)
p                 124 drivers/scsi/aic94xx/aic94xx_sds.c 			*p = asd_read_ocm_byte(asd_ha, offs);
p                 164 drivers/scsi/aic94xx/aic94xx_sds.c 	u8 *p = buffer;
p                 168 drivers/scsi/aic94xx/aic94xx_sds.c 		for ( ; size > 0; size--, offs++, p++)
p                 169 drivers/scsi/aic94xx/aic94xx_sds.c 			asd_write_ocm_byte(asd_ha, offs, *p);
p                 633 drivers/scsi/aic94xx/aic94xx_sds.c static u16 asd_calc_flash_chksum(u16 *p, int size)
p                 638 drivers/scsi/aic94xx/aic94xx_sds.c 		chksum += *p++;
p                  49 drivers/scsi/aic94xx/aic94xx_task.c 		void *p = task->scatter;
p                  50 drivers/scsi/aic94xx/aic94xx_task.c 		dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
p                2773 drivers/scsi/arm/acornscsi.c     static char string[100], *p;
p                2775 drivers/scsi/arm/acornscsi.c     p = string;
p                2777 drivers/scsi/arm/acornscsi.c     p += sprintf(string, "%s at port %08lX irq %d v%d.%d.%d"
p                1504 drivers/scsi/be2iscsi/be_cmds.c 		phba->port_name = ioctl->p.resp.port_names >>
p                1266 drivers/scsi/be2iscsi/be_cmds.h 	} p;
p                2704 drivers/scsi/be2iscsi/be_main.c 	struct hba_parameters *p = &phba->params;
p                2750 drivers/scsi/be2iscsi/be_main.c 			pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
p                2857 drivers/scsi/be2iscsi/be_main.c 			pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
p                2878 drivers/scsi/be2iscsi/be_main.c 						 (p->defpdu_hdr_sz * index));
p                2882 drivers/scsi/be2iscsi/be_main.c 					address + (p->defpdu_hdr_sz * index);
p                2913 drivers/scsi/be2iscsi/be_main.c 					(p->defpdu_data_sz * num_per_mem));
p                2917 drivers/scsi/be2iscsi/be_main.c 					address + (p->defpdu_data_sz *
p                 186 drivers/scsi/bfa/bfa_fc.h #define FC_GET_DOMAIN(p)	(((p) & FC_DOMAIN_MASK) >> FC_DOMAIN_SHIFT)
p                 187 drivers/scsi/bfa/bfa_fc.h #define FC_GET_AREA(p)		(((p) & FC_AREA_MASK) >> FC_AREA_SHIFT)
p                 188 drivers/scsi/bfa/bfa_fc.h #define FC_GET_PORT(p)		(((p) & FC_PORT_MASK) >> FC_PORT_SHIFT)
p                 190 drivers/scsi/bfa/bfa_fc.h #define FC_DOMAIN_CTRLR(p)	(FC_DOMAIN_CONTROLLER_MASK | (FC_GET_DOMAIN(p)))
p                 558 drivers/scsi/bfa/bfad_im.c 	im_portp->p = im_port;
p                  64 drivers/scsi/bfa/bfad_im.h 	struct bfad_im_port_s *p;
p                  70 drivers/scsi/bfa/bfad_im.h 	return im_portp->p;
p                 648 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	struct bnx2fc_percpu_s *p = arg;
p                 656 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		spin_lock_bh(&p->fp_work_lock);
p                 657 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		while (!list_empty(&p->work_list)) {
p                 658 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			list_splice_init(&p->work_list, &work_list);
p                 659 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			spin_unlock_bh(&p->fp_work_lock);
p                 667 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			spin_lock_bh(&p->fp_work_lock);
p                 670 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		spin_unlock_bh(&p->fp_work_lock);
p                2623 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	struct bnx2fc_percpu_s *p;
p                2626 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	p = &per_cpu(bnx2fc_percpu, cpu);
p                2629 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 					(void *)p, cpu_to_node(cpu),
p                2636 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	p->iothread = thread;
p                2643 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	struct bnx2fc_percpu_s *p;
p                2650 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	p = &per_cpu(bnx2fc_percpu, cpu);
p                2651 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	spin_lock_bh(&p->fp_work_lock);
p                2652 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	thread = p->iothread;
p                2653 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	p->iothread = NULL;
p                2656 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	list_for_each_entry_safe(work, tmp, &p->work_list, list) {
p                2662 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	spin_unlock_bh(&p->fp_work_lock);
p                2692 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	struct bnx2fc_percpu_s *p;
p                2735 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		p = &per_cpu(bnx2fc_percpu, cpu);
p                2736 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		INIT_LIST_HEAD(&p->work_list);
p                2737 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		spin_lock_init(&p->fp_work_lock);
p                1853 drivers/scsi/bnx2i/bnx2i_hwi.c 	struct bnx2i_percpu_s *p = arg;
p                1860 drivers/scsi/bnx2i/bnx2i_hwi.c 		spin_lock_bh(&p->p_work_lock);
p                1861 drivers/scsi/bnx2i/bnx2i_hwi.c 		while (!list_empty(&p->work_list)) {
p                1862 drivers/scsi/bnx2i/bnx2i_hwi.c 			list_splice_init(&p->work_list, &work_list);
p                1863 drivers/scsi/bnx2i/bnx2i_hwi.c 			spin_unlock_bh(&p->p_work_lock);
p                1874 drivers/scsi/bnx2i/bnx2i_hwi.c 			spin_lock_bh(&p->p_work_lock);
p                1877 drivers/scsi/bnx2i/bnx2i_hwi.c 		spin_unlock_bh(&p->p_work_lock);
p                1902 drivers/scsi/bnx2i/bnx2i_hwi.c 	struct bnx2i_percpu_s *p = NULL;
p                1918 drivers/scsi/bnx2i/bnx2i_hwi.c 	p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(sc->request));
p                1919 drivers/scsi/bnx2i/bnx2i_hwi.c 	spin_lock(&p->p_work_lock);
p                1920 drivers/scsi/bnx2i/bnx2i_hwi.c 	if (unlikely(!p->iothread)) {
p                1931 drivers/scsi/bnx2i/bnx2i_hwi.c 		list_add_tail(&bnx2i_work->list, &p->work_list);
p                1933 drivers/scsi/bnx2i/bnx2i_hwi.c 		wake_up_process(p->iothread);
p                1934 drivers/scsi/bnx2i/bnx2i_hwi.c 		spin_unlock(&p->p_work_lock);
p                1939 drivers/scsi/bnx2i/bnx2i_hwi.c 	spin_unlock(&p->p_work_lock);
p                 413 drivers/scsi/bnx2i/bnx2i_init.c 	struct bnx2i_percpu_s *p;
p                 416 drivers/scsi/bnx2i/bnx2i_init.c 	p = &per_cpu(bnx2i_percpu, cpu);
p                 418 drivers/scsi/bnx2i/bnx2i_init.c 	thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
p                 426 drivers/scsi/bnx2i/bnx2i_init.c 	p->iothread = thread;
p                 433 drivers/scsi/bnx2i/bnx2i_init.c 	struct bnx2i_percpu_s *p;
p                 438 drivers/scsi/bnx2i/bnx2i_init.c 	p = &per_cpu(bnx2i_percpu, cpu);
p                 439 drivers/scsi/bnx2i/bnx2i_init.c 	spin_lock_bh(&p->p_work_lock);
p                 440 drivers/scsi/bnx2i/bnx2i_init.c 	thread = p->iothread;
p                 441 drivers/scsi/bnx2i/bnx2i_init.c 	p->iothread = NULL;
p                 444 drivers/scsi/bnx2i/bnx2i_init.c 	list_for_each_entry_safe(work, tmp, &p->work_list, list) {
p                 451 drivers/scsi/bnx2i/bnx2i_init.c 	spin_unlock_bh(&p->p_work_lock);
p                 470 drivers/scsi/bnx2i/bnx2i_init.c 	struct bnx2i_percpu_s *p;
p                 495 drivers/scsi/bnx2i/bnx2i_init.c 		p = &per_cpu(bnx2i_percpu, cpu);
p                 496 drivers/scsi/bnx2i/bnx2i_init.c 		INIT_LIST_HEAD(&p->work_list);
p                 497 drivers/scsi/bnx2i/bnx2i_init.c 		spin_lock_init(&p->p_work_lock);
p                 498 drivers/scsi/bnx2i/bnx2i_init.c 		p->iothread = NULL;
p                1483 drivers/scsi/bnx2i/bnx2i_iscsi.c 	struct bnx2i_percpu_s *p;
p                1492 drivers/scsi/bnx2i/bnx2i_iscsi.c 			p = &per_cpu(bnx2i_percpu, cpu);
p                1493 drivers/scsi/bnx2i/bnx2i_iscsi.c 			spin_lock_bh(&p->p_work_lock);
p                1495 drivers/scsi/bnx2i/bnx2i_iscsi.c 						 &p->work_list, list) {
p                1505 drivers/scsi/bnx2i/bnx2i_iscsi.c 			spin_unlock_bh(&p->p_work_lock);
p                 303 drivers/scsi/csiostor/csio_hw.c csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)
p                 365 drivers/scsi/csiostor/csio_hw.c 	memcpy(p->id, v->id_data, ID_LEN);
p                 366 drivers/scsi/csiostor/csio_hw.c 	s = strstrip(p->id);
p                 367 drivers/scsi/csiostor/csio_hw.c 	memcpy(p->ec, vpd + ec, EC_LEN);
p                 368 drivers/scsi/csiostor/csio_hw.c 	s = strstrip(p->ec);
p                 370 drivers/scsi/csiostor/csio_hw.c 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
p                 371 drivers/scsi/csiostor/csio_hw.c 	s = strstrip(p->sn);
p                 675 drivers/scsi/csiostor/csio_hw.c 	const __be32 *p = (const __be32 *)fw_data;
p                 706 drivers/scsi/csiostor/csio_hw.c 		csum += ntohl(p[i]);
p                 201 drivers/scsi/csiostor/csio_mb.c 	__be32 *p = &cmdp->param[0].mnem;
p                 218 drivers/scsi/csiostor/csio_mb.c 			*p++ = htonl(temp_params);
p                 219 drivers/scsi/csiostor/csio_mb.c 			*p++ = htonl(temp_val);
p                 222 drivers/scsi/csiostor/csio_mb.c 		for (i = 0; i < nparams; i++, p += 2) {
p                 224 drivers/scsi/csiostor/csio_mb.c 			*p = htonl(temp_params);
p                 246 drivers/scsi/csiostor/csio_mb.c 	__be32 *p = &rsp->param[0].val;
p                 251 drivers/scsi/csiostor/csio_mb.c 		for (i = 0; i < nparams; i++, p += 2)
p                 252 drivers/scsi/csiostor/csio_mb.c 			*val++ = ntohl(*p);
p                1036 drivers/scsi/cxgbi/libcxgbi.c 		struct sk_buff *p = cxgbi_sock_peek_wr(csk);
p                1038 drivers/scsi/cxgbi/libcxgbi.c 		if (unlikely(!p)) {
p                1045 drivers/scsi/cxgbi/libcxgbi.c 		if (unlikely(credits < p->csum)) {
p                1049 drivers/scsi/cxgbi/libcxgbi.c 				p->csum);
p                1050 drivers/scsi/cxgbi/libcxgbi.c 			p->csum -= credits;
p                1054 drivers/scsi/cxgbi/libcxgbi.c 			credits -= p->csum;
p                1055 drivers/scsi/cxgbi/libcxgbi.c 			kfree_skb(p);
p                 543 drivers/scsi/cxgbi/libcxgbi.h 	void *p = kzalloc(size, gfp | __GFP_NOWARN);
p                 545 drivers/scsi/cxgbi/libcxgbi.h 	if (!p)
p                 546 drivers/scsi/cxgbi/libcxgbi.h 		p = vzalloc(size);
p                 548 drivers/scsi/cxgbi/libcxgbi.h 	return p;
p                3423 drivers/scsi/dc395x.c 		struct scsi_cmnd *p;
p                3429 drivers/scsi/dc395x.c 			p = srb->cmd;
p                3430 drivers/scsi/dc395x.c 			dir = p->sc_data_direction;
p                3432 drivers/scsi/dc395x.c 			printk("G:%p(%02i-%i) ", p,
p                3433 drivers/scsi/dc395x.c 			       p->device->id, (u8)p->device->lun);
p                3437 drivers/scsi/dc395x.c 			p->result = result;
p                3443 drivers/scsi/dc395x.c 				p->scsi_done(p);
p                3459 drivers/scsi/dc395x.c 			p = srb->cmd;
p                3462 drivers/scsi/dc395x.c 			printk("W:%p<%02i-%i>", p, p->device->id,
p                3463 drivers/scsi/dc395x.c 			       (u8)p->device->lun);
p                3465 drivers/scsi/dc395x.c 			p->result = result;
p                3667 drivers/scsi/dc395x.c 		struct DeviceCtlBlk *p;
p                3668 drivers/scsi/dc395x.c 		list_for_each_entry(p, &acb->dcb_list, list)
p                3669 drivers/scsi/dc395x.c 			if (p->target_id == dcb->target_id)
p                3674 drivers/scsi/dc395x.c 		       p->target_id, p->target_lun);
p                3675 drivers/scsi/dc395x.c 		dcb->sync_mode = p->sync_mode;
p                3676 drivers/scsi/dc395x.c 		dcb->sync_period = p->sync_period;
p                3677 drivers/scsi/dc395x.c 		dcb->min_nego_period = p->min_nego_period;
p                3678 drivers/scsi/dc395x.c 		dcb->sync_offset = p->sync_offset;
p                3679 drivers/scsi/dc395x.c 		dcb->inquiry7 = p->inquiry7;
p                 968 drivers/scsi/device_handler/scsi_dh_alua.c 	const char *p = params;
p                 975 drivers/scsi/device_handler/scsi_dh_alua.c 	while (*p++)
p                 977 drivers/scsi/device_handler/scsi_dh_alua.c 	if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1))
p                 427 drivers/scsi/device_handler/scsi_dh_emc.c 	const char *p = params;
p                 433 drivers/scsi/device_handler/scsi_dh_emc.c 	while (*p++)
p                 435 drivers/scsi/device_handler/scsi_dh_emc.c 	if ((sscanf(p, "%u", &st) != 1) || (st > 1))
p                 438 drivers/scsi/device_handler/scsi_dh_emc.c 	while (*p++)
p                 440 drivers/scsi/device_handler/scsi_dh_emc.c 	if ((sscanf(p, "%u", &hr) != 1) || (hr > 1))
p                 626 drivers/scsi/dpt_i2o.c 	void *p = pHba->ioctl_reply_context[context];
p                 629 drivers/scsi/dpt_i2o.c 	return p;
p                 866 drivers/scsi/dpt_i2o.c 	adpt_hba* p = NULL;
p                 973 drivers/scsi/dpt_i2o.c 		for(p = hba_chain; p->next; p = p->next);
p                 974 drivers/scsi/dpt_i2o.c 		p->next = pHba;
p                1670 drivers/scsi/dpt_i2o.c 	void *p = NULL;
p                1735 drivers/scsi/dpt_i2o.c 			p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
p                1736 drivers/scsi/dpt_i2o.c 			if(!p) {
p                1742 drivers/scsi/dpt_i2o.c 			sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
p                1746 drivers/scsi/dpt_i2o.c 				if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
p                2142 drivers/scsi/dpt_i2o.c 			void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
p                2143 drivers/scsi/dpt_i2o.c 			if( p != NULL) {
p                2144 drivers/scsi/dpt_i2o.c 				memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
p                 100 drivers/scsi/esas2r/esas2r_flash.c 	u8 *p = (u8 *)&cksum;
p                 119 drivers/scsi/esas2r/esas2r_flash.c 	return p[0] ^ p[1] ^ p[2] ^ p[3];
p                 124 drivers/scsi/esas2r/esas2r_flash.c 	u8 *p = (u8 *)addr;
p                 128 drivers/scsi/esas2r/esas2r_flash.c 		cksum = cksum + p[len];
p                 331 drivers/scsi/esas2r/esas2r_flash.c 	u8 *p, *q;
p                 346 drivers/scsi/esas2r/esas2r_flash.c 		p = fc->scratch;
p                 357 drivers/scsi/esas2r/esas2r_flash.c 			if (*p++ != *q++)
p                1313 drivers/scsi/esas2r/esas2r_main.c 	struct atto_vdapart_info *p;
p                1392 drivers/scsi/esas2r/esas2r_main.c 		p = (struct atto_vdapart_info *)data;
p                1394 drivers/scsi/esas2r/esas2r_main.c 		p->part_size = le64_to_cpu(p->part_size);
p                1395 drivers/scsi/esas2r/esas2r_main.c 		p->start_lba = le32_to_cpu(p->start_lba);
p                1396 drivers/scsi/esas2r/esas2r_main.c 		p->block_size = le32_to_cpu(p->block_size);
p                1397 drivers/scsi/esas2r/esas2r_main.c 		p->target_id = le16_to_cpu(p->target_id);
p                 120 drivers/scsi/esp_scsi.c 			      struct esp_event_ent *p)
p                 122 drivers/scsi/esp_scsi.c 	p->sreg = esp->sreg;
p                 123 drivers/scsi/esp_scsi.c 	p->seqreg = esp->seqreg;
p                 124 drivers/scsi/esp_scsi.c 	p->sreg2 = esp->sreg2;
p                 125 drivers/scsi/esp_scsi.c 	p->ireg = esp->ireg;
p                 126 drivers/scsi/esp_scsi.c 	p->select_state = esp->select_state;
p                 127 drivers/scsi/esp_scsi.c 	p->event = esp->event;
p                 132 drivers/scsi/esp_scsi.c 	struct esp_event_ent *p;
p                 135 drivers/scsi/esp_scsi.c 	p = &esp->esp_event_log[idx];
p                 136 drivers/scsi/esp_scsi.c 	p->type = ESP_EVENT_TYPE_CMD;
p                 137 drivers/scsi/esp_scsi.c 	p->val = val;
p                 138 drivers/scsi/esp_scsi.c 	esp_log_fill_regs(esp, p);
p                 167 drivers/scsi/esp_scsi.c 	struct esp_event_ent *p;
p                 170 drivers/scsi/esp_scsi.c 	p = &esp->esp_event_log[idx];
p                 171 drivers/scsi/esp_scsi.c 	p->type = ESP_EVENT_TYPE_EVENT;
p                 172 drivers/scsi/esp_scsi.c 	p->val = val;
p                 173 drivers/scsi/esp_scsi.c 	esp_log_fill_regs(esp, p);
p                 187 drivers/scsi/esp_scsi.c 		struct esp_event_ent *p = &esp->esp_event_log[idx];
p                 193 drivers/scsi/esp_scsi.c 			     p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
p                 194 drivers/scsi/esp_scsi.c 			     p->val, p->sreg, p->seqreg,
p                 195 drivers/scsi/esp_scsi.c 			     p->sreg2, p->ireg, p->select_state, p->event);
p                 404 drivers/scsi/esp_scsi.c 	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
p                 411 drivers/scsi/esp_scsi.c 	return sg_dma_address(p->cur_sg) +
p                 412 drivers/scsi/esp_scsi.c 		(sg_dma_len(p->cur_sg) -
p                 413 drivers/scsi/esp_scsi.c 		 p->cur_residue);
p                 419 drivers/scsi/esp_scsi.c 	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
p                 425 drivers/scsi/esp_scsi.c 	return p->cur_residue;
p                 431 drivers/scsi/esp_scsi.c 	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
p                 438 drivers/scsi/esp_scsi.c 	p->cur_residue -= len;
p                 439 drivers/scsi/esp_scsi.c 	p->tot_residue -= len;
p                 440 drivers/scsi/esp_scsi.c 	if (p->cur_residue < 0 || p->tot_residue < 0) {
p                 445 drivers/scsi/esp_scsi.c 			     p->cur_residue, p->tot_residue, len);
p                 446 drivers/scsi/esp_scsi.c 		p->cur_residue = 0;
p                 447 drivers/scsi/esp_scsi.c 		p->tot_residue = 0;
p                 449 drivers/scsi/esp_scsi.c 	if (!p->cur_residue && p->tot_residue) {
p                 450 drivers/scsi/esp_scsi.c 		p->prv_sg = p->cur_sg;
p                 451 drivers/scsi/esp_scsi.c 		p->cur_sg = sg_next(p->cur_sg);
p                 452 drivers/scsi/esp_scsi.c 		p->cur_residue = sg_dma_len(p->cur_sg);
p                 659 drivers/scsi/esp_scsi.c 	u8 *p, val;
p                 674 drivers/scsi/esp_scsi.c 	p = esp->command_block;
p                 677 drivers/scsi/esp_scsi.c 	*p++ = IDENTIFY(0, lun);
p                 678 drivers/scsi/esp_scsi.c 	*p++ = REQUEST_SENSE;
p                 679 drivers/scsi/esp_scsi.c 	*p++ = ((dev->scsi_level <= SCSI_2) ?
p                 681 drivers/scsi/esp_scsi.c 	*p++ = 0;
p                 682 drivers/scsi/esp_scsi.c 	*p++ = 0;
p                 683 drivers/scsi/esp_scsi.c 	*p++ = SCSI_SENSE_BUFFERSIZE;
p                 684 drivers/scsi/esp_scsi.c 	*p++ = 0;
p                 696 drivers/scsi/esp_scsi.c 	val = (p - esp->command_block);
p                 741 drivers/scsi/esp_scsi.c 	u8 *p;
p                 772 drivers/scsi/esp_scsi.c 	p = esp->command_block;
p                 817 drivers/scsi/esp_scsi.c 	*p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
p                 844 drivers/scsi/esp_scsi.c 			*p++ = ent->tag[0];
p                 845 drivers/scsi/esp_scsi.c 			*p++ = ent->tag[1];
p                 851 drivers/scsi/esp_scsi.c 			*p++ = cmd->cmnd[i];
p                 863 drivers/scsi/esp_scsi.c 	val = (p - esp->command_block);
p                1360 drivers/scsi/esp_scsi.c 			struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
p                1363 drivers/scsi/esp_scsi.c 			ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
p                1289 drivers/scsi/fcoe/fcoe.c 	struct fcoe_percpu_s *p;
p                1291 drivers/scsi/fcoe/fcoe.c 	p = per_cpu_ptr(&fcoe_percpu, cpu);
p                1292 drivers/scsi/fcoe/fcoe.c 	spin_lock_bh(&p->fcoe_rx_list.lock);
p                1293 drivers/scsi/fcoe/fcoe.c 	crc_eof = p->crc_eof_page;
p                1294 drivers/scsi/fcoe/fcoe.c 	p->crc_eof_page = NULL;
p                1295 drivers/scsi/fcoe/fcoe.c 	p->crc_eof_offset = 0;
p                1296 drivers/scsi/fcoe/fcoe.c 	spin_unlock_bh(&p->fcoe_rx_list.lock);
p                1300 drivers/scsi/fcoe/fcoe.c 	flush_work(&p->work);
p                1738 drivers/scsi/fcoe/fcoe.c 	struct fcoe_percpu_s *p;
p                1742 drivers/scsi/fcoe/fcoe.c 	p = container_of(work, struct fcoe_percpu_s, work);
p                1745 drivers/scsi/fcoe/fcoe.c 	spin_lock_bh(&p->fcoe_rx_list.lock);
p                1746 drivers/scsi/fcoe/fcoe.c 	skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
p                1747 drivers/scsi/fcoe/fcoe.c 	spin_unlock_bh(&p->fcoe_rx_list.lock);
p                2473 drivers/scsi/fcoe/fcoe.c 	struct fcoe_percpu_s *p;
p                2492 drivers/scsi/fcoe/fcoe.c 		p = per_cpu_ptr(&fcoe_percpu, cpu);
p                2493 drivers/scsi/fcoe/fcoe.c 		INIT_WORK(&p->work, fcoe_receive_work);
p                2494 drivers/scsi/fcoe/fcoe.c 		skb_queue_head_init(&p->fcoe_rx_list);
p                 466 drivers/scsi/fdomain.c 	unsigned char *p = scsi_bios_ptable(bdev);
p                 468 drivers/scsi/fdomain.c 	if (p && p[65] == 0xaa && p[64] == 0x55 /* Partition table valid */
p                 469 drivers/scsi/fdomain.c 	    && p[4]) {	 /* Partition type */
p                 470 drivers/scsi/fdomain.c 		geom[0] = p[5] + 1;	/* heads */
p                 471 drivers/scsi/fdomain.c 		geom[1] = p[6] & 0x3f;	/* sectors */
p                 485 drivers/scsi/fdomain.c 	kfree(p);
p                  92 drivers/scsi/fdomain_isa.c 	void __iomem *p;
p                  97 drivers/scsi/fdomain_isa.c 		p = ioremap(addresses[ndev], FDOMAIN_BIOS_SIZE);
p                  98 drivers/scsi/fdomain_isa.c 		if (!p)
p                 101 drivers/scsi/fdomain_isa.c 			if (check_signature(p + signatures[i].offset,
p                 111 drivers/scsi/fdomain_isa.c 			base = readb(p + sig->base_offset) +
p                 112 drivers/scsi/fdomain_isa.c 			      (readb(p + sig->base_offset + 1) << 8);
p                 113 drivers/scsi/fdomain_isa.c 		iounmap(p);
p                 148 drivers/scsi/fdomain_isa.c 	iounmap(p);
p                 941 drivers/scsi/fnic/vnic_dev.c 	void *p;
p                 943 drivers/scsi/fnic/vnic_dev.c 	p = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
p                 944 drivers/scsi/fnic/vnic_dev.c 	if (p) {
p                 160 drivers/scsi/gdth.h #define SPECIAL_SCP(p)  (p==UNUSED_CMND || p==INTERNAL_CMND || p==SCREEN_CMND)
p                 326 drivers/scsi/hisi_sas/hisi_sas.h 	struct sas_ha_struct *p;
p                2851 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
p                2877 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p)
p                2903 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p)
p                2929 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
p                2999 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
p                3034 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p)
p                3057 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
p                3085 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p)
p                3125 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
p                3153 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p)
p                3198 drivers/scsi/hisi_sas/hisi_sas_main.c 	int p;
p                3211 drivers/scsi/hisi_sas/hisi_sas_main.c 	for (p = 0; p < hisi_hba->n_phy; p++) {
p                3212 drivers/scsi/hisi_sas/hisi_sas_main.c 		snprintf(name, 256, "%d", p);
p                3214 drivers/scsi/hisi_sas/hisi_sas_main.c 		debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p],
p                3336 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_bist_linkrate_show(struct seq_file *s, void *p)
p                3425 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_bist_code_mode_show(struct seq_file *s, void *p)
p                3521 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_bist_phy_show(struct seq_file *s, void *p)
p                3555 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_bist_mode_show(struct seq_file *s, void *p)
p                3656 drivers/scsi/hisi_sas/hisi_sas_main.c static int hisi_sas_debugfs_bist_enable_show(struct seq_file *s, void *p)
p                3720 drivers/scsi/hisi_sas/hisi_sas_main.c 	int p, c, d;
p                3731 drivers/scsi/hisi_sas/hisi_sas_main.c 	for (p = 0; p < hisi_hba->n_phy; p++) {
p                3732 drivers/scsi/hisi_sas/hisi_sas_main.c 		hisi_hba->debugfs_port_reg[p] =
p                3735 drivers/scsi/hisi_sas/hisi_sas_main.c 		if (!hisi_hba->debugfs_port_reg[p])
p                1319 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
p                1321 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 	struct hisi_sas_phy *phy = p;
p                1406 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c static irqreturn_t int_bcast_v1_hw(int irq, void *p)
p                1408 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 	struct hisi_sas_phy *phy = p;
p                1436 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c static irqreturn_t int_abnormal_v1_hw(int irq, void *p)
p                1438 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 	struct hisi_sas_phy *phy = p;
p                1491 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
p                1493 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 	struct hisi_sas_cq *cq = p;
p                1538 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c static irqreturn_t fatal_ecc_int_v1_hw(int irq, void *p)
p                1540 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 	struct hisi_hba *hisi_hba = p;
p                1597 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c static irqreturn_t fatal_axi_int_v1_hw(int irq, void *p)
p                1599 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 	struct hisi_hba *hisi_hba = p;
p                2754 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
p                2756 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 	struct hisi_hba *hisi_hba = p;
p                2858 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
p                2860 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 	struct hisi_hba *hisi_hba = p;
p                2979 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p)
p                2981 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 	struct hisi_hba *hisi_hba = p;
p                3053 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
p                3055 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 	struct hisi_hba *hisi_hba = p;
p                3179 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
p                3181 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 	struct hisi_sas_cq *cq = p;
p                3192 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
p                3194 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 	struct hisi_sas_phy *phy = p;
p                1585 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
p                1587 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 	struct hisi_hba *hisi_hba = p;
p                1806 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
p                1808 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 	struct hisi_hba *hisi_hba = p;
p                2019 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
p                2022 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 	struct hisi_hba *hisi_hba = p;
p                2334 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
p                2336 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 	struct hisi_sas_cq *cq = p;
p                 511 drivers/scsi/hosts.c 	struct Scsi_Host *p;
p                 514 drivers/scsi/hosts.c 	p = class_to_shost(dev);
p                 515 drivers/scsi/hosts.c 	return p->host_no == *hostnum;
p                6202 drivers/scsi/hpsa.c 	IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
p                6217 drivers/scsi/hpsa.c 	err |= copy_to_user(p, &arg64, sizeof(arg64));
p                6222 drivers/scsi/hpsa.c 	err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
p                6225 drivers/scsi/hpsa.c 	err |= copy_in_user(&arg32->error_info, &p->error_info,
p                6238 drivers/scsi/hpsa.c 	BIG_IOCTL_Command_struct __user *p =
p                6255 drivers/scsi/hpsa.c 	err |= copy_to_user(p, &arg64, sizeof(arg64));
p                6260 drivers/scsi/hpsa.c 	err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
p                6263 drivers/scsi/hpsa.c 	err |= copy_in_user(&arg32->error_info, &p->error_info,
p                7131 drivers/scsi/hpsa.c #define hpsa_noop(p) hpsa_message(p, 3, 0)
p                  96 drivers/scsi/hptiop.c 			struct hpt_iop_request_header __iomem * p;
p                  98 drivers/scsi/hptiop.c 			p = (struct hpt_iop_request_header __iomem *)
p                 101 drivers/scsi/hptiop.c 			if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
p                 102 drivers/scsi/hptiop.c 				if (readl(&p->context))
p                 105 drivers/scsi/hptiop.c 					writel(1, &p->context);
p                 148 drivers/scsi/hptiop.c 		u64 p;
p                 150 drivers/scsi/hptiop.c 		memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
p                 156 drivers/scsi/hptiop.c 		return p;
p                 161 drivers/scsi/hptiop.c static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
p                 169 drivers/scsi/hptiop.c 	memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
p                 798 drivers/scsi/hptiop.c 	struct hpt_iop_request_ioctl_command __iomem *p;
p                 811 drivers/scsi/hptiop.c 	p = (struct hpt_iop_request_ioctl_command __iomem *)req;
p                 821 drivers/scsi/hptiop.c 				&p->buf[(readl(&p->inbuf_size) + 3)& ~3],
p                1200 drivers/scsi/hptiop.c 	char *p;
p                1218 drivers/scsi/hptiop.c 	p = dma_alloc_coherent(&hba->pcidev->dev,
p                1220 drivers/scsi/hptiop.c 	if (!p)
p                1223 drivers/scsi/hptiop.c 	hba->u.mvfrey.internal_req.req_virt = p;
p                1228 drivers/scsi/hptiop.c 	p += 0x800;
p                1231 drivers/scsi/hptiop.c 	hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
p                1234 drivers/scsi/hptiop.c 	p += list_count * sizeof(struct mvfrey_inlist_entry);
p                1237 drivers/scsi/hptiop.c 	hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
p                1240 drivers/scsi/hptiop.c 	p += list_count * sizeof(struct mvfrey_outlist_entry);
p                1243 drivers/scsi/hptiop.c 	hba->u.mvfrey.outlist_cptr = (__le32 *)p;
p                1299 drivers/scsi/ipr.c 	char *p = buffer;
p                1301 drivers/scsi/ipr.c 	*p = '\0';
p                1302 drivers/scsi/ipr.c 	p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
p                1304 drivers/scsi/ipr.c 		p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
p                1322 drivers/scsi/ipr.c 	char *p = buffer;
p                1324 drivers/scsi/ipr.c 	*p = '\0';
p                1325 drivers/scsi/ipr.c 	p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
p                1326 drivers/scsi/ipr.c 	__ipr_format_res_path(res_path, p, len - (buffer - p));
p                9876 drivers/scsi/ipr.c 	const struct ipr_interrupt_offsets *p;
p                9880 drivers/scsi/ipr.c 	p = &ioa_cfg->chip_cfg->regs;
p                9884 drivers/scsi/ipr.c 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
p                9885 drivers/scsi/ipr.c 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
p                9886 drivers/scsi/ipr.c 	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
p                9887 drivers/scsi/ipr.c 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
p                9888 drivers/scsi/ipr.c 	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
p                9889 drivers/scsi/ipr.c 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
p                9890 drivers/scsi/ipr.c 	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
p                9891 drivers/scsi/ipr.c 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
p                9892 drivers/scsi/ipr.c 	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
p                9893 drivers/scsi/ipr.c 	t->ioarrin_reg = base + p->ioarrin_reg;
p                9894 drivers/scsi/ipr.c 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
p                9895 drivers/scsi/ipr.c 	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
p                9896 drivers/scsi/ipr.c 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
p                9897 drivers/scsi/ipr.c 	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
p                9898 drivers/scsi/ipr.c 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
p                9899 drivers/scsi/ipr.c 	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
p                9902 drivers/scsi/ipr.c 		t->init_feedback_reg = base + p->init_feedback_reg;
p                9903 drivers/scsi/ipr.c 		t->dump_addr_reg = base + p->dump_addr_reg;
p                9904 drivers/scsi/ipr.c 		t->dump_data_reg = base + p->dump_data_reg;
p                9905 drivers/scsi/ipr.c 		t->endian_swap_reg = base + p->endian_swap_reg;
p                2526 drivers/scsi/ips.c 	struct scsi_cmnd *p;
p                2627 drivers/scsi/ips.c 	p = ha->scb_waitlist.head;
p                2628 drivers/scsi/ips.c 	while ((p) && (scb = ips_getscb(ha))) {
p                2629 drivers/scsi/ips.c 		if ((scmd_channel(p) > 0)
p                2631 drivers/scsi/ips.c 			dcdb_active[scmd_channel(p) -
p                2632 drivers/scsi/ips.c 				    1] & (1 << scmd_id(p)))) {
p                2634 drivers/scsi/ips.c 			p = (struct scsi_cmnd *) p->host_scribble;
p                2638 drivers/scsi/ips.c 		q = p;
p                2733 drivers/scsi/ips.c 		p = (struct scsi_cmnd *) p->host_scribble;
p                2818 drivers/scsi/ips.c 	ips_scb_t *p;
p                2829 drivers/scsi/ips.c 	p = queue->head;
p                2831 drivers/scsi/ips.c 	while ((p) && (item != p->q_next))
p                2832 drivers/scsi/ips.c 		p = p->q_next;
p                2834 drivers/scsi/ips.c 	if (p) {
p                2836 drivers/scsi/ips.c 		p->q_next = item->q_next;
p                2839 drivers/scsi/ips.c 			queue->tail = p;
p                2929 drivers/scsi/ips.c 	struct scsi_cmnd *p;
p                2940 drivers/scsi/ips.c 	p = queue->head;
p                2942 drivers/scsi/ips.c 	while ((p) && (item != (struct scsi_cmnd *) p->host_scribble))
p                2943 drivers/scsi/ips.c 		p = (struct scsi_cmnd *) p->host_scribble;
p                2945 drivers/scsi/ips.c 	if (p) {
p                2947 drivers/scsi/ips.c 		p->host_scribble = item->host_scribble;
p                2950 drivers/scsi/ips.c 			queue->tail = p;
p                3042 drivers/scsi/ips.c 	ips_copp_wait_item_t *p;
p                3053 drivers/scsi/ips.c 	p = queue->head;
p                3055 drivers/scsi/ips.c 	while ((p) && (item != p->next))
p                3056 drivers/scsi/ips.c 		p = p->next;
p                3058 drivers/scsi/ips.c 	if (p) {
p                3060 drivers/scsi/ips.c 		p->next = item->next;
p                3063 drivers/scsi/ips.c 			queue->tail = p;
p                 361 drivers/scsi/libsas/sas_discover.c 	struct sas_port *sas_port, *p;
p                 363 drivers/scsi/libsas/sas_discover.c 	list_for_each_entry_safe(sas_port, p, &port->sas_port_del_list, del_list) {
p                 155 drivers/scsi/libsas/sas_expander.c 	u8 *p = kzalloc(size, GFP_KERNEL);
p                 156 drivers/scsi/libsas/sas_expander.c 	if (p)
p                 157 drivers/scsi/libsas/sas_expander.c 		p[0] = SMP_REQUEST;
p                 158 drivers/scsi/libsas/sas_expander.c 	return p;
p                 142 drivers/scsi/libsas/sas_internal.h static inline void sas_phy_set_target(struct asd_sas_phy *p, struct domain_device *dev)
p                 144 drivers/scsi/libsas/sas_internal.h 	struct sas_phy *phy = p->phy;
p                 743 drivers/scsi/lpfc/lpfc_hbadisc.c lpfc_do_work(void *p)
p                 745 drivers/scsi/lpfc/lpfc_hbadisc.c 	struct lpfc_hba *phba = p;
p                3313 drivers/scsi/megaraid/megaraid_sas_fusion.c 	u8 *p;
p                3317 drivers/scsi/megaraid/megaraid_sas_fusion.c 	p = fusion->req_frames_desc +
p                3320 drivers/scsi/megaraid/megaraid_sas_fusion.c 	return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
p                 115 drivers/scsi/mesh.h #define SYNC_PARAMS(o, p)	(((o) << 4) | (p))
p                 608 drivers/scsi/mpt3sas/mpt3sas_base.c 	struct task_struct *p;
p                 648 drivers/scsi/mpt3sas/mpt3sas_base.c 		p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
p                 650 drivers/scsi/mpt3sas/mpt3sas_base.c 		if (IS_ERR(p))
p                 620 drivers/scsi/mpt3sas/mpt3sas_base.h static inline void pcie_device_get(struct _pcie_device *p)
p                 622 drivers/scsi/mpt3sas/mpt3sas_base.h 	kref_get(&p->refcount);
p                 648 drivers/scsi/mpt3sas/mpt3sas_base.h static inline void pcie_device_put(struct _pcie_device *p)
p                 650 drivers/scsi/mpt3sas/mpt3sas_base.h 	kref_put(&p->refcount, pcie_device_free);
p                 436 drivers/scsi/mpt3sas/mpt3sas_config.c 		u8 *p = (u8 *)mem.page;
p                 439 drivers/scsi/mpt3sas/mpt3sas_config.c 		if (p) {
p                 441 drivers/scsi/mpt3sas/mpt3sas_config.c 			    (p[3] & 0xF)) {
p                 444 drivers/scsi/mpt3sas/mpt3sas_config.c 				_debug_dump_config(p, min_t(u16, mem.sz,
p                 449 drivers/scsi/mpt3sas/mpt3sas_config.c 				      p[3] & 0xF);
p                 454 drivers/scsi/mpt3sas/mpt3sas_config.c 			    (mpi_request->ExtPageType != p[6])) {
p                 457 drivers/scsi/mpt3sas/mpt3sas_config.c 				_debug_dump_config(p, min_t(u16, mem.sz,
p                 461 drivers/scsi/mpt3sas/mpt3sas_config.c 				      mpi_request->ExtPageType, p[6]);
p                1792 drivers/scsi/mpt3sas/mpt3sas_transport.c 		dma_addr_t *dma_addr, size_t *dma_len, void **p)
p                1796 drivers/scsi/mpt3sas/mpt3sas_transport.c 		*p = dma_alloc_coherent(dev, buf->payload_len, dma_addr,
p                1798 drivers/scsi/mpt3sas/mpt3sas_transport.c 		if (!*p)
p                1806 drivers/scsi/mpt3sas/mpt3sas_transport.c 		*p = NULL;
p                1814 drivers/scsi/mpt3sas/mpt3sas_transport.c 		dma_addr_t dma_addr, void *p)
p                1816 drivers/scsi/mpt3sas/mpt3sas_transport.c 	if (p)
p                1817 drivers/scsi/mpt3sas/mpt3sas_transport.c 		dma_free_coherent(dev, buf->payload_len, p, dma_addr);
p                 914 drivers/scsi/mvumi.c 	dma_addr_t p;
p                 934 drivers/scsi/mvumi.c 	p = res_mgnt->bus_addr;
p                 937 drivers/scsi/mvumi.c 	offset = round_up(p, 128) - p;
p                 938 drivers/scsi/mvumi.c 	p += offset;
p                 941 drivers/scsi/mvumi.c 	mhba->ib_list_phys = p;
p                 944 drivers/scsi/mvumi.c 		p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
p                 946 drivers/scsi/mvumi.c 		mhba->ib_frame_phys = p;
p                 949 drivers/scsi/mvumi.c 	p += mhba->ib_max_size * mhba->max_io;
p                 952 drivers/scsi/mvumi.c 	offset = round_up(p, 8) - p;
p                 953 drivers/scsi/mvumi.c 	p += offset;
p                 956 drivers/scsi/mvumi.c 	mhba->ib_shadow_phys = p;
p                 957 drivers/scsi/mvumi.c 	p += sizeof(u32)*2;
p                 961 drivers/scsi/mvumi.c 		offset = round_up(p, 8) - p;
p                 962 drivers/scsi/mvumi.c 		p += offset;
p                 965 drivers/scsi/mvumi.c 		mhba->ob_shadow_phys = p;
p                 966 drivers/scsi/mvumi.c 		p += 8;
p                 969 drivers/scsi/mvumi.c 		offset = round_up(p, 4) - p;
p                 970 drivers/scsi/mvumi.c 		p += offset;
p                 973 drivers/scsi/mvumi.c 		mhba->ob_shadow_phys = p;
p                 974 drivers/scsi/mvumi.c 		p += 4;
p                 979 drivers/scsi/mvumi.c 	offset = round_up(p, 128) - p;
p                 980 drivers/scsi/mvumi.c 	p += offset;
p                 984 drivers/scsi/mvumi.c 	mhba->ob_list_phys = p;
p                 315 drivers/scsi/ncr53c8xx.c 	void *p;
p                 317 drivers/scsi/ncr53c8xx.c 	p = ___m_alloc(mp, size);
p                 320 drivers/scsi/ncr53c8xx.c 		printk ("new %-10s[%4d] @%p.\n", name, size, p);
p                 322 drivers/scsi/ncr53c8xx.c 	if (p)
p                 323 drivers/scsi/ncr53c8xx.c 		memset(p, 0, size);
p                 327 drivers/scsi/ncr53c8xx.c 	return p;
p                 437 drivers/scsi/ncr53c8xx.c static void ___del_dma_pool(m_pool_s *p)
p                 441 drivers/scsi/ncr53c8xx.c 	while (*pp && *pp != p)
p                 445 drivers/scsi/ncr53c8xx.c 		__m_free(&mp0, p, sizeof(*p), "MPOOL");
p                 502 drivers/scsi/ncr53c8xx.c #define _m_free_dma(np, p, s, n)	__m_free_dma(np->dev, p, s, n)
p                 504 drivers/scsi/ncr53c8xx.c #define m_free_dma(p, s, n)		_m_free_dma(np, p, s, n)
p                 505 drivers/scsi/ncr53c8xx.c #define _vtobus(np, p)			__vtobus(np->dev, p)
p                 506 drivers/scsi/ncr53c8xx.c #define vtobus(p)			_vtobus(np, p)
p                 636 drivers/scsi/ncr53c8xx.c static int __init get_setup_token(char *p)
p                 645 drivers/scsi/ncr53c8xx.c 		if (!strncmp(p, cur, pc - cur))
p                 790 drivers/scsi/ncr53c8xx.c 	char *p = driver_setup.tag_ctrl;
p                 796 drivers/scsi/ncr53c8xx.c 	while ((c = *p++) != 0) {
p                 797 drivers/scsi/ncr53c8xx.c 		v = simple_strtoul(p, &ep, 0);
p                 826 drivers/scsi/ncr53c8xx.c 		p = ep;
p                3436 drivers/scsi/ncr53c8xx.c 	ncrcmd	*p;
p                3438 drivers/scsi/ncr53c8xx.c 	p = scrh->tryloop;
p                3440 drivers/scsi/ncr53c8xx.c 		*p++ =SCR_CALL;
p                3441 drivers/scsi/ncr53c8xx.c 		*p++ =PADDR (idle);
p                3444 drivers/scsi/ncr53c8xx.c 	BUG_ON((u_long)p != (u_long)&scrh->tryloop + sizeof (scrh->tryloop));
p                3448 drivers/scsi/ncr53c8xx.c 	p = scrh->done_queue;
p                3450 drivers/scsi/ncr53c8xx.c 		*p++ =SCR_COPY (sizeof(struct ccb *));
p                3451 drivers/scsi/ncr53c8xx.c 		*p++ =NADDR (header.cp);
p                3452 drivers/scsi/ncr53c8xx.c 		*p++ =NADDR (ccb_done[i]);
p                3453 drivers/scsi/ncr53c8xx.c 		*p++ =SCR_CALL;
p                3454 drivers/scsi/ncr53c8xx.c 		*p++ =PADDR (done_end);
p                3457 drivers/scsi/ncr53c8xx.c 	BUG_ON((u_long)p != (u_long)&scrh->done_queue+sizeof(scrh->done_queue));
p                3461 drivers/scsi/ncr53c8xx.c 	p = scrh->hdata_in;
p                3463 drivers/scsi/ncr53c8xx.c 		*p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN));
p                3464 drivers/scsi/ncr53c8xx.c 		*p++ =PADDR (dispatch);
p                3465 drivers/scsi/ncr53c8xx.c 		*p++ =SCR_MOVE_TBL ^ SCR_DATA_IN;
p                3466 drivers/scsi/ncr53c8xx.c 		*p++ =offsetof (struct dsb, data[i]);
p                3469 drivers/scsi/ncr53c8xx.c 	BUG_ON((u_long)p != (u_long)&scrh->hdata_in + sizeof (scrh->hdata_in));
p                3471 drivers/scsi/ncr53c8xx.c 	p = scr->data_in;
p                3473 drivers/scsi/ncr53c8xx.c 		*p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN));
p                3474 drivers/scsi/ncr53c8xx.c 		*p++ =PADDR (dispatch);
p                3475 drivers/scsi/ncr53c8xx.c 		*p++ =SCR_MOVE_TBL ^ SCR_DATA_IN;
p                3476 drivers/scsi/ncr53c8xx.c 		*p++ =offsetof (struct dsb, data[i]);
p                3479 drivers/scsi/ncr53c8xx.c 	BUG_ON((u_long)p != (u_long)&scr->data_in + sizeof (scr->data_in));
p                3481 drivers/scsi/ncr53c8xx.c 	p = scrh->hdata_out;
p                3483 drivers/scsi/ncr53c8xx.c 		*p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT));
p                3484 drivers/scsi/ncr53c8xx.c 		*p++ =PADDR (dispatch);
p                3485 drivers/scsi/ncr53c8xx.c 		*p++ =SCR_MOVE_TBL ^ SCR_DATA_OUT;
p                3486 drivers/scsi/ncr53c8xx.c 		*p++ =offsetof (struct dsb, data[i]);
p                3489 drivers/scsi/ncr53c8xx.c 	BUG_ON((u_long)p != (u_long)&scrh->hdata_out + sizeof (scrh->hdata_out));
p                3491 drivers/scsi/ncr53c8xx.c 	p = scr->data_out;
p                3493 drivers/scsi/ncr53c8xx.c 		*p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT));
p                3494 drivers/scsi/ncr53c8xx.c 		*p++ =PADDR (dispatch);
p                3495 drivers/scsi/ncr53c8xx.c 		*p++ =SCR_MOVE_TBL ^ SCR_DATA_OUT;
p                3496 drivers/scsi/ncr53c8xx.c 		*p++ =offsetof (struct dsb, data[i]);
p                3499 drivers/scsi/ncr53c8xx.c 	BUG_ON((u_long) p != (u_long)&scr->data_out + sizeof (scr->data_out));
p                4961 drivers/scsi/ncr53c8xx.c 			u_char *p = cmd->sense_buffer;
p                4964 drivers/scsi/ncr53c8xx.c 			for (i=0; i<14; i++) printk (" %x", *p++);
p                5021 drivers/scsi/ncr53c8xx.c 		u_char * p;
p                5024 drivers/scsi/ncr53c8xx.c 		p = (u_char*) &cmd->cmnd[0];
p                5025 drivers/scsi/ncr53c8xx.c 		for (i=0; i<cmd->cmd_len; i++) printk (" %x", *p++);
p                5034 drivers/scsi/ncr53c8xx.c 				p = (u_char*) &cmd->sense_buffer;
p                5036 drivers/scsi/ncr53c8xx.c 					printk (" %x", *p++);
p                 898 drivers/scsi/ncr53c8xx.h #define REGJ(p,r) (offsetof(struct ncr_reg, p ## r))
p                1139 drivers/scsi/qedi/qedi_main.c 			  u16 que_idx, struct qedi_percpu_s *p)
p                1171 drivers/scsi/qedi/qedi_main.c 		list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list);
p                1186 drivers/scsi/qedi/qedi_main.c 		list_add_tail(&qedi_work->list, &p->work_list);
p                1200 drivers/scsi/qedi/qedi_main.c 	struct qedi_percpu_s *p = NULL;
p                1221 drivers/scsi/qedi/qedi_main.c 	p = &per_cpu(qedi_percpu, cpu);
p                1223 drivers/scsi/qedi/qedi_main.c 	if (unlikely(!p->iothread))
p                1226 drivers/scsi/qedi/qedi_main.c 	spin_lock_irqsave(&p->p_work_lock, flags);
p                1234 drivers/scsi/qedi/qedi_main.c 		ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
p                1244 drivers/scsi/qedi/qedi_main.c 	wake_up_process(p->iothread);
p                1245 drivers/scsi/qedi/qedi_main.c 	spin_unlock_irqrestore(&p->p_work_lock, flags);
p                1859 drivers/scsi/qedi/qedi_main.c 	struct qedi_percpu_s *p = arg;
p                1867 drivers/scsi/qedi/qedi_main.c 		spin_lock_irqsave(&p->p_work_lock, flags);
p                1868 drivers/scsi/qedi/qedi_main.c 		while (!list_empty(&p->work_list)) {
p                1869 drivers/scsi/qedi/qedi_main.c 			list_splice_init(&p->work_list, &work_list);
p                1870 drivers/scsi/qedi/qedi_main.c 			spin_unlock_irqrestore(&p->p_work_lock, flags);
p                1879 drivers/scsi/qedi/qedi_main.c 			spin_lock_irqsave(&p->p_work_lock, flags);
p                1882 drivers/scsi/qedi/qedi_main.c 		spin_unlock_irqrestore(&p->p_work_lock, flags);
p                1892 drivers/scsi/qedi/qedi_main.c 	struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
p                1895 drivers/scsi/qedi/qedi_main.c 	thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
p                1902 drivers/scsi/qedi/qedi_main.c 	p->iothread = thread;
p                1909 drivers/scsi/qedi/qedi_main.c 	struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
p                1913 drivers/scsi/qedi/qedi_main.c 	spin_lock_bh(&p->p_work_lock);
p                1914 drivers/scsi/qedi/qedi_main.c 	thread = p->iothread;
p                1915 drivers/scsi/qedi/qedi_main.c 	p->iothread = NULL;
p                1917 drivers/scsi/qedi/qedi_main.c 	list_for_each_entry_safe(work, tmp, &p->work_list, list) {
p                1924 drivers/scsi/qedi/qedi_main.c 	spin_unlock_bh(&p->p_work_lock);
p                2704 drivers/scsi/qedi/qedi_main.c 	struct qedi_percpu_s *p;
p                2725 drivers/scsi/qedi/qedi_main.c 		p = &per_cpu(qedi_percpu, cpu);
p                2726 drivers/scsi/qedi/qedi_main.c 		INIT_LIST_HEAD(&p->work_list);
p                2727 drivers/scsi/qedi/qedi_main.c 		spin_lock_init(&p->p_work_lock);
p                2728 drivers/scsi/qedi/qedi_main.c 		p->iothread = NULL;
p                2609 drivers/scsi/qla2xxx/qla_attr.c 	struct fc_host_statistics *p = &vha->fc_host_stat;
p                2611 drivers/scsi/qla2xxx/qla_attr.c 	memset(p, -1, sizeof(*p));
p                2646 drivers/scsi/qla2xxx/qla_attr.c 	p->link_failure_count = stats->link_fail_cnt;
p                2647 drivers/scsi/qla2xxx/qla_attr.c 	p->loss_of_sync_count = stats->loss_sync_cnt;
p                2648 drivers/scsi/qla2xxx/qla_attr.c 	p->loss_of_signal_count = stats->loss_sig_cnt;
p                2649 drivers/scsi/qla2xxx/qla_attr.c 	p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
p                2650 drivers/scsi/qla2xxx/qla_attr.c 	p->invalid_tx_word_count = stats->inval_xmit_word_cnt;
p                2651 drivers/scsi/qla2xxx/qla_attr.c 	p->invalid_crc_count = stats->inval_crc_cnt;
p                2653 drivers/scsi/qla2xxx/qla_attr.c 		p->lip_count = stats->lip_cnt;
p                2654 drivers/scsi/qla2xxx/qla_attr.c 		p->tx_frames = stats->tx_frames;
p                2655 drivers/scsi/qla2xxx/qla_attr.c 		p->rx_frames = stats->rx_frames;
p                2656 drivers/scsi/qla2xxx/qla_attr.c 		p->dumped_frames = stats->discarded_frames;
p                2657 drivers/scsi/qla2xxx/qla_attr.c 		p->nos_count = stats->nos_rcvd;
p                2658 drivers/scsi/qla2xxx/qla_attr.c 		p->error_frames =
p                2660 drivers/scsi/qla2xxx/qla_attr.c 		p->rx_words = vha->qla_stats.input_bytes;
p                2661 drivers/scsi/qla2xxx/qla_attr.c 		p->tx_words = vha->qla_stats.output_bytes;
p                2663 drivers/scsi/qla2xxx/qla_attr.c 	p->fcp_control_requests = vha->qla_stats.control_requests;
p                2664 drivers/scsi/qla2xxx/qla_attr.c 	p->fcp_input_requests = vha->qla_stats.input_requests;
p                2665 drivers/scsi/qla2xxx/qla_attr.c 	p->fcp_output_requests = vha->qla_stats.output_requests;
p                2666 drivers/scsi/qla2xxx/qla_attr.c 	p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
p                2667 drivers/scsi/qla2xxx/qla_attr.c 	p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
p                2668 drivers/scsi/qla2xxx/qla_attr.c 	p->seconds_since_last_reset =
p                2670 drivers/scsi/qla2xxx/qla_attr.c 	do_div(p->seconds_since_last_reset, HZ);
p                2676 drivers/scsi/qla2xxx/qla_attr.c 	return p;
p                3028 drivers/scsi/qla2xxx/qla_def.h 	} p;
p                3035 drivers/scsi/qla2xxx/qla_def.h 	} p;
p                3116 drivers/scsi/qla2xxx/qla_def.h 	} p;
p                 105 drivers/scsi/qla2xxx/qla_gs.c qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
p                 107 drivers/scsi/qla2xxx/qla_gs.c 	memset(p, 0, sizeof(struct ct_sns_pkt));
p                 109 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.revision = 0x01;
p                 110 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.gs_type = 0xFC;
p                 111 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.gs_subtype = 0x02;
p                 112 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.command = cpu_to_be16(cmd);
p                 113 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
p                 115 drivers/scsi/qla2xxx/qla_gs.c 	return &p->p.req;
p                 226 drivers/scsi/qla2xxx/qla_gs.c 	ct_rsp = &ha->ct_sns->p.rsp;
p                 317 drivers/scsi/qla2xxx/qla_gs.c 	ct_rsp = &ha->ct_sns->p.rsp;
p                 397 drivers/scsi/qla2xxx/qla_gs.c 		ct_rsp = &ha->ct_sns->p.rsp;
p                 464 drivers/scsi/qla2xxx/qla_gs.c 		ct_rsp = &ha->ct_sns->p.rsp;
p                 981 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
p                 982 drivers/scsi/qla2xxx/qla_gs.c 	put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address);
p                 983 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
p                 984 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
p                 986 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.size = cpu_to_le16(wc);
p                1015 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
p                1016 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
p                1017 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
p                1026 drivers/scsi/qla2xxx/qla_gs.c 	} else if (sns_cmd->p.gan_data[8] != 0x80 ||
p                1027 drivers/scsi/qla2xxx/qla_gs.c 	    sns_cmd->p.gan_data[9] != 0x02) {
p                1031 drivers/scsi/qla2xxx/qla_gs.c 		    sns_cmd->p.gan_data, 16);
p                1035 drivers/scsi/qla2xxx/qla_gs.c 		fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
p                1036 drivers/scsi/qla2xxx/qla_gs.c 		fcport->d_id.b.area = sns_cmd->p.gan_data[18];
p                1037 drivers/scsi/qla2xxx/qla_gs.c 		fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
p                1039 drivers/scsi/qla2xxx/qla_gs.c 		memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
p                1040 drivers/scsi/qla2xxx/qla_gs.c 		memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
p                1042 drivers/scsi/qla2xxx/qla_gs.c 		if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
p                1043 drivers/scsi/qla2xxx/qla_gs.c 		    sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
p                1086 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
p                1095 drivers/scsi/qla2xxx/qla_gs.c 	} else if (sns_cmd->p.gid_data[8] != 0x80 ||
p                1096 drivers/scsi/qla2xxx/qla_gs.c 	    sns_cmd->p.gid_data[9] != 0x02) {
p                1100 drivers/scsi/qla2xxx/qla_gs.c 		    sns_cmd->p.gid_data, 16);
p                1105 drivers/scsi/qla2xxx/qla_gs.c 			entry = &sns_cmd->p.gid_data[(i * 4) + 16];
p                1154 drivers/scsi/qla2xxx/qla_gs.c 		sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
p                1155 drivers/scsi/qla2xxx/qla_gs.c 		sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
p                1156 drivers/scsi/qla2xxx/qla_gs.c 		sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
p                1165 drivers/scsi/qla2xxx/qla_gs.c 		} else if (sns_cmd->p.gpn_data[8] != 0x80 ||
p                1166 drivers/scsi/qla2xxx/qla_gs.c 		    sns_cmd->p.gpn_data[9] != 0x02) {
p                1170 drivers/scsi/qla2xxx/qla_gs.c 			    sns_cmd->p.gpn_data, 16);
p                1174 drivers/scsi/qla2xxx/qla_gs.c 			memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
p                1210 drivers/scsi/qla2xxx/qla_gs.c 		sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
p                1211 drivers/scsi/qla2xxx/qla_gs.c 		sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
p                1212 drivers/scsi/qla2xxx/qla_gs.c 		sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
p                1221 drivers/scsi/qla2xxx/qla_gs.c 		} else if (sns_cmd->p.gnn_data[8] != 0x80 ||
p                1222 drivers/scsi/qla2xxx/qla_gs.c 		    sns_cmd->p.gnn_data[9] != 0x02) {
p                1226 drivers/scsi/qla2xxx/qla_gs.c 			    sns_cmd->p.gnn_data, 16);
p                1230 drivers/scsi/qla2xxx/qla_gs.c 			memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
p                1270 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
p                1271 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
p                1272 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
p                1274 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[5] = 0x01;			/* FCP-3 */
p                1283 drivers/scsi/qla2xxx/qla_gs.c 	} else if (sns_cmd->p.rft_data[8] != 0x80 ||
p                1284 drivers/scsi/qla2xxx/qla_gs.c 	    sns_cmd->p.rft_data[9] != 0x02) {
p                1288 drivers/scsi/qla2xxx/qla_gs.c 		    sns_cmd->p.rft_data, 16);
p                1319 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
p                1320 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
p                1321 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
p                1323 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[4] = vha->node_name[7];
p                1324 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[5] = vha->node_name[6];
p                1325 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[6] = vha->node_name[5];
p                1326 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[7] = vha->node_name[4];
p                1327 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[8] = vha->node_name[3];
p                1328 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[9] = vha->node_name[2];
p                1329 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[10] = vha->node_name[1];
p                1330 drivers/scsi/qla2xxx/qla_gs.c 	sns_cmd->p.cmd.param[11] = vha->node_name[0];
p                1339 drivers/scsi/qla2xxx/qla_gs.c 	} else if (sns_cmd->p.rnn_data[8] != 0x80 ||
p                1340 drivers/scsi/qla2xxx/qla_gs.c 	    sns_cmd->p.rnn_data[9] != 0x02) {
p                1344 drivers/scsi/qla2xxx/qla_gs.c 		    sns_cmd->p.rnn_data, 16);
p                1490 drivers/scsi/qla2xxx/qla_gs.c qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
p                1493 drivers/scsi/qla2xxx/qla_gs.c 	memset(p, 0, sizeof(struct ct_sns_pkt));
p                1495 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.revision = 0x01;
p                1496 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.gs_type = 0xFA;
p                1497 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.gs_subtype = 0x10;
p                1498 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.command = cpu_to_be16(cmd);
p                1499 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
p                1501 drivers/scsi/qla2xxx/qla_gs.c 	return &p->p.req;
p                1530 drivers/scsi/qla2xxx/qla_gs.c 	ct_rsp = &ha->ct_sns->p.rsp;
p                1742 drivers/scsi/qla2xxx/qla_gs.c 	ct_rsp = &ha->ct_sns->p.rsp;
p                1953 drivers/scsi/qla2xxx/qla_gs.c 	ct_rsp = &ha->ct_sns->p.rsp;
p                2261 drivers/scsi/qla2xxx/qla_gs.c 	ct_rsp = &ha->ct_sns->p.rsp;
p                2314 drivers/scsi/qla2xxx/qla_gs.c 	ct_rsp = &ha->ct_sns->p.rsp;
p                2711 drivers/scsi/qla2xxx/qla_gs.c 		ct_rsp = &ha->ct_sns->p.rsp;
p                2744 drivers/scsi/qla2xxx/qla_gs.c qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
p                2747 drivers/scsi/qla2xxx/qla_gs.c 	memset(p, 0, sizeof(struct ct_sns_pkt));
p                2749 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.revision = 0x01;
p                2750 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.gs_type = 0xFA;
p                2751 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.header.gs_subtype = 0x01;
p                2752 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.command = cpu_to_be16(cmd);
p                2753 drivers/scsi/qla2xxx/qla_gs.c 	p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
p                2755 drivers/scsi/qla2xxx/qla_gs.c 	return &p->p.req;
p                2825 drivers/scsi/qla2xxx/qla_gs.c 		ct_rsp = &ha->ct_sns->p.rsp;
p                2915 drivers/scsi/qla2xxx/qla_gs.c 		ct_rsp = &ha->ct_sns->p.rsp;
p                2999 drivers/scsi/qla2xxx/qla_gs.c 	ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
p                3444 drivers/scsi/qla2xxx/qla_gs.c 	ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
p                4239 drivers/scsi/qla2xxx/qla_gs.c 	u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
p                4370 drivers/scsi/qla2xxx/qla_gs.c 	u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
p                3441 drivers/scsi/qla2xxx/qla_init.c 		if (p) {\
p                3447 drivers/scsi/qla2xxx/qla_init.c 		p = 1;\
p                3457 drivers/scsi/qla2xxx/qla_init.c 	u8 str[STR_LEN], *ptr, p;
p                3474 drivers/scsi/qla2xxx/qla_init.c 	p = len = 0;
p                3489 drivers/scsi/qla2xxx/qla_init.c 	p = len = 0;
p                3501 drivers/scsi/qla2xxx/qla_init.c 	p = len = 0;
p                7374 drivers/scsi/qla2xxx/qla_init.c 	uint32_t *p = (void *)image_status;
p                7375 drivers/scsi/qla2xxx/qla_init.c 	uint n = sizeof(*image_status) / sizeof(*p);
p                7378 drivers/scsi/qla2xxx/qla_init.c 	for ( ; n--; p++)
p                7379 drivers/scsi/qla2xxx/qla_init.c 		sum += le32_to_cpup(p);
p                3189 drivers/scsi/qla2xxx/qla_mbx.c 	} p;
p                3225 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
p                3226 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.entry_count = 1;
p                3227 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
p                3228 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
p                3229 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
p                3230 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.control_flags = cpu_to_le32(type);
p                3231 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
p                3232 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
p                3233 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
p                3234 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.vp_index = fcport->vha->vp_idx;
p                3236 drivers/scsi/qla2xxx/qla_mbx.c 		int_to_scsilun(l, &tsk->p.tsk.lun);
p                3237 drivers/scsi/qla2xxx/qla_mbx.c 		host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
p                3238 drivers/scsi/qla2xxx/qla_mbx.c 		    sizeof(tsk->p.tsk.lun));
p                3241 drivers/scsi/qla2xxx/qla_mbx.c 	sts = &tsk->p.sts;
p                4238 drivers/scsi/qla2xxx/qla_mbx.c 	} p;
p                4269 drivers/scsi/qla2xxx/qla_mbx.c 		mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
p                4270 drivers/scsi/qla2xxx/qla_mbx.c 		mn->p.req.entry_count = 1;
p                4271 drivers/scsi/qla2xxx/qla_mbx.c 		mn->p.req.options = cpu_to_le16(options);
p                4290 drivers/scsi/qla2xxx/qla_mbx.c 		status[0] = le16_to_cpu(mn->p.rsp.comp_status);
p                4292 drivers/scsi/qla2xxx/qla_mbx.c 		    le16_to_cpu(mn->p.rsp.failure_code) : 0;
p                4309 drivers/scsi/qla2xxx/qla_mbx.c 			    le32_to_cpu(mn->p.rsp.fw_ver));
p                4314 drivers/scsi/qla2xxx/qla_mbx.c 			    le32_to_cpu(mn->p.rsp.fw_ver);
p                2626 drivers/scsi/qla2xxx/qla_sup.c 	uint32_t *p, check_sum = 0;
p                2629 drivers/scsi/qla2xxx/qla_sup.c 	p = buf + buf_size_without_sfub;
p                2632 drivers/scsi/qla2xxx/qla_sup.c 	memcpy(sfub_buf, (uint8_t *)p,
p                2636 drivers/scsi/qla2xxx/qla_sup.c 		check_sum += p[i];
p                2640 drivers/scsi/qla2xxx/qla_sup.c 	if (check_sum != p[i]) {
p                2643 drivers/scsi/qla2xxx/qla_sup.c 		    check_sum, p[i]);
p                1721 drivers/scsi/qla2xxx/qla_target.c 	uint8_t *p;
p                1764 drivers/scsi/qla2xxx/qla_target.c 	p = (uint8_t *)&f_ctl;
p                1765 drivers/scsi/qla2xxx/qla_target.c 	resp->fcp_hdr_le.f_ctl[0] = *p++;
p                1766 drivers/scsi/qla2xxx/qla_target.c 	resp->fcp_hdr_le.f_ctl[1] = *p++;
p                1767 drivers/scsi/qla2xxx/qla_target.c 	resp->fcp_hdr_le.f_ctl[2] = *p;
p                1810 drivers/scsi/qla2xxx/qla_target.c 	uint8_t *p;
p                1836 drivers/scsi/qla2xxx/qla_target.c 	p = (uint8_t *)&f_ctl;
p                1837 drivers/scsi/qla2xxx/qla_target.c 	resp->fcp_hdr_le.f_ctl[0] = *p++;
p                1838 drivers/scsi/qla2xxx/qla_target.c 	resp->fcp_hdr_le.f_ctl[1] = *p++;
p                1839 drivers/scsi/qla2xxx/qla_target.c 	resp->fcp_hdr_le.f_ctl[2] = *p;
p                 895 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_template_checksum(void *p, ulong size)
p                 897 drivers/scsi/qla2xxx/qla_tmpl.c 	__le32 *buf = p;
p                 939 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p)
p                 941 drivers/scsi/qla2xxx/qla_tmpl.c 	struct qla27xx_fwdt_template *tmp = p;
p                 953 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_fwdt_template_size(void *p)
p                 955 drivers/scsi/qla2xxx/qla_tmpl.c 	struct qla27xx_fwdt_template *tmp = p;
p                 961 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_fwdt_template_valid(void *p)
p                 963 drivers/scsi/qla2xxx/qla_tmpl.c 	struct qla27xx_fwdt_template *tmp = p;
p                1393 drivers/scsi/qla2xxx/tcm_qla2xxx.c 				  struct se_session *se_sess, void *p)
p                1402 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	struct fc_port *qlat_sess = p;
p                1726 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	char *p, tmp[128];
p                1731 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	p = strchr(tmp, '@');
p                1732 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	if (!p) {
p                1736 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	*p++ = '\0';
p                1741 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1,
p                 186 drivers/scsi/qla4xxx/ql4_def.h #define OP_STATE(o, f, p) {			\
p                 187 drivers/scsi/qla4xxx/ql4_def.h 	p = (o & f) ? "enable" : "disable";	\
p                1951 drivers/scsi/scsi_debug.c static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
p                1956 drivers/scsi/scsi_debug.c 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
p                1958 drivers/scsi/scsi_debug.c 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
p                1962 drivers/scsi/scsi_debug.c static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
p                1967 drivers/scsi/scsi_debug.c 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
p                1969 drivers/scsi/scsi_debug.c 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
p                1973 drivers/scsi/scsi_debug.c static int resp_format_pg(unsigned char *p, int pcontrol, int target)
p                1979 drivers/scsi/scsi_debug.c 	memcpy(p, format_pg, sizeof(format_pg));
p                1980 drivers/scsi/scsi_debug.c 	put_unaligned_be16(sdebug_sectors_per, p + 10);
p                1981 drivers/scsi/scsi_debug.c 	put_unaligned_be16(sdebug_sector_size, p + 12);
p                1983 drivers/scsi/scsi_debug.c 		p[20] |= 0x20; /* should agree with INQUIRY */
p                1985 drivers/scsi/scsi_debug.c 		memset(p + 2, 0, sizeof(format_pg) - 2);
p                1993 drivers/scsi/scsi_debug.c static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
p                2002 drivers/scsi/scsi_debug.c 	memcpy(p, caching_pg, sizeof(caching_pg));
p                2004 drivers/scsi/scsi_debug.c 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
p                2006 drivers/scsi/scsi_debug.c 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
p                2013 drivers/scsi/scsi_debug.c static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
p                2028 drivers/scsi/scsi_debug.c 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
p                2030 drivers/scsi/scsi_debug.c 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
p                2032 drivers/scsi/scsi_debug.c 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
p                2037 drivers/scsi/scsi_debug.c static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
p                2044 drivers/scsi/scsi_debug.c 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
p                2046 drivers/scsi/scsi_debug.c 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
p                2048 drivers/scsi/scsi_debug.c 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
p                2052 drivers/scsi/scsi_debug.c static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
p                2057 drivers/scsi/scsi_debug.c 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
p                2059 drivers/scsi/scsi_debug.c 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
p                2064 drivers/scsi/scsi_debug.c static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
p                2089 drivers/scsi/scsi_debug.c 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
p                2090 drivers/scsi/scsi_debug.c 	put_unaligned_be32(port_a, p + 20);
p                2091 drivers/scsi/scsi_debug.c 	put_unaligned_be32(port_b, p + 48 + 20);
p                2093 drivers/scsi/scsi_debug.c 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
p                2097 drivers/scsi/scsi_debug.c static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
p                2103 drivers/scsi/scsi_debug.c 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
p                2105 drivers/scsi/scsi_debug.c 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
p                 847 drivers/scsi/scsi_devinfo.c 	struct proc_dir_entry *p;
p                 870 drivers/scsi/scsi_devinfo.c 	p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_fops);
p                 871 drivers/scsi/scsi_devinfo.c 	if (!p) {
p                 141 drivers/scsi/scsi_proc.c 	struct proc_dir_entry *p;
p                 148 drivers/scsi/scsi_proc.c 	p = proc_create_data(name, S_IRUGO | S_IWUSR,
p                 150 drivers/scsi/scsi_proc.c 	if (!p)
p                 314 drivers/scsi/scsi_proc.c 	char *buffer, *p;
p                 339 drivers/scsi/scsi_proc.c 		p = buffer + 23;
p                 341 drivers/scsi/scsi_proc.c 		host = simple_strtoul(p, &p, 0);
p                 342 drivers/scsi/scsi_proc.c 		channel = simple_strtoul(p + 1, &p, 0);
p                 343 drivers/scsi/scsi_proc.c 		id = simple_strtoul(p + 1, &p, 0);
p                 344 drivers/scsi/scsi_proc.c 		lun = simple_strtoul(p + 1, &p, 0);
p                 353 drivers/scsi/scsi_proc.c 		p = buffer + 26;
p                 355 drivers/scsi/scsi_proc.c 		host = simple_strtoul(p, &p, 0);
p                 356 drivers/scsi/scsi_proc.c 		channel = simple_strtoul(p + 1, &p, 0);
p                 357 drivers/scsi/scsi_proc.c 		id = simple_strtoul(p + 1, &p, 0);
p                 358 drivers/scsi/scsi_proc.c 		lun = simple_strtoul(p + 1, &p, 0);
p                  18 drivers/scsi/scsi_trace.c scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
p                  20 drivers/scsi/scsi_trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  32 drivers/scsi/scsi_trace.c 	trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen);
p                  33 drivers/scsi/scsi_trace.c 	trace_seq_putc(p, 0);
p                  39 drivers/scsi/scsi_trace.c scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
p                  41 drivers/scsi/scsi_trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  47 drivers/scsi/scsi_trace.c 	trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen,
p                  51 drivers/scsi/scsi_trace.c 		trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
p                  53 drivers/scsi/scsi_trace.c 	trace_seq_putc(p, 0);
p                  59 drivers/scsi/scsi_trace.c scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
p                  61 drivers/scsi/scsi_trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  67 drivers/scsi/scsi_trace.c 	trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen,
p                  69 drivers/scsi/scsi_trace.c 	trace_seq_putc(p, 0);
p                  75 drivers/scsi/scsi_trace.c scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
p                  77 drivers/scsi/scsi_trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  84 drivers/scsi/scsi_trace.c 	trace_seq_printf(p, "lba=%llu txlen=%u protect=%u", lba, txlen,
p                  88 drivers/scsi/scsi_trace.c 		trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
p                  90 drivers/scsi/scsi_trace.c 	trace_seq_putc(p, 0);
p                  96 drivers/scsi/scsi_trace.c scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
p                  98 drivers/scsi/scsi_trace.c 	const char *ret = trace_seq_buffer_ptr(p), *cmd;
p                 116 drivers/scsi/scsi_trace.c 		trace_seq_puts(p, "UNKNOWN");
p                 124 drivers/scsi/scsi_trace.c 	trace_seq_printf(p, "%s_32 lba=%llu txlen=%u protect=%u ei_lbrt=%u",
p                 128 drivers/scsi/scsi_trace.c 		trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
p                 131 drivers/scsi/scsi_trace.c 	trace_seq_putc(p, 0);
p                 137 drivers/scsi/scsi_trace.c scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
p                 139 drivers/scsi/scsi_trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 142 drivers/scsi/scsi_trace.c 	trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
p                 143 drivers/scsi/scsi_trace.c 	trace_seq_putc(p, 0);
p                 149 drivers/scsi/scsi_trace.c scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
p                 151 drivers/scsi/scsi_trace.c 	const char *ret = trace_seq_buffer_ptr(p), *cmd;
p                 163 drivers/scsi/scsi_trace.c 		trace_seq_puts(p, "UNKNOWN");
p                 170 drivers/scsi/scsi_trace.c 	trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, lba, alloc_len);
p                 173 drivers/scsi/scsi_trace.c 	trace_seq_putc(p, 0);
p                 179 drivers/scsi/scsi_trace.c scsi_trace_maintenance_in(struct trace_seq *p, unsigned char *cdb, int len)
p                 181 drivers/scsi/scsi_trace.c 	const char *ret = trace_seq_buffer_ptr(p), *cmd;
p                 210 drivers/scsi/scsi_trace.c 		trace_seq_puts(p, "UNKNOWN");
p                 216 drivers/scsi/scsi_trace.c 	trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len);
p                 219 drivers/scsi/scsi_trace.c 	trace_seq_putc(p, 0);
p                 225 drivers/scsi/scsi_trace.c scsi_trace_maintenance_out(struct trace_seq *p, unsigned char *cdb, int len)
p                 227 drivers/scsi/scsi_trace.c 	const char *ret = trace_seq_buffer_ptr(p), *cmd;
p                 250 drivers/scsi/scsi_trace.c 		trace_seq_puts(p, "UNKNOWN");
p                 256 drivers/scsi/scsi_trace.c 	trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len);
p                 259 drivers/scsi/scsi_trace.c 	trace_seq_putc(p, 0);
p                 265 drivers/scsi/scsi_trace.c scsi_trace_zbc_in(struct trace_seq *p, unsigned char *cdb, int len)
p                 267 drivers/scsi/scsi_trace.c 	const char *ret = trace_seq_buffer_ptr(p), *cmd;
p                 277 drivers/scsi/scsi_trace.c 		trace_seq_puts(p, "UNKNOWN");
p                 285 drivers/scsi/scsi_trace.c 	trace_seq_printf(p, "%s zone=%llu alloc_len=%u options=%u partial=%u",
p                 290 drivers/scsi/scsi_trace.c 	trace_seq_putc(p, 0);
p                 296 drivers/scsi/scsi_trace.c scsi_trace_zbc_out(struct trace_seq *p, unsigned char *cdb, int len)
p                 298 drivers/scsi/scsi_trace.c 	const char *ret = trace_seq_buffer_ptr(p), *cmd;
p                 315 drivers/scsi/scsi_trace.c 		trace_seq_puts(p, "UNKNOWN");
p                 321 drivers/scsi/scsi_trace.c 	trace_seq_printf(p, "%s zone=%llu all=%u", cmd,
p                 325 drivers/scsi/scsi_trace.c 	trace_seq_putc(p, 0);
p                 331 drivers/scsi/scsi_trace.c scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
p                 338 drivers/scsi/scsi_trace.c 		return scsi_trace_rw32(p, cdb, len);
p                 340 drivers/scsi/scsi_trace.c 		return scsi_trace_misc(p, cdb, len);
p                 345 drivers/scsi/scsi_trace.c scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
p                 347 drivers/scsi/scsi_trace.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 349 drivers/scsi/scsi_trace.c 	trace_seq_putc(p, '-');
p                 350 drivers/scsi/scsi_trace.c 	trace_seq_putc(p, 0);
p                 356 drivers/scsi/scsi_trace.c scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len)
p                 361 drivers/scsi/scsi_trace.c 		return scsi_trace_rw6(p, cdb, len);
p                 366 drivers/scsi/scsi_trace.c 		return scsi_trace_rw10(p, cdb, len);
p                 370 drivers/scsi/scsi_trace.c 		return scsi_trace_rw12(p, cdb, len);
p                 375 drivers/scsi/scsi_trace.c 		return scsi_trace_rw16(p, cdb, len);
p                 377 drivers/scsi/scsi_trace.c 		return scsi_trace_unmap(p, cdb, len);
p                 379 drivers/scsi/scsi_trace.c 		return scsi_trace_service_action_in(p, cdb, len);
p                 381 drivers/scsi/scsi_trace.c 		return scsi_trace_varlen(p, cdb, len);
p                 383 drivers/scsi/scsi_trace.c 		return scsi_trace_maintenance_in(p, cdb, len);
p                 385 drivers/scsi/scsi_trace.c 		return scsi_trace_maintenance_out(p, cdb, len);
p                 387 drivers/scsi/scsi_trace.c 		return scsi_trace_zbc_in(p, cdb, len);
p                 389 drivers/scsi/scsi_trace.c 		return scsi_trace_zbc_out(p, cdb, len);
p                 391 drivers/scsi/scsi_trace.c 		return scsi_trace_misc(p, cdb, len);
p                  70 drivers/scsi/scsicam.c 	unsigned char *p;
p                  74 drivers/scsi/scsicam.c 	p = scsi_bios_ptable(bdev);
p                  75 drivers/scsi/scsicam.c 	if (!p)
p                  79 drivers/scsi/scsicam.c 	ret = scsi_partsize(p, (unsigned long)capacity, (unsigned int *)ip + 2,
p                  81 drivers/scsi/scsicam.c 	kfree(p);
p                 129 drivers/scsi/scsicam.c 	struct partition *p = (struct partition *)buf, *largest = NULL;
p                 136 drivers/scsi/scsicam.c 		for (largest_cyl = -1, i = 0; i < 4; ++i, ++p) {
p                 137 drivers/scsi/scsicam.c 			if (!p->sys_ind)
p                 143 drivers/scsi/scsicam.c 			cyl = p->cyl + ((p->sector & 0xc0) << 2);
p                 146 drivers/scsi/scsicam.c 				largest = p;
p                1468 drivers/scsi/sd.c 	void __user *p = (void __user *)arg;
p                1490 drivers/scsi/sd.c 		return sed_ioctl(sdkp->opal_dev, cmd, p);
p                1500 drivers/scsi/sd.c 			error = scsi_ioctl(sdp, cmd, p);
p                1503 drivers/scsi/sd.c 			error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
p                1506 drivers/scsi/sd.c 			error = scsi_ioctl(sdp, cmd, p);
p                1700 drivers/scsi/sd.c 	void __user *p = compat_ptr(arg);
p                1713 drivers/scsi/sd.c 		return sed_ioctl(sdkp->opal_dev, cmd, p);
p                1720 drivers/scsi/sd.c 	return sdev->host->hostt->compat_ioctl(sdev, cmd, p);
p                3249 drivers/scsi/sd.c 	char *p;
p                3252 drivers/scsi/sd.c 	p = end - 1;
p                3253 drivers/scsi/sd.c 	*p = '\0';
p                3256 drivers/scsi/sd.c 		if (p == begin)
p                3258 drivers/scsi/sd.c 		*--p = 'a' + (index % unit);
p                3262 drivers/scsi/sd.c 	memmove(begin, p, end - p);
p                 903 drivers/scsi/sg.c 	void __user *p = (void __user *)arg;
p                 904 drivers/scsi/sg.c 	int __user *ip = p;
p                 924 drivers/scsi/sg.c 		if (!access_ok(p, SZ_SG_IO_HDR))
p                 926 drivers/scsi/sg.c 		result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
p                 938 drivers/scsi/sg.c 			result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
p                 970 drivers/scsi/sg.c 		if (!access_ok(p, sizeof (sg_scsi_id_t)))
p                 973 drivers/scsi/sg.c 			sg_scsi_id_t __user *sg_idp = p;
p                1080 drivers/scsi/sg.c 		if (!access_ok(p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
p                1092 drivers/scsi/sg.c 			result = __copy_to_user(p, rinfo,
p                1105 drivers/scsi/sg.c 		return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
p                1119 drivers/scsi/sg.c 				       NULL, p);
p                1144 drivers/scsi/sg.c 	return scsi_ioctl(sdp->device, cmd_in, p);
p                2246 drivers/scsi/sg.c sg_idr_max_id(int id, void *p, void *data)
p                2356 drivers/scsi/sg.c 	struct proc_dir_entry *p;
p                2358 drivers/scsi/sg.c 	p = proc_mkdir("scsi/sg", NULL);
p                2359 drivers/scsi/sg.c 	if (!p)
p                2362 drivers/scsi/sg.c 	proc_create("allow_dio", S_IRUGO | S_IWUSR, p, &adio_fops);
p                2363 drivers/scsi/sg.c 	proc_create_seq("debug", S_IRUGO, p, &debug_seq_ops);
p                2364 drivers/scsi/sg.c 	proc_create("def_reserved_size", S_IRUGO | S_IWUSR, p, &dressz_fops);
p                2365 drivers/scsi/sg.c 	proc_create_single("device_hdr", S_IRUGO, p, sg_proc_seq_show_devhdr);
p                2366 drivers/scsi/sg.c 	proc_create_seq("devices", S_IRUGO, p, &dev_seq_ops);
p                2367 drivers/scsi/sg.c 	proc_create_seq("device_strs", S_IRUGO, p, &devstrs_seq_ops);
p                2368 drivers/scsi/sg.c 	proc_create_single("version", S_IRUGO, p, sg_proc_seq_show_version);
p                  64 drivers/scsi/snic/snic_ctl.c 	const char *p = s;
p                  70 drivers/scsi/snic/snic_ctl.c 	while ((c = *p++)) {
p                 360 drivers/scsi/snic/vnic_dev.c 	void __iomem *p;
p                 365 drivers/scsi/snic/vnic_dev.c 	p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
p                 366 drivers/scsi/snic/vnic_dev.c 	if (!p)
p                 754 drivers/scsi/snic/vnic_dev.c 	void __iomem *p;
p                 756 drivers/scsi/snic/vnic_dev.c 	p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
p                 757 drivers/scsi/snic/vnic_dev.c 	if (p)
p                3168 drivers/scsi/st.c 	int result, p;
p                3189 drivers/scsi/st.c 		if (get_location(STp, &blk, &p, 1))
p                3239 drivers/scsi/st.c 		    (p = find_partition(STp)) >= 0)
p                3240 drivers/scsi/st.c 			STp->partition = p;
p                3511 drivers/scsi/st.c 	void __user *p = (void __user *)arg;
p                3548 drivers/scsi/st.c 		i = copy_from_user(&mtc, p, sizeof(struct mtop));
p                3803 drivers/scsi/st.c 		i = copy_to_user(p, &mt_status, sizeof(struct mtget));
p                3824 drivers/scsi/st.c 		i = copy_to_user(p, &mt_pos, sizeof(struct mtpos));
p                3842 drivers/scsi/st.c 						   file->f_mode, cmd_in, p);
p                3847 drivers/scsi/st.c 	retval = scsi_ioctl(STp->device, cmd_in, p);
p                 492 drivers/scsi/stex.c 	struct st_frame *p;
p                 495 drivers/scsi/stex.c 	p = hba->copy_buffer;
p                 496 drivers/scsi/stex.c 	scsi_sg_copy_to_buffer(ccb->cmd, p, count);
p                 497 drivers/scsi/stex.c 	memset(p->base, 0, sizeof(u32)*6);
p                 498 drivers/scsi/stex.c 	*(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
p                 499 drivers/scsi/stex.c 	p->rom_addr = 0;
p                 501 drivers/scsi/stex.c 	p->drv_ver.major = ST_VER_MAJOR;
p                 502 drivers/scsi/stex.c 	p->drv_ver.minor = ST_VER_MINOR;
p                 503 drivers/scsi/stex.c 	p->drv_ver.oem = ST_OEM;
p                 504 drivers/scsi/stex.c 	p->drv_ver.build = ST_BUILD_VER;
p                 506 drivers/scsi/stex.c 	p->bus = hba->pdev->bus->number;
p                 507 drivers/scsi/stex.c 	p->slot = hba->pdev->devfn;
p                 508 drivers/scsi/stex.c 	p->irq_level = 0;
p                 509 drivers/scsi/stex.c 	p->irq_vec = hba->pdev->irq;
p                 510 drivers/scsi/stex.c 	p->id = hba->pdev->vendor << 16 | hba->pdev->device;
p                 511 drivers/scsi/stex.c 	p->subid =
p                 514 drivers/scsi/stex.c 	scsi_sg_copy_from_buffer(ccb->cmd, p, count);
p                 371 drivers/scsi/sym53c8xx_2/sym_defs.h #define REGJ(p,r) (offsetof(struct sym_reg, p ## r))
p                  83 drivers/scsi/sym53c8xx_2/sym_glue.c 	char *p = excl_string;
p                  86 drivers/scsi/sym53c8xx_2/sym_glue.c 	while (p && (xi < 8)) {
p                  88 drivers/scsi/sym53c8xx_2/sym_glue.c 		int val = (int) simple_strtoul(p, &next_p, 0);
p                  90 drivers/scsi/sym53c8xx_2/sym_glue.c 		p = next_p;
p                 207 drivers/scsi/sym53c8xx_2/sym_glue.c 				u_char *p;
p                 208 drivers/scsi/sym53c8xx_2/sym_glue.c 				p  = (u_char *) cmd->sense_data;
p                 209 drivers/scsi/sym53c8xx_2/sym_glue.c 				if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29)
p                  53 drivers/scsi/sym53c8xx_2/sym_hipd.c static void sym_printl_hex(u_char *p, int n)
p                  56 drivers/scsi/sym53c8xx_2/sym_hipd.c 		printf (" %x", *p++);
p                1114 drivers/scsi/sym53c8xx_2/sym_hipd.h #define sym_free_mem_cluster(p)	\
p                1115 drivers/scsi/sym53c8xx_2/sym_hipd.h 	free_pages((unsigned long)p, SYM_MEM_PAGE_ORDER)
p                1157 drivers/scsi/sym53c8xx_2/sym_hipd.h #define M_FREE_MEM_CLUSTER(p)		mp->free_mem_cluster(mp, p)
p                1177 drivers/scsi/sym53c8xx_2/sym_hipd.h #define _uvptv_(p) ((void *)((u_long)(p)))
p                1180 drivers/scsi/sym53c8xx_2/sym_hipd.h #define _sym_mfree_dma(np, p, l, n)	\
p                1181 drivers/scsi/sym53c8xx_2/sym_hipd.h 			__sym_mfree_dma(np->bus_dmat, _uvptv_(p), l, n)
p                1183 drivers/scsi/sym53c8xx_2/sym_hipd.h #define sym_mfree_dma(p, l, n)		_sym_mfree_dma(np, p, l, n)
p                1184 drivers/scsi/sym53c8xx_2/sym_hipd.h #define vtobus(p)			__vtobus(np->bus_dmat, _uvptv_(p))
p                 147 drivers/scsi/sym53c8xx_2/sym_malloc.c 	void *p;
p                 149 drivers/scsi/sym53c8xx_2/sym_malloc.c 	p = ___sym_malloc(mp, size);
p                 152 drivers/scsi/sym53c8xx_2/sym_malloc.c 		printf ("new %-10s[%4d] @%p.\n", name, size, p);
p                 155 drivers/scsi/sym53c8xx_2/sym_malloc.c 	if (p)
p                 156 drivers/scsi/sym53c8xx_2/sym_malloc.c 		memset(p, 0, size);
p                 159 drivers/scsi/sym53c8xx_2/sym_malloc.c 	return p;
p                 280 drivers/scsi/sym53c8xx_2/sym_malloc.c static void ___del_dma_pool(m_pool_p p)
p                 284 drivers/scsi/sym53c8xx_2/sym_malloc.c 	while (*pp && *pp != p)
p                 288 drivers/scsi/sym53c8xx_2/sym_malloc.c 		__sym_mfree(&mp0, p, sizeof(*p), "MPOOL");
p                 152 drivers/scsi/sym53c8xx_2/sym_misc.h #define sym_qptr(p)	((struct sym_quehead *) (p))
p                 157 drivers/scsi/sym53c8xx_2/sym_misc.h #define sym_set_bit(p, n)	(((u32 *)(p))[(n)>>5] |=  (1<<((n)&0x1f)))
p                 158 drivers/scsi/sym53c8xx_2/sym_misc.h #define sym_clr_bit(p, n)	(((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f)))
p                 159 drivers/scsi/sym53c8xx_2/sym_misc.h #define sym_is_bit(p, n)	(((u32 *)(p))[(n)>>5] &   (1<<((n)&0x1f)))
p                 605 drivers/scsi/ufs/ufs-qcom.c static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
p                 607 drivers/scsi/ufs/ufs-qcom.c 	int gear = max_t(u32, p->gear_rx, p->gear_tx);
p                 608 drivers/scsi/ufs/ufs-qcom.c 	int lanes = max_t(u32, p->lane_rx, p->lane_tx);
p                 618 drivers/scsi/ufs/ufs-qcom.c 	if (!p->pwr_rx && !p->pwr_tx) {
p                 621 drivers/scsi/ufs/ufs-qcom.c 	} else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
p                 622 drivers/scsi/ufs/ufs-qcom.c 		 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
p                 625 drivers/scsi/ufs/ufs-qcom.c 			 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
p                 391 drivers/scsi/ufs/ufshcd.c 		int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
p                 393 drivers/scsi/ufs/ufshcd.c 		if (err_hist->reg[p] == 0)
p                 395 drivers/scsi/ufs/ufshcd.c 		dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
p                 396 drivers/scsi/ufs/ufshcd.c 			err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
p                 333 drivers/sh/maple/maple.c 	char *p, *recvbuf;
p                 352 drivers/sh/maple/maple.c 	for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
p                 353 drivers/sh/maple/maple.c 		if (*p == ' ')
p                 354 drivers/sh/maple/maple.c 			*p = '\0';
p                 357 drivers/sh/maple/maple.c 	for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
p                 358 drivers/sh/maple/maple.c 		if (*p == ' ')
p                 359 drivers/sh/maple/maple.c 			*p = '\0';
p                 399 drivers/slimbus/qcom-ctrl.c 	} __packed p;
p                 405 drivers/slimbus/qcom-ctrl.c 	p.manf_id = cpu_to_be16(ead->manf_id);
p                 406 drivers/slimbus/qcom-ctrl.c 	p.prod_code = cpu_to_be16(ead->prod_code);
p                 407 drivers/slimbus/qcom-ctrl.c 	p.dev_index = ead->dev_index;
p                 408 drivers/slimbus/qcom-ctrl.c 	p.instance = ead->instance;
p                 409 drivers/slimbus/qcom-ctrl.c 	p.laddr = laddr;
p                 411 drivers/slimbus/qcom-ctrl.c 	msg.wbuf = (void *)&p;
p                  68 drivers/soc/aspeed/aspeed-lpc-ctrl.c 	void __user *p = (void __user *)param;
p                  74 drivers/soc/aspeed/aspeed-lpc-ctrl.c 	if (copy_from_user(&map, p, sizeof(map)))
p                  98 drivers/soc/aspeed/aspeed-lpc-ctrl.c 		return copy_to_user(p, &map, sizeof(map)) ? -EFAULT : 0;
p                  63 drivers/soc/fsl/dpio/qbman-portal.c #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
p                  95 drivers/soc/fsl/dpio/qbman-portal.c static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
p                  97 drivers/soc/fsl/dpio/qbman-portal.c 	return readl_relaxed(p->addr_cinh + offset);
p                 100 drivers/soc/fsl/dpio/qbman-portal.c static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
p                 103 drivers/soc/fsl/dpio/qbman-portal.c 	writel_relaxed(value, p->addr_cinh + offset);
p                 106 drivers/soc/fsl/dpio/qbman-portal.c static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
p                 108 drivers/soc/fsl/dpio/qbman-portal.c 	return p->addr_cena + offset;
p                 159 drivers/soc/fsl/dpio/qbman-portal.c 	struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
p                 162 drivers/soc/fsl/dpio/qbman-portal.c 	if (!p)
p                 164 drivers/soc/fsl/dpio/qbman-portal.c 	p->desc = d;
p                 165 drivers/soc/fsl/dpio/qbman-portal.c 	p->mc.valid_bit = QB_VALID_BIT;
p                 166 drivers/soc/fsl/dpio/qbman-portal.c 	p->sdq = 0;
p                 167 drivers/soc/fsl/dpio/qbman-portal.c 	p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
p                 168 drivers/soc/fsl/dpio/qbman-portal.c 	p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
p                 169 drivers/soc/fsl/dpio/qbman-portal.c 	p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
p                 170 drivers/soc/fsl/dpio/qbman-portal.c 	if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
p                 171 drivers/soc/fsl/dpio/qbman-portal.c 		p->mr.valid_bit = QB_VALID_BIT;
p                 173 drivers/soc/fsl/dpio/qbman-portal.c 	atomic_set(&p->vdq.available, 1);
p                 174 drivers/soc/fsl/dpio/qbman-portal.c 	p->vdq.valid_bit = QB_VALID_BIT;
p                 175 drivers/soc/fsl/dpio/qbman-portal.c 	p->dqrr.next_idx = 0;
p                 176 drivers/soc/fsl/dpio/qbman-portal.c 	p->dqrr.valid_bit = QB_VALID_BIT;
p                 178 drivers/soc/fsl/dpio/qbman-portal.c 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
p                 179 drivers/soc/fsl/dpio/qbman-portal.c 		p->dqrr.dqrr_size = 4;
p                 180 drivers/soc/fsl/dpio/qbman-portal.c 		p->dqrr.reset_bug = 1;
p                 182 drivers/soc/fsl/dpio/qbman-portal.c 		p->dqrr.dqrr_size = 8;
p                 183 drivers/soc/fsl/dpio/qbman-portal.c 		p->dqrr.reset_bug = 0;
p                 186 drivers/soc/fsl/dpio/qbman-portal.c 	p->addr_cena = d->cena_bar;
p                 187 drivers/soc/fsl/dpio/qbman-portal.c 	p->addr_cinh = d->cinh_bar;
p                 189 drivers/soc/fsl/dpio/qbman-portal.c 	if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
p                 190 drivers/soc/fsl/dpio/qbman-portal.c 		memset(p->addr_cena, 0, 64 * 1024);
p                 192 drivers/soc/fsl/dpio/qbman-portal.c 	reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
p                 204 drivers/soc/fsl/dpio/qbman-portal.c 	if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
p                 209 drivers/soc/fsl/dpio/qbman-portal.c 	qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
p                 210 drivers/soc/fsl/dpio/qbman-portal.c 	reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
p                 213 drivers/soc/fsl/dpio/qbman-portal.c 		kfree(p);
p                 217 drivers/soc/fsl/dpio/qbman-portal.c 	if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
p                 218 drivers/soc/fsl/dpio/qbman-portal.c 		qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
p                 219 drivers/soc/fsl/dpio/qbman-portal.c 		qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
p                 227 drivers/soc/fsl/dpio/qbman-portal.c 	qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
p                 228 drivers/soc/fsl/dpio/qbman-portal.c 	return p;
p                 236 drivers/soc/fsl/dpio/qbman-portal.c void qbman_swp_finish(struct qbman_swp *p)
p                 238 drivers/soc/fsl/dpio/qbman-portal.c 	kfree(p);
p                 247 drivers/soc/fsl/dpio/qbman-portal.c u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
p                 249 drivers/soc/fsl/dpio/qbman-portal.c 	return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
p                 257 drivers/soc/fsl/dpio/qbman-portal.c void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
p                 259 drivers/soc/fsl/dpio/qbman-portal.c 	qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
p                 268 drivers/soc/fsl/dpio/qbman-portal.c u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
p                 270 drivers/soc/fsl/dpio/qbman-portal.c 	return qbman_read_register(p, QBMAN_CINH_SWP_IER);
p                 278 drivers/soc/fsl/dpio/qbman-portal.c void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
p                 280 drivers/soc/fsl/dpio/qbman-portal.c 	qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
p                 289 drivers/soc/fsl/dpio/qbman-portal.c int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
p                 291 drivers/soc/fsl/dpio/qbman-portal.c 	return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
p                 299 drivers/soc/fsl/dpio/qbman-portal.c void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
p                 301 drivers/soc/fsl/dpio/qbman-portal.c 	qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
p                 313 drivers/soc/fsl/dpio/qbman-portal.c void *qbman_swp_mc_start(struct qbman_swp *p)
p                 315 drivers/soc/fsl/dpio/qbman-portal.c 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
p                 316 drivers/soc/fsl/dpio/qbman-portal.c 		return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
p                 318 drivers/soc/fsl/dpio/qbman-portal.c 		return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
p                 325 drivers/soc/fsl/dpio/qbman-portal.c void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
p                 329 drivers/soc/fsl/dpio/qbman-portal.c 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
p                 331 drivers/soc/fsl/dpio/qbman-portal.c 		*v = cmd_verb | p->mc.valid_bit;
p                 333 drivers/soc/fsl/dpio/qbman-portal.c 		*v = cmd_verb | p->mc.valid_bit;
p                 335 drivers/soc/fsl/dpio/qbman-portal.c 		qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
p                 343 drivers/soc/fsl/dpio/qbman-portal.c void *qbman_swp_mc_result(struct qbman_swp *p)
p                 347 drivers/soc/fsl/dpio/qbman-portal.c 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
p                 348 drivers/soc/fsl/dpio/qbman-portal.c 		ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
p                 355 drivers/soc/fsl/dpio/qbman-portal.c 		p->mc.valid_bit ^= QB_VALID_BIT;
p                 357 drivers/soc/fsl/dpio/qbman-portal.c 		ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
p                 359 drivers/soc/fsl/dpio/qbman-portal.c 		if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
p                 365 drivers/soc/fsl/dpio/qbman-portal.c 		p->mr.valid_bit ^= QB_VALID_BIT;
p                 444 drivers/soc/fsl/dpio/qbman-portal.c static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
p                 448 drivers/soc/fsl/dpio/qbman-portal.c 		qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
p                 451 drivers/soc/fsl/dpio/qbman-portal.c 		qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 +
p                 470 drivers/soc/fsl/dpio/qbman-portal.c 	struct qbman_eq_desc *p;
p                 476 drivers/soc/fsl/dpio/qbman-portal.c 	p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
p                 477 drivers/soc/fsl/dpio/qbman-portal.c 	memcpy(&p->dca, &d->dca, 31);
p                 478 drivers/soc/fsl/dpio/qbman-portal.c 	memcpy(&p->fd, fd, sizeof(*fd));
p                 483 drivers/soc/fsl/dpio/qbman-portal.c 		p->verb = d->verb | EQAR_VB(eqar);
p                 485 drivers/soc/fsl/dpio/qbman-portal.c 		p->verb = d->verb | EQAR_VB(eqar);
p                 658 drivers/soc/fsl/dpio/qbman-portal.c 	struct qbman_pull_desc *p;
p                 666 drivers/soc/fsl/dpio/qbman-portal.c 		p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
p                 668 drivers/soc/fsl/dpio/qbman-portal.c 		p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
p                 669 drivers/soc/fsl/dpio/qbman-portal.c 	p->numf = d->numf;
p                 670 drivers/soc/fsl/dpio/qbman-portal.c 	p->tok = QMAN_DQ_TOKEN_VALID;
p                 671 drivers/soc/fsl/dpio/qbman-portal.c 	p->dq_src = d->dq_src;
p                 672 drivers/soc/fsl/dpio/qbman-portal.c 	p->rsp_addr = d->rsp_addr;
p                 673 drivers/soc/fsl/dpio/qbman-portal.c 	p->rsp_addr_virt = d->rsp_addr_virt;
p                 678 drivers/soc/fsl/dpio/qbman-portal.c 		p->verb = d->verb | s->vdq.valid_bit;
p                 681 drivers/soc/fsl/dpio/qbman-portal.c 		p->verb = d->verb | s->vdq.valid_bit;
p                 705 drivers/soc/fsl/dpio/qbman-portal.c 	struct dpaa2_dq *p;
p                 744 drivers/soc/fsl/dpio/qbman-portal.c 		p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
p                 746 drivers/soc/fsl/dpio/qbman-portal.c 		p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
p                 747 drivers/soc/fsl/dpio/qbman-portal.c 	verb = p->dq.verb;
p                 775 drivers/soc/fsl/dpio/qbman-portal.c 	flags = p->dq.stat;
p                 784 drivers/soc/fsl/dpio/qbman-portal.c 	return p;
p                 887 drivers/soc/fsl/dpio/qbman-portal.c 	struct qbman_release_desc *p;
p                 899 drivers/soc/fsl/dpio/qbman-portal.c 		p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
p                 901 drivers/soc/fsl/dpio/qbman-portal.c 		p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
p                 904 drivers/soc/fsl/dpio/qbman-portal.c 		p->buf[i] = cpu_to_le64(buffers[i]);
p                 905 drivers/soc/fsl/dpio/qbman-portal.c 	p->bpid = d->bpid;
p                 913 drivers/soc/fsl/dpio/qbman-portal.c 		p->verb = d->verb | RAR_VB(rar) | num_buffers;
p                 915 drivers/soc/fsl/dpio/qbman-portal.c 		p->verb = d->verb | RAR_VB(rar) | num_buffers;
p                 954 drivers/soc/fsl/dpio/qbman-portal.c 	struct qbman_acquire_desc *p;
p                 962 drivers/soc/fsl/dpio/qbman-portal.c 	p = qbman_swp_mc_start(s);
p                 964 drivers/soc/fsl/dpio/qbman-portal.c 	if (!p)
p                 968 drivers/soc/fsl/dpio/qbman-portal.c 	p->bpid = cpu_to_le16(bpid);
p                 969 drivers/soc/fsl/dpio/qbman-portal.c 	p->num = num_buffers;
p                 972 drivers/soc/fsl/dpio/qbman-portal.c 	r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
p                1016 drivers/soc/fsl/dpio/qbman-portal.c 	struct qbman_alt_fq_state_desc *p;
p                1020 drivers/soc/fsl/dpio/qbman-portal.c 	p = qbman_swp_mc_start(s);
p                1021 drivers/soc/fsl/dpio/qbman-portal.c 	if (!p)
p                1024 drivers/soc/fsl/dpio/qbman-portal.c 	p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
p                1027 drivers/soc/fsl/dpio/qbman-portal.c 	r = qbman_swp_mc_complete(s, p, alt_fq_verb);
p                1070 drivers/soc/fsl/dpio/qbman-portal.c 	struct qbman_cdan_ctrl_desc *p = NULL;
p                1074 drivers/soc/fsl/dpio/qbman-portal.c 	p = qbman_swp_mc_start(s);
p                1075 drivers/soc/fsl/dpio/qbman-portal.c 	if (!p)
p                1079 drivers/soc/fsl/dpio/qbman-portal.c 	p->ch = cpu_to_le16(channelid);
p                1080 drivers/soc/fsl/dpio/qbman-portal.c 	p->we = we_mask;
p                1082 drivers/soc/fsl/dpio/qbman-portal.c 		p->ctrl = 1;
p                1084 drivers/soc/fsl/dpio/qbman-portal.c 		p->ctrl = 0;
p                1085 drivers/soc/fsl/dpio/qbman-portal.c 	p->cdan_ctx = cpu_to_le64(ctx);
p                1088 drivers/soc/fsl/dpio/qbman-portal.c 	r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
p                1120 drivers/soc/fsl/dpio/qbman-portal.c 	struct qbman_fq_query_desc *p;
p                1123 drivers/soc/fsl/dpio/qbman-portal.c 	p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
p                1124 drivers/soc/fsl/dpio/qbman-portal.c 	if (!p)
p                1128 drivers/soc/fsl/dpio/qbman-portal.c 	p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
p                1129 drivers/soc/fsl/dpio/qbman-portal.c 	resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
p                1142 drivers/soc/fsl/dpio/qbman-portal.c 		       p->fqid, r->rslt);
p                1169 drivers/soc/fsl/dpio/qbman-portal.c 	struct qbman_bp_query_desc *p;
p                1172 drivers/soc/fsl/dpio/qbman-portal.c 	p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
p                1173 drivers/soc/fsl/dpio/qbman-portal.c 	if (!p)
p                1176 drivers/soc/fsl/dpio/qbman-portal.c 	p->bpid = cpu_to_le16(bpid);
p                1177 drivers/soc/fsl/dpio/qbman-portal.c 	resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
p                 138 drivers/soc/fsl/dpio/qbman-portal.h void qbman_swp_finish(struct qbman_swp *p);
p                 139 drivers/soc/fsl/dpio/qbman-portal.h u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
p                 140 drivers/soc/fsl/dpio/qbman-portal.h void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
p                 141 drivers/soc/fsl/dpio/qbman-portal.h u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
p                 142 drivers/soc/fsl/dpio/qbman-portal.h void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
p                 143 drivers/soc/fsl/dpio/qbman-portal.h int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
p                 144 drivers/soc/fsl/dpio/qbman-portal.h void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
p                 146 drivers/soc/fsl/dpio/qbman-portal.h void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
p                 147 drivers/soc/fsl/dpio/qbman-portal.h void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
p                 161 drivers/soc/fsl/dpio/qbman-portal.h int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
p                 166 drivers/soc/fsl/dpio/qbman-portal.h int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
p                 175 drivers/soc/fsl/dpio/qbman-portal.h int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
p                 192 drivers/soc/fsl/dpio/qbman-portal.h void *qbman_swp_mc_start(struct qbman_swp *p);
p                 193 drivers/soc/fsl/dpio/qbman-portal.h void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
p                 194 drivers/soc/fsl/dpio/qbman-portal.h void *qbman_swp_mc_result(struct qbman_swp *p);
p                 191 drivers/soc/fsl/qbman/bman.c static inline u32 bm_in(struct bm_portal *p, u32 offset)
p                 193 drivers/soc/fsl/qbman/bman.c 	return ioread32be(p->addr.ci + offset);
p                 196 drivers/soc/fsl/qbman/bman.c static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
p                 198 drivers/soc/fsl/qbman/bman.c 	iowrite32be(val, p->addr.ci + offset);
p                 202 drivers/soc/fsl/qbman/bman.c static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
p                 204 drivers/soc/fsl/qbman/bman.c 	dpaa_invalidate(p->addr.ce + offset);
p                 207 drivers/soc/fsl/qbman/bman.c static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
p                 209 drivers/soc/fsl/qbman/bman.c 	dpaa_touch_ro(p->addr.ce + offset);
p                 212 drivers/soc/fsl/qbman/bman.c static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
p                 214 drivers/soc/fsl/qbman/bman.c 	return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
p                 218 drivers/soc/fsl/qbman/bman.c 	struct bm_portal p;
p                 253 drivers/soc/fsl/qbman/bman.c static u32 poll_portal_slow(struct bman_portal *p, u32 is);
p                 257 drivers/soc/fsl/qbman/bman.c 	struct bman_portal *p = ptr;
p                 258 drivers/soc/fsl/qbman/bman.c 	struct bm_portal *portal = &p->p;
p                 259 drivers/soc/fsl/qbman/bman.c 	u32 clear = p->irq_sources;
p                 260 drivers/soc/fsl/qbman/bman.c 	u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
p                 265 drivers/soc/fsl/qbman/bman.c 	clear |= poll_portal_slow(p, is);
p                 276 drivers/soc/fsl/qbman/bman.c static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
p                 278 drivers/soc/fsl/qbman/bman.c 	uintptr_t addr = (uintptr_t)p;
p                 529 drivers/soc/fsl/qbman/bman.c 	struct bm_portal *p;
p                 532 drivers/soc/fsl/qbman/bman.c 	p = &portal->p;
p                 538 drivers/soc/fsl/qbman/bman.c 	p->addr.ce = c->addr_virt_ce;
p                 539 drivers/soc/fsl/qbman/bman.c 	p->addr.ce_be = c->addr_virt_ce;
p                 540 drivers/soc/fsl/qbman/bman.c 	p->addr.ci = c->addr_virt_ci;
p                 541 drivers/soc/fsl/qbman/bman.c 	if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
p                 545 drivers/soc/fsl/qbman/bman.c 	if (bm_mc_init(p)) {
p                 553 drivers/soc/fsl/qbman/bman.c 	bm_isr_bscn_disable(p);
p                 556 drivers/soc/fsl/qbman/bman.c 	bm_out(p, BM_REG_ISDR, 0xffffffff);
p                 558 drivers/soc/fsl/qbman/bman.c 	bm_out(p, BM_REG_IER, 0);
p                 559 drivers/soc/fsl/qbman/bman.c 	bm_out(p, BM_REG_ISR, 0xffffffff);
p                 570 drivers/soc/fsl/qbman/bman.c 	ret = bm_rcr_get_fill(p);
p                 578 drivers/soc/fsl/qbman/bman.c 	bm_out(p, BM_REG_ISDR, 0);
p                 579 drivers/soc/fsl/qbman/bman.c 	bm_out(p, BM_REG_IIR, 0);
p                 587 drivers/soc/fsl/qbman/bman.c 	bm_mc_finish(p);
p                 589 drivers/soc/fsl/qbman/bman.c 	bm_rcr_finish(p);
p                 611 drivers/soc/fsl/qbman/bman.c static u32 poll_portal_slow(struct bman_portal *p, u32 is)
p                 616 drivers/soc/fsl/qbman/bman.c 		bm_rcr_cce_update(&p->p);
p                 617 drivers/soc/fsl/qbman/bman.c 		bm_rcr_set_ithresh(&p->p, 0);
p                 618 drivers/soc/fsl/qbman/bman.c 		bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
p                 627 drivers/soc/fsl/qbman/bman.c int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
p                 632 drivers/soc/fsl/qbman/bman.c 	p->irq_sources |= bits & BM_PIRQ_VISIBLE;
p                 633 drivers/soc/fsl/qbman/bman.c 	bm_out(&p->p, BM_REG_IER, p->irq_sources);
p                 645 drivers/soc/fsl/qbman/bman.c 	struct bman_portal *p = get_affine_portal();
p                 648 drivers/soc/fsl/qbman/bman.c 		bm_cmd = bm_mc_start(&p->p);
p                 650 drivers/soc/fsl/qbman/bman.c 		bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
p                 651 drivers/soc/fsl/qbman/bman.c 		if (!bm_mc_result_timeout(&p->p, &bm_res)) {
p                 731 drivers/soc/fsl/qbman/bman.c static void update_rcr_ci(struct bman_portal *p, int avail)
p                 734 drivers/soc/fsl/qbman/bman.c 		bm_rcr_cce_prefetch(&p->p);
p                 736 drivers/soc/fsl/qbman/bman.c 		bm_rcr_cce_update(&p->p);
p                 741 drivers/soc/fsl/qbman/bman.c 	struct bman_portal *p;
p                 750 drivers/soc/fsl/qbman/bman.c 		p = get_affine_portal();
p                 752 drivers/soc/fsl/qbman/bman.c 		avail = bm_rcr_get_avail(&p->p);
p                 754 drivers/soc/fsl/qbman/bman.c 			update_rcr_ci(p, avail);
p                 755 drivers/soc/fsl/qbman/bman.c 		r = bm_rcr_start(&p->p);
p                 767 drivers/soc/fsl/qbman/bman.c 	p = get_affine_portal();
p                 778 drivers/soc/fsl/qbman/bman.c 	bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
p                 789 drivers/soc/fsl/qbman/bman.c 	struct bman_portal *p = get_affine_portal();
p                 796 drivers/soc/fsl/qbman/bman.c 	mcc = bm_mc_start(&p->p);
p                 798 drivers/soc/fsl/qbman/bman.c 	bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
p                 800 drivers/soc/fsl/qbman/bman.c 	if (!bm_mc_result_timeout(&p->p, &mcr)) {
p                  41 drivers/soc/fsl/qbman/bman_portal.c 	struct bman_portal *p = bman_create_affine_portal(pcfg);
p                  43 drivers/soc/fsl/qbman/bman_portal.c 	if (!p) {
p                  49 drivers/soc/fsl/qbman/bman_portal.c 	bman_p_irqsource_add(p, BM_PIRQ_RCRI);
p                  50 drivers/soc/fsl/qbman/bman_portal.c 	affine_bportals[pcfg->cpu] = p;
p                  54 drivers/soc/fsl/qbman/bman_portal.c 	return p;
p                  59 drivers/soc/fsl/qbman/bman_portal.c 	struct bman_portal *p = affine_bportals[cpu];
p                  62 drivers/soc/fsl/qbman/bman_portal.c 	if (!p)
p                  65 drivers/soc/fsl/qbman/bman_portal.c 	pcfg = bman_get_bm_portal_config(p);
p                  77 drivers/soc/fsl/qbman/bman_portal.c 	struct bman_portal *p = affine_bportals[cpu];
p                  80 drivers/soc/fsl/qbman/bman_portal.c 	if (!p)
p                  83 drivers/soc/fsl/qbman/bman_portal.c 	pcfg = bman_get_bm_portal_config(p);
p                  69 drivers/soc/fsl/qbman/bman_priv.h int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
p                  54 drivers/soc/fsl/qbman/dpaa_sys.h static inline void dpaa_flush(void *p)
p                  61 drivers/soc/fsl/qbman/dpaa_sys.h 	flush_dcache_range((unsigned long)p, (unsigned long)p+64);
p                  65 drivers/soc/fsl/qbman/dpaa_sys.h #define dpaa_invalidate(p) dpaa_flush(p)
p                  67 drivers/soc/fsl/qbman/dpaa_sys.h #define dpaa_zero(p) memset(p, 0, 64)
p                  69 drivers/soc/fsl/qbman/dpaa_sys.h static inline void dpaa_touch_ro(void *p)
p                  72 drivers/soc/fsl/qbman/dpaa_sys.h 	prefetch(p+32);
p                  74 drivers/soc/fsl/qbman/dpaa_sys.h 	prefetch(p);
p                  78 drivers/soc/fsl/qbman/dpaa_sys.h static inline void dpaa_invalidate_touch_ro(void *p)
p                  80 drivers/soc/fsl/qbman/dpaa_sys.h 	dpaa_invalidate(p);
p                  81 drivers/soc/fsl/qbman/dpaa_sys.h 	dpaa_touch_ro(p);
p                 363 drivers/soc/fsl/qbman/qman.c static inline u32 qm_in(struct qm_portal *p, u32 offset)
p                 365 drivers/soc/fsl/qbman/qman.c 	return ioread32be(p->addr.ci + offset);
p                 368 drivers/soc/fsl/qbman/qman.c static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
p                 370 drivers/soc/fsl/qbman/qman.c 	iowrite32be(val, p->addr.ci + offset);
p                 374 drivers/soc/fsl/qbman/qman.c static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
p                 376 drivers/soc/fsl/qbman/qman.c 	dpaa_invalidate(p->addr.ce + offset);
p                 379 drivers/soc/fsl/qbman/qman.c static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
p                 381 drivers/soc/fsl/qbman/qman.c 	dpaa_touch_ro(p->addr.ce + offset);
p                 384 drivers/soc/fsl/qbman/qman.c static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
p                 386 drivers/soc/fsl/qbman/qman.c 	return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
p                 395 drivers/soc/fsl/qbman/qman.c static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
p                 397 drivers/soc/fsl/qbman/qman.c 	uintptr_t addr = (uintptr_t)p;
p                 581 drivers/soc/fsl/qbman/qman.c 					const struct qm_dqrr_entry *p)
p                 583 drivers/soc/fsl/qbman/qman.c 	uintptr_t addr = (uintptr_t)p;
p                 748 drivers/soc/fsl/qbman/qman.c static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
p                 750 drivers/soc/fsl/qbman/qman.c 	uintptr_t addr = (uintptr_t)p;
p                 983 drivers/soc/fsl/qbman/qman.c 	struct qm_portal p;
p                1044 drivers/soc/fsl/qbman/qman.c 	res = qm_dqrr_set_ithresh(&portal->p, ithresh);
p                1048 drivers/soc/fsl/qbman/qman.c 	portal->p.dqrr.ithresh = ithresh;
p                1057 drivers/soc/fsl/qbman/qman.c 		*ithresh = qm_in(&portal->p, QM_REG_DQRR_ITR);
p                1064 drivers/soc/fsl/qbman/qman.c 		*iperiod = qm_in(&portal->p, QM_REG_ITPR);
p                1073 drivers/soc/fsl/qbman/qman.c 	qm_out(&portal->p, QM_REG_ITPR, iperiod);
p                1094 drivers/soc/fsl/qbman/qman.c 			qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff);
p                1095 drivers/soc/fsl/qbman/qman.c 			qm_out(&affine_portals[i]->p, QM_REG_IIR, 0);
p                1165 drivers/soc/fsl/qbman/qman.c static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
p                1166 drivers/soc/fsl/qbman/qman.c static inline unsigned int __poll_portal_fast(struct qman_portal *p,
p                1173 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p = ptr;
p                1174 drivers/soc/fsl/qbman/qman.c 	u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
p                1182 drivers/soc/fsl/qbman/qman.c 		__poll_portal_fast(p, QMAN_POLL_LIMIT);
p                1186 drivers/soc/fsl/qbman/qman.c 	clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
p                1187 drivers/soc/fsl/qbman/qman.c 	qm_out(&p->p, QM_REG_ISR, clear);
p                1191 drivers/soc/fsl/qbman/qman.c static int drain_mr_fqrni(struct qm_portal *p)
p                1195 drivers/soc/fsl/qbman/qman.c 	qm_mr_pvb_update(p);
p                1196 drivers/soc/fsl/qbman/qman.c 	msg = qm_mr_current(p);
p                1213 drivers/soc/fsl/qbman/qman.c 		qm_mr_pvb_update(p);
p                1214 drivers/soc/fsl/qbman/qman.c 		msg = qm_mr_current(p);
p                1223 drivers/soc/fsl/qbman/qman.c 	qm_mr_next(p);
p                1224 drivers/soc/fsl/qbman/qman.c 	qm_mr_cci_consume(p, 1);
p                1232 drivers/soc/fsl/qbman/qman.c 	struct qm_portal *p;
p                1236 drivers/soc/fsl/qbman/qman.c 	p = &portal->p;
p                1249 drivers/soc/fsl/qbman/qman.c 	p->addr.ce = c->addr_virt_ce;
p                1250 drivers/soc/fsl/qbman/qman.c 	p->addr.ce_be = c->addr_virt_ce;
p                1251 drivers/soc/fsl/qbman/qman.c 	p->addr.ci = c->addr_virt_ci;
p                1256 drivers/soc/fsl/qbman/qman.c 	if (qm_eqcr_init(p, qm_eqcr_pvb,
p                1261 drivers/soc/fsl/qbman/qman.c 	if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
p                1266 drivers/soc/fsl/qbman/qman.c 	if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
p                1270 drivers/soc/fsl/qbman/qman.c 	if (qm_mc_init(p)) {
p                1275 drivers/soc/fsl/qbman/qman.c 	qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
p                1276 drivers/soc/fsl/qbman/qman.c 	qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
p                1277 drivers/soc/fsl/qbman/qman.c 	qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
p                1297 drivers/soc/fsl/qbman/qman.c 	qm_out(p, QM_REG_ISDR, isdr);
p                1299 drivers/soc/fsl/qbman/qman.c 	qm_out(p, QM_REG_IER, 0);
p                1301 drivers/soc/fsl/qbman/qman.c 	qm_out(p, QM_REG_IIR, 1);
p                1312 drivers/soc/fsl/qbman/qman.c 	qm_out(p, QM_REG_ISDR, isdr);
p                1313 drivers/soc/fsl/qbman/qman.c 	ret = qm_eqcr_get_fill(p);
p                1319 drivers/soc/fsl/qbman/qman.c 	qm_out(p, QM_REG_ISDR, isdr);
p                1320 drivers/soc/fsl/qbman/qman.c 	if (qm_dqrr_current(p)) {
p                1322 drivers/soc/fsl/qbman/qman.c 		qm_dqrr_cdc_consume_n(p, 0xffff);
p                1324 drivers/soc/fsl/qbman/qman.c 	if (qm_mr_current(p) && drain_mr_fqrni(p)) {
p                1326 drivers/soc/fsl/qbman/qman.c 		const union qm_mr_entry *e = qm_mr_current(p);
p                1334 drivers/soc/fsl/qbman/qman.c 	qm_out(p, QM_REG_ISR, 0xffffffff);
p                1335 drivers/soc/fsl/qbman/qman.c 	qm_out(p, QM_REG_ISDR, 0);
p                1337 drivers/soc/fsl/qbman/qman.c 		qm_out(p, QM_REG_IIR, 0);
p                1339 drivers/soc/fsl/qbman/qman.c 	qm_dqrr_sdqcr_set(p, portal->sdqcr);
p                1349 drivers/soc/fsl/qbman/qman.c 	qm_mc_finish(p);
p                1351 drivers/soc/fsl/qbman/qman.c 	qm_mr_finish(p);
p                1353 drivers/soc/fsl/qbman/qman.c 	qm_dqrr_finish(p);
p                1355 drivers/soc/fsl/qbman/qman.c 	qm_eqcr_finish(p);
p                1385 drivers/soc/fsl/qbman/qman.c 	qm_dqrr_sdqcr_set(&qm->p, 0);
p                1396 drivers/soc/fsl/qbman/qman.c 	qm_eqcr_cce_update(&qm->p);
p                1397 drivers/soc/fsl/qbman/qman.c 	qm_eqcr_cce_update(&qm->p);
p                1403 drivers/soc/fsl/qbman/qman.c 	qm_mc_finish(&qm->p);
p                1404 drivers/soc/fsl/qbman/qman.c 	qm_mr_finish(&qm->p);
p                1405 drivers/soc/fsl/qbman/qman.c 	qm_dqrr_finish(&qm->p);
p                1406 drivers/soc/fsl/qbman/qman.c 	qm_eqcr_finish(&qm->p);
p                1430 drivers/soc/fsl/qbman/qman.c static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
p                1458 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p = container_of(work, struct qman_portal,
p                1464 drivers/soc/fsl/qbman/qman.c 	spin_lock(&p->cgr_lock);
p                1465 drivers/soc/fsl/qbman/qman.c 	qm_mc_start(&p->p);
p                1466 drivers/soc/fsl/qbman/qman.c 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
p                1467 drivers/soc/fsl/qbman/qman.c 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
p                1468 drivers/soc/fsl/qbman/qman.c 		spin_unlock(&p->cgr_lock);
p                1469 drivers/soc/fsl/qbman/qman.c 		dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
p                1470 drivers/soc/fsl/qbman/qman.c 		qman_p_irqsource_add(p, QM_PIRQ_CSCI);
p                1475 drivers/soc/fsl/qbman/qman.c 		      &p->cgrs[0]);
p                1477 drivers/soc/fsl/qbman/qman.c 	qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
p                1479 drivers/soc/fsl/qbman/qman.c 	qman_cgrs_cp(&p->cgrs[1], &rr);
p                1481 drivers/soc/fsl/qbman/qman.c 	list_for_each_entry(cgr, &p->cgr_cbs, node)
p                1483 drivers/soc/fsl/qbman/qman.c 			cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
p                1484 drivers/soc/fsl/qbman/qman.c 	spin_unlock(&p->cgr_lock);
p                1485 drivers/soc/fsl/qbman/qman.c 	qman_p_irqsource_add(p, QM_PIRQ_CSCI);
p                1490 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p = container_of(work, struct qman_portal,
p                1499 drivers/soc/fsl/qbman/qman.c 		qm_mr_pvb_update(&p->p);
p                1500 drivers/soc/fsl/qbman/qman.c 		msg = qm_mr_current(&p->p);
p                1517 drivers/soc/fsl/qbman/qman.c 				fq_state_change(p, fq, msg, verb);
p                1519 drivers/soc/fsl/qbman/qman.c 					fq->cb.fqs(p, fq, msg);
p                1524 drivers/soc/fsl/qbman/qman.c 				fq_state_change(p, fq, msg, verb);
p                1526 drivers/soc/fsl/qbman/qman.c 					fq->cb.fqs(p, fq, msg);
p                1538 drivers/soc/fsl/qbman/qman.c 			fq->cb.ern(p, fq, msg);
p                1541 drivers/soc/fsl/qbman/qman.c 		qm_mr_next(&p->p);
p                1544 drivers/soc/fsl/qbman/qman.c 	qm_mr_cci_consume(&p->p, num);
p                1545 drivers/soc/fsl/qbman/qman.c 	qman_p_irqsource_add(p, QM_PIRQ_MRI);
p                1549 drivers/soc/fsl/qbman/qman.c static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
p                1552 drivers/soc/fsl/qbman/qman.c 		qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
p                1554 drivers/soc/fsl/qbman/qman.c 			      &p->congestion_work);
p                1558 drivers/soc/fsl/qbman/qman.c 		qm_eqcr_cce_update(&p->p);
p                1559 drivers/soc/fsl/qbman/qman.c 		qm_eqcr_set_ithresh(&p->p, 0);
p                1564 drivers/soc/fsl/qbman/qman.c 		qman_p_irqsource_remove(p, QM_PIRQ_MRI);
p                1566 drivers/soc/fsl/qbman/qman.c 			      &p->mr_work);
p                1576 drivers/soc/fsl/qbman/qman.c static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
p                1578 drivers/soc/fsl/qbman/qman.c 	p->vdqcr_owned = NULL;
p                1609 drivers/soc/fsl/qbman/qman.c static inline unsigned int __poll_portal_fast(struct qman_portal *p,
p                1618 drivers/soc/fsl/qbman/qman.c 		qm_dqrr_pvb_update(&p->p);
p                1619 drivers/soc/fsl/qbman/qman.c 		dq = qm_dqrr_current(&p->p);
p                1629 drivers/soc/fsl/qbman/qman.c 			fq = p->vdqcr_owned;
p                1644 drivers/soc/fsl/qbman/qman.c 			res = fq->cb.dqrr(p, fq, dq);
p                1649 drivers/soc/fsl/qbman/qman.c 				clear_vdqcr(p, fq);
p                1654 drivers/soc/fsl/qbman/qman.c 			res = fq->cb.dqrr(p, fq, dq);
p                1672 drivers/soc/fsl/qbman/qman.c 			qm_dqrr_cdc_consume_1ptr(&p->p, dq,
p                1675 drivers/soc/fsl/qbman/qman.c 		qm_dqrr_next(&p->p);
p                1687 drivers/soc/fsl/qbman/qman.c void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
p                1692 drivers/soc/fsl/qbman/qman.c 	p->irq_sources |= bits & QM_PIRQ_VISIBLE;
p                1693 drivers/soc/fsl/qbman/qman.c 	qm_out(&p->p, QM_REG_IER, p->irq_sources);
p                1698 drivers/soc/fsl/qbman/qman.c void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
p                1715 drivers/soc/fsl/qbman/qman.c 	p->irq_sources &= ~bits;
p                1716 drivers/soc/fsl/qbman/qman.c 	qm_out(&p->p, QM_REG_IER, p->irq_sources);
p                1717 drivers/soc/fsl/qbman/qman.c 	ier = qm_in(&p->p, QM_REG_IER);
p                1722 drivers/soc/fsl/qbman/qman.c 	qm_out(&p->p, QM_REG_ISR, ~ier);
p                1752 drivers/soc/fsl/qbman/qman.c int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
p                1754 drivers/soc/fsl/qbman/qman.c 	return __poll_portal_fast(p, limit);
p                1758 drivers/soc/fsl/qbman/qman.c void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
p                1763 drivers/soc/fsl/qbman/qman.c 	pools &= p->config->pools;
p                1764 drivers/soc/fsl/qbman/qman.c 	p->sdqcr |= pools;
p                1765 drivers/soc/fsl/qbman/qman.c 	qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
p                1855 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p;
p                1875 drivers/soc/fsl/qbman/qman.c 	p = get_affine_portal();
p                1882 drivers/soc/fsl/qbman/qman.c 	mcc = qm_mc_start(&p->p);
p                1908 drivers/soc/fsl/qbman/qman.c 			struct qman_portal *p = qman_dma_portal;
p                1910 drivers/soc/fsl/qbman/qman.c 			phys_fq = dma_map_single(p->config->dev, fq,
p                1912 drivers/soc/fsl/qbman/qman.c 			if (dma_mapping_error(p->config->dev, phys_fq)) {
p                1913 drivers/soc/fsl/qbman/qman.c 				dev_err(p->config->dev, "dma_mapping failed\n");
p                1930 drivers/soc/fsl/qbman/qman.c 		qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
p                1932 drivers/soc/fsl/qbman/qman.c 	qm_mc_commit(&p->p, myverb);
p                1933 drivers/soc/fsl/qbman/qman.c 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
p                1934 drivers/soc/fsl/qbman/qman.c 		dev_err(p->config->dev, "MCR timeout\n");
p                1968 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p;
p                1978 drivers/soc/fsl/qbman/qman.c 	p = get_affine_portal();
p                1984 drivers/soc/fsl/qbman/qman.c 	mcc = qm_mc_start(&p->p);
p                1986 drivers/soc/fsl/qbman/qman.c 	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
p                1987 drivers/soc/fsl/qbman/qman.c 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
p                1988 drivers/soc/fsl/qbman/qman.c 		dev_err(p->config->dev, "ALTER_SCHED timeout\n");
p                2009 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p;
p                2020 drivers/soc/fsl/qbman/qman.c 	p = get_affine_portal();
p                2027 drivers/soc/fsl/qbman/qman.c 	mcc = qm_mc_start(&p->p);
p                2029 drivers/soc/fsl/qbman/qman.c 	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
p                2030 drivers/soc/fsl/qbman/qman.c 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
p                2031 drivers/soc/fsl/qbman/qman.c 		dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
p                2073 drivers/soc/fsl/qbman/qman.c 			fq->cb.fqs(p, fq, &msg);
p                2091 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p;
p                2100 drivers/soc/fsl/qbman/qman.c 	p = get_affine_portal();
p                2106 drivers/soc/fsl/qbman/qman.c 	mcc = qm_mc_start(&p->p);
p                2108 drivers/soc/fsl/qbman/qman.c 	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
p                2109 drivers/soc/fsl/qbman/qman.c 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
p                2129 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p = get_affine_portal();
p                2132 drivers/soc/fsl/qbman/qman.c 	mcc = qm_mc_start(&p->p);
p                2134 drivers/soc/fsl/qbman/qman.c 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
p                2135 drivers/soc/fsl/qbman/qman.c 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
p                2154 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p = get_affine_portal();
p                2157 drivers/soc/fsl/qbman/qman.c 	mcc = qm_mc_start(&p->p);
p                2159 drivers/soc/fsl/qbman/qman.c 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
p                2160 drivers/soc/fsl/qbman/qman.c 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
p                2183 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p = get_affine_portal();
p                2186 drivers/soc/fsl/qbman/qman.c 	mcc = qm_mc_start(&p->p);
p                2188 drivers/soc/fsl/qbman/qman.c 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
p                2189 drivers/soc/fsl/qbman/qman.c 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
p                2197 drivers/soc/fsl/qbman/qman.c 		dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
p                2221 drivers/soc/fsl/qbman/qman.c static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
p                2227 drivers/soc/fsl/qbman/qman.c 	if (p->vdqcr_owned)
p                2233 drivers/soc/fsl/qbman/qman.c 	p->vdqcr_owned = fq;
p                2234 drivers/soc/fsl/qbman/qman.c 	qm_dqrr_vdqcr_set(&p->p, vdqcr);
p                2241 drivers/soc/fsl/qbman/qman.c static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
p                2245 drivers/soc/fsl/qbman/qman.c 	*p = get_affine_portal();
p                2246 drivers/soc/fsl/qbman/qman.c 	ret = set_p_vdqcr(*p, fq, vdqcr);
p                2251 drivers/soc/fsl/qbman/qman.c static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
p                2258 drivers/soc/fsl/qbman/qman.c 				!set_vdqcr(p, fq, vdqcr));
p                2260 drivers/soc/fsl/qbman/qman.c 		wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
p                2266 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p;
p                2278 drivers/soc/fsl/qbman/qman.c 		ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
p                2280 drivers/soc/fsl/qbman/qman.c 		ret = set_vdqcr(&p, fq, vdqcr);
p                2302 drivers/soc/fsl/qbman/qman.c static void update_eqcr_ci(struct qman_portal *p, u8 avail)
p                2305 drivers/soc/fsl/qbman/qman.c 		qm_eqcr_cce_prefetch(&p->p);
p                2307 drivers/soc/fsl/qbman/qman.c 		qm_eqcr_cce_update(&p->p);
p                2312 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p;
p                2317 drivers/soc/fsl/qbman/qman.c 	p = get_affine_portal();
p                2320 drivers/soc/fsl/qbman/qman.c 	if (p->use_eqcr_ci_stashing) {
p                2325 drivers/soc/fsl/qbman/qman.c 		eq = qm_eqcr_start_stash(&p->p);
p                2331 drivers/soc/fsl/qbman/qman.c 		avail = qm_eqcr_get_avail(&p->p);
p                2333 drivers/soc/fsl/qbman/qman.c 			update_eqcr_ci(p, avail);
p                2334 drivers/soc/fsl/qbman/qman.c 		eq = qm_eqcr_start_no_stash(&p->p);
p                2344 drivers/soc/fsl/qbman/qman.c 	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
p                2357 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p = get_affine_portal();
p                2361 drivers/soc/fsl/qbman/qman.c 	mcc = qm_mc_start(&p->p);
p                2367 drivers/soc/fsl/qbman/qman.c 	qm_mc_commit(&p->p, verb);
p                2368 drivers/soc/fsl/qbman/qman.c 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
p                2424 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p;
p                2436 drivers/soc/fsl/qbman/qman.c 	p = get_affine_portal();
p                2440 drivers/soc/fsl/qbman/qman.c 	cgr->chan = p->config->channel;
p                2441 drivers/soc/fsl/qbman/qman.c 	spin_lock(&p->cgr_lock);
p                2450 drivers/soc/fsl/qbman/qman.c 		qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
p                2464 drivers/soc/fsl/qbman/qman.c 	list_add(&cgr->node, &p->cgr_cbs);
p                2470 drivers/soc/fsl/qbman/qman.c 		dev_err(p->config->dev, "CGR HW state partially modified\n");
p                2475 drivers/soc/fsl/qbman/qman.c 	    qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
p                2476 drivers/soc/fsl/qbman/qman.c 		cgr->cb(p, cgr, 1);
p                2478 drivers/soc/fsl/qbman/qman.c 	spin_unlock(&p->cgr_lock);
p                2491 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p = get_affine_portal();
p                2493 drivers/soc/fsl/qbman/qman.c 	if (cgr->chan != p->config->channel) {
p                2495 drivers/soc/fsl/qbman/qman.c 		dev_err(p->config->dev, "CGR not owned by current portal");
p                2496 drivers/soc/fsl/qbman/qman.c 		dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
p                2497 drivers/soc/fsl/qbman/qman.c 			cgr->chan, p->config->channel);
p                2503 drivers/soc/fsl/qbman/qman.c 	spin_lock_irqsave(&p->cgr_lock, irqflags);
p                2509 drivers/soc/fsl/qbman/qman.c 	list_for_each_entry(i, &p->cgr_cbs, node)
p                2515 drivers/soc/fsl/qbman/qman.c 		list_add(&cgr->node, &p->cgr_cbs);
p                2520 drivers/soc/fsl/qbman/qman.c 	qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
p                2526 drivers/soc/fsl/qbman/qman.c 		list_add(&cgr->node, &p->cgr_cbs);
p                2528 drivers/soc/fsl/qbman/qman.c 	spin_unlock_irqrestore(&p->cgr_lock, irqflags);
p                2540 drivers/soc/fsl/qbman/qman.c static void qman_delete_cgr_smp_call(void *p)
p                2542 drivers/soc/fsl/qbman/qman.c 	qman_delete_cgr((struct qman_cgr *)p);
p                2562 drivers/soc/fsl/qbman/qman.c static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
p                2567 drivers/soc/fsl/qbman/qman.c 	qm_mr_pvb_update(p);
p                2568 drivers/soc/fsl/qbman/qman.c 	msg = qm_mr_current(p);
p                2572 drivers/soc/fsl/qbman/qman.c 		qm_mr_next(p);
p                2573 drivers/soc/fsl/qbman/qman.c 		qm_mr_cci_consume_to_current(p);
p                2574 drivers/soc/fsl/qbman/qman.c 		qm_mr_pvb_update(p);
p                2575 drivers/soc/fsl/qbman/qman.c 		msg = qm_mr_current(p);
p                2580 drivers/soc/fsl/qbman/qman.c static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
p                2587 drivers/soc/fsl/qbman/qman.c 		qm_dqrr_pvb_update(p);
p                2588 drivers/soc/fsl/qbman/qman.c 		dqrr = qm_dqrr_current(p);
p                2596 drivers/soc/fsl/qbman/qman.c 		qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
p                2597 drivers/soc/fsl/qbman/qman.c 		qm_dqrr_pvb_update(p);
p                2598 drivers/soc/fsl/qbman/qman.c 		qm_dqrr_next(p);
p                2599 drivers/soc/fsl/qbman/qman.c 		dqrr = qm_dqrr_current(p);
p                2604 drivers/soc/fsl/qbman/qman.c #define qm_mr_drain(p, V) \
p                2605 drivers/soc/fsl/qbman/qman.c 	_qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
p                2607 drivers/soc/fsl/qbman/qman.c #define qm_dqrr_drain(p, f, S) \
p                2608 drivers/soc/fsl/qbman/qman.c 	_qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
p                2610 drivers/soc/fsl/qbman/qman.c #define qm_dqrr_drain_wait(p, f, S) \
p                2611 drivers/soc/fsl/qbman/qman.c 	_qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
p                2613 drivers/soc/fsl/qbman/qman.c #define qm_dqrr_drain_nomatch(p) \
p                2614 drivers/soc/fsl/qbman/qman.c 	_qm_dqrr_consume_and_match(p, 0, 0, false)
p                2618 drivers/soc/fsl/qbman/qman.c 	struct qman_portal *p, *channel_portal;
p                2626 drivers/soc/fsl/qbman/qman.c 	p = get_affine_portal();
p                2627 drivers/soc/fsl/qbman/qman.c 	dev = p->config->dev;
p                2629 drivers/soc/fsl/qbman/qman.c 	mcc = qm_mc_start(&p->p);
p                2631 drivers/soc/fsl/qbman/qman.c 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
p                2632 drivers/soc/fsl/qbman/qman.c 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
p                2644 drivers/soc/fsl/qbman/qman.c 	mcc = qm_mc_start(&p->p);
p                2646 drivers/soc/fsl/qbman/qman.c 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
p                2647 drivers/soc/fsl/qbman/qman.c 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
p                2667 drivers/soc/fsl/qbman/qman.c 		channel_portal = p;
p                2675 drivers/soc/fsl/qbman/qman.c 		mcc = qm_mc_start(&channel_portal->p);
p                2677 drivers/soc/fsl/qbman/qman.c 		qm_mc_commit(&channel_portal->p, QM_MCC_VERB_ALTER_RETIRE);
p                2678 drivers/soc/fsl/qbman/qman.c 		if (!qm_mc_result_timeout(&channel_portal->p, &mcr)) {
p                2688 drivers/soc/fsl/qbman/qman.c 			drain_mr_fqrni(&channel_portal->p);
p                2719 drivers/soc/fsl/qbman/qman.c 				qm_dqrr_sdqcr_set(&channel_portal->p,
p                2723 drivers/soc/fsl/qbman/qman.c 				qm_dqrr_sdqcr_set(&channel_portal->p,
p                2729 drivers/soc/fsl/qbman/qman.c 				qm_dqrr_drain_nomatch(&channel_portal->p);
p                2731 drivers/soc/fsl/qbman/qman.c 				found_fqrn = qm_mr_drain(&channel_portal->p,
p                2736 drivers/soc/fsl/qbman/qman.c 			qm_dqrr_sdqcr_set(&channel_portal->p,
p                2763 drivers/soc/fsl/qbman/qman.c 				qm_dqrr_vdqcr_set(&p->p, vdqcr);
p                2768 drivers/soc/fsl/qbman/qman.c 			} while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
p                2773 drivers/soc/fsl/qbman/qman.c 			orl_empty = qm_mr_drain(&p->p, FQRL);
p                2776 drivers/soc/fsl/qbman/qman.c 		mcc = qm_mc_start(&p->p);
p                2778 drivers/soc/fsl/qbman/qman.c 		qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
p                2779 drivers/soc/fsl/qbman/qman.c 		if (!qm_mc_result_timeout(&p->p, &mcr)) {
p                2796 drivers/soc/fsl/qbman/qman.c 		mcc = qm_mc_start(&p->p);
p                2798 drivers/soc/fsl/qbman/qman.c 		qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
p                2799 drivers/soc/fsl/qbman/qman.c 		if (!qm_mc_result_timeout(&p->p, &mcr)) {
p                2838 drivers/soc/fsl/qbman/qman.c static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
p                2842 drivers/soc/fsl/qbman/qman.c 	if (!p)
p                2845 drivers/soc/fsl/qbman/qman.c 	addr = gen_pool_alloc(p, cnt);
p                 161 drivers/soc/fsl/qbman/qman_ccsr.c static bool qm_ecir_is_dcp(const struct qm_ecir *p)
p                 163 drivers/soc/fsl/qbman/qman_ccsr.c 	return p->info & BIT(29);
p                 166 drivers/soc/fsl/qbman/qman_ccsr.c static int qm_ecir_get_pnum(const struct qm_ecir *p)
p                 168 drivers/soc/fsl/qbman/qman_ccsr.c 	return (p->info >> 24) & 0x1f;
p                 171 drivers/soc/fsl/qbman/qman_ccsr.c static int qm_ecir_get_fqid(const struct qm_ecir *p)
p                 173 drivers/soc/fsl/qbman/qman_ccsr.c 	return p->info & (BIT(24) - 1);
p                 180 drivers/soc/fsl/qbman/qman_ccsr.c static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
p                 182 drivers/soc/fsl/qbman/qman_ccsr.c 	return p->info & BIT(31);
p                 185 drivers/soc/fsl/qbman/qman_ccsr.c static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
p                 187 drivers/soc/fsl/qbman/qman_ccsr.c 	return p->info & (BIT(10) - 1);
p                 195 drivers/soc/fsl/qbman/qman_ccsr.c static int qm_eadr_get_memid(const struct qm_eadr *p)
p                 197 drivers/soc/fsl/qbman/qman_ccsr.c 	return (p->info >> 24) & 0xf;
p                 200 drivers/soc/fsl/qbman/qman_ccsr.c static int qm_eadr_get_eadr(const struct qm_eadr *p)
p                 202 drivers/soc/fsl/qbman/qman_ccsr.c 	return p->info & (BIT(12) - 1);
p                 205 drivers/soc/fsl/qbman/qman_ccsr.c static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
p                 207 drivers/soc/fsl/qbman/qman_ccsr.c 	return (p->info >> 24) & 0x1f;
p                 210 drivers/soc/fsl/qbman/qman_ccsr.c static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
p                 212 drivers/soc/fsl/qbman/qman_ccsr.c 	return p->info & (BIT(16) - 1);
p                 126 drivers/soc/fsl/qbman/qman_portal.c 	struct qman_portal *p;
p                 135 drivers/soc/fsl/qbman/qman_portal.c 	p = qman_create_affine_portal(pcfg, NULL);
p                 136 drivers/soc/fsl/qbman/qman_portal.c 	if (!p) {
p                 150 drivers/soc/fsl/qbman/qman_portal.c 	qman_p_irqsource_add(p, irq_sources);
p                 159 drivers/soc/fsl/qbman/qman_portal.c 		qman_dma_portal = p;
p                 165 drivers/soc/fsl/qbman/qman_portal.c 	return p;
p                 192 drivers/soc/fsl/qbman/qman_portal.c 	struct qman_portal *p;
p                 195 drivers/soc/fsl/qbman/qman_portal.c 	p = affine_portals[cpu];
p                 196 drivers/soc/fsl/qbman/qman_portal.c 	if (p) {
p                 197 drivers/soc/fsl/qbman/qman_portal.c 		pcfg = qman_get_qm_portal_config(p);
p                 210 drivers/soc/fsl/qbman/qman_portal.c 	struct qman_portal *p;
p                 213 drivers/soc/fsl/qbman/qman_portal.c 	p = affine_portals[cpu];
p                 214 drivers/soc/fsl/qbman/qman_portal.c 	if (p) {
p                 215 drivers/soc/fsl/qbman/qman_portal.c 		pcfg = qman_get_qm_portal_config(p);
p                 209 drivers/soc/fsl/qbman/qman_test_api.c static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
p                 225 drivers/soc/fsl/qbman/qman_test_api.c static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
p                 232 drivers/soc/fsl/qbman/qman_test_api.c static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
p                 255 drivers/soc/fsl/qbman/qman_test_stash.c 	u32 *p = handler->frame_ptr;
p                 264 drivers/soc/fsl/qbman/qman_test_stash.c 	for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
p                 265 drivers/soc/fsl/qbman/qman_test_stash.c 		*p ^= handler->rx_mixer;
p                 266 drivers/soc/fsl/qbman/qman_test_stash.c 		if (*p != lfsr) {
p                 270 drivers/soc/fsl/qbman/qman_test_stash.c 		*p ^= handler->tx_mixer;
p                 519 drivers/soc/fsl/qbman/qman_test_stash.c 	u32 *p = special_handler->frame_ptr;
p                 531 drivers/soc/fsl/qbman/qman_test_stash.c 	for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
p                 532 drivers/soc/fsl/qbman/qman_test_stash.c 		if (*p != lfsr) {
p                 537 drivers/soc/fsl/qbman/qman_test_stash.c 		*p ^= special_handler->tx_mixer;
p                 357 drivers/soc/fsl/qe/qe.c 	const u8 *p = memchr(snums, snum, qe_num_of_snum);
p                 359 drivers/soc/fsl/qe/qe.c 	if (p)
p                 360 drivers/soc/fsl/qe/qe.c 		clear_bit(p - snums, snum_state);
p                  37 drivers/soc/qcom/llcc-sdm845.c #define SCT_ENTRY(uid, sid, mc, p, fs, bway, rway, cmod, ptw, dca, rp, a) \
p                  42 drivers/soc/qcom/llcc-sdm845.c 		.priority = p,			\
p                 207 drivers/soc/qcom/rpmh-rsc.c static irqreturn_t tcs_tx_done(int irq, void *p)
p                 209 drivers/soc/qcom/rpmh-rsc.c 	struct rsc_drv *drv = p;
p                 104 drivers/soc/qcom/rpmh.c 	struct cache_req *p, *req = NULL;
p                 106 drivers/soc/qcom/rpmh.c 	list_for_each_entry(p, &ctrlr->cache, list) {
p                 107 drivers/soc/qcom/rpmh.c 		if (p->addr == addr) {
p                 108 drivers/soc/qcom/rpmh.c 			req = p;
p                 459 drivers/soc/qcom/rpmh.c 	struct cache_req *p;
p                 477 drivers/soc/qcom/rpmh.c 	list_for_each_entry(p, &ctrlr->cache, list) {
p                 478 drivers/soc/qcom/rpmh.c 		if (!is_req_valid(p)) {
p                 480 drivers/soc/qcom/rpmh.c 				 __func__, p->addr, p->sleep_val, p->wake_val);
p                 483 drivers/soc/qcom/rpmh.c 		ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
p                 487 drivers/soc/qcom/rpmh.c 				  p->addr, p->wake_val);
p                 280 drivers/soc/qcom/smem.c 	void *p = phdr;
p                 282 drivers/soc/qcom/smem.c 	return p + le32_to_cpu(phdr->offset_free_uncached);
p                 289 drivers/soc/qcom/smem.c 	void *p = phdr;
p                 292 drivers/soc/qcom/smem.c 	return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
p                 298 drivers/soc/qcom/smem.c 	void *p = phdr;
p                 300 drivers/soc/qcom/smem.c 	return p + le32_to_cpu(phdr->offset_free_cached);
p                 306 drivers/soc/qcom/smem.c 	void *p = phdr;
p                 308 drivers/soc/qcom/smem.c 	return p + sizeof(*phdr);
p                 314 drivers/soc/qcom/smem.c 	void *p = e;
p                 316 drivers/soc/qcom/smem.c 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
p                 323 drivers/soc/qcom/smem.c 	void *p = e;
p                 325 drivers/soc/qcom/smem.c 	return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
p                 330 drivers/soc/qcom/smem.c 	void *p = e;
p                 332 drivers/soc/qcom/smem.c 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
p                 337 drivers/soc/qcom/smem.c 	void *p = e;
p                 339 drivers/soc/qcom/smem.c 	return p - le32_to_cpu(e->size);
p                 654 drivers/soc/qcom/smem.c phys_addr_t qcom_smem_virt_to_phys(void *p)
p                 661 drivers/soc/qcom/smem.c 		if (p < region->virt_base)
p                 663 drivers/soc/qcom/smem.c 		if (p < region->virt_base + region->size) {
p                 664 drivers/soc/qcom/smem.c 			u64 offset = p - region->virt_base;
p                 236 drivers/soc/qcom/socinfo.c static int qcom_show_build_id(struct seq_file *seq, void *p)
p                 245 drivers/soc/qcom/socinfo.c static int qcom_show_pmic_model(struct seq_file *seq, void *p)
p                 258 drivers/soc/qcom/socinfo.c static int qcom_show_pmic_die_revision(struct seq_file *seq, void *p)
p                 274 drivers/soc/qcom/socinfo.c static int show_image_##type(struct seq_file *seq, void *p)		  \
p                  90 drivers/spi/spi-axi-spi-engine.c 	struct spi_engine_program *p;
p                 108 drivers/spi/spi-axi-spi-engine.c static void spi_engine_program_add_cmd(struct spi_engine_program *p,
p                 112 drivers/spi/spi-axi-spi-engine.c 		p->instructions[p->length] = cmd;
p                 113 drivers/spi/spi-axi-spi-engine.c 	p->length++;
p                 145 drivers/spi/spi-axi-spi-engine.c static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
p                 159 drivers/spi/spi-axi-spi-engine.c 		spi_engine_program_add_cmd(p, dry,
p                 165 drivers/spi/spi-axi-spi-engine.c static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
p                 178 drivers/spi/spi-axi-spi-engine.c 		spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
p                 183 drivers/spi/spi-axi-spi-engine.c static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
p                 191 drivers/spi/spi-axi-spi-engine.c 	spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(1, mask));
p                 195 drivers/spi/spi-axi-spi-engine.c 	struct spi_message *msg, bool dry, struct spi_engine_program *p)
p                 204 drivers/spi/spi-axi-spi-engine.c 	spi_engine_program_add_cmd(p, dry,
p                 212 drivers/spi/spi-axi-spi-engine.c 			spi_engine_program_add_cmd(p, dry,
p                 218 drivers/spi/spi-axi-spi-engine.c 			spi_engine_gen_cs(p, dry, spi, true);
p                 220 drivers/spi/spi-axi-spi-engine.c 		spi_engine_gen_xfer(p, dry, xfer);
p                 221 drivers/spi/spi-axi-spi-engine.c 		spi_engine_gen_sleep(p, dry, spi_engine, clk_div,
p                 229 drivers/spi/spi-axi-spi-engine.c 			spi_engine_gen_cs(p, dry, spi, false);
p                 389 drivers/spi/spi-axi-spi-engine.c 			kfree(spi_engine->p);
p                 412 drivers/spi/spi-axi-spi-engine.c 	struct spi_engine_program p_dry, *p;
p                 421 drivers/spi/spi-axi-spi-engine.c 	size = sizeof(*p->instructions) * (p_dry.length + 1);
p                 422 drivers/spi/spi-axi-spi-engine.c 	p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
p                 423 drivers/spi/spi-axi-spi-engine.c 	if (!p)
p                 425 drivers/spi/spi-axi-spi-engine.c 	spi_engine_compile_message(spi_engine, msg, false, p);
p                 429 drivers/spi/spi-axi-spi-engine.c 	spi_engine_program_add_cmd(p, false,
p                 433 drivers/spi/spi-axi-spi-engine.c 	spi_engine->p = p;
p                 435 drivers/spi/spi-axi-spi-engine.c 	spi_engine->cmd_buf = p->instructions;
p                 436 drivers/spi/spi-axi-spi-engine.c 	spi_engine->cmd_length = p->length;
p                 176 drivers/spi/spi-butterfly.c static void butterfly_attach(struct parport *p)
p                 182 drivers/spi/spi-butterfly.c 	struct device		*dev = p->physport->dev;
p                 215 drivers/spi/spi-butterfly.c 	pp->port = p;
p                 218 drivers/spi/spi-butterfly.c 	pd = parport_register_dev_model(p, "spi_butterfly", &butterfly_cb, 0);
p                 232 drivers/spi/spi-butterfly.c 	pr_debug("%s: powerup/reset Butterfly\n", p->name);
p                 268 drivers/spi/spi-butterfly.c 		pr_debug("%s: dataflash at %s\n", p->name,
p                 271 drivers/spi/spi-butterfly.c 	pr_info("%s: AVR Butterfly\n", p->name);
p                 285 drivers/spi/spi-butterfly.c 	pr_debug("%s: butterfly probe, fail %d\n", p->name, status);
p                 288 drivers/spi/spi-butterfly.c static void butterfly_detach(struct parport *p)
p                 296 drivers/spi/spi-butterfly.c 	if (!butterfly || butterfly->port != p)
p                  23 drivers/spi/spi-cavium-octeon.c 	struct octeon_spi *p;
p                  29 drivers/spi/spi-cavium-octeon.c 	p = spi_master_get_devdata(master);
p                  38 drivers/spi/spi-cavium-octeon.c 	p->register_base = reg_base;
p                  39 drivers/spi/spi-cavium-octeon.c 	p->sys_freq = octeon_get_io_clock_rate();
p                  41 drivers/spi/spi-cavium-octeon.c 	p->regs.config = 0;
p                  42 drivers/spi/spi-cavium-octeon.c 	p->regs.status = 0x08;
p                  43 drivers/spi/spi-cavium-octeon.c 	p->regs.tx = 0x10;
p                  44 drivers/spi/spi-cavium-octeon.c 	p->regs.data = 0x80;
p                  75 drivers/spi/spi-cavium-octeon.c 	struct octeon_spi *p = spi_master_get_devdata(master);
p                  78 drivers/spi/spi-cavium-octeon.c 	writeq(0, p->register_base + OCTEON_SPI_CFG(p));
p                  24 drivers/spi/spi-cavium-thunderx.c 	struct octeon_spi *p;
p                  31 drivers/spi/spi-cavium-thunderx.c 	p = spi_master_get_devdata(master);
p                  41 drivers/spi/spi-cavium-thunderx.c 	p->register_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
p                  42 drivers/spi/spi-cavium-thunderx.c 	if (!p->register_base) {
p                  47 drivers/spi/spi-cavium-thunderx.c 	p->regs.config = 0x1000;
p                  48 drivers/spi/spi-cavium-thunderx.c 	p->regs.status = 0x1008;
p                  49 drivers/spi/spi-cavium-thunderx.c 	p->regs.tx = 0x1010;
p                  50 drivers/spi/spi-cavium-thunderx.c 	p->regs.data = 0x1080;
p                  52 drivers/spi/spi-cavium-thunderx.c 	p->clk = devm_clk_get(dev, NULL);
p                  53 drivers/spi/spi-cavium-thunderx.c 	if (IS_ERR(p->clk)) {
p                  54 drivers/spi/spi-cavium-thunderx.c 		ret = PTR_ERR(p->clk);
p                  58 drivers/spi/spi-cavium-thunderx.c 	ret = clk_prepare_enable(p->clk);
p                  62 drivers/spi/spi-cavium-thunderx.c 	p->sys_freq = clk_get_rate(p->clk);
p                  63 drivers/spi/spi-cavium-thunderx.c 	if (!p->sys_freq)
p                  64 drivers/spi/spi-cavium-thunderx.c 		p->sys_freq = SYS_FREQ_DEFAULT;
p                  65 drivers/spi/spi-cavium-thunderx.c 	dev_info(dev, "Set system clock to %u\n", p->sys_freq);
p                  84 drivers/spi/spi-cavium-thunderx.c 	clk_disable_unprepare(p->clk);
p                  93 drivers/spi/spi-cavium-thunderx.c 	struct octeon_spi *p;
p                  95 drivers/spi/spi-cavium-thunderx.c 	p = spi_master_get_devdata(master);
p                  96 drivers/spi/spi-cavium-thunderx.c 	if (!p)
p                  99 drivers/spi/spi-cavium-thunderx.c 	clk_disable_unprepare(p->clk);
p                 102 drivers/spi/spi-cavium-thunderx.c 	writeq(0, p->register_base + OCTEON_SPI_CFG(p));
p                  16 drivers/spi/spi-cavium.c static void octeon_spi_wait_ready(struct octeon_spi *p)
p                  24 drivers/spi/spi-cavium.c 		mpi_sts.u64 = readq(p->register_base + OCTEON_SPI_STS(p));
p                  28 drivers/spi/spi-cavium.c static int octeon_spi_do_transfer(struct octeon_spi *p,
p                  48 drivers/spi/spi-cavium.c 	clkdiv = p->sys_freq / (2 * xfer->speed_hz);
p                  61 drivers/spi/spi-cavium.c 		p->cs_enax |= 1ull << (12 + spi->chip_select);
p                  62 drivers/spi/spi-cavium.c 	mpi_cfg.u64 |= p->cs_enax;
p                  64 drivers/spi/spi-cavium.c 	if (mpi_cfg.u64 != p->last_cfg) {
p                  65 drivers/spi/spi-cavium.c 		p->last_cfg = mpi_cfg.u64;
p                  66 drivers/spi/spi-cavium.c 		writeq(mpi_cfg.u64, p->register_base + OCTEON_SPI_CFG(p));
p                  78 drivers/spi/spi-cavium.c 			writeq(d, p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
p                  85 drivers/spi/spi-cavium.c 		writeq(mpi_tx.u64, p->register_base + OCTEON_SPI_TX(p));
p                  87 drivers/spi/spi-cavium.c 		octeon_spi_wait_ready(p);
p                  90 drivers/spi/spi-cavium.c 				u64 v = readq(p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
p                 102 drivers/spi/spi-cavium.c 		writeq(d, p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
p                 113 drivers/spi/spi-cavium.c 	writeq(mpi_tx.u64, p->register_base + OCTEON_SPI_TX(p));
p                 115 drivers/spi/spi-cavium.c 	octeon_spi_wait_ready(p);
p                 118 drivers/spi/spi-cavium.c 			u64 v = readq(p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
p                 131 drivers/spi/spi-cavium.c 	struct octeon_spi *p = spi_master_get_devdata(master);
p                 139 drivers/spi/spi-cavium.c 		int r = octeon_spi_do_transfer(p, msg, xfer, last_xfer);
p                 188 drivers/spi/spi-lm70llp.c static void spi_lm70llp_attach(struct parport *p)
p                 205 drivers/spi/spi-lm70llp.c 	master = spi_alloc_master(p->physport->dev, sizeof *pp);
p                 223 drivers/spi/spi-lm70llp.c 	pp->port = p;
p                 227 drivers/spi/spi-lm70llp.c 	pd = parport_register_dev_model(p, DRVNAME, &lm70llp_cb, 0);
p                 296 drivers/spi/spi-lm70llp.c static void spi_lm70llp_detach(struct parport *p)
p                 300 drivers/spi/spi-lm70llp.c 	if (!lm70llp || lm70llp->port != p)
p                 195 drivers/spi/spi-sh-msiof.c static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
p                 200 drivers/spi/spi-sh-msiof.c 		return ioread16(p->mapbase + reg_offs);
p                 202 drivers/spi/spi-sh-msiof.c 		return ioread32(p->mapbase + reg_offs);
p                 206 drivers/spi/spi-sh-msiof.c static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
p                 212 drivers/spi/spi-sh-msiof.c 		iowrite16(value, p->mapbase + reg_offs);
p                 215 drivers/spi/spi-sh-msiof.c 		iowrite32(value, p->mapbase + reg_offs);
p                 220 drivers/spi/spi-sh-msiof.c static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
p                 226 drivers/spi/spi-sh-msiof.c 	data = sh_msiof_read(p, CTR);
p                 229 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, CTR, data);
p                 231 drivers/spi/spi-sh-msiof.c 	return readl_poll_timeout_atomic(p->mapbase + CTR, data,
p                 237 drivers/spi/spi-sh-msiof.c 	struct sh_msiof_spi_priv *p = data;
p                 240 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, IER, 0);
p                 241 drivers/spi/spi-sh-msiof.c 	complete(&p->done);
p                 246 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
p                 251 drivers/spi/spi-sh-msiof.c 	data = sh_msiof_read(p, CTR);
p                 253 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, CTR, data);
p                 255 drivers/spi/spi-sh-msiof.c 	readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1,
p                 264 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
p                 269 drivers/spi/spi-sh-msiof.c 	unsigned int div_pow = p->min_div_pow;
p                 292 drivers/spi/spi-sh-msiof.c 		dev_err(&p->pdev->dev,
p                 299 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, TSCR, scr);
p                 300 drivers/spi/spi-sh-msiof.c 	if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
p                 301 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, RSCR, scr);
p                 321 drivers/spi/spi-sh-msiof.c static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
p                 325 drivers/spi/spi-sh-msiof.c 	if (!p->info)
p                 329 drivers/spi/spi-sh-msiof.c 	if (p->info->dtdl > 200 || p->info->syncdl > 300) {
p                 330 drivers/spi/spi-sh-msiof.c 		dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
p                 335 drivers/spi/spi-sh-msiof.c 	if ((p->info->dtdl + p->info->syncdl) % 100) {
p                 336 drivers/spi/spi-sh-msiof.c 		dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
p                 340 drivers/spi/spi-sh-msiof.c 	val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
p                 341 drivers/spi/spi-sh-msiof.c 	val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
p                 346 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
p                 363 drivers/spi/spi-sh-msiof.c 	tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
p                 364 drivers/spi/spi-sh-msiof.c 	if (spi_controller_is_slave(p->ctlr)) {
p                 365 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
p                 367 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, TMDR1,
p                 371 drivers/spi/spi-sh-msiof.c 	if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
p                 375 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, RMDR1, tmp);
p                 386 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, CTR, tmp);
p                 389 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
p                 395 drivers/spi/spi-sh-msiof.c 	if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
p                 396 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, TMDR2, dr2);
p                 398 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
p                 401 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, RMDR2, dr2);
p                 404 drivers/spi/spi-sh-msiof.c static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
p                 406 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, STR,
p                 407 drivers/spi/spi-sh-msiof.c 		       sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
p                 410 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
p                 417 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, TFDR, buf_8[k] << fs);
p                 420 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
p                 427 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, TFDR, buf_16[k] << fs);
p                 430 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
p                 437 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
p                 440 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
p                 447 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, TFDR, buf_32[k] << fs);
p                 450 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
p                 457 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
p                 460 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
p                 467 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
p                 470 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
p                 477 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
p                 480 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
p                 487 drivers/spi/spi-sh-msiof.c 		buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
p                 490 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
p                 497 drivers/spi/spi-sh-msiof.c 		buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
p                 500 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
p                 507 drivers/spi/spi-sh-msiof.c 		put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
p                 510 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
p                 517 drivers/spi/spi-sh-msiof.c 		buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
p                 520 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
p                 527 drivers/spi/spi-sh-msiof.c 		put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
p                 530 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
p                 537 drivers/spi/spi-sh-msiof.c 		buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
p                 540 drivers/spi/spi-sh-msiof.c static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
p                 547 drivers/spi/spi-sh-msiof.c 		put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
p                 552 drivers/spi/spi-sh-msiof.c 	struct sh_msiof_spi_priv *p =
p                 556 drivers/spi/spi-sh-msiof.c 	if (spi->cs_gpiod || spi_controller_is_slave(p->ctlr))
p                 559 drivers/spi/spi-sh-msiof.c 	if (p->native_cs_inited &&
p                 560 drivers/spi/spi-sh-msiof.c 	    (p->native_cs_high == !!(spi->mode & SPI_CS_HIGH)))
p                 570 drivers/spi/spi-sh-msiof.c 	pm_runtime_get_sync(&p->pdev->dev);
p                 571 drivers/spi/spi-sh-msiof.c 	tmp = sh_msiof_read(p, TMDR1) & ~clr;
p                 572 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON);
p                 573 drivers/spi/spi-sh-msiof.c 	tmp = sh_msiof_read(p, RMDR1) & ~clr;
p                 574 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, RMDR1, tmp | set);
p                 575 drivers/spi/spi-sh-msiof.c 	pm_runtime_put(&p->pdev->dev);
p                 576 drivers/spi/spi-sh-msiof.c 	p->native_cs_high = spi->mode & SPI_CS_HIGH;
p                 577 drivers/spi/spi-sh-msiof.c 	p->native_cs_inited = true;
p                 584 drivers/spi/spi-sh-msiof.c 	struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
p                 590 drivers/spi/spi-sh-msiof.c 		ss = p->unused_ss;
p                 591 drivers/spi/spi-sh-msiof.c 		cs_high = p->native_cs_high;
p                 596 drivers/spi/spi-sh-msiof.c 	sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL),
p                 603 drivers/spi/spi-sh-msiof.c static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
p                 605 drivers/spi/spi-sh-msiof.c 	bool slave = spi_controller_is_slave(p->ctlr);
p                 610 drivers/spi/spi-sh-msiof.c 		ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
p                 612 drivers/spi/spi-sh-msiof.c 		ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
p                 614 drivers/spi/spi-sh-msiof.c 		ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
p                 618 drivers/spi/spi-sh-msiof.c 		ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
p                 623 drivers/spi/spi-sh-msiof.c static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
p                 625 drivers/spi/spi-sh-msiof.c 	bool slave = spi_controller_is_slave(p->ctlr);
p                 630 drivers/spi/spi-sh-msiof.c 		ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
p                 632 drivers/spi/spi-sh-msiof.c 		ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
p                 634 drivers/spi/spi-sh-msiof.c 		ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
p                 636 drivers/spi/spi-sh-msiof.c 		ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
p                 643 drivers/spi/spi-sh-msiof.c 	struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
p                 645 drivers/spi/spi-sh-msiof.c 	p->slave_aborted = true;
p                 646 drivers/spi/spi-sh-msiof.c 	complete(&p->done);
p                 647 drivers/spi/spi-sh-msiof.c 	complete(&p->done_txdma);
p                 651 drivers/spi/spi-sh-msiof.c static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p,
p                 654 drivers/spi/spi-sh-msiof.c 	if (spi_controller_is_slave(p->ctlr)) {
p                 656 drivers/spi/spi-sh-msiof.c 		    p->slave_aborted) {
p                 657 drivers/spi/spi-sh-msiof.c 			dev_dbg(&p->pdev->dev, "interrupted\n");
p                 662 drivers/spi/spi-sh-msiof.c 			dev_err(&p->pdev->dev, "timeout\n");
p                 670 drivers/spi/spi-sh-msiof.c static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
p                 683 drivers/spi/spi-sh-msiof.c 		words = min_t(int, words, p->tx_fifo_size);
p                 685 drivers/spi/spi-sh-msiof.c 		words = min_t(int, words, p->rx_fifo_size);
p                 691 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, FCTR, 0);
p                 694 drivers/spi/spi-sh-msiof.c 	sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
p                 695 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE);
p                 699 drivers/spi/spi-sh-msiof.c 		tx_fifo(p, tx_buf, words, fifo_shift);
p                 701 drivers/spi/spi-sh-msiof.c 	reinit_completion(&p->done);
p                 702 drivers/spi/spi-sh-msiof.c 	p->slave_aborted = false;
p                 704 drivers/spi/spi-sh-msiof.c 	ret = sh_msiof_spi_start(p, rx_buf);
p                 706 drivers/spi/spi-sh-msiof.c 		dev_err(&p->pdev->dev, "failed to start hardware\n");
p                 711 drivers/spi/spi-sh-msiof.c 	ret = sh_msiof_wait_for_completion(p, &p->done);
p                 717 drivers/spi/spi-sh-msiof.c 		rx_fifo(p, rx_buf, words, fifo_shift);
p                 720 drivers/spi/spi-sh-msiof.c 	sh_msiof_reset_str(p);
p                 722 drivers/spi/spi-sh-msiof.c 	ret = sh_msiof_spi_stop(p, rx_buf);
p                 724 drivers/spi/spi-sh-msiof.c 		dev_err(&p->pdev->dev, "failed to shut down hardware\n");
p                 731 drivers/spi/spi-sh-msiof.c 	sh_msiof_reset_str(p);
p                 732 drivers/spi/spi-sh-msiof.c 	sh_msiof_spi_stop(p, rx_buf);
p                 734 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, IER, 0);
p                 743 drivers/spi/spi-sh-msiof.c static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
p                 754 drivers/spi/spi-sh-msiof.c 		desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
p                 755 drivers/spi/spi-sh-msiof.c 					p->rx_dma_addr, len, DMA_DEV_TO_MEM,
p                 761 drivers/spi/spi-sh-msiof.c 		desc_rx->callback_param = &p->done;
p                 769 drivers/spi/spi-sh-msiof.c 		dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
p                 770 drivers/spi/spi-sh-msiof.c 					   p->tx_dma_addr, len, DMA_TO_DEVICE);
p                 771 drivers/spi/spi-sh-msiof.c 		desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
p                 772 drivers/spi/spi-sh-msiof.c 					p->tx_dma_addr, len, DMA_MEM_TO_DEV,
p                 780 drivers/spi/spi-sh-msiof.c 		desc_tx->callback_param = &p->done_txdma;
p                 789 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
p                 792 drivers/spi/spi-sh-msiof.c 	sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
p                 794 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, IER, ier_bits);
p                 796 drivers/spi/spi-sh-msiof.c 	reinit_completion(&p->done);
p                 798 drivers/spi/spi-sh-msiof.c 		reinit_completion(&p->done_txdma);
p                 799 drivers/spi/spi-sh-msiof.c 	p->slave_aborted = false;
p                 803 drivers/spi/spi-sh-msiof.c 		dma_async_issue_pending(p->ctlr->dma_rx);
p                 805 drivers/spi/spi-sh-msiof.c 		dma_async_issue_pending(p->ctlr->dma_tx);
p                 807 drivers/spi/spi-sh-msiof.c 	ret = sh_msiof_spi_start(p, rx);
p                 809 drivers/spi/spi-sh-msiof.c 		dev_err(&p->pdev->dev, "failed to start hardware\n");
p                 815 drivers/spi/spi-sh-msiof.c 		ret = sh_msiof_wait_for_completion(p, &p->done_txdma);
p                 822 drivers/spi/spi-sh-msiof.c 		ret = sh_msiof_wait_for_completion(p, &p->done);
p                 826 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, IER, 0);
p                 829 drivers/spi/spi-sh-msiof.c 		sh_msiof_write(p, IER, IER_TEOFE);
p                 830 drivers/spi/spi-sh-msiof.c 		ret = sh_msiof_wait_for_completion(p, &p->done);
p                 836 drivers/spi/spi-sh-msiof.c 	sh_msiof_reset_str(p);
p                 838 drivers/spi/spi-sh-msiof.c 	ret = sh_msiof_spi_stop(p, rx);
p                 840 drivers/spi/spi-sh-msiof.c 		dev_err(&p->pdev->dev, "failed to shut down hardware\n");
p                 845 drivers/spi/spi-sh-msiof.c 		dma_sync_single_for_cpu(p->ctlr->dma_rx->device->dev,
p                 846 drivers/spi/spi-sh-msiof.c 					p->rx_dma_addr, len, DMA_FROM_DEVICE);
p                 851 drivers/spi/spi-sh-msiof.c 	sh_msiof_reset_str(p);
p                 852 drivers/spi/spi-sh-msiof.c 	sh_msiof_spi_stop(p, rx);
p                 855 drivers/spi/spi-sh-msiof.c 		dmaengine_terminate_all(p->ctlr->dma_tx);
p                 858 drivers/spi/spi-sh-msiof.c 		dmaengine_terminate_all(p->ctlr->dma_rx);
p                 859 drivers/spi/spi-sh-msiof.c 	sh_msiof_write(p, IER, 0);
p                 910 drivers/spi/spi-sh-msiof.c 	struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
p                 925 drivers/spi/spi-sh-msiof.c 	sh_msiof_spi_reset_regs(p);
p                 928 drivers/spi/spi-sh-msiof.c 	if (!spi_controller_is_slave(p->ctlr))
p                 929 drivers/spi/spi-sh-msiof.c 		sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
p                 939 drivers/spi/spi-sh-msiof.c 			l = min(round_down(len, 4), p->tx_fifo_size * 4);
p                 941 drivers/spi/spi-sh-msiof.c 			l = min(round_down(len, 4), p->rx_fifo_size * 4);
p                 952 drivers/spi/spi-sh-msiof.c 			copy32(p->tx_dma_page, tx_buf, l / 4);
p                 954 drivers/spi/spi-sh-msiof.c 		ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
p                 956 drivers/spi/spi-sh-msiof.c 			dev_warn_once(&p->pdev->dev,
p                 964 drivers/spi/spi-sh-msiof.c 			copy32(rx_buf, p->rx_dma_page, l / 4);
p                1026 drivers/spi/spi-sh-msiof.c 		n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, tx_buf, rx_buf,
p                1127 drivers/spi/spi-sh-msiof.c static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
p                1129 drivers/spi/spi-sh-msiof.c 	struct device *dev = &p->pdev->dev;
p                1139 drivers/spi/spi-sh-msiof.c 	num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect);
p                1159 drivers/spi/spi-sh-msiof.c 	p->unused_ss = ffz(used_ss_mask);
p                1160 drivers/spi/spi-sh-msiof.c 	if (cs_gpios && p->unused_ss >= MAX_SS) {
p                1206 drivers/spi/spi-sh-msiof.c static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
p                1208 drivers/spi/spi-sh-msiof.c 	struct platform_device *pdev = p->pdev;
p                1210 drivers/spi/spi-sh-msiof.c 	const struct sh_msiof_spi_info *info = p->info;
p                1233 drivers/spi/spi-sh-msiof.c 	ctlr = p->ctlr;
p                1244 drivers/spi/spi-sh-msiof.c 	p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
p                1245 drivers/spi/spi-sh-msiof.c 	if (!p->tx_dma_page)
p                1248 drivers/spi/spi-sh-msiof.c 	p->rx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
p                1249 drivers/spi/spi-sh-msiof.c 	if (!p->rx_dma_page)
p                1253 drivers/spi/spi-sh-msiof.c 	p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE,
p                1255 drivers/spi/spi-sh-msiof.c 	if (dma_mapping_error(tx_dev, p->tx_dma_addr))
p                1259 drivers/spi/spi-sh-msiof.c 	p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE,
p                1261 drivers/spi/spi-sh-msiof.c 	if (dma_mapping_error(rx_dev, p->rx_dma_addr))
p                1268 drivers/spi/spi-sh-msiof.c 	dma_unmap_single(tx_dev, p->tx_dma_addr, PAGE_SIZE, DMA_TO_DEVICE);
p                1270 drivers/spi/spi-sh-msiof.c 	free_page((unsigned long)p->rx_dma_page);
p                1272 drivers/spi/spi-sh-msiof.c 	free_page((unsigned long)p->tx_dma_page);
p                1281 drivers/spi/spi-sh-msiof.c static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
p                1283 drivers/spi/spi-sh-msiof.c 	struct spi_controller *ctlr = p->ctlr;
p                1288 drivers/spi/spi-sh-msiof.c 	dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE,
p                1290 drivers/spi/spi-sh-msiof.c 	dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE,
p                1292 drivers/spi/spi-sh-msiof.c 	free_page((unsigned long)p->rx_dma_page);
p                1293 drivers/spi/spi-sh-msiof.c 	free_page((unsigned long)p->tx_dma_page);
p                1303 drivers/spi/spi-sh-msiof.c 	struct sh_msiof_spi_priv *p;
p                1329 drivers/spi/spi-sh-msiof.c 	p = spi_controller_get_devdata(ctlr);
p                1331 drivers/spi/spi-sh-msiof.c 	platform_set_drvdata(pdev, p);
p                1332 drivers/spi/spi-sh-msiof.c 	p->ctlr = ctlr;
p                1333 drivers/spi/spi-sh-msiof.c 	p->info = info;
p                1334 drivers/spi/spi-sh-msiof.c 	p->min_div_pow = chipdata->min_div_pow;
p                1336 drivers/spi/spi-sh-msiof.c 	init_completion(&p->done);
p                1337 drivers/spi/spi-sh-msiof.c 	init_completion(&p->done_txdma);
p                1339 drivers/spi/spi-sh-msiof.c 	p->clk = devm_clk_get(&pdev->dev, NULL);
p                1340 drivers/spi/spi-sh-msiof.c 	if (IS_ERR(p->clk)) {
p                1342 drivers/spi/spi-sh-msiof.c 		ret = PTR_ERR(p->clk);
p                1352 drivers/spi/spi-sh-msiof.c 	p->mapbase = devm_platform_ioremap_resource(pdev, 0);
p                1353 drivers/spi/spi-sh-msiof.c 	if (IS_ERR(p->mapbase)) {
p                1354 drivers/spi/spi-sh-msiof.c 		ret = PTR_ERR(p->mapbase);
p                1359 drivers/spi/spi-sh-msiof.c 			       dev_name(&pdev->dev), p);
p                1365 drivers/spi/spi-sh-msiof.c 	p->pdev = pdev;
p                1369 drivers/spi/spi-sh-msiof.c 	p->tx_fifo_size = chipdata->tx_fifo_size;
p                1370 drivers/spi/spi-sh-msiof.c 	p->rx_fifo_size = chipdata->rx_fifo_size;
p                1371 drivers/spi/spi-sh-msiof.c 	if (p->info->tx_fifo_override)
p                1372 drivers/spi/spi-sh-msiof.c 		p->tx_fifo_size = p->info->tx_fifo_override;
p                1373 drivers/spi/spi-sh-msiof.c 	if (p->info->rx_fifo_override)
p                1374 drivers/spi/spi-sh-msiof.c 		p->rx_fifo_size = p->info->rx_fifo_override;
p                1377 drivers/spi/spi-sh-msiof.c 	ctlr->num_chipselect = p->info->num_chipselect;
p                1378 drivers/spi/spi-sh-msiof.c 	ret = sh_msiof_get_cs_gpios(p);
p                1396 drivers/spi/spi-sh-msiof.c 	ret = sh_msiof_request_dma(p);
p                1409 drivers/spi/spi-sh-msiof.c 	sh_msiof_release_dma(p);
p                1418 drivers/spi/spi-sh-msiof.c 	struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
p                1420 drivers/spi/spi-sh-msiof.c 	sh_msiof_release_dma(p);
p                1434 drivers/spi/spi-sh-msiof.c 	struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
p                1436 drivers/spi/spi-sh-msiof.c 	return spi_controller_suspend(p->ctlr);
p                1441 drivers/spi/spi-sh-msiof.c 	struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
p                1443 drivers/spi/spi-sh-msiof.c 	return spi_controller_resume(p->ctlr);
p                 295 drivers/spi/spi-sprd-adi.c 		u32 *p = (u32 *)t->tx_buf;
p                 302 drivers/spi/spi-sprd-adi.c 		phy_reg = *p++ + sadi->slave_pbase;
p                 307 drivers/spi/spi-sprd-adi.c 		val = *p;
p                 746 drivers/staging/android/ashmem.c 			    void __user *p)
p                 753 drivers/staging/android/ashmem.c 	if (copy_from_user(&pin, p, sizeof(pin)))
p                 115 drivers/staging/android/ion/ion_heap.c 	int p = 0;
p                 121 drivers/staging/android/ion/ion_heap.c 		pages[p++] = sg_page_iter_page(&piter);
p                 122 drivers/staging/android/ion/ion_heap.c 		if (p == ARRAY_SIZE(pages)) {
p                 123 drivers/staging/android/ion/ion_heap.c 			ret = ion_heap_clear_pages(pages, p, pgprot);
p                 126 drivers/staging/android/ion/ion_heap.c 			p = 0;
p                 129 drivers/staging/android/ion/ion_heap.c 	if (p)
p                 130 drivers/staging/android/ion/ion_heap.c 		ret = ion_heap_clear_pages(pages, p, pgprot);
p                   7 drivers/staging/comedi/drivers/jr3_pci.h static inline u16 get_u16(const u32 __iomem *p)
p                   9 drivers/staging/comedi/drivers/jr3_pci.h 	return (u16)readl(p);
p                  12 drivers/staging/comedi/drivers/jr3_pci.h static inline void set_u16(u32 __iomem *p, u16 val)
p                  14 drivers/staging/comedi/drivers/jr3_pci.h 	writel(val, p);
p                  17 drivers/staging/comedi/drivers/jr3_pci.h static inline s16 get_s16(const s32 __iomem *p)
p                  19 drivers/staging/comedi/drivers/jr3_pci.h 	return (s16)readl(p);
p                  22 drivers/staging/comedi/drivers/jr3_pci.h static inline void set_s16(s32 __iomem *p, s16 val)
p                  24 drivers/staging/comedi/drivers/jr3_pci.h 	writel(val, p);
p                 612 drivers/staging/comedi/drivers/s626.h #define S626_MAKE(x, w, p)	(((x) & ((1 << (w)) - 1)) << (p))
p                 613 drivers/staging/comedi/drivers/s626.h #define S626_UNMAKE(v, w, p)	(((v) >> (p)) & ((1 << (w)) - 1))
p                 589 drivers/staging/comedi/drivers/usbdux.c 	u8 p = (range <= 1);
p                 592 drivers/staging/comedi/drivers/usbdux.c 	return (chan << 4) | ((p == 1) << 2) | ((r == 1) << 3);
p                 803 drivers/staging/comedi/drivers/usbdux.c 	__le16 *p = (__le16 *)&devpriv->dux_commands[2];
p                 821 drivers/staging/comedi/drivers/usbdux.c 		*p = cpu_to_le16(val);
p                1089 drivers/staging/comedi/drivers/usbdux.c 	__le16 *p = (__le16 *)&devpriv->dux_commands[2];
p                1098 drivers/staging/comedi/drivers/usbdux.c 		*p = cpu_to_le16(data[i]);
p                 489 drivers/staging/emxx_udc/emxx_udc.c 	u8		*p;
p                 495 drivers/staging/emxx_udc/emxx_udc.c 			p = req->req.buf;
p                 496 drivers/staging/emxx_udc/emxx_udc.c 			p += (req->req.actual - count);
p                 497 drivers/staging/emxx_udc/emxx_udc.c 			memcpy(data, p, count);
p                 525 drivers/staging/emxx_udc/emxx_udc.c 		p = req->req.buf;
p                 526 drivers/staging/emxx_udc/emxx_udc.c 		p += (req->req.actual - count);
p                 527 drivers/staging/emxx_udc/emxx_udc.c 		memcpy(p, data, count);
p                2133 drivers/staging/emxx_udc/emxx_udc.c 	struct fc_regs __iomem *p = udc->p_regs;
p                2140 drivers/staging/emxx_udc/emxx_udc.c 		_nbu2ss_bitset(&p->EP0_CONTROL, EP0_BCLR);
p                2145 drivers/staging/emxx_udc/emxx_udc.c 		_nbu2ss_bitset(&p->EP_REGS[ep->epnum - 1].EP_CONTROL, EPN_BCLR);
p                 127 drivers/staging/exfat/exfat_nls.c 	u16 *p, *last_period;
p                 152 drivers/staging/exfat/exfat_nls.c 	for (p = uniname; *p; p++) {
p                 153 drivers/staging/exfat/exfat_nls.c 		if (*p == (u16)'.')
p                 154 drivers/staging/exfat/exfat_nls.c 			last_period = p;
p                3670 drivers/staging/exfat/exfat_super.c 	char *p;
p                3691 drivers/staging/exfat/exfat_super.c 	while ((p = strsep(&options, ","))) {
p                3694 drivers/staging/exfat/exfat_super.c 		if (!*p)
p                3697 drivers/staging/exfat/exfat_super.c 		token = match_token(p, exfat_tokens, args);
p                3764 drivers/staging/exfat/exfat_super.c 				       p);
p                 280 drivers/staging/fbtft/fb_agm1264k-fl.c 				signed short p = *write_pos + error * coeff;
p                 282 drivers/staging/fbtft/fb_agm1264k-fl.c 				if (p > WHITE)
p                 283 drivers/staging/fbtft/fb_agm1264k-fl.c 					p = WHITE;
p                 284 drivers/staging/fbtft/fb_agm1264k-fl.c 				if (p < BLACK)
p                 285 drivers/staging/fbtft/fb_agm1264k-fl.c 					p = BLACK;
p                 286 drivers/staging/fbtft/fb_agm1264k-fl.c 				*write_pos = p;
p                 911 drivers/staging/fbtft/fbtft-core.c 	const __be32 *p;
p                 919 drivers/staging/fbtft/fbtft-core.c 	p = of_prop_next_u32(prop, NULL, &val);
p                 920 drivers/staging/fbtft/fbtft-core.c 	if (!p)
p                 927 drivers/staging/fbtft/fbtft-core.c 	while (p) {
p                 931 drivers/staging/fbtft/fbtft-core.c 			while (p && !(val & 0xFFFF0000)) {
p                 939 drivers/staging/fbtft/fbtft-core.c 				p = of_prop_next_u32(prop, p, &val);
p                 969 drivers/staging/fbtft/fbtft-core.c 			p = of_prop_next_u32(prop, p, &val);
p                  96 drivers/staging/fwserial/fwserial.c #define fwtty_err(p, fmt, ...)						\
p                  97 drivers/staging/fwserial/fwserial.c 	dev_err(to_device(p, device), fmt, ##__VA_ARGS__)
p                  98 drivers/staging/fwserial/fwserial.c #define fwtty_info(p, fmt, ...)						\
p                  99 drivers/staging/fwserial/fwserial.c 	dev_info(to_device(p, device), fmt, ##__VA_ARGS__)
p                 100 drivers/staging/fwserial/fwserial.c #define fwtty_notice(p, fmt, ...)					\
p                 101 drivers/staging/fwserial/fwserial.c 	dev_notice(to_device(p, device), fmt, ##__VA_ARGS__)
p                 102 drivers/staging/fwserial/fwserial.c #define fwtty_dbg(p, fmt, ...)						\
p                 103 drivers/staging/fwserial/fwserial.c 	dev_dbg(to_device(p, device), "%s: " fmt, __func__, ##__VA_ARGS__)
p                 104 drivers/staging/fwserial/fwserial.c #define fwtty_err_ratelimited(p, fmt, ...)				\
p                 105 drivers/staging/fwserial/fwserial.c 	dev_err_ratelimited(to_device(p, device), fmt, ##__VA_ARGS__)
p                  34 drivers/staging/greybus/gbphy.h #define GBPHY_PROTOCOL(p)		\
p                  35 drivers/staging/greybus/gbphy.h 	.protocol_id	= (p),
p                 711 drivers/staging/isdn/avm/b1.c 	avmcard_dmainfo *p;
p                 714 drivers/staging/isdn/avm/b1.c 	p = kzalloc(sizeof(avmcard_dmainfo), GFP_KERNEL);
p                 715 drivers/staging/isdn/avm/b1.c 	if (!p) {
p                 720 drivers/staging/isdn/avm/b1.c 	p->recvbuf.size = rsize;
p                 721 drivers/staging/isdn/avm/b1.c 	buf = pci_alloc_consistent(pdev, rsize, &p->recvbuf.dmaaddr);
p                 726 drivers/staging/isdn/avm/b1.c 	p->recvbuf.dmabuf = buf;
p                 728 drivers/staging/isdn/avm/b1.c 	p->sendbuf.size = ssize;
p                 729 drivers/staging/isdn/avm/b1.c 	buf = pci_alloc_consistent(pdev, ssize, &p->sendbuf.dmaaddr);
p                 735 drivers/staging/isdn/avm/b1.c 	p->sendbuf.dmabuf = buf;
p                 736 drivers/staging/isdn/avm/b1.c 	skb_queue_head_init(&p->send_queue);
p                 738 drivers/staging/isdn/avm/b1.c 	return p;
p                 741 drivers/staging/isdn/avm/b1.c 	pci_free_consistent(p->pcidev, p->recvbuf.size,
p                 742 drivers/staging/isdn/avm/b1.c 			    p->recvbuf.dmabuf, p->recvbuf.dmaaddr);
p                 744 drivers/staging/isdn/avm/b1.c 	kfree(p);
p                 749 drivers/staging/isdn/avm/b1.c void avmcard_dma_free(avmcard_dmainfo *p)
p                 751 drivers/staging/isdn/avm/b1.c 	pci_free_consistent(p->pcidev, p->recvbuf.size,
p                 752 drivers/staging/isdn/avm/b1.c 			    p->recvbuf.dmabuf, p->recvbuf.dmaaddr);
p                 753 drivers/staging/isdn/avm/b1.c 	pci_free_consistent(p->pcidev, p->sendbuf.size,
p                 754 drivers/staging/isdn/avm/b1.c 			    p->sendbuf.dmabuf, p->sendbuf.dmaaddr);
p                 755 drivers/staging/isdn/avm/b1.c 	skb_queue_purge(&p->send_queue);
p                 756 drivers/staging/isdn/avm/b1.c 	kfree(p);
p                 784 drivers/staging/isdn/avm/b1.c 	char *p;
p                 787 drivers/staging/isdn/avm/b1.c 	if ((p = strchr(revision, ':')) != NULL && p[1]) {
p                 788 drivers/staging/isdn/avm/b1.c 		strlcpy(rev, p + 2, 32);
p                 789 drivers/staging/isdn/avm/b1.c 		if ((p = strchr(rev, '$')) != NULL && p > rev)
p                 790 drivers/staging/isdn/avm/b1.c 			*(p - 1) = 0;
p                 375 drivers/staging/isdn/avm/b1dma.c 	void *p;
p                 385 drivers/staging/isdn/avm/b1dma.c 		p = dma->sendbuf.dmabuf;
p                 389 drivers/staging/isdn/avm/b1dma.c 			_put_byte(&p, SEND_DATA_B3_REQ);
p                 390 drivers/staging/isdn/avm/b1dma.c 			_put_slice(&p, skb->data, len);
p                 391 drivers/staging/isdn/avm/b1dma.c 			_put_slice(&p, skb->data + len, dlen);
p                 393 drivers/staging/isdn/avm/b1dma.c 			_put_byte(&p, SEND_MESSAGE);
p                 394 drivers/staging/isdn/avm/b1dma.c 			_put_slice(&p, skb->data, len);
p                 396 drivers/staging/isdn/avm/b1dma.c 		txlen = (u8 *)p - (u8 *)dma->sendbuf.dmabuf;
p                 428 drivers/staging/isdn/avm/b1dma.c 	void *p;
p                 436 drivers/staging/isdn/avm/b1dma.c 	p = skb->data;
p                 437 drivers/staging/isdn/avm/b1dma.c 	_put_byte(&p, 0);
p                 438 drivers/staging/isdn/avm/b1dma.c 	_put_byte(&p, 0);
p                 439 drivers/staging/isdn/avm/b1dma.c 	_put_byte(&p, SEND_POLLACK);
p                 440 drivers/staging/isdn/avm/b1dma.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
p                 453 drivers/staging/isdn/avm/b1dma.c 	void *p = dma->recvbuf.dmabuf + 4;
p                 455 drivers/staging/isdn/avm/b1dma.c 	u8 b1cmd =  _get_byte(&p);
p                 464 drivers/staging/isdn/avm/b1dma.c 		ApplId = (unsigned) _get_word(&p);
p                 465 drivers/staging/isdn/avm/b1dma.c 		MsgLen = _get_slice(&p, card->msgbuf);
p                 466 drivers/staging/isdn/avm/b1dma.c 		DataB3Len = _get_slice(&p, card->databuf);
p                 485 drivers/staging/isdn/avm/b1dma.c 		ApplId = (unsigned) _get_word(&p);
p                 486 drivers/staging/isdn/avm/b1dma.c 		MsgLen = _get_slice(&p, card->msgbuf);
p                 505 drivers/staging/isdn/avm/b1dma.c 		ApplId = _get_word(&p);
p                 506 drivers/staging/isdn/avm/b1dma.c 		NCCI = _get_word(&p);
p                 507 drivers/staging/isdn/avm/b1dma.c 		WindowSize = _get_word(&p);
p                 515 drivers/staging/isdn/avm/b1dma.c 		ApplId = _get_word(&p);
p                 516 drivers/staging/isdn/avm/b1dma.c 		NCCI = _get_word(&p);
p                 540 drivers/staging/isdn/avm/b1dma.c 		cinfo->versionlen = _get_slice(&p, cinfo->versionbuf);
p                 550 drivers/staging/isdn/avm/b1dma.c 		ApplId = (unsigned) _get_word(&p);
p                 551 drivers/staging/isdn/avm/b1dma.c 		MsgLen = _get_slice(&p, card->msgbuf);
p                 564 drivers/staging/isdn/avm/b1dma.c 		MsgLen = _get_slice(&p, card->msgbuf);
p                 684 drivers/staging/isdn/avm/b1dma.c 	void *p;
p                 692 drivers/staging/isdn/avm/b1dma.c 	p = skb->data;
p                 693 drivers/staging/isdn/avm/b1dma.c 	_put_byte(&p, 0);
p                 694 drivers/staging/isdn/avm/b1dma.c 	_put_byte(&p, 0);
p                 695 drivers/staging/isdn/avm/b1dma.c 	_put_byte(&p, SEND_INIT);
p                 696 drivers/staging/isdn/avm/b1dma.c 	_put_word(&p, CAPI_MAXAPPL);
p                 697 drivers/staging/isdn/avm/b1dma.c 	_put_word(&p, AVM_NCCI_PER_CHANNEL * 30);
p                 698 drivers/staging/isdn/avm/b1dma.c 	_put_word(&p, card->cardnr - 1);
p                 699 drivers/staging/isdn/avm/b1dma.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
p                 779 drivers/staging/isdn/avm/b1dma.c 	void *p;
p                 791 drivers/staging/isdn/avm/b1dma.c 	p = skb->data;
p                 792 drivers/staging/isdn/avm/b1dma.c 	_put_byte(&p, 0);
p                 793 drivers/staging/isdn/avm/b1dma.c 	_put_byte(&p, 0);
p                 794 drivers/staging/isdn/avm/b1dma.c 	_put_byte(&p, SEND_REGISTER);
p                 795 drivers/staging/isdn/avm/b1dma.c 	_put_word(&p, appl);
p                 796 drivers/staging/isdn/avm/b1dma.c 	_put_word(&p, 1024 * (nconn + 1));
p                 797 drivers/staging/isdn/avm/b1dma.c 	_put_word(&p, nconn);
p                 798 drivers/staging/isdn/avm/b1dma.c 	_put_word(&p, rp->datablkcnt);
p                 799 drivers/staging/isdn/avm/b1dma.c 	_put_word(&p, rp->datablklen);
p                 800 drivers/staging/isdn/avm/b1dma.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
p                 812 drivers/staging/isdn/avm/b1dma.c 	void *p;
p                 825 drivers/staging/isdn/avm/b1dma.c 	p = skb->data;
p                 826 drivers/staging/isdn/avm/b1dma.c 	_put_byte(&p, 0);
p                 827 drivers/staging/isdn/avm/b1dma.c 	_put_byte(&p, 0);
p                 828 drivers/staging/isdn/avm/b1dma.c 	_put_byte(&p, SEND_RELEASE);
p                 829 drivers/staging/isdn/avm/b1dma.c 	_put_word(&p, appl);
p                 831 drivers/staging/isdn/avm/b1dma.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
p                 961 drivers/staging/isdn/avm/b1dma.c 	char *p;
p                 964 drivers/staging/isdn/avm/b1dma.c 	if ((p = strchr(revision, ':')) != NULL && p[1]) {
p                 965 drivers/staging/isdn/avm/b1dma.c 		strlcpy(rev, p + 2, sizeof(rev));
p                 966 drivers/staging/isdn/avm/b1dma.c 		if ((p = strchr(rev, '$')) != NULL && p > rev)
p                 967 drivers/staging/isdn/avm/b1dma.c 			*(p - 1) = 0;
p                 202 drivers/staging/isdn/avm/b1isa.c 	char *p;
p                 206 drivers/staging/isdn/avm/b1isa.c 	if ((p = strchr(revision, ':')) != NULL && p[1]) {
p                 207 drivers/staging/isdn/avm/b1isa.c 		strlcpy(rev, p + 2, 32);
p                 208 drivers/staging/isdn/avm/b1isa.c 		if ((p = strchr(rev, '$')) != NULL && p > rev)
p                 209 drivers/staging/isdn/avm/b1isa.c 			*(p - 1) = 0;
p                  64 drivers/staging/isdn/avm/b1pci.c static int b1pci_probe(struct capicardparams *p, struct pci_dev *pdev)
p                  78 drivers/staging/isdn/avm/b1pci.c 	sprintf(card->name, "b1pci-%x", p->port);
p                  79 drivers/staging/isdn/avm/b1pci.c 	card->port = p->port;
p                  80 drivers/staging/isdn/avm/b1pci.c 	card->irq = p->irq;
p                 183 drivers/staging/isdn/avm/b1pci.c static int b1pciv4_probe(struct capicardparams *p, struct pci_dev *pdev)
p                 204 drivers/staging/isdn/avm/b1pci.c 	sprintf(card->name, "b1pciv4-%x", p->port);
p                 205 drivers/staging/isdn/avm/b1pci.c 	card->port = p->port;
p                 206 drivers/staging/isdn/avm/b1pci.c 	card->irq = p->irq;
p                 207 drivers/staging/isdn/avm/b1pci.c 	card->membase = p->membase;
p                 381 drivers/staging/isdn/avm/b1pci.c 	char *p;
p                 385 drivers/staging/isdn/avm/b1pci.c 	if ((p = strchr(revision, ':')) != NULL && p[1]) {
p                 386 drivers/staging/isdn/avm/b1pci.c 		strlcpy(rev, p + 2, 32);
p                 387 drivers/staging/isdn/avm/b1pci.c 		if ((p = strchr(rev, '$')) != NULL && p > rev)
p                 388 drivers/staging/isdn/avm/b1pci.c 			*(p - 1) = 0;
p                 201 drivers/staging/isdn/avm/b1pcmcia.c 	char *p;
p                 204 drivers/staging/isdn/avm/b1pcmcia.c 	if ((p = strchr(revision, ':')) != NULL && p[1]) {
p                 205 drivers/staging/isdn/avm/b1pcmcia.c 		strlcpy(rev, p + 2, 32);
p                 206 drivers/staging/isdn/avm/b1pcmcia.c 		if ((p = strchr(rev, '$')) != NULL && p > rev)
p                 207 drivers/staging/isdn/avm/b1pcmcia.c 			*(p - 1) = 0;
p                 417 drivers/staging/isdn/avm/c4.c 	void *p;
p                 438 drivers/staging/isdn/avm/c4.c 		p = dma->sendbuf.dmabuf;
p                 442 drivers/staging/isdn/avm/c4.c 			_put_byte(&p, SEND_DATA_B3_REQ);
p                 443 drivers/staging/isdn/avm/c4.c 			_put_slice(&p, skb->data, len);
p                 444 drivers/staging/isdn/avm/c4.c 			_put_slice(&p, skb->data + len, dlen);
p                 446 drivers/staging/isdn/avm/c4.c 			_put_byte(&p, SEND_MESSAGE);
p                 447 drivers/staging/isdn/avm/c4.c 			_put_slice(&p, skb->data, len);
p                 449 drivers/staging/isdn/avm/c4.c 		txlen = (u8 *)p - (u8 *)dma->sendbuf.dmabuf;
p                 483 drivers/staging/isdn/avm/c4.c 	void *p;
p                 491 drivers/staging/isdn/avm/c4.c 	p = skb->data;
p                 492 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, 0);
p                 493 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, 0);
p                 494 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, SEND_POLLACK);
p                 495 drivers/staging/isdn/avm/c4.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
p                 509 drivers/staging/isdn/avm/c4.c 	void *p = dma->recvbuf.dmabuf;
p                 511 drivers/staging/isdn/avm/c4.c 	u8 b1cmd =  _get_byte(&p);
p                 523 drivers/staging/isdn/avm/c4.c 		ApplId = (unsigned) _get_word(&p);
p                 524 drivers/staging/isdn/avm/c4.c 		MsgLen = _get_slice(&p, card->msgbuf);
p                 525 drivers/staging/isdn/avm/c4.c 		DataB3Len = _get_slice(&p, card->databuf);
p                 547 drivers/staging/isdn/avm/c4.c 		ApplId = (unsigned) _get_word(&p);
p                 548 drivers/staging/isdn/avm/c4.c 		MsgLen = _get_slice(&p, card->msgbuf);
p                 570 drivers/staging/isdn/avm/c4.c 		ApplId = _get_word(&p);
p                 571 drivers/staging/isdn/avm/c4.c 		NCCI = _get_word(&p);
p                 572 drivers/staging/isdn/avm/c4.c 		WindowSize = _get_word(&p);
p                 582 drivers/staging/isdn/avm/c4.c 		ApplId = _get_word(&p);
p                 583 drivers/staging/isdn/avm/c4.c 		NCCI = _get_word(&p);
p                 622 drivers/staging/isdn/avm/c4.c 		cinfo->versionlen = _get_slice(&p, cinfo->versionbuf);
p                 632 drivers/staging/isdn/avm/c4.c 		ApplId = (unsigned) _get_word(&p);
p                 633 drivers/staging/isdn/avm/c4.c 		MsgLen = _get_slice(&p, card->msgbuf);
p                 646 drivers/staging/isdn/avm/c4.c 		MsgLen = _get_slice(&p, card->msgbuf);
p                 734 drivers/staging/isdn/avm/c4.c 	void *p;
p                 743 drivers/staging/isdn/avm/c4.c 	p = skb->data;
p                 744 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, 0);
p                 745 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, 0);
p                 746 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, SEND_INIT);
p                 747 drivers/staging/isdn/avm/c4.c 	_put_word(&p, CAPI_MAXAPPL);
p                 748 drivers/staging/isdn/avm/c4.c 	_put_word(&p, AVM_NCCI_PER_CHANNEL * 30);
p                 749 drivers/staging/isdn/avm/c4.c 	_put_word(&p, card->cardnr - 1);
p                 750 drivers/staging/isdn/avm/c4.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
p                 762 drivers/staging/isdn/avm/c4.c 	void *p;
p                 770 drivers/staging/isdn/avm/c4.c 	p = skb->data;
p                 771 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, 0);
p                 772 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, 0);
p                 773 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, SEND_CONFIG);
p                 774 drivers/staging/isdn/avm/c4.c 	_put_word(&p, val);
p                 775 drivers/staging/isdn/avm/c4.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
p                 788 drivers/staging/isdn/avm/c4.c 	void *p;
p                 796 drivers/staging/isdn/avm/c4.c 	p = skb->data;
p                 797 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, 0);
p                 798 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, 0);
p                 799 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, SEND_CONFIG);
p                 800 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, cval[0]);
p                 801 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, cval[1]);
p                 802 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, cval[2]);
p                 803 drivers/staging/isdn/avm/c4.c 	_put_byte(&p, cval[3]);
p                 804 drivers/staging/isdn/avm/c4.c 	skb_put(skb, (u8 *)p - (u8 *)skb->data);
p                 957 drivers/staging/isdn/avm/c4.c 	void *p;
p                 971 drivers/staging/isdn/avm/c4.c 		p = skb->data;
p                 972 drivers/staging/isdn/avm/c4.c 		_put_byte(&p, 0);
p                 973 drivers/staging/isdn/avm/c4.c 		_put_byte(&p, 0);
p                 974 drivers/staging/isdn/avm/c4.c 		_put_byte(&p, SEND_REGISTER);
p                 975 drivers/staging/isdn/avm/c4.c 		_put_word(&p, appl);
p                 976 drivers/staging/isdn/avm/c4.c 		_put_word(&p, 1024 * (nconn + 1));
p                 977 drivers/staging/isdn/avm/c4.c 		_put_word(&p, nconn);
p                 978 drivers/staging/isdn/avm/c4.c 		_put_word(&p, rp->datablkcnt);
p                 979 drivers/staging/isdn/avm/c4.c 		_put_word(&p, rp->datablklen);
p                 980 drivers/staging/isdn/avm/c4.c 		skb_put(skb, (u8 *)p - (u8 *)skb->data);
p                 998 drivers/staging/isdn/avm/c4.c 	void *p;
p                1011 drivers/staging/isdn/avm/c4.c 		p = skb->data;
p                1012 drivers/staging/isdn/avm/c4.c 		_put_byte(&p, 0);
p                1013 drivers/staging/isdn/avm/c4.c 		_put_byte(&p, 0);
p                1014 drivers/staging/isdn/avm/c4.c 		_put_byte(&p, SEND_RELEASE);
p                1015 drivers/staging/isdn/avm/c4.c 		_put_word(&p, appl);
p                1017 drivers/staging/isdn/avm/c4.c 		skb_put(skb, (u8 *)p - (u8 *)skb->data);
p                1132 drivers/staging/isdn/avm/c4.c static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
p                1153 drivers/staging/isdn/avm/c4.c 	sprintf(card->name, "c%d-%x", nr_controllers, p->port);
p                1154 drivers/staging/isdn/avm/c4.c 	card->port = p->port;
p                1155 drivers/staging/isdn/avm/c4.c 	card->irq = p->irq;
p                1156 drivers/staging/isdn/avm/c4.c 	card->membase = p->membase;
p                1287 drivers/staging/isdn/avm/c4.c 	char *p;
p                1291 drivers/staging/isdn/avm/c4.c 	if ((p = strchr(revision, ':')) != NULL && p[1]) {
p                1292 drivers/staging/isdn/avm/c4.c 		strlcpy(rev, p + 2, 32);
p                1293 drivers/staging/isdn/avm/c4.c 		if ((p = strchr(rev, '$')) != NULL && p > rev)
p                1294 drivers/staging/isdn/avm/c4.c 			*(p - 1) = 0;
p                 552 drivers/staging/isdn/avm/t1isa.c 	char *p;
p                 555 drivers/staging/isdn/avm/t1isa.c 	if ((p = strchr(revision, ':')) != NULL && p[1]) {
p                 556 drivers/staging/isdn/avm/t1isa.c 		strlcpy(rev, p + 2, 32);
p                 557 drivers/staging/isdn/avm/t1isa.c 		if ((p = strchr(rev, '$')) != NULL && p > rev)
p                 558 drivers/staging/isdn/avm/t1isa.c 			*(p - 1) = 0;
p                  49 drivers/staging/isdn/avm/t1pci.c static int t1pci_add_card(struct capicardparams *p, struct pci_dev *pdev)
p                  70 drivers/staging/isdn/avm/t1pci.c 	sprintf(card->name, "t1pci-%x", p->port);
p                  71 drivers/staging/isdn/avm/t1pci.c 	card->port = p->port;
p                  72 drivers/staging/isdn/avm/t1pci.c 	card->irq = p->irq;
p                  73 drivers/staging/isdn/avm/t1pci.c 	card->membase = p->membase;
p                 232 drivers/staging/isdn/avm/t1pci.c 	char *p;
p                 236 drivers/staging/isdn/avm/t1pci.c 	if ((p = strchr(revision, ':')) != NULL && p[1]) {
p                 237 drivers/staging/isdn/avm/t1pci.c 		strlcpy(rev, p + 2, 32);
p                 238 drivers/staging/isdn/avm/t1pci.c 		if ((p = strchr(rev, '$')) != NULL && p > rev)
p                 239 drivers/staging/isdn/avm/t1pci.c 			*(p - 1) = 0;
p                 205 drivers/staging/isdn/gigaset/capi.c static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p)
p                 217 drivers/staging/isdn/gigaset/capi.c 	cdb = capi_cmsg2str(p);
p                 219 drivers/staging/isdn/gigaset/capi.c 		gig_dbg(level, "%s: [%d] %s", tag, p->ApplId, cdb->buf);
p                 222 drivers/staging/isdn/gigaset/capi.c 		gig_dbg(level, "%s: [%d] %s", tag, p->ApplId,
p                 223 drivers/staging/isdn/gigaset/capi.c 			capi_cmd2str(p->Command, p->Subcommand));
p                 609 drivers/staging/isdn/gigaset/ser-gigaset.c 	int __user *p = (int __user *)arg;
p                 619 drivers/staging/isdn/gigaset/ser-gigaset.c 		rc = put_user(val, p);
p                 341 drivers/staging/isdn/hysdn/hysdn_boot.c 	u_char *p;
p                 351 drivers/staging/isdn/hysdn/hysdn_boot.c 	for (p = cp, crc = 0; p < (cp + len - 2); p++)
p                 353 drivers/staging/isdn/hysdn/hysdn_boot.c 			crc = (((u_char) (crc << 1)) + 1) + *p;
p                 355 drivers/staging/isdn/hysdn/hysdn_boot.c 			crc = ((u_char) (crc << 1)) + *p;
p                  48 drivers/staging/kpc2000/kpc2000_i2c.c #define SMBHSTSTS(p)    ((0  * REG_SIZE) + (p)->smba)
p                  49 drivers/staging/kpc2000/kpc2000_i2c.c #define SMBHSTCNT(p)    ((2  * REG_SIZE) + (p)->smba)
p                  50 drivers/staging/kpc2000/kpc2000_i2c.c #define SMBHSTCMD(p)    ((3  * REG_SIZE) + (p)->smba)
p                  51 drivers/staging/kpc2000/kpc2000_i2c.c #define SMBHSTADD(p)    ((4  * REG_SIZE) + (p)->smba)
p                  52 drivers/staging/kpc2000/kpc2000_i2c.c #define SMBHSTDAT0(p)   ((5  * REG_SIZE) + (p)->smba)
p                  53 drivers/staging/kpc2000/kpc2000_i2c.c #define SMBHSTDAT1(p)   ((6  * REG_SIZE) + (p)->smba)
p                  54 drivers/staging/kpc2000/kpc2000_i2c.c #define SMBBLKDAT(p)    ((7  * REG_SIZE) + (p)->smba)
p                  55 drivers/staging/kpc2000/kpc2000_i2c.c #define SMBPEC(p)       ((8  * REG_SIZE) + (p)->smba)   /* ICH3 and later */
p                  56 drivers/staging/kpc2000/kpc2000_i2c.c #define SMBAUXSTS(p)    ((12 * REG_SIZE) + (p)->smba)   /* ICH4 and later */
p                  57 drivers/staging/kpc2000/kpc2000_i2c.c #define SMBAUXCTL(p)    ((13 * REG_SIZE) + (p)->smba)   /* ICH4 and later */
p                  47 drivers/staging/kpc2000/kpc_dma/fileops.c 	unsigned int p;
p                 127 drivers/staging/kpc2000/kpc_dma/fileops.c 		for (p = 0 ; p < pcnt ; p++) {
p                 131 drivers/staging/kpc2000/kpc_dma/fileops.c 			if (p != pcnt-1) {
p                 134 drivers/staging/kpc2000/kpc_dma/fileops.c 				desc->DescByteCount = sg_dma_len(sg) - (p * 0x80000);
p                 139 drivers/staging/kpc2000/kpc_dma/fileops.c 			if (i == 0 && p == 0)
p                 141 drivers/staging/kpc2000/kpc_dma/fileops.c 			if (i == acd->mapped_entry_count-1 && p == pcnt-1)
p                 148 drivers/staging/kpc2000/kpc_dma/fileops.c 			dma_addr  = sg_dma_address(sg) + (p * 0x80000);
p                 153 drivers/staging/kpc2000/kpc_dma/fileops.c 			if (i == acd->mapped_entry_count-1 && p == pcnt-1) {
p                 159 drivers/staging/kpc2000/kpc_dma/fileops.c 			if (i == acd->mapped_entry_count-1 && p == pcnt-1)
p                 283 drivers/staging/ks7010/ks7010_sdio.c static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
p                 304 drivers/staging/ks7010/ks7010_sdio.c 	sp->sendp = p;
p                 313 drivers/staging/ks7010/ks7010_sdio.c 	kfree(p);
p                 378 drivers/staging/ks7010/ks7010_sdio.c int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
p                 386 drivers/staging/ks7010/ks7010_sdio.c 	hdr = (struct hostif_hdr *)p;
p                 399 drivers/staging/ks7010/ks7010_sdio.c 	result = enqueue_txdev(priv, p, size, complete_handler, skb);
p                1051 drivers/staging/ks7010/ks_hostif.c 	struct hostif_hdr *p;
p                1053 drivers/staging/ks7010/ks_hostif.c 	p = kzalloc(hif_align_size(size), GFP_ATOMIC);
p                1054 drivers/staging/ks7010/ks_hostif.c 	if (!p)
p                1057 drivers/staging/ks7010/ks_hostif.c 	p->size = cpu_to_le16(size - sizeof(p->size));
p                1058 drivers/staging/ks7010/ks_hostif.c 	p->event = cpu_to_le16(event);
p                1060 drivers/staging/ks7010/ks_hostif.c 	return p;
p                1069 drivers/staging/ks7010/ks_hostif.c 	unsigned char *p;
p                1110 drivers/staging/ks7010/ks_hostif.c 	p = (unsigned char *)pp->data;
p                1127 drivers/staging/ks7010/ks_hostif.c 	memcpy(p, buffer, size);
p                1128 drivers/staging/ks7010/ks_hostif.c 	p += size;
p                1136 drivers/staging/ks7010/ks_hostif.c 		*p++ = 0xAA;	/* DSAP */
p                1137 drivers/staging/ks7010/ks_hostif.c 		*p++ = 0xAA;	/* SSAP */
p                1138 drivers/staging/ks7010/ks_hostif.c 		*p++ = 0x03;	/* CTL */
p                1139 drivers/staging/ks7010/ks_hostif.c 		*p++ = 0x00;	/* OUI ("000000") */
p                1140 drivers/staging/ks7010/ks_hostif.c 		*p++ = 0x00;	/* OUI ("000000") */
p                1141 drivers/staging/ks7010/ks_hostif.c 		*p++ = 0x00;	/* OUI ("000000") */
p                1151 drivers/staging/ks7010/ks_hostif.c 	memcpy(p, buffer, length);
p                1153 drivers/staging/ks7010/ks_hostif.c 	p += length;
p                1186 drivers/staging/ks7010/ks_hostif.c 				memcpy(p, mic, sizeof(mic));
p                1189 drivers/staging/ks7010/ks_hostif.c 				p += sizeof(mic);
p                1584 drivers/staging/ks7010/ks_hostif.c static void devio_rec_ind(struct ks_wlan_private *priv, unsigned char *p,
p                1591 drivers/staging/ks7010/ks_hostif.c 	priv->dev_data[atomic_read(&priv->rec_count)] = p;
p                1607 drivers/staging/ks7010/ks_hostif.c void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
p                1610 drivers/staging/ks7010/ks_hostif.c 	devio_rec_ind(priv, p, size);
p                1612 drivers/staging/ks7010/ks_hostif.c 	priv->rxp = p;
p                 594 drivers/staging/ks7010/ks_hostif.h void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
p                 599 drivers/staging/ks7010/ks_hostif.h int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
p                2087 drivers/staging/ks7010/ks_wlan_net.c 	u8 *p = extra;
p                2095 drivers/staging/ks7010/ks_wlan_net.c 	if (p[1] + 2 != dwrq->length || dwrq->length > 256)
p                2098 drivers/staging/ks7010/ks_wlan_net.c 	priv->wps.ielen = p[1] + 2 + 1;	/* IE header + IE + sizeof(len) */
p                2099 drivers/staging/ks7010/ks_wlan_net.c 	len = p[1] + 2;	/* IE header + IE */
p                2102 drivers/staging/ks7010/ks_wlan_net.c 	p = memcpy(priv->wps.ie + 1, p, len);
p                2105 drivers/staging/ks7010/ks_wlan_net.c 		   priv->wps.ielen, priv->wps.ielen, p[0], p[1], p[2], p[3],
p                2106 drivers/staging/ks7010/ks_wlan_net.c 		   p[priv->wps.ielen - 3], p[priv->wps.ielen - 2],
p                2107 drivers/staging/ks7010/ks_wlan_net.c 		   p[priv->wps.ielen - 1]);
p                 435 drivers/staging/media/allegro-dvt/nal-h264.c 	u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
p                 443 drivers/staging/media/allegro-dvt/nal-h264.c 	p[0] = 0x00;
p                 444 drivers/staging/media/allegro-dvt/nal-h264.c 	p[1] = 0x00;
p                 445 drivers/staging/media/allegro-dvt/nal-h264.c 	p[2] = 0x00;
p                 446 drivers/staging/media/allegro-dvt/nal-h264.c 	p[3] = 0x01;
p                 453 drivers/staging/media/allegro-dvt/nal-h264.c 	u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
p                 461 drivers/staging/media/allegro-dvt/nal-h264.c 	if (p[0] != 0x00 || p[1] != 0x00 || p[2] != 0x00 || p[3] != 0x01) {
p                 471 drivers/staging/media/allegro-dvt/nal-h264.c 	u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
p                 476 drivers/staging/media/allegro-dvt/nal-h264.c 	memset(p, 0xff, i);
p                 482 drivers/staging/media/allegro-dvt/nal-h264.c 	u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
p                 484 drivers/staging/media/allegro-dvt/nal-h264.c 	while (*p == 0xff) {
p                 490 drivers/staging/media/allegro-dvt/nal-h264.c 		p++;
p                  43 drivers/staging/media/hantro/hantro_drv.c 	return ctrl ? ctrl->p_cur.p : NULL;
p                 178 drivers/staging/media/hantro/hantro_g1_h264_dec.c 	p_reflist = ctx->h264_dec.reflists.p;
p                 602 drivers/staging/media/hantro/hantro_h264.c 	build_p_ref_list(&reflist_builder, h264_ctx->reflists.p);
p                  72 drivers/staging/media/hantro/hantro_hw.h 	u8 p[HANTRO_H264_DPB_SIZE];
p                1470 drivers/staging/media/ipu3/ipu3-css.c 	unsigned int p, q, i;
p                1473 drivers/staging/media/ipu3/ipu3-css.c 	for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
p                1477 drivers/staging/media/ipu3/ipu3-css.c 					       xmem_sp_stage_ptrs[p][i],
p                1482 drivers/staging/media/ipu3/ipu3-css.c 					       xmem_isp_stage_ptrs[p][i],
p                1514 drivers/staging/media/ipu3/ipu3-css.c 	unsigned int p, q, i, abi_buf_num;
p                1524 drivers/staging/media/ipu3/ipu3-css.c 	for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
p                1527 drivers/staging/media/ipu3/ipu3-css.c 					 &css_pipe->xmem_sp_stage_ptrs[p][i]);
p                1529 drivers/staging/media/ipu3/ipu3-css.c 					 &css_pipe->xmem_isp_stage_ptrs[p][i]);
p                 417 drivers/staging/media/ipu3/ipu3-v4l2.c 	unsigned int i, pipe, p;
p                 428 drivers/staging/media/ipu3/ipu3-v4l2.c 	for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
p                 430 drivers/staging/media/ipu3/ipu3-v4l2.c 			node = &imgu->imgu_pipe[p].nodes[i];
p                 432 drivers/staging/media/ipu3/ipu3-v4l2.c 				__func__, p, i, node->name, node->enabled);
p                 505 drivers/staging/media/ipu3/ipu3.c 	int p;
p                 573 drivers/staging/media/ipu3/ipu3.c 		for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
p                 574 drivers/staging/media/ipu3/ipu3.c 			imgu_queue_buffers(imgu, false, p);
p                 357 drivers/staging/media/soc_camera/soc_camera.c 			      struct v4l2_requestbuffers *p)
p                 367 drivers/staging/media/soc_camera/soc_camera.c 	ret = vb2_reqbufs(&icd->vb2_vidq, p);
p                 369 drivers/staging/media/soc_camera/soc_camera.c 		icd->streamer = p->count ? file : NULL;
p                 374 drivers/staging/media/soc_camera/soc_camera.c 			       struct v4l2_buffer *p)
p                 380 drivers/staging/media/soc_camera/soc_camera.c 	return vb2_querybuf(&icd->vb2_vidq, p);
p                 384 drivers/staging/media/soc_camera/soc_camera.c 			   struct v4l2_buffer *p)
p                 393 drivers/staging/media/soc_camera/soc_camera.c 	return vb2_qbuf(&icd->vb2_vidq, NULL, p);
p                 397 drivers/staging/media/soc_camera/soc_camera.c 			    struct v4l2_buffer *p)
p                 406 drivers/staging/media/soc_camera/soc_camera.c 	return vb2_dqbuf(&icd->vb2_vidq, p, file->f_flags & O_NONBLOCK);
p                 433 drivers/staging/media/soc_camera/soc_camera.c 			     struct v4l2_exportbuffer *p)
p                 439 drivers/staging/media/soc_camera/soc_camera.c 	return vb2_expbuf(&icd->vb2_vidq, p);
p                 108 drivers/staging/media/sunxi/cedrus/cedrus.c 			return ctx->ctrls[i]->p_cur.p;
p                 196 drivers/staging/media/sunxi/cedrus/cedrus.h vb2_v4l2_to_cedrus_buffer(const struct vb2_v4l2_buffer *p)
p                 198 drivers/staging/media/sunxi/cedrus/cedrus.h 	return container_of(p, struct cedrus_buffer, m2m_buf.vb);
p                 202 drivers/staging/media/sunxi/cedrus/cedrus.h vb2_to_cedrus_buffer(const struct vb2_buffer *p)
p                 204 drivers/staging/media/sunxi/cedrus/cedrus.h 	return vb2_v4l2_to_cedrus_buffer(to_vb2_v4l2_buffer(p));
p                  80 drivers/staging/media/sunxi/cedrus/cedrus_regs.h #define VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(p) \
p                  81 drivers/staging/media/sunxi/cedrus/cedrus_regs.h 	SHIFT_AND_MASK_BITS(p, 11, 10)
p                 481 drivers/staging/most/core.c 	list_for_each_entry(c, &iface->p->channel_list, list) {
p                 580 drivers/staging/most/core.c 	list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
p                1041 drivers/staging/most/core.c 	struct most_channel *c = iface->p->channel[id];
p                1077 drivers/staging/most/core.c 	c = iface->p->channel[id];
p                1180 drivers/staging/most/core.c 	struct most_channel *c = iface->p->channel[id];
p                1254 drivers/staging/most/core.c 	c = iface->p->channel[id];
p                1323 drivers/staging/most/core.c 	list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
p                1387 drivers/staging/most/core.c 	iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
p                1388 drivers/staging/most/core.c 	if (!iface->p) {
p                1393 drivers/staging/most/core.c 	INIT_LIST_HEAD(&iface->p->channel_list);
p                1394 drivers/staging/most/core.c 	iface->p->dev_id = id;
p                1395 drivers/staging/most/core.c 	strscpy(iface->p->name, iface->description, sizeof(iface->p->name));
p                1396 drivers/staging/most/core.c 	iface->dev.init_name = iface->p->name;
p                1403 drivers/staging/most/core.c 		kfree(iface->p);
p                1422 drivers/staging/most/core.c 		iface->p->channel[i] = c;
p                1443 drivers/staging/most/core.c 		list_add_tail(&c->list, &iface->p->channel_list);
p                1459 drivers/staging/most/core.c 		c = iface->p->channel[--i];
p                1463 drivers/staging/most/core.c 	kfree(iface->p);
p                1485 drivers/staging/most/core.c 		c = iface->p->channel[i];
p                1499 drivers/staging/most/core.c 	ida_simple_remove(&mdev_id, iface->p->dev_id);
p                1500 drivers/staging/most/core.c 	kfree(iface->p);
p                1517 drivers/staging/most/core.c 	struct most_channel *c = iface->p->channel[id];
p                1538 drivers/staging/most/core.c 	struct most_channel *c = iface->p->channel[id];
p                 251 drivers/staging/most/core.h 	struct interface_private *p;
p                 115 drivers/staging/most/dim2/dim2.c #define PACKET_IS_NET_INFO(p)  \
p                 116 drivers/staging/most/dim2/dim2.c 	(((p)[1] == 0x18) && ((p)[2] == 0x05) && ((p)[3] == 0x0C) && \
p                 117 drivers/staging/most/dim2/dim2.c 	 ((p)[13] == 0x3C) && ((p)[14] == 0x00) && ((p)[15] == 0x0A))
p                 162 drivers/staging/most/net/net.c static int most_nd_set_mac_address(struct net_device *dev, void *p)
p                 165 drivers/staging/most/net/net.c 	int err = eth_mac_addr(dev, p);
p                  72 drivers/staging/most/usb/usb.c #define to_dci_obj(p) container_of(p, struct most_dci_obj, dev)
p                  48 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
p                  50 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	return p->group_count;
p                  56 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
p                  58 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	return (group >= p->group_count) ? NULL : p->group_names[group];
p                  66 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
p                  68 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	if (group >= p->group_count)
p                  71 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	*pins = p->groups[group].func[0].pins;
p                  72 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	*num_pins = p->groups[group].func[0].pin_count;
p                  87 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
p                  89 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	return p->func_count;
p                  95 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
p                  97 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	return p->func[func]->name;
p                 105 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
p                 107 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	if (p->func[func]->group_count == 1)
p                 108 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		*groups = &p->group_names[p->func[func]->groups[0]];
p                 110 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		*groups = p->group_names;
p                 112 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	*num_groups = p->func[func]->group_count;
p                 120 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
p                 127 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	if (p->groups[group].enabled) {
p                 128 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		dev_err(p->dev, "%s is already enabled\n",
p                 129 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 			p->groups[group].name);
p                 133 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	p->groups[group].enabled = 1;
p                 134 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	p->func[func]->enabled = 1;
p                 136 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	shift = p->groups[group].shift;
p                 142 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	mode &= ~(p->groups[group].mask << shift);
p                 145 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	for (i = 0; i < p->groups[group].func[0].pin_count; i++)
p                 146 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		p->gpio[p->groups[group].func[0].pins[i]] = 1;
p                 150 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		mode |= p->groups[group].gpio << shift;
p                 152 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		for (i = 0; i < p->func[func]->pin_count; i++)
p                 153 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 			p->gpio[p->func[func]->pins[i]] = 0;
p                 154 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		mode |= p->func[func]->value << shift;
p                 165 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
p                 167 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	if (!p->gpio[pin]) {
p                 168 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		dev_err(p->dev, "pin %d is not set to gpio mux\n", pin);
p                 194 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c static int rt2880_pinmux_index(struct rt2880_priv *p)
p                 197 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	struct rt2880_pmx_group *mux = p->groups;
p                 202 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		p->group_count++;
p                 207 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	p->group_names = devm_kcalloc(p->dev, p->group_count,
p                 209 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	if (!p->group_names)
p                 212 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	for (i = 0; i < p->group_count; i++) {
p                 213 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		p->group_names[i] = p->groups[i].name;
p                 214 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		p->func_count += p->groups[i].func_count;
p                 218 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	p->func_count++;
p                 221 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	f = p->func = devm_kcalloc(p->dev,
p                 222 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 				   p->func_count,
p                 225 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	gpio_func.groups = devm_kcalloc(p->dev, p->group_count, sizeof(int),
p                 231 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	gpio_func.group_count = p->group_count;
p                 239 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	for (i = 0; i < p->group_count; i++) {
p                 240 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		for (j = 0; j < p->groups[i].func_count; j++) {
p                 241 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 			f[c] = &p->groups[i].func[j];
p                 242 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 			f[c]->groups = devm_kzalloc(p->dev, sizeof(int),
p                 252 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c static int rt2880_pinmux_pins(struct rt2880_priv *p)
p                 260 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	for (i = 0; i < p->func_count; i++) {
p                 263 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		if (!p->func[i]->pin_count)
p                 266 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		p->func[i]->pins = devm_kcalloc(p->dev,
p                 267 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 						p->func[i]->pin_count,
p                 270 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		for (j = 0; j < p->func[i]->pin_count; j++)
p                 271 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 			p->func[i]->pins[j] = p->func[i]->pin_first + j;
p                 273 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		pin = p->func[i]->pin_first + p->func[i]->pin_count;
p                 274 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		if (pin > p->max_pins)
p                 275 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 			p->max_pins = pin;
p                 279 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	p->gpio = devm_kcalloc(p->dev, p->max_pins, sizeof(u8), GFP_KERNEL);
p                 281 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	p->pads = devm_kcalloc(p->dev, p->max_pins,
p                 283 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	if (!p->pads || !p->gpio) {
p                 284 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		dev_err(p->dev, "Failed to allocate gpio data\n");
p                 288 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	memset(p->gpio, 1, sizeof(u8) * p->max_pins);
p                 289 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	for (i = 0; i < p->func_count; i++) {
p                 290 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		if (!p->func[i]->pin_count)
p                 293 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		for (j = 0; j < p->func[i]->pin_count; j++)
p                 294 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 			p->gpio[p->func[i]->pins[j]] = 0;
p                 298 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	p->gpio[0] = 1;
p                 301 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	for (i = 0; i < p->max_pins; i++) {
p                 303 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL);
p                 308 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		p->pads[i].number = i;
p                 309 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		p->pads[i].name = name;
p                 311 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	p->desc->pins = p->pads;
p                 312 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	p->desc->npins = p->max_pins;
p                 319 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	struct rt2880_priv *p;
p                 327 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	p = devm_kzalloc(&pdev->dev, sizeof(struct rt2880_priv), GFP_KERNEL);
p                 328 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	if (!p)
p                 331 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	p->dev = &pdev->dev;
p                 332 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	p->desc = &rt2880_pctrl_desc;
p                 333 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	p->groups = rt2880_pinmux_data;
p                 334 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	platform_set_drvdata(pdev, p);
p                 337 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	if (rt2880_pinmux_index(p)) {
p                 341 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	if (rt2880_pinmux_pins(p)) {
p                 345 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 	dev = pinctrl_register(p->desc, &pdev->dev, p);
p                 365 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c 		range = devm_kzalloc(p->dev, sizeof(*range), GFP_KERNEL);
p                 412 drivers/staging/octeon-usb/octeon-hcd.c static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
p                 414 drivers/staging/octeon-usb/octeon-hcd.c 	return container_of((void *)p, struct usb_hcd, hcd_priv);
p                   4 drivers/staging/octeon/octeon-stubs.h # define XKPHYS_TO_PHYS(p)			(p)
p                 567 drivers/staging/olpc_dcon/olpc_dcon.c 			     unsigned long e, void *p)
p                1373 drivers/staging/qlge/qlge.h 	} p;
p                 372 drivers/staging/qlge/qlge_ethtool.c 		char *p = (char *)qdev +
p                 375 drivers/staging/qlge/qlge_ethtool.c 			sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
p                 609 drivers/staging/qlge/qlge_ethtool.c 			struct ethtool_regs *regs, void *p)
p                 613 drivers/staging/qlge/qlge_ethtool.c 	ql_get_dump(qdev, p);
p                 749 drivers/staging/qlge/qlge_main.c 	__le32 *p = (__le32 *)&qdev->flash;
p                 765 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < size; i++, p++) {
p                 766 drivers/staging/qlge/qlge_main.c 		status = ql_read_flash_word(qdev, i+offset, p);
p                 814 drivers/staging/qlge/qlge_main.c 	__le32 *p = (__le32 *)&qdev->flash;
p                 827 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < size; i++, p++) {
p                 828 drivers/staging/qlge/qlge_main.c 		status = ql_read_flash_word(qdev, i+offset, p);
p                1054 drivers/staging/qlge/qlge_main.c 	if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
p                1057 drivers/staging/qlge/qlge_main.c 				lbq_desc->p.pg_chunk.map,
p                1121 drivers/staging/qlge/qlge_main.c 	lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
p                1129 drivers/staging/qlge/qlge_main.c 		lbq_desc->p.pg_chunk.last_flag = 1;
p                1133 drivers/staging/qlge/qlge_main.c 		lbq_desc->p.pg_chunk.last_flag = 0;
p                1160 drivers/staging/qlge/qlge_main.c 			map = lbq_desc->p.pg_chunk.map +
p                1161 drivers/staging/qlge/qlge_main.c 				lbq_desc->p.pg_chunk.offset;
p                1206 drivers/staging/qlge/qlge_main.c 			if (sbq_desc->p.skb == NULL) {
p                1211 drivers/staging/qlge/qlge_main.c 				sbq_desc->p.skb =
p                1214 drivers/staging/qlge/qlge_main.c 				if (sbq_desc->p.skb == NULL) {
p                1218 drivers/staging/qlge/qlge_main.c 				skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
p                1220 drivers/staging/qlge/qlge_main.c 						     sbq_desc->p.skb->data,
p                1227 drivers/staging/qlge/qlge_main.c 					dev_kfree_skb_any(sbq_desc->p.skb);
p                1228 drivers/staging/qlge/qlge_main.c 					sbq_desc->p.skb = NULL;
p                1504 drivers/staging/qlge/qlge_main.c 		put_page(lbq_desc->p.pg_chunk.page);
p                1514 drivers/staging/qlge/qlge_main.c 		put_page(lbq_desc->p.pg_chunk.page);
p                1517 drivers/staging/qlge/qlge_main.c 	prefetch(lbq_desc->p.pg_chunk.va);
p                1519 drivers/staging/qlge/qlge_main.c 			     lbq_desc->p.pg_chunk.page,
p                1520 drivers/staging/qlge/qlge_main.c 			     lbq_desc->p.pg_chunk.offset,
p                1554 drivers/staging/qlge/qlge_main.c 		put_page(lbq_desc->p.pg_chunk.page);
p                1558 drivers/staging/qlge/qlge_main.c 	addr = lbq_desc->p.pg_chunk.va;
p                1583 drivers/staging/qlge/qlge_main.c 	skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
p                1584 drivers/staging/qlge/qlge_main.c 				lbq_desc->p.pg_chunk.offset + hlen,
p                1627 drivers/staging/qlge/qlge_main.c 	put_page(lbq_desc->p.pg_chunk.page);
p                1642 drivers/staging/qlge/qlge_main.c 	skb = sbq_desc->p.skb;
p                1784 drivers/staging/qlge/qlge_main.c 		skb = sbq_desc->p.skb;
p                1787 drivers/staging/qlge/qlge_main.c 		sbq_desc->p.skb = NULL;
p                1818 drivers/staging/qlge/qlge_main.c 			skb_put_data(skb, sbq_desc->p.skb->data, length);
p                1832 drivers/staging/qlge/qlge_main.c 			skb = sbq_desc->p.skb;
p                1841 drivers/staging/qlge/qlge_main.c 			sbq_desc->p.skb = NULL;
p                1856 drivers/staging/qlge/qlge_main.c 				     lbq_desc->p.pg_chunk.offset, length);
p                1857 drivers/staging/qlge/qlge_main.c 			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
p                1858 drivers/staging/qlge/qlge_main.c 						lbq_desc->p.pg_chunk.offset,
p                1886 drivers/staging/qlge/qlge_main.c 						lbq_desc->p.pg_chunk.page,
p                1887 drivers/staging/qlge/qlge_main.c 						lbq_desc->p.pg_chunk.offset,
p                1893 drivers/staging/qlge/qlge_main.c 					      lbq_desc->p.pg_chunk.va,
p                1928 drivers/staging/qlge/qlge_main.c 			skb = sbq_desc->p.skb;
p                1929 drivers/staging/qlge/qlge_main.c 			sbq_desc->p.skb = NULL;
p                1941 drivers/staging/qlge/qlge_main.c 						lbq_desc->p.pg_chunk.page,
p                1942 drivers/staging/qlge/qlge_main.c 						lbq_desc->p.pg_chunk.offset,
p                1950 drivers/staging/qlge/qlge_main.c 		ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
p                2832 drivers/staging/qlge/qlge_main.c 		if (lbq_desc->p.pg_chunk.last_flag) {
p                2834 drivers/staging/qlge/qlge_main.c 				lbq_desc->p.pg_chunk.map,
p                2837 drivers/staging/qlge/qlge_main.c 			lbq_desc->p.pg_chunk.last_flag = 0;
p                2840 drivers/staging/qlge/qlge_main.c 		put_page(lbq_desc->p.pg_chunk.page);
p                2841 drivers/staging/qlge/qlge_main.c 		lbq_desc->p.pg_chunk.page = NULL;
p                2867 drivers/staging/qlge/qlge_main.c 		if (sbq_desc->p.skb) {
p                2872 drivers/staging/qlge/qlge_main.c 			dev_kfree_skb(sbq_desc->p.skb);
p                2873 drivers/staging/qlge/qlge_main.c 			sbq_desc->p.skb = NULL;
p                4416 drivers/staging/qlge/qlge_main.c static int qlge_set_mac_address(struct net_device *ndev, void *p)
p                4419 drivers/staging/qlge/qlge_main.c 	struct sockaddr *addr = p;
p                  65 drivers/staging/rtl8188eu/core/rtw_ap.c 	u8 *p, *dst_ie, *premainder_ie = NULL;
p                  70 drivers/staging/rtl8188eu/core/rtw_ap.c 	p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, _TIM_IE_, &tim_ielen,
p                  72 drivers/staging/rtl8188eu/core/rtw_ap.c 	if (p && tim_ielen > 0) {
p                  74 drivers/staging/rtl8188eu/core/rtw_ap.c 		premainder_ie = p + tim_ielen;
p                  75 drivers/staging/rtl8188eu/core/rtw_ap.c 		tim_ie_offset = (int)(p - pie);
p                  79 drivers/staging/rtl8188eu/core/rtw_ap.c 		dst_ie = p;
p                  88 drivers/staging/rtl8188eu/core/rtw_ap.c 		p = rtw_get_ie(pie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_,
p                  91 drivers/staging/rtl8188eu/core/rtw_ap.c 		if (p)
p                 593 drivers/staging/rtl8188eu/core/rtw_ap.c 	u8 *p;
p                 671 drivers/staging/rtl8188eu/core/rtw_ap.c 	p = rtw_get_ie((pnetwork->ies + sizeof(struct ndis_802_11_fixed_ie)), _HT_ADD_INFO_IE_, &ie_len, (pnetwork->ie_length - sizeof(struct ndis_802_11_fixed_ie)));
p                 672 drivers/staging/rtl8188eu/core/rtw_ap.c 	if (p && ie_len) {
p                 673 drivers/staging/rtl8188eu/core/rtw_ap.c 		pht_info = (struct HT_info_element *)(p + 2);
p                 728 drivers/staging/rtl8188eu/core/rtw_ap.c 	u8 *p;
p                 778 drivers/staging/rtl8188eu/core/rtw_ap.c 	p = rtw_get_beacon_interval_from_ie(ie);/* 8: TimeStamp, 2: Beacon Interval 2:Capability */
p                 779 drivers/staging/rtl8188eu/core/rtw_ap.c 	pbss_network->Configuration.BeaconPeriod = get_unaligned_le16(p);
p                 785 drivers/staging/rtl8188eu/core/rtw_ap.c 	p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
p                 786 drivers/staging/rtl8188eu/core/rtw_ap.c 	if (p && ie_len > 0) {
p                 788 drivers/staging/rtl8188eu/core/rtw_ap.c 		memcpy(pbss_network->ssid.ssid, (p + 2), ie_len);
p                 795 drivers/staging/rtl8188eu/core/rtw_ap.c 	p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _DSSET_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
p                 796 drivers/staging/rtl8188eu/core/rtw_ap.c 	if (p && ie_len > 0)
p                 797 drivers/staging/rtl8188eu/core/rtw_ap.c 		channel = *(p + 2);
p                 803 drivers/staging/rtl8188eu/core/rtw_ap.c 	p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
p                 804 drivers/staging/rtl8188eu/core/rtw_ap.c 	if (p) {
p                 805 drivers/staging/rtl8188eu/core/rtw_ap.c 		memcpy(supportRate, p + 2, ie_len);
p                 810 drivers/staging/rtl8188eu/core/rtw_ap.c 	p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_);
p                 811 drivers/staging/rtl8188eu/core/rtw_ap.c 	if (p) {
p                 812 drivers/staging/rtl8188eu/core/rtw_ap.c 		memcpy(supportRate + supportRateNum, p + 2, ie_len);
p                 821 drivers/staging/rtl8188eu/core/rtw_ap.c 	p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
p                 822 drivers/staging/rtl8188eu/core/rtw_ap.c 	if (p && ie_len > 0)
p                 823 drivers/staging/rtl8188eu/core/rtw_ap.c 		ERP_IE_handler(padapter, (struct ndis_802_11_var_ie *)p);
p                 838 drivers/staging/rtl8188eu/core/rtw_ap.c 	p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
p                 839 drivers/staging/rtl8188eu/core/rtw_ap.c 	if (p && ie_len > 0) {
p                 840 drivers/staging/rtl8188eu/core/rtw_ap.c 		if (rtw_parse_wpa2_ie(p, ie_len + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
p                 856 drivers/staging/rtl8188eu/core/rtw_ap.c 	for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
p                 857 drivers/staging/rtl8188eu/core/rtw_ap.c 		p = rtw_get_ie(p, _SSN_IE_1_, &ie_len,
p                 859 drivers/staging/rtl8188eu/core/rtw_ap.c 		if ((p) && (!memcmp(p + 2, OUI1, 4))) {
p                 860 drivers/staging/rtl8188eu/core/rtw_ap.c 			if (rtw_parse_wpa_ie(p, ie_len + 2, &group_cipher,
p                 873 drivers/staging/rtl8188eu/core/rtw_ap.c 		if ((!p) || (ie_len == 0))
p                 881 drivers/staging/rtl8188eu/core/rtw_ap.c 		for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
p                 882 drivers/staging/rtl8188eu/core/rtw_ap.c 			p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len,
p                 884 drivers/staging/rtl8188eu/core/rtw_ap.c 			if ((p) && !memcmp(p + 2, WMM_PARA_IE, 6)) {
p                 888 drivers/staging/rtl8188eu/core/rtw_ap.c 				*(p + 8) |= BIT(7);
p                 893 drivers/staging/rtl8188eu/core/rtw_ap.c 				*(p + 10) &= ~BIT(4); /* BE */
p                 894 drivers/staging/rtl8188eu/core/rtw_ap.c 				*(p + 14) &= ~BIT(4); /* BK */
p                 895 drivers/staging/rtl8188eu/core/rtw_ap.c 				*(p + 18) &= ~BIT(4); /* VI */
p                 896 drivers/staging/rtl8188eu/core/rtw_ap.c 				*(p + 22) &= ~BIT(4); /* VO */
p                 900 drivers/staging/rtl8188eu/core/rtw_ap.c 			if ((!p) || (ie_len == 0))
p                 905 drivers/staging/rtl8188eu/core/rtw_ap.c 	p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len,
p                 907 drivers/staging/rtl8188eu/core/rtw_ap.c 	if (p && ie_len > 0) {
p                 908 drivers/staging/rtl8188eu/core/rtw_ap.c 		struct ieee80211_ht_cap *pht_cap = (struct ieee80211_ht_cap *)(p + 2);
p                 910 drivers/staging/rtl8188eu/core/rtw_ap.c 		pHT_caps_ie = p;
p                 925 drivers/staging/rtl8188eu/core/rtw_ap.c 		memcpy(&pmlmepriv->htpriv.ht_cap, p + 2, ie_len);
p                 929 drivers/staging/rtl8188eu/core/rtw_ap.c 	p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len,
p                 931 drivers/staging/rtl8188eu/core/rtw_ap.c 	if (p && ie_len > 0)
p                 932 drivers/staging/rtl8188eu/core/rtw_ap.c 		pHT_info_ie = p;
p                1110 drivers/staging/rtl8188eu/core/rtw_ap.c 	unsigned char *p, *ie = pnetwork->ies;
p                1119 drivers/staging/rtl8188eu/core/rtw_ap.c 	p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &len,
p                1121 drivers/staging/rtl8188eu/core/rtw_ap.c 	if (p && len > 0) {
p                1122 drivers/staging/rtl8188eu/core/rtw_ap.c 		struct ndis_802_11_var_ie *pIE = (struct ndis_802_11_var_ie *)p;
p                 148 drivers/staging/rtl8188eu/core/rtw_ieee80211.c 	u8 *p;
p                 153 drivers/staging/rtl8188eu/core/rtw_ieee80211.c 	p = pbuf;
p                 157 drivers/staging/rtl8188eu/core/rtw_ieee80211.c 		if (*p == index) {
p                 158 drivers/staging/rtl8188eu/core/rtw_ieee80211.c 			*len = *(p + 1);
p                 159 drivers/staging/rtl8188eu/core/rtw_ieee80211.c 			return p;
p                 161 drivers/staging/rtl8188eu/core/rtw_ieee80211.c 			tmp = *(p + 1);
p                 162 drivers/staging/rtl8188eu/core/rtw_ieee80211.c 			p += (tmp + 2);
p                 965 drivers/staging/rtl8188eu/core/rtw_ieee80211.c 	unsigned char *p;
p                 997 drivers/staging/rtl8188eu/core/rtw_ieee80211.c 	p = rtw_get_ie(pnetwork->network.ies + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pnetwork->network.ie_length - _FIXED_IE_LENGTH_);
p                 998 drivers/staging/rtl8188eu/core/rtw_ieee80211.c 	if (p && len > 0) {
p                1000 drivers/staging/rtl8188eu/core/rtw_ieee80211.c 			(struct ieee80211_ht_cap *)(p + 2);
p                1007 drivers/staging/rtl8188eu/core/rtw_ieee80211.c 	p = rtw_get_ie(pnetwork->network.ies + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, pnetwork->network.ie_length - _FIXED_IE_LENGTH_);
p                1008 drivers/staging/rtl8188eu/core/rtw_ieee80211.c 	if (p && len > 0) {
p                1009 drivers/staging/rtl8188eu/core/rtw_ieee80211.c 		pht_info = (struct HT_info_element *)(p + 2);
p                 537 drivers/staging/rtl8188eu/core/rtw_ioctl_set.c 	u8 *p;
p                 552 drivers/staging/rtl8188eu/core/rtw_ioctl_set.c 		p = rtw_get_ie(&pcur_bss->ies[12], _HT_CAPABILITY_IE_,
p                 554 drivers/staging/rtl8188eu/core/rtw_ioctl_set.c 		if (p && ht_ielen > 0) {
p                1859 drivers/staging/rtl8188eu/core/rtw_mlme.c 	unsigned char *p;
p                1868 drivers/staging/rtl8188eu/core/rtw_mlme.c 	p = rtw_get_ie(in_ie+12, _HT_CAPABILITY_IE_, &ielen, in_len-12);
p                1870 drivers/staging/rtl8188eu/core/rtw_mlme.c 	if (p && ielen > 0) {
p                1913 drivers/staging/rtl8188eu/core/rtw_mlme.c 		p = rtw_get_ie(in_ie+12, _HT_ADD_INFO_IE_, &ielen, in_len-12);
p                1914 drivers/staging/rtl8188eu/core/rtw_mlme.c 		if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
p                1916 drivers/staging/rtl8188eu/core/rtw_mlme.c 			rtw_set_ie(out_ie+out_len, _HT_ADD_INFO_IE_, ielen, p+2, pout_len);
p                1009 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	unsigned char *pframe, *p;
p                1116 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	p = rtw_get_ie((pmlmeinfo->network.ies + sizeof(struct ndis_802_11_fixed_ie)), _RSN_IE_2_, &ie_len, (pmlmeinfo->network.ie_length - sizeof(struct ndis_802_11_fixed_ie)));
p                1117 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	if (p)
p                1118 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		pframe = rtw_set_ie(pframe, _RSN_IE_2_, ie_len, p + 2, &pattrib->pktlen);
p                1122 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		p = rtw_get_ie((pmlmeinfo->network.ies + sizeof(struct ndis_802_11_fixed_ie)), _HT_CAPABILITY_IE_, &ie_len, (pmlmeinfo->network.ie_length - sizeof(struct ndis_802_11_fixed_ie)));
p                1123 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		if ((p != NULL) && (!(is_ap_in_tkip(padapter)))) {
p                1124 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			memcpy(&pmlmeinfo->HT_caps, p + 2, sizeof(struct ieee80211_ht_cap));
p                1773 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			u8 *p;
p                1782 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			p = rtw_get_ie(pbss_network->ies + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->ie_length - _FIXED_IE_LENGTH_);
p                1783 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			if ((p == NULL) || (len == 0)) { /* non-HT */
p                2015 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	u8 *p;
p                2063 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	p = rtw_get_ie(bssid->ies + ie_offset, _SSID_IE_, &len, bssid->ie_length - ie_offset);
p                2064 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	if (!p) {
p                2074 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		memcpy(bssid->ssid.ssid, (p + 2), len);
p                2084 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	p = rtw_get_ie(bssid->ies + ie_offset, _SUPPORTEDRATES_IE_, &len, bssid->ie_length - ie_offset);
p                2085 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	if (p != NULL) {
p                2090 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		memcpy(bssid->SupportedRates, (p + 2), len);
p                2094 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	p = rtw_get_ie(bssid->ies + ie_offset, _EXT_SUPPORTEDRATES_IE_, &len, bssid->ie_length - ie_offset);
p                2095 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	if (p) {
p                2100 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		memcpy(bssid->SupportedRates + i, (p + 2), len);
p                2110 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	p = rtw_get_ie(bssid->ies + ie_offset, _DSSET_IE_, &len, bssid->ie_length - ie_offset);
p                2115 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	if (p) {
p                2116 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		bssid->Configuration.DSConfig = *(p + 2);
p                2119 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		p = rtw_get_ie(bssid->ies + ie_offset, _HT_ADD_INFO_IE_, &len, bssid->ie_length - ie_offset);
p                2120 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		if (p) {
p                2121 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			struct HT_info_element *HT_info = (struct HT_info_element *)(p + 2);
p                2161 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		p = rtw_get_ie(bssid->ies + ie_offset, _HT_CAPABILITY_IE_, &len, bssid->ie_length - ie_offset);
p                2162 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		if (p && len > 0) {
p                2164 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 				(struct ieee80211_ht_cap *)(p + 2);
p                2370 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		u8 *ie, *p;
p                2385 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		p = ie;
p                2389 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		memcpy(country, p, 3);
p                2390 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		p += 3;
p                2395 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		while ((ie - p) >= 3) {
p                2396 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			fcn = *(p++);
p                2397 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			noc = *(p++);
p                2398 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			p++;
p                2509 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	unsigned char *p;
p                2524 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, &ielen,
p                2528 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	if (p) {
p                2529 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		if ((ielen != 0 && memcmp((void *)(p+2), (void *)cur->ssid.ssid, cur->ssid.ssid_length)) ||
p                2650 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	unsigned char *sa, *p;
p                2761 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, &ie_len,
p                2764 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			if ((p == NULL) || (ie_len <= 0)) {
p                2770 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			if (!memcmp((void *)(p + 2), pstat->chg_txt, 128)) {
p                2818 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	unsigned char *p;
p                2855 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, &len,
p                2858 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			if (p == NULL)
p                2861 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			memcpy((void *)(pmlmeinfo->chg_txt), (void *)(p + 2), len);
p                2897 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	unsigned char reassoc, *p, *pos, *wpa_ie;
p                2969 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SSID_IE_, &ie_len,
p                2972 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	if (!p || ie_len == 0) {
p                2978 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		if (memcmp((void *)(p+2), cur->ssid.ssid, cur->ssid.ssid_length))
p                2989 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SUPPORTEDRATES_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
p                2990 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	if (p == NULL) {
p                2999 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		memcpy(supportRate, p+2, ie_len);
p                3002 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_, &ie_len,
p                3004 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		if (p !=  NULL) {
p                3006 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 				memcpy(supportRate+supportRateNum, p+2, ie_len);
p                3146 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		p = pframe + WLAN_HDR_A3_LEN + ie_offset; ie_len = 0;
p                3148 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
p                3149 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			if (p != NULL) {
p                3150 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 				if (!memcmp(p+2, WMM_IE, 6)) {
p                3154 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 					pstat->qos_info = *(p+8);
p                3189 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 			p = p + ie_len + 2;
p                  92 drivers/staging/rtl8188eu/core/rtw_security.c 	u8 *p = (u8 *)&c, *p1;
p                 106 drivers/staging/rtl8188eu/core/rtw_security.c 		p1[0] = crc32_reverseBit(p[3]);
p                 107 drivers/staging/rtl8188eu/core/rtw_security.c 		p1[1] = crc32_reverseBit(p[2]);
p                 108 drivers/staging/rtl8188eu/core/rtw_security.c 		p1[2] = crc32_reverseBit(p[1]);
p                 109 drivers/staging/rtl8188eu/core/rtw_security.c 		p1[3] = crc32_reverseBit(p[0]);
p                 116 drivers/staging/rtl8188eu/core/rtw_security.c 	u8 *p;
p                 124 drivers/staging/rtl8188eu/core/rtw_security.c 	for (p = buf; len > 0; ++p, --len)
p                 125 drivers/staging/rtl8188eu/core/rtw_security.c 		crc = crc32_table[(crc ^ *p) & 0xff] ^ (crc >> 8);
p                 256 drivers/staging/rtl8188eu/core/rtw_security.c static u32 secmicgetuint32(u8 *p)
p                 263 drivers/staging/rtl8188eu/core/rtw_security.c 		res |= ((u32)(*p++)) << (8*i);
p                 267 drivers/staging/rtl8188eu/core/rtw_security.c static void secmicputuint32(u8 *p, u32 val)
p                 273 drivers/staging/rtl8188eu/core/rtw_security.c 		*p++ = (u8)(val & 0xff);
p                 813 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 	unsigned char *p;
p                 862 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 	p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
p                 863 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 	if (p && len > 0) {
p                 865 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 			(struct ieee80211_ht_cap *)(p + 2);
p                 872 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 	p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
p                 873 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 	if (p && len > 0) {
p                 874 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 			pht_info = (struct HT_info_element *)(p + 2);
p                 894 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 	p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _DSSET_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
p                 895 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 	if (p) {
p                 896 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 			bcn_channel = *(p + 2);
p                 898 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 			p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
p                 914 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 	p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
p                 915 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 	if (p) {
p                 916 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 		ssid_len = *(p + 1);
p                 920 drivers/staging/rtl8188eu/core/rtw_wlan_util.c 	memcpy(bssid->ssid.ssid, (p + 2), ssid_len);
p                 102 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	char *p;
p                 123 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	p = rtw_get_ie(&pnetwork->network.ies[12], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.ie_length-12);
p                 125 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	if (p && ht_ielen > 0) {
p                 129 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		pht_capie = (struct ieee80211_ht_cap *)(p + 2);
p                 202 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	p = custom;
p                 203 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
p                 208 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
p                 235 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		u8 *p;
p                 246 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 			p = buf;
p                 247 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 			p += sprintf(p, "wpa_ie=");
p                 249 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 				p += sprintf(p, "%02x", wpa_ie[i]);
p                 262 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 			p = buf;
p                 263 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 			p += sprintf(p, "rsn_ie=");
p                 265 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 				p += sprintf(p, "%02x", rsn_ie[i]);
p                 629 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	char *p;
p                 639 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		p = rtw_get_ie(&pcur_bss->ies[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->ie_length-12);
p                 640 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		if (p && ht_ielen > 0)
p                2023 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
p                2028 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	if (!p->pointer || p->length != sizeof(struct ieee_param)) {
p                2033 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	param = (struct ieee_param *)rtw_malloc(p->length);
p                2039 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	if (copy_from_user(param, p->pointer, p->length)) {
p                2056 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		ret = wpa_set_encryption(dev, param, p->length);
p                2069 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	if (ret == 0 && copy_to_user(p->pointer, param, p->length))
p                2799 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
p                2815 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	if (!p->pointer || p->length != sizeof(struct ieee_param)) {
p                2820 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	param = (struct ieee_param *)rtw_malloc(p->length);
p                2826 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	if (copy_from_user(param, p->pointer, p->length)) {
p                2843 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		ret = rtw_set_beacon(dev, param, p->length);
p                2846 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		ret = rtw_set_encryption(dev, param, p->length);
p                2852 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		ret = rtw_set_wps_beacon(dev, param, p->length);
p                2855 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		ret = rtw_set_wps_probe_resp(dev, param, p->length);
p                2858 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		ret = rtw_set_wps_assoc_resp(dev, param, p->length);
p                2861 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		ret = rtw_set_hidden_ssid(dev, param, p->length);
p                2864 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		ret = rtw_ioctl_get_sta_data(dev, param, p->length);
p                2867 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		ret = rtw_ioctl_set_macaddr_acl(dev, param, p->length);
p                2870 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		ret = rtw_ioctl_acl_add_sta(dev, param, p->length);
p                2873 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		ret = rtw_ioctl_acl_remove_sta(dev, param, p->length);
p                2881 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	if (ret == 0 && copy_to_user(p->pointer, param, p->length))
p                  87 drivers/staging/rtl8188eu/os_dep/mlme_linux.c 	u8 *buff, *p, i;
p                 100 drivers/staging/rtl8188eu/os_dep/mlme_linux.c 		p = buff;
p                 101 drivers/staging/rtl8188eu/os_dep/mlme_linux.c 		p += sprintf(p, "ASSOCINFO(ReqIEs =");
p                 105 drivers/staging/rtl8188eu/os_dep/mlme_linux.c 			p += sprintf(p, "%02x", sec_ie[i]);
p                 106 drivers/staging/rtl8188eu/os_dep/mlme_linux.c 		p += sprintf(p, ")");
p                 108 drivers/staging/rtl8188eu/os_dep/mlme_linux.c 		wrqu.data.length = min_t(__u16, p - buff, IW_CUSTOM_MAX);
p                 188 drivers/staging/rtl8188eu/os_dep/os_intfs.c static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
p                 191 drivers/staging/rtl8188eu/os_dep/os_intfs.c 	struct sockaddr *addr = p;
p                 531 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 	struct r8192_priv *p = rtllib_priv(dev);
p                 534 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 		if ((p->rfa_txpowertrackingindex > 0) &&
p                 535 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 		    (p->rfc_txpowertrackingindex > 0)) {
p                 536 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 			p->rfa_txpowertrackingindex--;
p                 537 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 			if (p->rfa_txpowertrackingindex_real > 4) {
p                 538 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 				p->rfa_txpowertrackingindex_real--;
p                 541 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 						  dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
p                 544 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 			p->rfc_txpowertrackingindex--;
p                 545 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 			if (p->rfc_txpowertrackingindex_real > 4) {
p                 546 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 				p->rfc_txpowertrackingindex_real--;
p                 550 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 						  dm_tx_bb_gain[p->rfc_txpowertrackingindex_real]);
p                 561 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 		if (p->rfa_txpowertrackingindex > 0) {
p                 562 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 			p->rfa_txpowertrackingindex--;
p                 563 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 			if (p->rfa_txpowertrackingindex_real > 4) {
p                 564 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 				p->rfa_txpowertrackingindex_real--;
p                 568 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 						  dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
p                 580 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 	struct r8192_priv *p = rtllib_priv(dev);
p                 583 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 		if ((p->rfa_txpowertrackingindex < TxBBGainTableLength - 1) &&
p                 584 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 		    (p->rfc_txpowertrackingindex < TxBBGainTableLength - 1)) {
p                 585 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 			p->rfa_txpowertrackingindex++;
p                 586 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 			p->rfa_txpowertrackingindex_real++;
p                 589 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 					  dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
p                 590 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 			p->rfc_txpowertrackingindex++;
p                 591 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 			p->rfc_txpowertrackingindex_real++;
p                 594 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 					  dm_tx_bb_gain[p->rfc_txpowertrackingindex_real]);
p                 604 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 		if (p->rfa_txpowertrackingindex < (TxBBGainTableLength - 1)) {
p                 605 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 			p->rfa_txpowertrackingindex++;
p                 606 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 			p->rfa_txpowertrackingindex_real++;
p                 609 drivers/staging/rtl8192e/rtl8192e/rtl_dm.c 					  dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
p                 750 drivers/staging/rtl8192e/rtl8192e/rtl_wx.c 				    union iwreq_data *wrqu, char *p)
p                 753 drivers/staging/rtl8192e/rtl8192e/rtl_wx.c 	int *parms = (int *)p;
p                  80 drivers/staging/rtl8192e/rtllib.h #define iwe_stream_add_point_rsl(info, start, stop, iwe, p)	\
p                  81 drivers/staging/rtl8192e/rtllib.h 	iwe_stream_add_point(info, start, stop, iwe, p)
p                 392 drivers/staging/rtl8192e/rtllib_rx.c 		struct list_head *p;
p                 397 drivers/staging/rtl8192e/rtllib_rx.c 		list_for_each(p, &ieee->ibss_mac_hash[index]) {
p                 398 drivers/staging/rtl8192e/rtllib_rx.c 			entry = list_entry(p, struct ieee_ibss_seq, list);
p                 402 drivers/staging/rtl8192e/rtllib_rx.c 		if (p == &ieee->ibss_mac_hash[index]) {
p                2054 drivers/staging/rtl8192e/rtllib_rx.c 	char *p;
p                2090 drivers/staging/rtl8192e/rtllib_rx.c 			p = rates_str;
p                2095 drivers/staging/rtl8192e/rtllib_rx.c 				p += snprintf(p, sizeof(rates_str) -
p                2096 drivers/staging/rtl8192e/rtllib_rx.c 					      (p - rates_str), "%02X ",
p                2118 drivers/staging/rtl8192e/rtllib_rx.c 			p = rates_str;
p                2123 drivers/staging/rtl8192e/rtllib_rx.c 				p += snprintf(p, sizeof(rates_str) -
p                2124 drivers/staging/rtl8192e/rtllib_rx.c 					      (p - rates_str), "%02X ",
p                  42 drivers/staging/rtl8192e/rtllib_wx.c 	char *p;
p                 116 drivers/staging/rtl8192e/rtllib_wx.c 	p = custom;
p                 117 drivers/staging/rtl8192e/rtllib_wx.c 	p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
p                 127 drivers/staging/rtl8192e/rtllib_wx.c 		p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
p                 132 drivers/staging/rtl8192e/rtllib_wx.c 		p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
p                 166 drivers/staging/rtl8192e/rtllib_wx.c 	iwe.u.data.length = p - custom;
p                 188 drivers/staging/rtl8192e/rtllib_wx.c 	p = custom;
p                 189 drivers/staging/rtl8192e/rtllib_wx.c 	iwe.u.data.length = p - custom;
p                 228 drivers/staging/rtl8192e/rtllib_wx.c 	p = custom;
p                 229 drivers/staging/rtl8192e/rtllib_wx.c 	p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
p                 232 drivers/staging/rtl8192e/rtllib_wx.c 	iwe.u.data.length = p - custom;
p                2254 drivers/staging/rtl8192u/ieee80211/ieee80211.h 				   struct iw_point *p);
p                  58 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h 	char * (*print_stats)(char *p, void *priv);
p                 381 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c static char *ieee80211_ccmp_print_stats(char *p, void *priv)
p                 385 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	p += sprintf(p, "key[%d] alg=CCMP key_set=%d tx_pn=%pm rx_pn=%pm format_errors=%d replays=%d decrypt_errors=%d\n",
p                 392 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c 	return p;
p                 720 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c static char *ieee80211_tkip_print_stats(char *p, void *priv)
p                 724 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	p += sprintf(p, "key[%d] alg=TKIP key_set=%d "
p                 744 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c 	return p;
p                 252 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c static char *prism2_wep_print_stats(char *p, void *priv)
p                 256 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 	p += sprintf(p, "key[%d] alg=WEP len=%d\n",
p                 258 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c 	return p;
p                 444 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		struct list_head *p;
p                 449 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		list_for_each(p, &ieee->ibss_mac_hash[index]) {
p                 450 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			entry = list_entry(p, struct ieee_ibss_seq, list);
p                 455 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 		if (p == &ieee->ibss_mac_hash[index]) {
p                1578 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 	char *p;
p                1616 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			p = rates_str;
p                1623 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 				p += snprintf(p, sizeof(rates_str) -
p                1624 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 					      (p - rates_str), "%02X ",
p                1643 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 			p = rates_str;
p                1650 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 				p += snprintf(p, sizeof(rates_str) -
p                1651 drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c 					      (p - rates_str), "%02X ",
p                2977 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_point *p)
p                2985 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (p->length < sizeof(struct ieee_param) || !p->pointer) {
p                2990 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	param = memdup_user(p->pointer, p->length);
p                3003 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		ret = ieee80211_wpa_set_wpa_ie(ieee, param, p->length);
p                3007 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 		ret = ieee80211_wpa_set_encryption(ieee, param, p->length);
p                3021 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c 	if (ret == 0 && copy_to_user(p->pointer, param, p->length))
p                  47 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 	char *p;
p                 111 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 	p = custom;
p                 112 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 	p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
p                 122 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 		p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
p                 127 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 		p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
p                 157 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 	iwe.u.data.length = p - custom;
p                 176 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 	p = custom;
p                 178 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 	iwe.u.data.length = p - custom;
p                 185 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 		u8 *p = buf;
p                 186 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 		p += sprintf(p, "wpa_ie=");
p                 188 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 			p += sprintf(p, "%02x", network->wpa_ie[i]);
p                 200 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 		u8 *p = buf;
p                 201 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 		p += sprintf(p, "rsn_ie=");
p                 203 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 			p += sprintf(p, "%02x", network->rsn_ie[i]);
p                 216 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 	p = custom;
p                 217 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 	p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
p                 219 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 	iwe.u.data.length = p - custom;
p                3530 drivers/staging/rtl8192u/r8192U_core.c 	struct iw_point *p = &wrq->u.data;
p                3536 drivers/staging/rtl8192u/r8192U_core.c 	if (p->length < sizeof(struct ieee_param) || !p->pointer) {
p                3541 drivers/staging/rtl8192u/r8192U_core.c 	ipw = memdup_user(p->pointer, p->length);
p                 601 drivers/staging/rtl8192u/r8192U_wx.c 					union iwreq_data *wrqu, char *p)
p                 605 drivers/staging/rtl8192u/r8192U_wx.c 	int *parms = (int *)p;
p                 105 drivers/staging/rtl8712/ieee80211.c 	u8 *p;
p                 109 drivers/staging/rtl8712/ieee80211.c 	p = pbuf;
p                 113 drivers/staging/rtl8712/ieee80211.c 		if (*p == index) {
p                 114 drivers/staging/rtl8712/ieee80211.c 			*len = *(p + 1);
p                 115 drivers/staging/rtl8712/ieee80211.c 			return p;
p                 117 drivers/staging/rtl8712/ieee80211.c 		tmp = *(p + 1);
p                 118 drivers/staging/rtl8712/ieee80211.c 		p += (tmp + 2);
p                 138 drivers/staging/rtl8712/mlme_linux.c 	u8 *buff, *p, i;
p                 146 drivers/staging/rtl8712/mlme_linux.c 		p = buff;
p                 147 drivers/staging/rtl8712/mlme_linux.c 		p += sprintf(p, "ASSOCINFO(ReqIEs=");
p                 151 drivers/staging/rtl8712/mlme_linux.c 			p += sprintf(p, "%02x", sec_ie[i]);
p                 152 drivers/staging/rtl8712/mlme_linux.c 		p += sprintf(p, ")");
p                 154 drivers/staging/rtl8712/mlme_linux.c 		wrqu.data.length = p - buff;
p                 163 drivers/staging/rtl8712/os_intfs.c static int r871x_net_set_mac_address(struct net_device *pnetdev, void *p)
p                 166 drivers/staging/rtl8712/os_intfs.c 	struct sockaddr *addr = p;
p                 216 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	s8 *p;
p                 240 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	p = r8712_get_ie(&pnetwork->network.IEs[12], _HT_CAPABILITY_IE_,
p                 242 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	if (p && ht_ielen > 0) {
p                 244 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 		pht_capie = (struct ieee80211_ht_cap *)(p + 2);
p                 607 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	char *p;
p                 616 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 		p = r8712_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_,
p                 618 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 		if (p && ht_ielen > 0)
p                1401 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	u8 *p;
p                1410 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	p = r8712_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen,
p                1412 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	if (p && ht_ielen > 0) {
p                1414 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 		pht_capie = (struct ieee80211_ht_cap *)(p + 2);
p                1870 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	struct iw_point *p = &wrqu->data;
p                1880 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	if ((!p->length) || (!p->pointer))
p                1883 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	bset = (u8)(p->flags & 0xFFFF);
p                1884 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	len = p->length;
p                1885 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	pparmbuf = memdup_user(p->pointer, len);
p                1929 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 		if (copy_to_user(p->pointer, pparmbuf, len))
p                2146 drivers/staging/rtl8712/rtl871x_ioctl_linux.c static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
p                2152 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	if (p->length < sizeof(struct ieee_param) || !p->pointer)
p                2154 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	param = memdup_user(p->pointer, p->length);
p                2167 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 		ret = wpa_set_encryption(dev, param, p->length);
p                2177 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	if (ret == 0 && copy_to_user(p->pointer, param, p->length))
p                1645 drivers/staging/rtl8712/rtl871x_mlme.c 	unsigned char *p;
p                1653 drivers/staging/rtl8712/rtl871x_mlme.c 	p = r8712_get_ie(in_ie + 12, _HT_CAPABILITY_IE_, &ielen, in_len - 12);
p                1654 drivers/staging/rtl8712/rtl871x_mlme.c 	if (p && (ielen > 0)) {
p                1682 drivers/staging/rtl8712/rtl871x_mlme.c 	u8 *p, max_ampdu_sz;
p                1701 drivers/staging/rtl8712/rtl871x_mlme.c 	p = r8712_get_ie(pie + sizeof(struct NDIS_802_11_FIXED_IEs),
p                1705 drivers/staging/rtl8712/rtl871x_mlme.c 	if (p && len > 0) {
p                1706 drivers/staging/rtl8712/rtl871x_mlme.c 		pht_capie = (struct ieee80211_ht_cap *)(p + 2);
p                1736 drivers/staging/rtl8712/rtl871x_mlme.c 	p = r8712_get_ie(pie + sizeof(struct NDIS_802_11_FIXED_IEs),
p                 116 drivers/staging/rtl8712/rtl871x_security.c 	u8 *p = (u8 *)&c, *p1;
p                 127 drivers/staging/rtl8712/rtl871x_security.c 		p1[0] = crc32_reverseBit(p[3]);
p                 128 drivers/staging/rtl8712/rtl871x_security.c 		p1[1] = crc32_reverseBit(p[2]);
p                 129 drivers/staging/rtl8712/rtl871x_security.c 		p1[2] = crc32_reverseBit(p[1]);
p                 130 drivers/staging/rtl8712/rtl871x_security.c 		p1[3] = crc32_reverseBit(p[0]);
p                 137 drivers/staging/rtl8712/rtl871x_security.c 	u8 *p;
p                 143 drivers/staging/rtl8712/rtl871x_security.c 	for (p = buf; len > 0; ++p, --len)
p                 144 drivers/staging/rtl8712/rtl871x_security.c 		crc = crc32_table[(crc ^ *p) & 0xff] ^ (crc >> 8);
p                 245 drivers/staging/rtl8712/rtl871x_security.c static u32 secmicgetuint32(u8 *p)
p                 252 drivers/staging/rtl8712/rtl871x_security.c 		res |= ((u32)(*p++)) << (8 * i);
p                 256 drivers/staging/rtl8712/rtl871x_security.c static void secmicputuint32(u8 *p, u32 val)
p                 262 drivers/staging/rtl8712/rtl871x_security.c 		*p++ = (u8) (val & 0xff);
p                  70 drivers/staging/rtl8723bs/core/rtw_ap.c 		u8 *p, *dst_ie, *premainder_ie = NULL, *pbackup_remainder_ie = NULL;
p                  76 drivers/staging/rtl8723bs/core/rtw_ap.c 		p = rtw_get_ie(
p                  82 drivers/staging/rtl8723bs/core/rtw_ap.c 		if (p != NULL && tim_ielen > 0) {
p                  85 drivers/staging/rtl8723bs/core/rtw_ap.c 			premainder_ie = p + tim_ielen;
p                  87 drivers/staging/rtl8723bs/core/rtw_ap.c 			tim_ie_offset = (sint)(p - pie);
p                  92 drivers/staging/rtl8723bs/core/rtw_ap.c 			dst_ie = p;
p                 100 drivers/staging/rtl8723bs/core/rtw_ap.c 			p = rtw_get_ie(
p                 106 drivers/staging/rtl8723bs/core/rtw_ap.c 			if (p != NULL)
p                 110 drivers/staging/rtl8723bs/core/rtw_ap.c 			p = rtw_get_ie(
p                 115 drivers/staging/rtl8723bs/core/rtw_ap.c 			if (p !=  NULL)
p                 758 drivers/staging/rtl8723bs/core/rtw_ap.c 	u8 *p;
p                 856 drivers/staging/rtl8723bs/core/rtw_ap.c 	p = rtw_get_ie(
p                 862 drivers/staging/rtl8723bs/core/rtw_ap.c 	if (p && ie_len) {
p                 863 drivers/staging/rtl8723bs/core/rtw_ap.c 		pht_info = (struct HT_info_element *)(p + 2);
p                 944 drivers/staging/rtl8723bs/core/rtw_ap.c 	u8 *p;
p                 995 drivers/staging/rtl8723bs/core/rtw_ap.c 	p = rtw_get_beacon_interval_from_ie(ie);/* ie + 8;	8: TimeStamp, 2: Beacon Interval 2:Capability */
p                 997 drivers/staging/rtl8723bs/core/rtw_ap.c 	pbss_network->Configuration.BeaconPeriod = RTW_GET_LE16(p);
p                1005 drivers/staging/rtl8723bs/core/rtw_ap.c 	p = rtw_get_ie(
p                1011 drivers/staging/rtl8723bs/core/rtw_ap.c 	if (p && ie_len > 0) {
p                1013 drivers/staging/rtl8723bs/core/rtw_ap.c 		memcpy(pbss_network->Ssid.Ssid, (p + 2), ie_len);
p                1020 drivers/staging/rtl8723bs/core/rtw_ap.c 	p = rtw_get_ie(
p                1025 drivers/staging/rtl8723bs/core/rtw_ap.c 	if (p && ie_len > 0)
p                1026 drivers/staging/rtl8723bs/core/rtw_ap.c 		channel = *(p + 2);
p                1032 drivers/staging/rtl8723bs/core/rtw_ap.c 	p = rtw_get_ie(
p                1038 drivers/staging/rtl8723bs/core/rtw_ap.c 	if (p !=  NULL) {
p                1039 drivers/staging/rtl8723bs/core/rtw_ap.c 		memcpy(supportRate, p + 2, ie_len);
p                1044 drivers/staging/rtl8723bs/core/rtw_ap.c 	p = rtw_get_ie(
p                1050 drivers/staging/rtl8723bs/core/rtw_ap.c 	if (p !=  NULL) {
p                1051 drivers/staging/rtl8723bs/core/rtw_ap.c 		memcpy(supportRate + supportRateNum, p + 2, ie_len);
p                1060 drivers/staging/rtl8723bs/core/rtw_ap.c 	p = rtw_get_ie(
p                1066 drivers/staging/rtl8723bs/core/rtw_ap.c 	if (p && ie_len > 0)
p                1067 drivers/staging/rtl8723bs/core/rtw_ap.c 		ERP_IE_handler(padapter, (struct ndis_80211_var_ie *)p);
p                1081 drivers/staging/rtl8723bs/core/rtw_ap.c 	p = rtw_get_ie(
p                1087 drivers/staging/rtl8723bs/core/rtw_ap.c 	if (p && ie_len > 0) {
p                1089 drivers/staging/rtl8723bs/core/rtw_ap.c 			p,
p                1110 drivers/staging/rtl8723bs/core/rtw_ap.c 	for (p = ie + _BEACON_IE_OFFSET_; ; p += (ie_len + 2)) {
p                1111 drivers/staging/rtl8723bs/core/rtw_ap.c 		p = rtw_get_ie(
p                1112 drivers/staging/rtl8723bs/core/rtw_ap.c 			p,
p                1117 drivers/staging/rtl8723bs/core/rtw_ap.c 		if ((p) && (!memcmp(p + 2, OUI1, 4))) {
p                1119 drivers/staging/rtl8723bs/core/rtw_ap.c 				p,
p                1138 drivers/staging/rtl8723bs/core/rtw_ap.c 		if ((p == NULL) || (ie_len == 0))
p                1146 drivers/staging/rtl8723bs/core/rtw_ap.c 		for (p = ie + _BEACON_IE_OFFSET_; ; p += (ie_len + 2)) {
p                1147 drivers/staging/rtl8723bs/core/rtw_ap.c 			p = rtw_get_ie(
p                1148 drivers/staging/rtl8723bs/core/rtw_ap.c 				p,
p                1153 drivers/staging/rtl8723bs/core/rtw_ap.c 			if ((p) && !memcmp(p + 2, WMM_PARA_IE, 6)) {
p                1156 drivers/staging/rtl8723bs/core/rtw_ap.c 				*(p + 8) |= BIT(7);/* QoS Info, support U-APSD */
p                1159 drivers/staging/rtl8723bs/core/rtw_ap.c 				*(p + 10) &= ~BIT(4); /* BE */
p                1160 drivers/staging/rtl8723bs/core/rtw_ap.c 				*(p + 14) &= ~BIT(4); /* BK */
p                1161 drivers/staging/rtl8723bs/core/rtw_ap.c 				*(p + 18) &= ~BIT(4); /* VI */
p                1162 drivers/staging/rtl8723bs/core/rtw_ap.c 				*(p + 22) &= ~BIT(4); /* VO */
p                1167 drivers/staging/rtl8723bs/core/rtw_ap.c 			if ((p == NULL) || (ie_len == 0))
p                1173 drivers/staging/rtl8723bs/core/rtw_ap.c 	p = rtw_get_ie(
p                1179 drivers/staging/rtl8723bs/core/rtw_ap.c 	if (p && ie_len > 0) {
p                1182 drivers/staging/rtl8723bs/core/rtw_ap.c 		struct rtw_ieee80211_ht_cap *pht_cap = (struct rtw_ieee80211_ht_cap *)(p + 2);
p                1184 drivers/staging/rtl8723bs/core/rtw_ap.c 		pHT_caps_ie = p;
p                1232 drivers/staging/rtl8723bs/core/rtw_ap.c 		memcpy(&pmlmepriv->htpriv.ht_cap, p + 2, ie_len);
p                1236 drivers/staging/rtl8723bs/core/rtw_ap.c 	p = rtw_get_ie(
p                1242 drivers/staging/rtl8723bs/core/rtw_ap.c 	if (p && ie_len > 0)
p                1243 drivers/staging/rtl8723bs/core/rtw_ap.c 		pHT_info_ie = p;
p                1587 drivers/staging/rtl8723bs/core/rtw_ap.c 	unsigned char *p, *ie = pnetwork->IEs;
p                1596 drivers/staging/rtl8723bs/core/rtw_ap.c 	p = rtw_get_ie(
p                1602 drivers/staging/rtl8723bs/core/rtw_ap.c 	if (p && len > 0) {
p                1603 drivers/staging/rtl8723bs/core/rtw_ap.c 		struct ndis_80211_var_ie *pIE = (struct ndis_80211_var_ie *)p;
p                 148 drivers/staging/rtl8723bs/core/rtw_ieee80211.c 	u8 *p;
p                 153 drivers/staging/rtl8723bs/core/rtw_ieee80211.c 	p = pbuf;
p                 157 drivers/staging/rtl8723bs/core/rtw_ieee80211.c 		if (*p == index) {
p                 158 drivers/staging/rtl8723bs/core/rtw_ieee80211.c 			*len = *(p + 1);
p                 159 drivers/staging/rtl8723bs/core/rtw_ieee80211.c 			return p;
p                 161 drivers/staging/rtl8723bs/core/rtw_ieee80211.c 			tmp = *(p + 1);
p                 162 drivers/staging/rtl8723bs/core/rtw_ieee80211.c 			p += (tmp + 2);
p                1181 drivers/staging/rtl8723bs/core/rtw_ieee80211.c 	unsigned char 	*p;
p                1214 drivers/staging/rtl8723bs/core/rtw_ieee80211.c 	p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_);
p                1215 drivers/staging/rtl8723bs/core/rtw_ieee80211.c 	if (p && len > 0) {
p                1216 drivers/staging/rtl8723bs/core/rtw_ieee80211.c 			pht_cap = (struct ieee80211_ht_cap *)(p + 2);
p                1222 drivers/staging/rtl8723bs/core/rtw_ieee80211.c 	p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_);
p                1223 drivers/staging/rtl8723bs/core/rtw_ieee80211.c 	if (p && len > 0) {
p                1224 drivers/staging/rtl8723bs/core/rtw_ieee80211.c 			pht_info = (struct HT_info_element *)(p + 2);
p                 749 drivers/staging/rtl8723bs/core/rtw_mlme.c 		u8 *p = NULL;
p                 756 drivers/staging/rtl8723bs/core/rtw_mlme.c 			p = rtw_get_ie(pnetwork->network.IEs + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len, (pnetwork->network.IELength - _BEACON_IE_OFFSET_));
p                 757 drivers/staging/rtl8723bs/core/rtw_mlme.c 			if (p && ie_len > 0) {
p                2662 drivers/staging/rtl8723bs/core/rtw_mlme.c 	unsigned char *p, *pframe;
p                2692 drivers/staging/rtl8723bs/core/rtw_mlme.c 		p = rtw_get_ie(in_ie, _HT_ADD_INFO_IE_, &ielen, in_len);
p                2693 drivers/staging/rtl8723bs/core/rtw_mlme.c 		if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
p                2694 drivers/staging/rtl8723bs/core/rtw_mlme.c 			struct HT_info_element *pht_info = (struct HT_info_element *)(p+2);
p                2798 drivers/staging/rtl8723bs/core/rtw_mlme.c 		p = rtw_get_ie(in_ie, _HT_ADD_INFO_IE_, &ielen, in_len);
p                2799 drivers/staging/rtl8723bs/core/rtw_mlme.c 		if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
p                2801 drivers/staging/rtl8723bs/core/rtw_mlme.c 			pframe = rtw_set_ie(out_ie+out_len, _HT_ADD_INFO_IE_, ielen, p+2, pout_len);
p                2812 drivers/staging/rtl8723bs/core/rtw_mlme.c 	u8 *p, max_ampdu_sz;
p                2853 drivers/staging/rtl8723bs/core/rtw_mlme.c 	p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fix_ie), _HT_CAPABILITY_IE_, &len, ie_len-sizeof(struct ndis_802_11_fix_ie));
p                2854 drivers/staging/rtl8723bs/core/rtw_mlme.c 	if (p && len > 0) {
p                2855 drivers/staging/rtl8723bs/core/rtw_mlme.c 		pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2);
p                2866 drivers/staging/rtl8723bs/core/rtw_mlme.c 	p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fix_ie), _HT_ADD_INFO_IE_, &len, ie_len-sizeof(struct ndis_802_11_fix_ie));
p                2867 drivers/staging/rtl8723bs/core/rtw_mlme.c 	if (p && len > 0) {
p                2868 drivers/staging/rtl8723bs/core/rtw_mlme.c 		pht_addtinfo = (struct ieee80211_ht_addt_info *)(p+2);
p                 601 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	unsigned char *p;
p                 630 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _VENDOR_SPECIFIC_IE_, (int *)&ielen,
p                 633 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		if (!p || ielen != 14)
p                 636 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		if (memcmp(p+2, RC_OUI, sizeof(RC_OUI)))
p                 639 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		if (memcmp(p+6, get_sa(pframe), ETH_ALEN)) {
p                 641 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 				MAC_ARG(get_sa(pframe)), MAC_ARG(p+6));
p                 730 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, (int *)&ielen,
p                 735 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	if (p != NULL) {
p                 739 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		if ((ielen != 0 && false == !memcmp((void *)(p+2), (void *)cur->Ssid.Ssid, cur->Ssid.SsidLength))
p                 782 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	u8 *p = NULL;
p                 785 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	p = rtw_get_ie(pframe + sizeof(struct ieee80211_hdr_3addr) + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ielen, precv_frame->u.hdr.len - sizeof(struct ieee80211_hdr_3addr) - _BEACON_IE_OFFSET_);
p                 786 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	if ((p != NULL) && (ielen > 0)) {
p                 787 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		if ((*(p + 1 + ielen) == 0x2D) && (*(p + 2 + ielen) != 0x2D)) {
p                 790 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			*(p + 1) = ielen - 1;
p                 887 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	unsigned char *sa, *p;
p                1030 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&ie_len,
p                1033 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			if ((p == NULL) || (ie_len <= 0)) {
p                1039 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			if (!memcmp((void *)(p + 2), pstat->chg_txt, 128)) {
p                1088 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	unsigned char *p;
p                1127 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&len,
p                1130 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			if (p == NULL) {
p                1135 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			memcpy((void *)(pmlmeinfo->chg_txt), (void *)(p + 2), len);
p                1176 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	unsigned char 	reassoc, *p, *pos, *wpa_ie;
p                1256 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SSID_IE_, &ie_len,
p                1259 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	if (!p || ie_len == 0) {
p                1265 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		if (memcmp((void *)(p+2), cur->Ssid.Ssid, cur->Ssid.SsidLength))
p                1276 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SUPPORTEDRATES_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
p                1277 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	if (p == NULL) {
p                1286 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		memcpy(supportRate, p+2, ie_len);
p                1289 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_, &ie_len,
p                1291 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		if (p !=  NULL) {
p                1294 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 				memcpy(supportRate+supportRateNum, p+2, ie_len);
p                1448 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		p = pframe + WLAN_HDR_A3_LEN + ie_offset; ie_len = 0;
p                1450 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
p                1451 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			if (p != NULL) {
p                1452 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 				if (!memcmp(p+2, WMM_IE, 6)) {
p                1457 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 					pstat->qos_info = *(p+8);
p                1494 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			p = p + ie_len + 2;
p                4117 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			u8 *p;
p                4129 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			p = rtw_get_ie(pbss_network->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->IELength - _FIXED_IE_LENGTH_);
p                4130 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			if ((p == NULL) || (len == 0)) {/* non-HT */
p                4415 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	u8 *p;
p                4465 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	p = rtw_get_ie(bssid->IEs + ie_offset, _SSID_IE_, &len, bssid->IELength - ie_offset);
p                4466 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	if (p == NULL) {
p                4471 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	if (*(p + 1)) {
p                4476 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
p                4477 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		bssid->Ssid.SsidLength = *(p + 1);
p                4485 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	p = rtw_get_ie(bssid->IEs + ie_offset, _SUPPORTEDRATES_IE_, &len, bssid->IELength - ie_offset);
p                4486 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	if (p != NULL) {
p                4491 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		memcpy(bssid->SupportedRates, (p + 2), len);
p                4495 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	p = rtw_get_ie(bssid->IEs + ie_offset, _EXT_SUPPORTEDRATES_IE_, &len, bssid->IELength - ie_offset);
p                4496 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	if (p != NULL) {
p                4501 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		memcpy(bssid->SupportedRates + i, (p + 2), len);
p                4510 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	p = rtw_get_ie(bssid->IEs + ie_offset, _DSSET_IE_, &len, bssid->IELength - ie_offset);
p                4515 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	if (p) {
p                4516 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		bssid->Configuration.DSConfig = *(p + 2);
p                4520 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		p = rtw_get_ie(bssid->IEs + ie_offset, _HT_ADD_INFO_IE_, &len, bssid->IELength - ie_offset);
p                4521 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		if (p) {
p                4522 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			struct HT_info_element *HT_info = (struct HT_info_element *)(p + 2);
p                4553 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		p = rtw_get_ie(bssid->IEs + ie_offset, _HT_CAPABILITY_IE_, &len, bssid->IELength - ie_offset);
p                4554 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		if (p && len > 0) {
p                4556 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			pHT_caps = (struct HT_caps_element	*)(p + 2);
p                4777 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		u8 *ie, *p;
p                4793 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		p = ie;
p                4797 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		memcpy(country, p, 3);
p                4798 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		p += 3;
p                4803 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		while ((ie - p) >= 3) {
p                4804 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			fcn = *(p++);
p                4805 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			noc = *(p++);
p                4806 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 			p++;
p                6860 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	struct RunInThread_param *p;
p                6865 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	p = (struct RunInThread_param *)pbuf;
p                6867 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	if (p->func)
p                6868 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		p->func(p->context);
p                 172 drivers/staging/rtl8723bs/core/rtw_security.c 		u8 *p = (u8 *)&c, *p1;
p                 184 drivers/staging/rtl8723bs/core/rtw_security.c 			p1[0] = crc32_reverseBit(p[3]);
p                 185 drivers/staging/rtl8723bs/core/rtw_security.c 			p1[1] = crc32_reverseBit(p[2]);
p                 186 drivers/staging/rtl8723bs/core/rtw_security.c 			p1[2] = crc32_reverseBit(p[1]);
p                 187 drivers/staging/rtl8723bs/core/rtw_security.c 			p1[3] = crc32_reverseBit(p[0]);
p                 195 drivers/staging/rtl8723bs/core/rtw_security.c 	u8 *p;
p                 203 drivers/staging/rtl8723bs/core/rtw_security.c 	for (p = buf; len > 0; ++p, --len) {
p                 204 drivers/staging/rtl8723bs/core/rtw_security.c 		crc = crc32_table[(crc ^ *p) & 0xff] ^ (crc >> 8);
p                 317 drivers/staging/rtl8723bs/core/rtw_security.c static u32 secmicgetuint32(u8 *p)
p                 324 drivers/staging/rtl8723bs/core/rtw_security.c 		res |= ((u32)(*p++)) << (8*i);
p                 330 drivers/staging/rtl8723bs/core/rtw_security.c static void secmicputuint32(u8 *p, u32 val)
p                 336 drivers/staging/rtl8723bs/core/rtw_security.c 		*p++ = (u8) (val & 0xff);
p                1942 drivers/staging/rtl8723bs/core/rtw_security.c 	u8 *BIP_AAD, *p;
p                1965 drivers/staging/rtl8723bs/core/rtw_security.c 	p = rtw_get_ie(BIP_AAD+BIP_AAD_SIZE, _MME_IE_, &len, pattrib->pkt_len-WLAN_HDR_A3_LEN);
p                1967 drivers/staging/rtl8723bs/core/rtw_security.c 	if (p) {
p                1971 drivers/staging/rtl8723bs/core/rtw_security.c 		memcpy(&le_tmp64, p+4, 6);
p                1979 drivers/staging/rtl8723bs/core/rtw_security.c 		memcpy(&le_tmp, p+2, 2);
p                1986 drivers/staging/rtl8723bs/core/rtw_security.c 		memset(p+2+len-8, 0, 8);
p                1278 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 	unsigned char 	*p;
p                1337 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 	p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
p                1338 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 	if (p && len > 0) {
p                1339 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 			pht_cap = (struct rtw_ieee80211_ht_cap *)(p + 2);
p                1345 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 	p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
p                1346 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 	if (p && len > 0) {
p                1347 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 			pht_info = (struct HT_info_element *)(p + 2);
p                1369 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 	p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _DSSET_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
p                1370 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 	if (p) {
p                1371 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 			bcn_channel = *(p + 2);
p                1388 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 	p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
p                1389 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 	if (!p) {
p                1396 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 	if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
p                1397 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 		memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
p                1398 drivers/staging/rtl8723bs/core/rtw_wlan_util.c 		bssid->Ssid.SsidLength = *(p + 1);
p                  42 drivers/staging/rtl8723bs/hal/HalPhyRf.c 	u8 p = 0;
p                  48 drivers/staging/rtl8723bs/hal/HalPhyRf.c 	for (p = ODM_RF_PATH_A; p < MAX_RF_PATH; ++p) {
p                  49 drivers/staging/rtl8723bs/hal/HalPhyRf.c 		pDM_Odm->BbSwingIdxOfdmBase[p] = pDM_Odm->DefaultOfdmIndex;
p                  50 drivers/staging/rtl8723bs/hal/HalPhyRf.c 		pDM_Odm->BbSwingIdxOfdm[p] = pDM_Odm->DefaultOfdmIndex;
p                  51 drivers/staging/rtl8723bs/hal/HalPhyRf.c 		pDM_Odm->RFCalibrateInfo.OFDM_index[p] = pDM_Odm->DefaultOfdmIndex;
p                  53 drivers/staging/rtl8723bs/hal/HalPhyRf.c 		pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p] = 0;
p                  54 drivers/staging/rtl8723bs/hal/HalPhyRf.c 		pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[p] = 0;
p                  55 drivers/staging/rtl8723bs/hal/HalPhyRf.c 		pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[p] = 0;
p                  56 drivers/staging/rtl8723bs/hal/HalPhyRf.c 		pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p] = 0;
p                  59 drivers/staging/rtl8723bs/hal/HalPhyRf.c 		pDM_Odm->Absolute_OFDMSwingIdx[p] = 0;
p                  60 drivers/staging/rtl8723bs/hal/HalPhyRf.c 		pDM_Odm->Remnant_OFDMSwingIdx[p] = 0;
p                  79 drivers/staging/rtl8723bs/hal/HalPhyRf.c 	u8 ThermalValue = 0, delta, delta_LCK, delta_IQK, p = 0, i = 0;
p                 350 drivers/staging/rtl8723bs/hal/HalPhyRf.c 		for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++) {
p                 357 drivers/staging/rtl8723bs/hal/HalPhyRf.c 					(p == ODM_RF_PATH_A ? 'A' : 'B')
p                 362 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[p] ==
p                 363 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[p]
p                 365 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p] = 0;
p                 367 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p] = pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[p] - pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[p];      /*  Power Index Diff between 2 times Power Tracking */
p                 376 drivers/staging/rtl8723bs/hal/HalPhyRf.c 						p == ODM_RF_PATH_A ? 'A' : 'B'),
p                 377 drivers/staging/rtl8723bs/hal/HalPhyRf.c 						pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p],
p                 378 drivers/staging/rtl8723bs/hal/HalPhyRf.c 						pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[p],
p                 379 drivers/staging/rtl8723bs/hal/HalPhyRf.c 						pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[p]
p                 383 drivers/staging/rtl8723bs/hal/HalPhyRf.c 			pDM_Odm->RFCalibrateInfo.OFDM_index[p] =
p                 384 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				pDM_Odm->BbSwingIdxOfdmBase[p] +
p                 385 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p];
p                 389 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p];
p                 394 drivers/staging/rtl8723bs/hal/HalPhyRf.c 			pDM_Odm->BbSwingIdxOfdm[p] =
p                 395 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				pDM_Odm->RFCalibrateInfo.OFDM_index[p];
p                 406 drivers/staging/rtl8723bs/hal/HalPhyRf.c 					pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p]
p                 415 drivers/staging/rtl8723bs/hal/HalPhyRf.c 					pDM_Odm->BbSwingIdxOfdm[p],
p                 416 drivers/staging/rtl8723bs/hal/HalPhyRf.c 					(p == ODM_RF_PATH_A ? 'A' : 'B'),
p                 417 drivers/staging/rtl8723bs/hal/HalPhyRf.c 					pDM_Odm->BbSwingIdxOfdmBase[p],
p                 418 drivers/staging/rtl8723bs/hal/HalPhyRf.c 					pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p]
p                 423 drivers/staging/rtl8723bs/hal/HalPhyRf.c 			if (pDM_Odm->RFCalibrateInfo.OFDM_index[p] > c.SwingTableSize_OFDM-1)
p                 424 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				pDM_Odm->RFCalibrateInfo.OFDM_index[p] = c.SwingTableSize_OFDM-1;
p                 425 drivers/staging/rtl8723bs/hal/HalPhyRf.c 			else if (pDM_Odm->RFCalibrateInfo.OFDM_index[p] < OFDM_min_index)
p                 426 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				pDM_Odm->RFCalibrateInfo.OFDM_index[p] = OFDM_min_index;
p                 451 drivers/staging/rtl8723bs/hal/HalPhyRf.c 			for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++)
p                 452 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p] = 0;
p                 466 drivers/staging/rtl8723bs/hal/HalPhyRf.c 	for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++) {
p                 473 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				pDM_Odm->RFCalibrateInfo.OFDM_index[p],
p                 474 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				(p == ODM_RF_PATH_A ? 'A' : 'B'),
p                 475 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				pDM_Odm->BbSwingIdxOfdmBase[p]
p                 573 drivers/staging/rtl8723bs/hal/HalPhyRf.c 			for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++)
p                 574 drivers/staging/rtl8723bs/hal/HalPhyRf.c 					(*c.ODM_TxPwrTrackSetPwr)(pDM_Odm, MIX_MODE, p, 0);
p                 593 drivers/staging/rtl8723bs/hal/HalPhyRf.c 			for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++)
p                 594 drivers/staging/rtl8723bs/hal/HalPhyRf.c 				(*c.ODM_TxPwrTrackSetPwr)(pDM_Odm, MIX_MODE, p, Indexforchannel);
p                 599 drivers/staging/rtl8723bs/hal/HalPhyRf.c 		for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++)
p                 600 drivers/staging/rtl8723bs/hal/HalPhyRf.c 			pDM_Odm->BbSwingIdxOfdmBase[p] = pDM_Odm->BbSwingIdxOfdm[p];
p                1348 drivers/staging/rtl8723bs/hal/odm.c 	u8 p = 0;
p                1379 drivers/staging/rtl8723bs/hal/odm.c 	for (p = ODM_RF_PATH_A; p < MAX_RF_PATH; ++p) {
p                1380 drivers/staging/rtl8723bs/hal/odm.c 		pDM_Odm->BbSwingIdxOfdmBase[p] = pDM_Odm->DefaultOfdmIndex;
p                1381 drivers/staging/rtl8723bs/hal/odm.c 		pDM_Odm->RFCalibrateInfo.OFDM_index[p] = pDM_Odm->DefaultOfdmIndex;
p                1382 drivers/staging/rtl8723bs/hal/odm.c 		pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[p] = 0;
p                1383 drivers/staging/rtl8723bs/hal/odm.c 		pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[p] = 0;
p                1384 drivers/staging/rtl8723bs/hal/odm.c 		pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p] = 0;
p                 451 drivers/staging/rtl8723bs/include/rtw_xmit.h #define rtw_alloc_cmdxmitframe(p) __rtw_alloc_cmdxmitframe(p, CMDBUF_RSVD)
p                 452 drivers/staging/rtl8723bs/include/rtw_xmit.h #define rtw_alloc_bcnxmitframe(p) __rtw_alloc_cmdxmitframe(p, CMDBUF_BEACON)
p                  88 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	char *p;
p                 112 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		p = rtw_get_ie(&pnetwork->network.IEs[0], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.IELength);
p                 114 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		p = rtw_get_ie(&pnetwork->network.IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.IELength-12);
p                 116 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	if (p && ht_ielen>0) {
p                 119 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2);
p                 199 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	p = custom;
p                 200 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
p                 205 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
p                 233 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		u8 *p;
p                 242 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 			p =buf;
p                 243 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 			p += sprintf(p, "wpa_ie =");
p                 245 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 				p += sprintf(p, "%02x", wpa_ie[i]);
p                 266 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 			p = buf;
p                 268 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 			p += sprintf(p, "rsn_ie =");
p                 270 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 				p += sprintf(p, "%02x", rsn_ie[i]);
p                 368 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		u8 *p, *pos;
p                 373 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		p = buf;
p                 375 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		p += sprintf(p, "fm =%02X%02X", pos[1], pos[0]);
p                 764 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	char *p;
p                 774 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12);
p                 775 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		if (p && ht_ielen>0)
p                2230 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	struct iw_point *p;
p                2241 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	p = &wrqu->data;
p                2242 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	len = p->length;
p                2250 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	if (copy_from_user(ptmp, p->pointer, len)) {
p                3375 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
p                3382 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	if (!p->pointer || p->length != sizeof(struct ieee_param)) {
p                3387 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	param = rtw_malloc(p->length);
p                3393 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	if (copy_from_user(param, p->pointer, p->length)) {
p                3411 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		ret = wpa_set_encryption(dev, param, p->length);
p                3425 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	if (ret == 0 && copy_to_user(p->pointer, param, p->length))
p                4196 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
p                4216 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	if (!p->pointer || p->length != sizeof(*param)) {
p                4221 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	param = rtw_malloc(p->length);
p                4227 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	if (copy_from_user(param, p->pointer, p->length)) {
p                4256 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		ret = rtw_set_beacon(dev, param, p->length);
p                4262 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		ret = rtw_set_encryption(dev, param, p->length);
p                4274 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		ret = rtw_set_wps_beacon(dev, param, p->length);
p                4280 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		ret = rtw_set_wps_probe_resp(dev, param, p->length);
p                4286 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		ret = rtw_set_wps_assoc_resp(dev, param, p->length);
p                4292 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		ret = rtw_set_hidden_ssid(dev, param, p->length);
p                4298 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		ret = rtw_ioctl_get_sta_data(dev, param, p->length);
p                4304 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		ret = rtw_ioctl_set_macaddr_acl(dev, param, p->length);
p                4310 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		ret = rtw_ioctl_acl_add_sta(dev, param, p->length);
p                4316 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		ret = rtw_ioctl_acl_remove_sta(dev, param, p->length);
p                4327 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	if (ret == 0 && copy_to_user(p->pointer, param, p->length))
p                 144 drivers/staging/rtl8723bs/os_dep/mlme_linux.c 	u8 *buff, *p, i;
p                 159 drivers/staging/rtl8723bs/os_dep/mlme_linux.c 		p = buff;
p                 161 drivers/staging/rtl8723bs/os_dep/mlme_linux.c 		p += sprintf(p, "ASSOCINFO(ReqIEs =");
p                 167 drivers/staging/rtl8723bs/os_dep/mlme_linux.c 			p += sprintf(p, "%02x", sec_ie[i]);
p                 170 drivers/staging/rtl8723bs/os_dep/mlme_linux.c 		p += sprintf(p, ")");
p                 174 drivers/staging/rtl8723bs/os_dep/mlme_linux.c 		wrqu.data.length = p - buff;
p                 331 drivers/staging/rtl8723bs/os_dep/os_intfs.c static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
p                 334 drivers/staging/rtl8723bs/os_dep/os_intfs.c 	struct sockaddr *addr = p;
p                 221 drivers/staging/speakup/synth.c 	unsigned char buf[160], *p;
p                 230 drivers/staging/speakup/synth.c 	p = buf;
p                 232 drivers/staging/speakup/synth.c 		synth_buffer_add(*p++);
p                 252 drivers/staging/speakup/synth.c 	const u16 *p;
p                 254 drivers/staging/speakup/synth.c 	for (p = buf; *p; p++)
p                 255 drivers/staging/speakup/synth.c 		synth_buffer_add(*p);
p                 318 drivers/staging/speakup/varhandlers.c 	char *p;
p                 323 drivers/staging/speakup/varhandlers.c 	for (p = s; *p; p++)
p                 324 drivers/staging/speakup/varhandlers.c 		*p = tolower(*p);
p                 233 drivers/staging/unisys/visorhba/visorhba_main.c static unsigned int simple_idr_get(struct idr *idrtable, void *p,
p                 241 drivers/staging/unisys/visorhba/visorhba_main.c 	id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
p                 409 drivers/staging/uwb/i1480/dfu/usb.c #define i1480_USB_DEV(v, p)				\
p                 415 drivers/staging/uwb/i1480/dfu/usb.c 	.idProduct = (p),				\
p                 113 drivers/staging/uwb/reset.c 	struct uwb_rc_cmd_done_params *p = (struct uwb_rc_cmd_done_params *)arg;
p                 116 drivers/staging/uwb/reset.c 		if (p->reply)
p                 117 drivers/staging/uwb/reset.c 			reply_size = min(p->reply_size, reply_size);
p                 119 drivers/staging/uwb/reset.c 			p->reply = kmalloc(reply_size, GFP_ATOMIC);
p                 121 drivers/staging/uwb/reset.c 		if (p->reply)
p                 122 drivers/staging/uwb/reset.c 			memcpy(p->reply, reply, reply_size);
p                 126 drivers/staging/uwb/reset.c 	p->reply_size = reply_size;
p                 127 drivers/staging/uwb/reset.c 	complete(&p->completion);
p                 194 drivers/staging/uwb/uwb-debug.c static int reservations_show(struct seq_file *s, void *p)
p                 230 drivers/staging/uwb/uwb-debug.c static int drp_avail_show(struct seq_file *s, void *p)
p                 109 drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c 		.tlv = {.p = snd_bcm2835_db_scale}
p                 247 drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c 		.tlv = {.p = snd_bcm2835_db_scale}
p                 281 drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c 		.tlv = {.p = snd_bcm2835_db_scale}
p                 744 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
p                 747 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c 	p->type = port->type;
p                 748 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c 	p->index = port->index;
p                 749 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c 	p->index_all = 0;
p                 750 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c 	p->is_enabled = port->enabled;
p                 751 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c 	p->buffer_num_min = port->minimum_buffer.num;
p                 752 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c 	p->buffer_size_min = port->minimum_buffer.size;
p                 753 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c 	p->buffer_alignment_min = port->minimum_buffer.alignment;
p                 754 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c 	p->buffer_num_recommended = port->recommended_buffer.num;
p                 755 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c 	p->buffer_size_recommended = port->recommended_buffer.size;
p                 758 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c 	p->buffer_num = port->current_buffer.num;
p                 759 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c 	p->buffer_size = port->current_buffer.size;
p                 760 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c 	p->userdata = (u32)(unsigned long)port;
p                  75 drivers/staging/vt6655/device.h #define	AVAIL_TD(p, q)	((p)->opts.tx_descs[(q)] - ((p)->iTDUsed[(q)]))
p                 120 drivers/staging/wlan-ng/hfa384x.h #define		HFA384x_ADDR_AUX_MKFLAT(p, o)	\
p                 121 drivers/staging/wlan-ng/hfa384x.h 		((((u32)(((u16)(p)) & HFA384x_ADDR_AUX_PAGE_MASK)) << 7) | \
p                 457 drivers/staging/wusbcore/devconnect.c 	int p;
p                 470 drivers/staging/wusbcore/devconnect.c 	for (p = 0; p < wusbhc->ports_max; p++) {
p                 471 drivers/staging/wusbcore/devconnect.c 		struct wusb_dev *wusb_dev = wusb_port_by_idx(wusbhc, p)->wusb_dev;
p                  20 drivers/staging/wusbcore/host/whci/asl.c 	struct list_head *n, *p;
p                  27 drivers/staging/wusbcore/host/whci/asl.c 	p = qset->list_node.prev;
p                  28 drivers/staging/wusbcore/host/whci/asl.c 	if (p == &whc->async_list)
p                  29 drivers/staging/wusbcore/host/whci/asl.c 		p = p->prev;
p                  32 drivers/staging/wusbcore/host/whci/asl.c 	*prev = container_of(p, struct whc_qset, list_node);
p                  75 drivers/staging/wusbcore/host/whci/debug.c static int di_show(struct seq_file *s, void *p)
p                  96 drivers/staging/wusbcore/host/whci/debug.c static int asl_show(struct seq_file *s, void *p)
p                 109 drivers/staging/wusbcore/host/whci/debug.c static int pzl_show(struct seq_file *s, void *p)
p                 351 drivers/staging/wusbcore/host/whci/qset.c 	int p;
p                 373 drivers/staging/wusbcore/host/whci/qset.c 	for (p = 0; p < std->num_pointers; p++) {
p                 374 drivers/staging/wusbcore/host/whci/qset.c 		std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
p                 434 drivers/staging/wusbcore/host/whci/qset.c 	int p = 0;
p                 477 drivers/staging/wusbcore/host/whci/qset.c 				p = 0;
p                 511 drivers/staging/wusbcore/host/whci/qset.c 			for (;p < std->num_pointers; p++) {
p                 512 drivers/staging/wusbcore/host/whci/qset.c 				std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
p                 154 drivers/staging/wusbcore/host/whci/whci-hc.h #define QH_INFO2_DBP(p)          ((p) << 5)  /* data burst policy (see [WUSB] table 5-7) */
p                 163 drivers/staging/wusbcore/host/whci/whci-hc.h #define QH_INFO3_TX_PWR(p)       ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */
p                 313 drivers/staging/wusbcore/host/whci/whci-hc.h #  define WHCSPARAMS_TO_N_MMC_IES(p) (((p) >> 16) & 0xff)
p                 314 drivers/staging/wusbcore/host/whci/whci-hc.h #  define WHCSPARAMS_TO_N_KEYS(p)    (((p) >> 8) & 0xff)
p                 315 drivers/staging/wusbcore/host/whci/whci-hc.h #  define WHCSPARAMS_TO_N_DEVICES(p) (((p) >> 0) & 0x7f)
p                  82 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
p                  84 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	if (p) {
p                  87 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		p->cnp = cnp;
p                  88 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		p->stid = stid;
p                  90 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		p->next = cdev->np_hash_tab[bucket];
p                  91 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		cdev->np_hash_tab[bucket] = p;
p                  95 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	return p;
p                 102 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct np_info *p;
p                 105 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
p                 106 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		if (p->cnp == cnp) {
p                 107 drivers/target/iscsi/cxgbit/cxgbit_cm.c 			stid = p->stid;
p                 119 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
p                 122 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	for (p = *prev; p; prev = &p->next, p = p->next) {
p                 123 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		if (p->cnp == cnp) {
p                 124 drivers/target/iscsi/cxgbit/cxgbit_cm.c 			stid = p->stid;
p                 125 drivers/target/iscsi/cxgbit/cxgbit_cm.c 			*prev = p->next;
p                 126 drivers/target/iscsi/cxgbit/cxgbit_cm.c 			kfree(p);
p                1833 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		struct sk_buff *p = cxgbit_sock_peek_wr(csk);
p                1836 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		if (unlikely(!p)) {
p                1843 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		csum = (__force u32)p->csum;
p                1849 drivers/target/iscsi/cxgbit/cxgbit_cm.c 			p->csum = (__force __wsum)(csum - credits);
p                1855 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		kfree_skb(p);
p                 176 drivers/target/iscsi/iscsi_target_parameters.h #define IS_USE_LEADING_ONLY(p)		((p)->use & USE_LEADING_ONLY)
p                 177 drivers/target/iscsi/iscsi_target_parameters.h #define IS_USE_INITIAL_ONLY(p)		((p)->use & USE_INITIAL_ONLY)
p                 178 drivers/target/iscsi/iscsi_target_parameters.h #define IS_USE_ALL(p)			((p)->use & USE_ALL)
p                 180 drivers/target/iscsi/iscsi_target_parameters.h #define SET_USE_INITIAL_ONLY(p)		((p)->use |= USE_INITIAL_ONLY)
p                 191 drivers/target/iscsi/iscsi_target_parameters.h #define IS_SENDER_INITIATOR(p)		((p)->sender & SENDER_INITIATOR)
p                 192 drivers/target/iscsi/iscsi_target_parameters.h #define IS_SENDER_TARGET(p)		((p)->sender & SENDER_TARGET)
p                 193 drivers/target/iscsi/iscsi_target_parameters.h #define IS_SENDER_BOTH(p)		((p)->sender & SENDER_BOTH)
p                 201 drivers/target/iscsi/iscsi_target_parameters.h #define IS_SCOPE_CONNECTION_ONLY(p)	((p)->scope & SCOPE_CONNECTION_ONLY)
p                 202 drivers/target/iscsi/iscsi_target_parameters.h #define IS_SCOPE_SESSION_WIDE(p)	((p)->scope & SCOPE_SESSION_WIDE)
p                 212 drivers/target/iscsi/iscsi_target_parameters.h #define IS_PHASE_SECURITY(p)		((p)->phase & PHASE_SECURITY)
p                 213 drivers/target/iscsi/iscsi_target_parameters.h #define IS_PHASE_OPERATIONAL(p)		((p)->phase & PHASE_OPERATIONAL)
p                 214 drivers/target/iscsi/iscsi_target_parameters.h #define IS_PHASE_DECLARATIVE(p)		((p)->phase & PHASE_DECLARATIVE)
p                 215 drivers/target/iscsi/iscsi_target_parameters.h #define IS_PHASE_FFP0(p)		((p)->phase & PHASE_FFP0)
p                 227 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPE_BOOL_AND(p)		((p)->type & TYPE_BOOL_AND)
p                 228 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPE_BOOL_OR(p)		((p)->type & TYPE_BOOL_OR)
p                 229 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPE_NUMBER(p)		((p)->type & TYPE_NUMBER)
p                 230 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPE_NUMBER_RANGE(p)		((p)->type & TYPE_NUMBER_RANGE)
p                 231 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPE_STRING(p)		((p)->type & TYPE_STRING)
p                 232 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPE_VALUE_LIST(p)		((p)->type & TYPE_VALUE_LIST)
p                 253 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPERANGE_0_TO_2(p)		((p)->type_range & TYPERANGE_0_TO_2)
p                 254 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPERANGE_0_TO_3600(p)	((p)->type_range & TYPERANGE_0_TO_3600)
p                 255 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPERANGE_0_TO_32767(p)	((p)->type_range & TYPERANGE_0_TO_32767)
p                 256 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPERANGE_0_TO_65535(p)	((p)->type_range & TYPERANGE_0_TO_65535)
p                 257 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPERANGE_1_TO_65535(p)	((p)->type_range & TYPERANGE_1_TO_65535)
p                 258 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPERANGE_2_TO_3600(p)	((p)->type_range & TYPERANGE_2_TO_3600)
p                 259 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPERANGE_512_TO_16777215(p)	((p)->type_range & \
p                 261 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPERANGE_AUTH_PARAM(p)	((p)->type_range & TYPERANGE_AUTH)
p                 262 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPERANGE_DIGEST_PARAM(p)	((p)->type_range & TYPERANGE_DIGEST)
p                 263 drivers/target/iscsi/iscsi_target_parameters.h #define IS_TYPERANGE_SESSIONTYPE(p)	((p)->type_range & \
p                 278 drivers/target/iscsi/iscsi_target_parameters.h #define IS_PSTATE_ACCEPTOR(p)		((p)->state & PSTATE_ACCEPTOR)
p                 279 drivers/target/iscsi/iscsi_target_parameters.h #define IS_PSTATE_NEGOTIATE(p)		((p)->state & PSTATE_NEGOTIATE)
p                 280 drivers/target/iscsi/iscsi_target_parameters.h #define IS_PSTATE_PROPOSER(p)		((p)->state & PSTATE_PROPOSER)
p                 281 drivers/target/iscsi/iscsi_target_parameters.h #define IS_PSTATE_IRRELEVANT(p)		((p)->state & PSTATE_IRRELEVANT)
p                 282 drivers/target/iscsi/iscsi_target_parameters.h #define IS_PSTATE_REJECT(p)		((p)->state & PSTATE_REJECT)
p                 283 drivers/target/iscsi/iscsi_target_parameters.h #define IS_PSTATE_REPLY_OPTIONAL(p)	((p)->state & PSTATE_REPLY_OPTIONAL)
p                 284 drivers/target/iscsi/iscsi_target_parameters.h #define IS_PSTATE_RESPONSE_GOT(p)	((p)->state & PSTATE_RESPONSE_GOT)
p                 285 drivers/target/iscsi/iscsi_target_parameters.h #define IS_PSTATE_RESPONSE_SENT(p)	((p)->state & PSTATE_RESPONSE_SENT)
p                 287 drivers/target/iscsi/iscsi_target_parameters.h #define SET_PSTATE_ACCEPTOR(p)		((p)->state |= PSTATE_ACCEPTOR)
p                 288 drivers/target/iscsi/iscsi_target_parameters.h #define SET_PSTATE_NEGOTIATE(p)		((p)->state |= PSTATE_NEGOTIATE)
p                 289 drivers/target/iscsi/iscsi_target_parameters.h #define SET_PSTATE_PROPOSER(p)		((p)->state |= PSTATE_PROPOSER)
p                 290 drivers/target/iscsi/iscsi_target_parameters.h #define SET_PSTATE_IRRELEVANT(p)	((p)->state |= PSTATE_IRRELEVANT)
p                 291 drivers/target/iscsi/iscsi_target_parameters.h #define SET_PSTATE_REJECT(p)		((p)->state |= PSTATE_REJECT)
p                 292 drivers/target/iscsi/iscsi_target_parameters.h #define SET_PSTATE_REPLY_OPTIONAL(p)	((p)->state |= PSTATE_REPLY_OPTIONAL)
p                 293 drivers/target/iscsi/iscsi_target_parameters.h #define SET_PSTATE_RESPONSE_GOT(p)	((p)->state |= PSTATE_RESPONSE_GOT)
p                 294 drivers/target/iscsi/iscsi_target_parameters.h #define SET_PSTATE_RESPONSE_SENT(p)	((p)->state |= PSTATE_RESPONSE_SENT)
p                 717 drivers/target/loopback/tcm_loop.c 				  struct se_session *se_sess, void *p)
p                 722 drivers/target/loopback/tcm_loop.c 	tl_tpg->tl_nexus = p;
p                2782 drivers/target/target_core_configfs.c 		struct config_item *item, char *p)			\
p                2785 drivers/target/target_core_configfs.c 	return sprintf(p, "%d\n",					\
p                2790 drivers/target/target_core_configfs.c 		struct config_item *item, const char *p, size_t c)	\
p                2803 drivers/target/target_core_configfs.c 	ret = kstrtoul(p, 0, &tmp);					\
p                2805 drivers/target/target_core_configfs.c 		pr_err("Invalid value '%s', must be '0' or '1'\n", p);	\
p                 862 drivers/target/target_core_device.c static int target_devices_idr_iter(int id, void *p, void *data)
p                 866 drivers/target/target_core_device.c 	struct se_device *dev = p;
p                 105 drivers/target/target_core_fabric_lib.c 	const char *p;
p                 109 drivers/target/target_core_fabric_lib.c 	p = nacl->initiatorname;
p                 110 drivers/target/target_core_fabric_lib.c 	if (strncasecmp(p, "0x", 2) == 0)
p                 111 drivers/target/target_core_fabric_lib.c 		p += 2;
p                 112 drivers/target/target_core_fabric_lib.c 	len = strlen(p);
p                 119 drivers/target/target_core_fabric_lib.c 	rc = hex2bin(buf + 8 + leading_zero_bytes, p, count);
p                 121 drivers/target/target_core_fabric_lib.c 		pr_debug("hex2bin failed for %s: %d\n", p, rc);
p                 267 drivers/target/target_core_fabric_lib.c 	char *p;
p                 319 drivers/target/target_core_fabric_lib.c 		p = strstr(&buf[4], ",i,0x");
p                 320 drivers/target/target_core_fabric_lib.c 		if (!p) {
p                 326 drivers/target/target_core_fabric_lib.c 		*p = '\0'; /* Terminate iSCSI Name */
p                 327 drivers/target/target_core_fabric_lib.c 		p += 5; /* Skip over ",i,0x" separator */
p                 329 drivers/target/target_core_fabric_lib.c 		*port_nexus_ptr = p;
p                 337 drivers/target/target_core_fabric_lib.c 			if (isdigit(*p)) {
p                 338 drivers/target/target_core_fabric_lib.c 				p++;
p                 341 drivers/target/target_core_fabric_lib.c 			*p = tolower(*p);
p                 342 drivers/target/target_core_fabric_lib.c 			p++;
p                 217 drivers/target/target_core_file.c static void fd_dev_call_rcu(struct rcu_head *p)
p                 219 drivers/target/target_core_file.c 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
p                 169 drivers/target/target_core_iblock.c static void iblock_dev_call_rcu(struct rcu_head *p)
p                 171 drivers/target/target_core_iblock.c 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
p                 546 drivers/target/target_core_pscsi.c static void pscsi_dev_call_rcu(struct rcu_head *p)
p                 548 drivers/target/target_core_pscsi.c 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
p                 120 drivers/target/target_core_rd.c 	unsigned char *p;
p                 162 drivers/target/target_core_rd.c 			p = kmap(pg);
p                 163 drivers/target/target_core_rd.c 			memset(p, init_payload, PAGE_SIZE);
p                 319 drivers/target/target_core_rd.c static void rd_dev_call_rcu(struct rcu_head *p)
p                 321 drivers/target/target_core_rd.c 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
p                 135 drivers/target/target_core_spc.c 	unsigned char *p = &dev->t10_wwn.unit_serial[0];
p                 147 drivers/target/target_core_spc.c 	for (cnt = 0; *p && cnt < 13; p++) {
p                 148 drivers/target/target_core_spc.c 		int val = hex_to_bin(*p);
p                 684 drivers/target/target_core_spc.c 	int p;
p                 693 drivers/target/target_core_spc.c 		for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
p                 694 drivers/target/target_core_spc.c 			buf[p + 4] = evpd_handlers[p].page;
p                 709 drivers/target/target_core_spc.c 	int p;
p                 736 drivers/target/target_core_spc.c 	for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
p                 737 drivers/target/target_core_spc.c 		if (cdb[2] == evpd_handlers[p].page) {
p                 739 drivers/target/target_core_spc.c 			ret = evpd_handlers[p].emulate(cmd, buf);
p                 761 drivers/target/target_core_spc.c static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
p                 763 drivers/target/target_core_spc.c 	p[0] = 0x01;
p                 764 drivers/target/target_core_spc.c 	p[1] = 0x0a;
p                 774 drivers/target/target_core_spc.c static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
p                 779 drivers/target/target_core_spc.c 	p[0] = 0x0a;
p                 780 drivers/target/target_core_spc.c 	p[1] = 0x0a;
p                 787 drivers/target/target_core_spc.c 	p[2] = (1 << 1);
p                 790 drivers/target/target_core_spc.c 		p[2] |= (1 << 2);
p                 820 drivers/target/target_core_spc.c 	p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
p                 850 drivers/target/target_core_spc.c 	p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
p                 864 drivers/target/target_core_spc.c 	p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
p                 877 drivers/target/target_core_spc.c 			p[5] |= 0x80;
p                 880 drivers/target/target_core_spc.c 	p[8] = 0xff;
p                 881 drivers/target/target_core_spc.c 	p[9] = 0xff;
p                 882 drivers/target/target_core_spc.c 	p[11] = 30;
p                 888 drivers/target/target_core_spc.c static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
p                 892 drivers/target/target_core_spc.c 	p[0] = 0x08;
p                 893 drivers/target/target_core_spc.c 	p[1] = 0x12;
p                 900 drivers/target/target_core_spc.c 		p[2] = 0x04; /* Write Cache Enable */
p                 901 drivers/target/target_core_spc.c 	p[12] = 0x20; /* Disabled Read Ahead */
p                 907 drivers/target/target_core_spc.c static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
p                 909 drivers/target/target_core_spc.c 	p[0] = 0x1c;
p                 910 drivers/target/target_core_spc.c 	p[1] = 0x0a;
p                1282 drivers/target/target_core_user.c static int tcmu_check_expired_cmd(int id, void *p, void *data)
p                1284 drivers/target/target_core_user.c 	struct tcmu_cmd *cmd = p;
p                1599 drivers/target/target_core_user.c static void tcmu_dev_call_rcu(struct rcu_head *p)
p                1601 drivers/target/target_core_user.c 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
p                 106 drivers/target/target_core_xcopy.c 				unsigned char *p, unsigned short cscd_index)
p                 108 drivers/target/target_core_xcopy.c 	unsigned char *desc = p;
p                 192 drivers/target/target_core_xcopy.c 				struct xcopy_op *xop, unsigned char *p,
p                 196 drivers/target/target_core_xcopy.c 	unsigned char *desc = p;
p                 285 drivers/target/target_core_xcopy.c 					unsigned char *p)
p                 287 drivers/target/target_core_xcopy.c 	unsigned char *desc = p;
p                 327 drivers/target/target_core_xcopy.c 				struct xcopy_op *xop, unsigned char *p,
p                 330 drivers/target/target_core_xcopy.c 	unsigned char *desc = p;
p                 834 drivers/target/target_core_xcopy.c 	unsigned char *p = NULL, *seg_desc;
p                 840 drivers/target/target_core_xcopy.c 	p = transport_kmap_data_sg(se_cmd);
p                 841 drivers/target/target_core_xcopy.c 	if (!p) {
p                 846 drivers/target/target_core_xcopy.c 	list_id = p[0];
p                 847 drivers/target/target_core_xcopy.c 	list_id_usage = (p[1] & 0x18) >> 3;
p                 852 drivers/target/target_core_xcopy.c 	tdll = get_unaligned_be16(&p[2]);
p                 853 drivers/target/target_core_xcopy.c 	sdll = get_unaligned_be32(&p[8]);
p                 861 drivers/target/target_core_xcopy.c 	inline_dl = get_unaligned_be32(&p[12]);
p                 883 drivers/target/target_core_xcopy.c 	seg_desc = &p[16] + tdll;
p                 893 drivers/target/target_core_xcopy.c 	rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
p                 914 drivers/target/target_core_xcopy.c 	if (p)
p                 964 drivers/target/target_core_xcopy.c 	unsigned char *p;
p                 966 drivers/target/target_core_xcopy.c 	p = transport_kmap_data_sg(se_cmd);
p                 967 drivers/target/target_core_xcopy.c 	if (!p) {
p                 982 drivers/target/target_core_xcopy.c 	p[4] = 0x1;
p                 986 drivers/target/target_core_xcopy.c 	put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
p                 990 drivers/target/target_core_xcopy.c 	put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
p                 994 drivers/target/target_core_xcopy.c 	put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
p                 998 drivers/target/target_core_xcopy.c 	put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
p                1002 drivers/target/target_core_xcopy.c 	put_unaligned_be32(0x0, &p[20]);
p                1006 drivers/target/target_core_xcopy.c 	put_unaligned_be32(0x0, &p[24]);
p                1010 drivers/target/target_core_xcopy.c 	put_unaligned_be32(0x0, &p[28]);
p                1014 drivers/target/target_core_xcopy.c 	put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
p                1018 drivers/target/target_core_xcopy.c 	p[36] = RCR_OP_MAX_CONCURR_COPIES;
p                1022 drivers/target/target_core_xcopy.c 	p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
p                1026 drivers/target/target_core_xcopy.c 	p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
p                1030 drivers/target/target_core_xcopy.c 	p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
p                1034 drivers/target/target_core_xcopy.c 	p[43] = 0x2;
p                1038 drivers/target/target_core_xcopy.c 	p[44] = 0x02; /* Copy Block to Block device */
p                1039 drivers/target/target_core_xcopy.c 	p[45] = 0xe4; /* Identification descriptor target descriptor */
p                1044 drivers/target/target_core_xcopy.c 	put_unaligned_be32(42, &p[0]);
p                 190 drivers/target/tcm_fc/tfc_sess.c 			    struct se_session *se_sess, void *p)
p                 192 drivers/target/tcm_fc/tfc_sess.c 	struct ft_sess *sess = p;
p                 530 drivers/tee/optee/call.c static bool is_normal_memory(pgprot_t p)
p                 533 drivers/tee/optee/call.c 	return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC;
p                 535 drivers/tee/optee/call.c 	return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
p                  45 drivers/tee/optee/core.c 		struct tee_param *p = params + n;
p                  51 drivers/tee/optee/core.c 			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
p                  52 drivers/tee/optee/core.c 			memset(&p->u, 0, sizeof(p->u));
p                  57 drivers/tee/optee/core.c 			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
p                  59 drivers/tee/optee/core.c 			p->u.value.a = mp->u.value.a;
p                  60 drivers/tee/optee/core.c 			p->u.value.b = mp->u.value.b;
p                  61 drivers/tee/optee/core.c 			p->u.value.c = mp->u.value.c;
p                  66 drivers/tee/optee/core.c 			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
p                  68 drivers/tee/optee/core.c 			p->u.memref.size = mp->u.tmem.size;
p                  72 drivers/tee/optee/core.c 				p->u.memref.shm_offs = 0;
p                  73 drivers/tee/optee/core.c 				p->u.memref.shm = NULL;
p                  79 drivers/tee/optee/core.c 			p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
p                  80 drivers/tee/optee/core.c 			p->u.memref.shm = shm;
p                  83 drivers/tee/optee/core.c 			if (p->u.memref.size) {
p                  84 drivers/tee/optee/core.c 				size_t o = p->u.memref.shm_offs +
p                  85 drivers/tee/optee/core.c 					   p->u.memref.size - 1;
p                  95 drivers/tee/optee/core.c 			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
p                  97 drivers/tee/optee/core.c 			p->u.memref.size = mp->u.rmem.size;
p                 102 drivers/tee/optee/core.c 				p->u.memref.shm_offs = 0;
p                 103 drivers/tee/optee/core.c 				p->u.memref.shm = NULL;
p                 106 drivers/tee/optee/core.c 			p->u.memref.shm_offs = mp->u.rmem.offs;
p                 107 drivers/tee/optee/core.c 			p->u.memref.shm = shm;
p                 119 drivers/tee/optee/core.c 				const struct tee_param *p)
p                 124 drivers/tee/optee/core.c 	mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
p                 127 drivers/tee/optee/core.c 	mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
p                 128 drivers/tee/optee/core.c 	mp->u.tmem.size = p->u.memref.size;
p                 130 drivers/tee/optee/core.c 	if (!p->u.memref.shm) {
p                 135 drivers/tee/optee/core.c 	rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
p                 147 drivers/tee/optee/core.c 				const struct tee_param *p)
p                 149 drivers/tee/optee/core.c 	mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
p                 152 drivers/tee/optee/core.c 	mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
p                 153 drivers/tee/optee/core.c 	mp->u.rmem.size = p->u.memref.size;
p                 154 drivers/tee/optee/core.c 	mp->u.rmem.offs = p->u.memref.shm_offs;
p                 172 drivers/tee/optee/core.c 		const struct tee_param *p = params + n;
p                 175 drivers/tee/optee/core.c 		switch (p->attr) {
p                 183 drivers/tee/optee/core.c 			mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
p                 185 drivers/tee/optee/core.c 			mp->u.value.a = p->u.value.a;
p                 186 drivers/tee/optee/core.c 			mp->u.value.b = p->u.value.b;
p                 187 drivers/tee/optee/core.c 			mp->u.value.c = p->u.value.c;
p                 192 drivers/tee/optee/core.c 			if (tee_shm_is_registered(p->u.memref.shm))
p                 193 drivers/tee/optee/core.c 				rc = to_msg_param_reg_mem(mp, p);
p                 195 drivers/tee/optee/core.c 				rc = to_msg_param_tmp_mem(mp, p);
p                 359 drivers/tee/optee/supp.c 		struct tee_param *p = req->param + n;
p                 361 drivers/tee/optee/supp.c 		switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
p                 364 drivers/tee/optee/supp.c 			p->u.value.a = param[n + num_meta].u.value.a;
p                 365 drivers/tee/optee/supp.c 			p->u.value.b = param[n + num_meta].u.value.b;
p                 366 drivers/tee/optee/supp.c 			p->u.value.c = param[n + num_meta].u.value.c;
p                 370 drivers/tee/optee/supp.c 			p->u.memref.size = param[n + num_meta].u.memref.size;
p                 276 drivers/tee/tee_core.c 		struct tee_param *p = params + n;
p                 278 drivers/tee/tee_core.c 		switch (p->attr) {
p                 281 drivers/tee/tee_core.c 			if (put_user(p->u.value.a, &up->a) ||
p                 282 drivers/tee/tee_core.c 			    put_user(p->u.value.b, &up->b) ||
p                 283 drivers/tee/tee_core.c 			    put_user(p->u.value.c, &up->c))
p                 288 drivers/tee/tee_core.c 			if (put_user((u64)p->u.memref.size, &up->b))
p                 468 drivers/tee/tee_core.c 		struct tee_param *p = params + n;
p                 470 drivers/tee/tee_core.c 		ip.attr = p->attr;
p                 471 drivers/tee/tee_core.c 		switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
p                 474 drivers/tee/tee_core.c 			ip.a = p->u.value.a;
p                 475 drivers/tee/tee_core.c 			ip.b = p->u.value.b;
p                 476 drivers/tee/tee_core.c 			ip.c = p->u.value.c;
p                 481 drivers/tee/tee_core.c 			ip.b = p->u.memref.size;
p                 482 drivers/tee/tee_core.c 			if (!p->u.memref.shm) {
p                 487 drivers/tee/tee_core.c 			ip.a = p->u.memref.shm_offs;
p                 488 drivers/tee/tee_core.c 			ip.c = p->u.memref.shm->id;
p                 561 drivers/tee/tee_core.c 		struct tee_param *p = params + n;
p                 571 drivers/tee/tee_core.c 		p->attr = ip.attr;
p                 576 drivers/tee/tee_core.c 			p->u.value.a = ip.a;
p                 577 drivers/tee/tee_core.c 			p->u.value.b = ip.b;
p                 578 drivers/tee/tee_core.c 			p->u.value.c = ip.c;
p                 589 drivers/tee/tee_core.c 			p->u.memref.shm = NULL;
p                 590 drivers/tee/tee_core.c 			p->u.memref.shm_offs = 0;
p                 591 drivers/tee/tee_core.c 			p->u.memref.size = ip.b;
p                 594 drivers/tee/tee_core.c 			memset(&p->u, 0, sizeof(p->u));
p                  75 drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c 	union acpi_object *p;
p                  84 drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c 	p = buffer.pointer;
p                  85 drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c 	if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
p                  91 drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c 	*trt_count = p->package.count;
p                 104 drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c 		status = acpi_extract_package(&(p->package.elements[i]),
p                 152 drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c 	union acpi_object *p;
p                 162 drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c 	p = buffer.pointer;
p                 163 drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c 	if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
p                 170 drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c 	*art_count = p->package.count - 1;
p                 183 drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c 		status = acpi_extract_package(&(p->package.elements[i + 1]),
p                 172 drivers/thermal/intel/int340x_thermal/int3403_thermal.c 	union acpi_object *p;
p                 182 drivers/thermal/intel/int340x_thermal/int3403_thermal.c 	p = buf.pointer;
p                 183 drivers/thermal/intel/int340x_thermal/int3403_thermal.c 	if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
p                 190 drivers/thermal/intel/int340x_thermal/int3403_thermal.c 	obj->max_state = p->package.count - 1;
p                 281 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	union acpi_object *p;
p                 289 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	p = buf.pointer;
p                 290 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
p                 296 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	if (!p->package.count) {
p                 302 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	for (i = 0; i < min((int)p->package.count - 1, 2); ++i) {
p                 303 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 		elements = &(p->package.elements[i+1]);
p                 196 drivers/thermal/power_allocator.c 	s64 p, i, d, power_range;
p                 216 drivers/thermal/power_allocator.c 	p = mul_frac(err < 0 ? tz->tzp->k_po : tz->tzp->k_pu, err);
p                 246 drivers/thermal/power_allocator.c 	power_range = p + i + d;
p                 255 drivers/thermal/power_allocator.c 					  frac_to_int(p), frac_to_int(i),
p                  81 drivers/thermal/qoriq_thermal.c static void tmu_write(struct qoriq_tmu_data *p, u32 val, void __iomem *addr)
p                  83 drivers/thermal/qoriq_thermal.c 	if (p->little_endian)
p                  89 drivers/thermal/qoriq_thermal.c static u32 tmu_read(struct qoriq_tmu_data *p, void __iomem *addr)
p                  91 drivers/thermal/qoriq_thermal.c 	if (p->little_endian)
p                  97 drivers/thermal/qoriq_thermal.c static int tmu_get_temp(void *p, int *temp)
p                  99 drivers/thermal/qoriq_thermal.c 	struct qoriq_sensor *qsensor = p;
p                 171 drivers/thermal/rcar_thermal.c #define rcar_thermal_read(p, r) _rcar_thermal_read(p, REG_ ##r)
p                 177 drivers/thermal/rcar_thermal.c #define rcar_thermal_write(p, r, d) _rcar_thermal_write(p, REG_ ##r, d)
p                 184 drivers/thermal/rcar_thermal.c #define rcar_thermal_bset(p, r, m, d) _rcar_thermal_bset(p, REG_ ##r, m, d)
p                 374 drivers/thermal/rcar_thermal.c #define rcar_thermal_irq_enable(p)	_rcar_thermal_irq_ctrl(p, 1)
p                 375 drivers/thermal/rcar_thermal.c #define rcar_thermal_irq_disable(p)	_rcar_thermal_irq_ctrl(p, 0)
p                 108 drivers/thermal/rockchip_thermal.c 			   void __iomem *reg, enum tshut_polarity p);
p                 650 drivers/thermal/samsung/exynos_tmu.c static int exynos_get_temp(void *p, int *temp)
p                 652 drivers/thermal/samsung/exynos_tmu.c 	struct exynos_tmu_data *data = p;
p                 117 drivers/thermal/ti-soc-thermal/ti-thermal-common.c static int __ti_thermal_get_trend(void *p, int trip, enum thermal_trend *trend)
p                 119 drivers/thermal/ti-soc-thermal/ti-thermal-common.c 	struct ti_thermal_data *data = p;
p                 108 drivers/thunderbolt/path.c 	struct tb_port *p;
p                 125 drivers/thunderbolt/path.c 	p = src;
p                 129 drivers/thunderbolt/path.c 	for (i = 0; p && i < TB_PATH_MAX_HOPS; i++) {
p                 130 drivers/thunderbolt/path.c 		sw = p->sw;
p                 132 drivers/thunderbolt/path.c 		ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
p                 134 drivers/thunderbolt/path.c 			tb_port_warn(p, "failed to read path at %d\n", h);
p                 147 drivers/thunderbolt/path.c 		p = out_port->remote;
p                 166 drivers/thunderbolt/path.c 	p = src;
p                 172 drivers/thunderbolt/path.c 		sw = p->sw;
p                 174 drivers/thunderbolt/path.c 		ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
p                 176 drivers/thunderbolt/path.c 			tb_port_warn(p, "failed to read path at %d\n", h);
p                 180 drivers/thunderbolt/path.c 		if (tb_port_alloc_in_hopid(p, h, h) < 0)
p                 187 drivers/thunderbolt/path.c 			tb_port_release_in_hopid(p, h);
p                 191 drivers/thunderbolt/path.c 		path->hops[i].in_port = p;
p                 198 drivers/thunderbolt/path.c 		p = out_port->remote;
p                 760 drivers/thunderbolt/xdomain.c 	const struct tb_property *p = data;
p                 767 drivers/thunderbolt/xdomain.c 	return !strcmp(svc->key, p->key);
p                 774 drivers/thunderbolt/xdomain.c 	struct tb_property *p;
p                 777 drivers/thunderbolt/xdomain.c 	p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
p                 778 drivers/thunderbolt/xdomain.c 	if (p)
p                 779 drivers/thunderbolt/xdomain.c 		svc->prtcid = p->value.immediate;
p                 780 drivers/thunderbolt/xdomain.c 	p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
p                 781 drivers/thunderbolt/xdomain.c 	if (p)
p                 782 drivers/thunderbolt/xdomain.c 		svc->prtcvers = p->value.immediate;
p                 783 drivers/thunderbolt/xdomain.c 	p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
p                 784 drivers/thunderbolt/xdomain.c 	if (p)
p                 785 drivers/thunderbolt/xdomain.c 		svc->prtcrevs = p->value.immediate;
p                 786 drivers/thunderbolt/xdomain.c 	p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
p                 787 drivers/thunderbolt/xdomain.c 	if (p)
p                 788 drivers/thunderbolt/xdomain.c 		svc->prtcstns = p->value.immediate;
p                 800 drivers/thunderbolt/xdomain.c 	struct tb_property *p;
p                 811 drivers/thunderbolt/xdomain.c 	tb_property_for_each(xd->properties, p) {
p                 812 drivers/thunderbolt/xdomain.c 		if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
p                 816 drivers/thunderbolt/xdomain.c 		dev = device_find_child(&xd->dev, p, find_service);
p                 826 drivers/thunderbolt/xdomain.c 		if (populate_service(svc, p)) {
p                 852 drivers/thunderbolt/xdomain.c 	const struct tb_property *p;
p                 855 drivers/thunderbolt/xdomain.c 	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
p                 856 drivers/thunderbolt/xdomain.c 	if (!p)
p                 858 drivers/thunderbolt/xdomain.c 	xd->device = p->value.immediate;
p                 860 drivers/thunderbolt/xdomain.c 	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
p                 861 drivers/thunderbolt/xdomain.c 	if (!p)
p                 863 drivers/thunderbolt/xdomain.c 	xd->vendor = p->value.immediate;
p                 871 drivers/thunderbolt/xdomain.c 	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
p                 872 drivers/thunderbolt/xdomain.c 	if (p)
p                 873 drivers/thunderbolt/xdomain.c 		xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
p                 874 drivers/thunderbolt/xdomain.c 	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
p                 875 drivers/thunderbolt/xdomain.c 	if (p)
p                 876 drivers/thunderbolt/xdomain.c 		xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
p                1616 drivers/thunderbolt/xdomain.c 	struct tb_property *p;
p                1618 drivers/thunderbolt/xdomain.c 	p = tb_property_find(xdomain_property_dir, key,
p                1620 drivers/thunderbolt/xdomain.c 	if (p && p->value.dir == dir) {
p                1621 drivers/thunderbolt/xdomain.c 		tb_property_remove(p);
p                 141 drivers/tty/ehv_bytechan.c 					       const char *p)
p                 147 drivers/tty/ehv_bytechan.c 		memcpy(buffer, p, c);
p                 149 drivers/tty/ehv_bytechan.c 		p = buffer;
p                 151 drivers/tty/ehv_bytechan.c 	return ev_byte_channel_send(handle, count, p);
p                  78 drivers/tty/hvc/hvc_dcc.c 	struct hvc_struct *p;
p                  83 drivers/tty/hvc/hvc_dcc.c 	p = hvc_alloc(0, 0, &hvc_dcc_get_put_ops, 128);
p                  85 drivers/tty/hvc/hvc_dcc.c 	return PTR_ERR_OR_ZERO(p);
p                1347 drivers/tty/hvc/hvc_iucv.c #define param_check_vmidfilter(name, p) __param_check(name, p, void)
p                 684 drivers/tty/hvc/hvcs.c static void hvcs_destruct_port(struct tty_port *p)
p                 686 drivers/tty/hvc/hvcs.c 	struct hvcs_struct *hvcsd = container_of(p, struct hvcs_struct, port);
p                 312 drivers/tty/moxa.c 		struct moxa_port *p;
p                 316 drivers/tty/moxa.c 			p = moxa_boards[i].ports;
p                 317 drivers/tty/moxa.c 			for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
p                 321 drivers/tty/moxa.c 					tmp.inq = MoxaPortRxQueue(p);
p                 322 drivers/tty/moxa.c 					tmp.outq = MoxaPortTxQueue(p);
p                 341 drivers/tty/moxa.c 		struct moxa_port *p;
p                 345 drivers/tty/moxa.c 			p = moxa_boards[i].ports;
p                 346 drivers/tty/moxa.c 			for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
p                 355 drivers/tty/moxa.c 				status = MoxaPortLineStatus(p);
p                 365 drivers/tty/moxa.c 				ttyp = tty_port_tty_get(&p->port);
p                 367 drivers/tty/moxa.c 					tmp.cflag = p->cflag;
p                 824 drivers/tty/moxa.c 	struct moxa_port *p;
p                 836 drivers/tty/moxa.c 	for (i = 0, p = brd->ports; i < MAX_PORTS_PER_BOARD; i++, p++) {
p                 837 drivers/tty/moxa.c 		tty_port_init(&p->port);
p                 838 drivers/tty/moxa.c 		p->port.ops = &moxa_port_ops;
p                 839 drivers/tty/moxa.c 		p->type = PORT_16550A;
p                 840 drivers/tty/moxa.c 		p->cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL;
p                1349 drivers/tty/moxa.c static void moxa_new_dcdstate(struct moxa_port *p, u8 dcd)
p                1354 drivers/tty/moxa.c 	spin_lock_irqsave(&p->port.lock, flags);
p                1355 drivers/tty/moxa.c 	if (dcd != p->DCDState) {
p                1356 drivers/tty/moxa.c         	p->DCDState = dcd;
p                1357 drivers/tty/moxa.c         	spin_unlock_irqrestore(&p->port.lock, flags);
p                1359 drivers/tty/moxa.c 			tty_port_tty_hangup(&p->port, true);
p                1362 drivers/tty/moxa.c 		spin_unlock_irqrestore(&p->port.lock, flags);
p                1365 drivers/tty/moxa.c static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
p                1368 drivers/tty/moxa.c 	struct tty_struct *tty = tty_port_tty_get(&p->port);
p                1370 drivers/tty/moxa.c 	unsigned int inited = tty_port_initialized(&p->port);
p                1374 drivers/tty/moxa.c 		if (test_bit(EMPTYWAIT, &p->statusflags) &&
p                1375 drivers/tty/moxa.c 				MoxaPortTxQueue(p) == 0) {
p                1376 drivers/tty/moxa.c 			clear_bit(EMPTYWAIT, &p->statusflags);
p                1379 drivers/tty/moxa.c 		if (test_bit(LOWWAIT, &p->statusflags) && !tty->stopped &&
p                1380 drivers/tty/moxa.c 				MoxaPortTxQueue(p) <= WAKEUP_CHARS) {
p                1381 drivers/tty/moxa.c 			clear_bit(LOWWAIT, &p->statusflags);
p                1386 drivers/tty/moxa.c 				MoxaPortRxQueue(p) > 0) { /* RX */
p                1387 drivers/tty/moxa.c 			MoxaPortReadData(p);
p                1388 drivers/tty/moxa.c 			tty_schedule_flip(&p->port);
p                1391 drivers/tty/moxa.c 		clear_bit(EMPTYWAIT, &p->statusflags);
p                1392 drivers/tty/moxa.c 		MoxaPortFlushData(p, 0); /* flush RX */
p                1403 drivers/tty/moxa.c 	ofsAddr = p->tableAddr;
p                1412 drivers/tty/moxa.c 		tty_insert_flip_char(&p->port, 0, TTY_BREAK);
p                1413 drivers/tty/moxa.c 		tty_schedule_flip(&p->port);
p                1417 drivers/tty/moxa.c 		moxa_new_dcdstate(p, readb(ofsAddr + FlagStat) & DCD_state);
p                1449 drivers/tty/moxa.c 			struct moxa_port *p = brd->ports;
p                1450 drivers/tty/moxa.c 			for (port = 0; port < brd->numPorts; port++, p++)
p                1451 drivers/tty/moxa.c 				if (p->lowChkFlag) {
p                1452 drivers/tty/moxa.c 					p->lowChkFlag = 0;
p                1453 drivers/tty/moxa.c 					moxa_low_water_check(p->tableAddr);
p                1562 drivers/tty/mxser.c 		unsigned int cflag, iflag, p;
p                1569 drivers/tty/mxser.c 		for (i = 0, p = 0; i < MXSER_BOARDS; i++) {
p                1570 drivers/tty/mxser.c 			for (j = 0; j < MXSER_PORTS_PER_BOARD; j++, p++) {
p                1571 drivers/tty/mxser.c 				if (p >= ARRAY_SIZE(me->rx_cnt)) {
p                1585 drivers/tty/mxser.c 				status = mxser_get_msr(ip->ioaddr, 0, p);
p                1597 drivers/tty/mxser.c 				me->rx_cnt[p] = ip->mon_data.rxcnt;
p                1598 drivers/tty/mxser.c 				me->tx_cnt[p] = ip->mon_data.txcnt;
p                1599 drivers/tty/mxser.c 				me->up_rxcnt[p] = ip->mon_data.up_rxcnt;
p                1600 drivers/tty/mxser.c 				me->up_txcnt[p] = ip->mon_data.up_txcnt;
p                1601 drivers/tty/mxser.c 				me->modem_status[p] =
p                1610 drivers/tty/mxser.c 					me->baudrate[p] = tty_termios_baud_rate(&ip->normal_termios);
p                1614 drivers/tty/mxser.c 					me->baudrate[p] = tty_get_baud_rate(tty);
p                1618 drivers/tty/mxser.c 				me->databits[p] = cflag & CSIZE;
p                1619 drivers/tty/mxser.c 				me->stopbits[p] = cflag & CSTOPB;
p                1620 drivers/tty/mxser.c 				me->parity[p] = cflag & (PARENB | PARODD |
p                1624 drivers/tty/mxser.c 					me->flowctrl[p] |= 0x03;
p                1627 drivers/tty/mxser.c 					me->flowctrl[p] |= 0x0C;
p                1630 drivers/tty/mxser.c 					me->fifo[p] = 1;
p                1633 drivers/tty/mxser.c 					opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2);
p                1638 drivers/tty/mxser.c 				me->iftype[p] = opmode;
p                1686 drivers/tty/mxser.c 		int p;
p                1695 drivers/tty/mxser.c 		p = tty->index % 4;
p                1704 drivers/tty/mxser.c 			mask = ModeMask[p];
p                1705 drivers/tty/mxser.c 			shiftbit = p * 2;
p                1713 drivers/tty/mxser.c 			shiftbit = p * 2;
p                 191 drivers/tty/n_hdlc.c #define bset(p,b)	((p)[(b) >> 5] |= (1 << ((b) & 0x1f)))
p                1270 drivers/tty/n_r3964.c 	const unsigned char *p;
p                1274 drivers/tty/n_r3964.c 	for (i = count, p = cp, f = fp; i; i--, p++) {
p                1278 drivers/tty/n_r3964.c 			receive_char(pInfo, *p);
p                 840 drivers/tty/nozomi.c 	char *p = buf;
p                 842 drivers/tty/nozomi.c 	interrupt & MDM_DL1 ? p += snprintf(p, TMP_BUF_MAX, "MDM_DL1 ") : NULL;
p                 843 drivers/tty/nozomi.c 	interrupt & MDM_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
p                 846 drivers/tty/nozomi.c 	interrupt & MDM_UL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
p                 848 drivers/tty/nozomi.c 	interrupt & MDM_UL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
p                 851 drivers/tty/nozomi.c 	interrupt & DIAG_DL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
p                 853 drivers/tty/nozomi.c 	interrupt & DIAG_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
p                 856 drivers/tty/nozomi.c 	interrupt & DIAG_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
p                 859 drivers/tty/nozomi.c 	interrupt & APP1_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
p                 861 drivers/tty/nozomi.c 	interrupt & APP2_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
p                 864 drivers/tty/nozomi.c 	interrupt & APP1_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
p                 866 drivers/tty/nozomi.c 	interrupt & APP2_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
p                 869 drivers/tty/nozomi.c 	interrupt & CTRL_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
p                 871 drivers/tty/nozomi.c 	interrupt & CTRL_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
p                 874 drivers/tty/nozomi.c 	interrupt & RESET ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
p                  57 drivers/tty/rocket_int.h static inline void out32(unsigned short port, Byte_t *p)
p                  59 drivers/tty/rocket_int.h 	u32 value = get_unaligned_le32(p);
p                  17 drivers/tty/serial/8250/8250.h 	int (*tx_dma)(struct uart_8250_port *p);
p                  18 drivers/tty/serial/8250/8250.h 	int (*rx_dma)(struct uart_8250_port *p);
p                 121 drivers/tty/serial/8250/8250.h void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p);
p                 153 drivers/tty/serial/8250/8250.h void serial8250_rpm_get(struct uart_8250_port *p);
p                 154 drivers/tty/serial/8250/8250.h void serial8250_rpm_put(struct uart_8250_port *p);
p                 156 drivers/tty/serial/8250/8250.h void serial8250_rpm_get_tx(struct uart_8250_port *p);
p                 157 drivers/tty/serial/8250/8250.h void serial8250_rpm_put_tx(struct uart_8250_port *p);
p                 159 drivers/tty/serial/8250/8250.h int serial8250_em485_init(struct uart_8250_port *p);
p                 160 drivers/tty/serial/8250/8250.h void serial8250_em485_destroy(struct uart_8250_port *p);
p                 309 drivers/tty/serial/8250/8250.h static inline int serial8250_tx_dma(struct uart_8250_port *p)
p                 313 drivers/tty/serial/8250/8250.h static inline int serial8250_rx_dma(struct uart_8250_port *p)
p                 317 drivers/tty/serial/8250/8250.h static inline void serial8250_rx_dma_flush(struct uart_8250_port *p) { }
p                 318 drivers/tty/serial/8250/8250.h static inline int serial8250_request_dma(struct uart_8250_port *p)
p                 322 drivers/tty/serial/8250/8250.h static inline void serial8250_release_dma(struct uart_8250_port *p) { }
p                 708 drivers/tty/serial/8250/8250_core.c 	struct uart_port *p;
p                 714 drivers/tty/serial/8250/8250_core.c 	p = &serial8250_ports[port->line].port;
p                 715 drivers/tty/serial/8250/8250_core.c 	p->iobase       = port->iobase;
p                 716 drivers/tty/serial/8250/8250_core.c 	p->membase      = port->membase;
p                 717 drivers/tty/serial/8250/8250_core.c 	p->irq          = port->irq;
p                 718 drivers/tty/serial/8250/8250_core.c 	p->irqflags     = port->irqflags;
p                 719 drivers/tty/serial/8250/8250_core.c 	p->uartclk      = port->uartclk;
p                 720 drivers/tty/serial/8250/8250_core.c 	p->fifosize     = port->fifosize;
p                 721 drivers/tty/serial/8250/8250_core.c 	p->regshift     = port->regshift;
p                 722 drivers/tty/serial/8250/8250_core.c 	p->iotype       = port->iotype;
p                 723 drivers/tty/serial/8250/8250_core.c 	p->flags        = port->flags;
p                 724 drivers/tty/serial/8250/8250_core.c 	p->mapbase      = port->mapbase;
p                 725 drivers/tty/serial/8250/8250_core.c 	p->mapsize      = port->mapsize;
p                 726 drivers/tty/serial/8250/8250_core.c 	p->private_data = port->private_data;
p                 727 drivers/tty/serial/8250/8250_core.c 	p->type		= port->type;
p                 728 drivers/tty/serial/8250/8250_core.c 	p->line		= port->line;
p                 730 drivers/tty/serial/8250/8250_core.c 	serial8250_set_defaults(up_to_u8250p(p));
p                 733 drivers/tty/serial/8250/8250_core.c 		p->serial_in = port->serial_in;
p                 735 drivers/tty/serial/8250/8250_core.c 		p->serial_out = port->serial_out;
p                 737 drivers/tty/serial/8250/8250_core.c 		p->handle_irq = port->handle_irq;
p                 798 drivers/tty/serial/8250/8250_core.c 	struct plat_serial8250_port *p = dev_get_platdata(&dev->dev);
p                 807 drivers/tty/serial/8250/8250_core.c 	for (i = 0; p && p->flags != 0; p++, i++) {
p                 808 drivers/tty/serial/8250/8250_core.c 		uart.port.iobase	= p->iobase;
p                 809 drivers/tty/serial/8250/8250_core.c 		uart.port.membase	= p->membase;
p                 810 drivers/tty/serial/8250/8250_core.c 		uart.port.irq		= p->irq;
p                 811 drivers/tty/serial/8250/8250_core.c 		uart.port.irqflags	= p->irqflags;
p                 812 drivers/tty/serial/8250/8250_core.c 		uart.port.uartclk	= p->uartclk;
p                 813 drivers/tty/serial/8250/8250_core.c 		uart.port.regshift	= p->regshift;
p                 814 drivers/tty/serial/8250/8250_core.c 		uart.port.iotype	= p->iotype;
p                 815 drivers/tty/serial/8250/8250_core.c 		uart.port.flags		= p->flags;
p                 816 drivers/tty/serial/8250/8250_core.c 		uart.port.mapbase	= p->mapbase;
p                 817 drivers/tty/serial/8250/8250_core.c 		uart.port.hub6		= p->hub6;
p                 818 drivers/tty/serial/8250/8250_core.c 		uart.port.private_data	= p->private_data;
p                 819 drivers/tty/serial/8250/8250_core.c 		uart.port.type		= p->type;
p                 820 drivers/tty/serial/8250/8250_core.c 		uart.port.serial_in	= p->serial_in;
p                 821 drivers/tty/serial/8250/8250_core.c 		uart.port.serial_out	= p->serial_out;
p                 822 drivers/tty/serial/8250/8250_core.c 		uart.port.handle_irq	= p->handle_irq;
p                 823 drivers/tty/serial/8250/8250_core.c 		uart.port.handle_break	= p->handle_break;
p                 824 drivers/tty/serial/8250/8250_core.c 		uart.port.set_termios	= p->set_termios;
p                 825 drivers/tty/serial/8250/8250_core.c 		uart.port.set_ldisc	= p->set_ldisc;
p                 826 drivers/tty/serial/8250/8250_core.c 		uart.port.get_mctrl	= p->get_mctrl;
p                 827 drivers/tty/serial/8250/8250_core.c 		uart.port.pm		= p->pm;
p                 834 drivers/tty/serial/8250/8250_core.c 				p->iobase, (unsigned long long)p->mapbase,
p                 835 drivers/tty/serial/8250/8250_core.c 				p->irq, ret);
p                  16 drivers/tty/serial/8250/8250_dma.c 	struct uart_8250_port	*p = param;
p                  17 drivers/tty/serial/8250/8250_dma.c 	struct uart_8250_dma	*dma = p->dma;
p                  18 drivers/tty/serial/8250/8250_dma.c 	struct circ_buf		*xmit = &p->port.state->xmit;
p                  25 drivers/tty/serial/8250/8250_dma.c 	spin_lock_irqsave(&p->port.lock, flags);
p                  31 drivers/tty/serial/8250/8250_dma.c 	p->port.icount.tx += dma->tx_size;
p                  34 drivers/tty/serial/8250/8250_dma.c 		uart_write_wakeup(&p->port);
p                  36 drivers/tty/serial/8250/8250_dma.c 	ret = serial8250_tx_dma(p);
p                  38 drivers/tty/serial/8250/8250_dma.c 		serial8250_set_THRI(p);
p                  40 drivers/tty/serial/8250/8250_dma.c 	spin_unlock_irqrestore(&p->port.lock, flags);
p                  45 drivers/tty/serial/8250/8250_dma.c 	struct uart_8250_port	*p = param;
p                  46 drivers/tty/serial/8250/8250_dma.c 	struct uart_8250_dma	*dma = p->dma;
p                  47 drivers/tty/serial/8250/8250_dma.c 	struct tty_port		*tty_port = &p->port.state->port;
p                  57 drivers/tty/serial/8250/8250_dma.c 	p->port.icount.rx += count;
p                  62 drivers/tty/serial/8250/8250_dma.c int serial8250_tx_dma(struct uart_8250_port *p)
p                  64 drivers/tty/serial/8250/8250_dma.c 	struct uart_8250_dma		*dma = p->dma;
p                  65 drivers/tty/serial/8250/8250_dma.c 	struct circ_buf			*xmit = &p->port.state->xmit;
p                  72 drivers/tty/serial/8250/8250_dma.c 	if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
p                  74 drivers/tty/serial/8250/8250_dma.c 		serial8250_rpm_put_tx(p);
p                  91 drivers/tty/serial/8250/8250_dma.c 	desc->callback_param = p;
p                 101 drivers/tty/serial/8250/8250_dma.c 		serial8250_clear_THRI(p);
p                 109 drivers/tty/serial/8250/8250_dma.c int serial8250_rx_dma(struct uart_8250_port *p)
p                 111 drivers/tty/serial/8250/8250_dma.c 	struct uart_8250_dma		*dma = p->dma;
p                 125 drivers/tty/serial/8250/8250_dma.c 	desc->callback_param = p;
p                 134 drivers/tty/serial/8250/8250_dma.c void serial8250_rx_dma_flush(struct uart_8250_port *p)
p                 136 drivers/tty/serial/8250/8250_dma.c 	struct uart_8250_dma *dma = p->dma;
p                 140 drivers/tty/serial/8250/8250_dma.c 		__dma_rx_complete(p);
p                 146 drivers/tty/serial/8250/8250_dma.c int serial8250_request_dma(struct uart_8250_port *p)
p                 148 drivers/tty/serial/8250/8250_dma.c 	struct uart_8250_dma	*dma = p->dma;
p                 150 drivers/tty/serial/8250/8250_dma.c 				  dma->rx_dma_addr : p->port.mapbase;
p                 152 drivers/tty/serial/8250/8250_dma.c 				  dma->tx_dma_addr : p->port.mapbase;
p                 172 drivers/tty/serial/8250/8250_dma.c 						       p->port.dev, "rx");
p                 191 drivers/tty/serial/8250/8250_dma.c 						       p->port.dev, "tx");
p                 221 drivers/tty/serial/8250/8250_dma.c 					p->port.state->xmit.buf,
p                 231 drivers/tty/serial/8250/8250_dma.c 	dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
p                 242 drivers/tty/serial/8250/8250_dma.c void serial8250_release_dma(struct uart_8250_port *p)
p                 244 drivers/tty/serial/8250/8250_dma.c 	struct uart_8250_dma *dma = p->dma;
p                 264 drivers/tty/serial/8250/8250_dma.c 	dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
p                  57 drivers/tty/serial/8250/8250_dw.c static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
p                  59 drivers/tty/serial/8250/8250_dw.c 	struct dw8250_data *d = to_dw8250_data(p->private_data);
p                  70 drivers/tty/serial/8250/8250_dw.c static void dw8250_force_idle(struct uart_port *p)
p                  72 drivers/tty/serial/8250/8250_dw.c 	struct uart_8250_port *up = up_to_u8250p(p);
p                  75 drivers/tty/serial/8250/8250_dw.c 	(void)p->serial_in(p, UART_RX);
p                  78 drivers/tty/serial/8250/8250_dw.c static void dw8250_check_lcr(struct uart_port *p, int value)
p                  80 drivers/tty/serial/8250/8250_dw.c 	void __iomem *offset = p->membase + (UART_LCR << p->regshift);
p                  85 drivers/tty/serial/8250/8250_dw.c 		unsigned int lcr = p->serial_in(p, UART_LCR);
p                  90 drivers/tty/serial/8250/8250_dw.c 		dw8250_force_idle(p);
p                  93 drivers/tty/serial/8250/8250_dw.c 		if (p->type == PORT_OCTEON)
p                  97 drivers/tty/serial/8250/8250_dw.c 		if (p->iotype == UPIO_MEM32)
p                  99 drivers/tty/serial/8250/8250_dw.c 		else if (p->iotype == UPIO_MEM32BE)
p                 111 drivers/tty/serial/8250/8250_dw.c static void dw8250_tx_wait_empty(struct uart_port *p)
p                 118 drivers/tty/serial/8250/8250_dw.c 		lsr = readb (p->membase + (UART_LSR << p->regshift));
p                 131 drivers/tty/serial/8250/8250_dw.c static void dw8250_serial_out38x(struct uart_port *p, int offset, int value)
p                 133 drivers/tty/serial/8250/8250_dw.c 	struct dw8250_data *d = to_dw8250_data(p->private_data);
p                 137 drivers/tty/serial/8250/8250_dw.c 		dw8250_tx_wait_empty(p);
p                 139 drivers/tty/serial/8250/8250_dw.c 	writeb(value, p->membase + (offset << p->regshift));
p                 142 drivers/tty/serial/8250/8250_dw.c 		dw8250_check_lcr(p, value);
p                 146 drivers/tty/serial/8250/8250_dw.c static void dw8250_serial_out(struct uart_port *p, int offset, int value)
p                 148 drivers/tty/serial/8250/8250_dw.c 	struct dw8250_data *d = to_dw8250_data(p->private_data);
p                 150 drivers/tty/serial/8250/8250_dw.c 	writeb(value, p->membase + (offset << p->regshift));
p                 153 drivers/tty/serial/8250/8250_dw.c 		dw8250_check_lcr(p, value);
p                 156 drivers/tty/serial/8250/8250_dw.c static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
p                 158 drivers/tty/serial/8250/8250_dw.c 	unsigned int value = readb(p->membase + (offset << p->regshift));
p                 160 drivers/tty/serial/8250/8250_dw.c 	return dw8250_modify_msr(p, offset, value);
p                 164 drivers/tty/serial/8250/8250_dw.c static unsigned int dw8250_serial_inq(struct uart_port *p, int offset)
p                 168 drivers/tty/serial/8250/8250_dw.c 	value = (u8)__raw_readq(p->membase + (offset << p->regshift));
p                 170 drivers/tty/serial/8250/8250_dw.c 	return dw8250_modify_msr(p, offset, value);
p                 173 drivers/tty/serial/8250/8250_dw.c static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
p                 175 drivers/tty/serial/8250/8250_dw.c 	struct dw8250_data *d = to_dw8250_data(p->private_data);
p                 178 drivers/tty/serial/8250/8250_dw.c 	__raw_writeq(value, p->membase + (offset << p->regshift));
p                 180 drivers/tty/serial/8250/8250_dw.c 	__raw_readq(p->membase + (UART_LCR << p->regshift));
p                 183 drivers/tty/serial/8250/8250_dw.c 		dw8250_check_lcr(p, value);
p                 187 drivers/tty/serial/8250/8250_dw.c static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
p                 189 drivers/tty/serial/8250/8250_dw.c 	struct dw8250_data *d = to_dw8250_data(p->private_data);
p                 191 drivers/tty/serial/8250/8250_dw.c 	writel(value, p->membase + (offset << p->regshift));
p                 194 drivers/tty/serial/8250/8250_dw.c 		dw8250_check_lcr(p, value);
p                 197 drivers/tty/serial/8250/8250_dw.c static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
p                 199 drivers/tty/serial/8250/8250_dw.c 	unsigned int value = readl(p->membase + (offset << p->regshift));
p                 201 drivers/tty/serial/8250/8250_dw.c 	return dw8250_modify_msr(p, offset, value);
p                 204 drivers/tty/serial/8250/8250_dw.c static void dw8250_serial_out32be(struct uart_port *p, int offset, int value)
p                 206 drivers/tty/serial/8250/8250_dw.c 	struct dw8250_data *d = to_dw8250_data(p->private_data);
p                 208 drivers/tty/serial/8250/8250_dw.c 	iowrite32be(value, p->membase + (offset << p->regshift));
p                 211 drivers/tty/serial/8250/8250_dw.c 		dw8250_check_lcr(p, value);
p                 214 drivers/tty/serial/8250/8250_dw.c static unsigned int dw8250_serial_in32be(struct uart_port *p, int offset)
p                 216 drivers/tty/serial/8250/8250_dw.c        unsigned int value = ioread32be(p->membase + (offset << p->regshift));
p                 218 drivers/tty/serial/8250/8250_dw.c        return dw8250_modify_msr(p, offset, value);
p                 222 drivers/tty/serial/8250/8250_dw.c static int dw8250_handle_irq(struct uart_port *p)
p                 224 drivers/tty/serial/8250/8250_dw.c 	struct uart_8250_port *up = up_to_u8250p(p);
p                 225 drivers/tty/serial/8250/8250_dw.c 	struct dw8250_data *d = to_dw8250_data(p->private_data);
p                 226 drivers/tty/serial/8250/8250_dw.c 	unsigned int iir = p->serial_in(p, UART_IIR);
p                 241 drivers/tty/serial/8250/8250_dw.c 		spin_lock_irqsave(&p->lock, flags);
p                 242 drivers/tty/serial/8250/8250_dw.c 		status = p->serial_in(p, UART_LSR);
p                 245 drivers/tty/serial/8250/8250_dw.c 			(void) p->serial_in(p, UART_RX);
p                 247 drivers/tty/serial/8250/8250_dw.c 		spin_unlock_irqrestore(&p->lock, flags);
p                 250 drivers/tty/serial/8250/8250_dw.c 	if (serial8250_handle_irq(p, iir))
p                 255 drivers/tty/serial/8250/8250_dw.c 		(void)p->serial_in(p, d->usr_reg);
p                 275 drivers/tty/serial/8250/8250_dw.c static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
p                 279 drivers/tty/serial/8250/8250_dw.c 	struct dw8250_data *d = to_dw8250_data(p->private_data);
p                 297 drivers/tty/serial/8250/8250_dw.c 		p->uartclk = rate;
p                 300 drivers/tty/serial/8250/8250_dw.c 	p->status &= ~UPSTAT_AUTOCTS;
p                 302 drivers/tty/serial/8250/8250_dw.c 		p->status |= UPSTAT_AUTOCTS;
p                 304 drivers/tty/serial/8250/8250_dw.c 	serial8250_do_set_termios(p, termios, old);
p                 307 drivers/tty/serial/8250/8250_dw.c static void dw8250_set_ldisc(struct uart_port *p, struct ktermios *termios)
p                 309 drivers/tty/serial/8250/8250_dw.c 	struct uart_8250_port *up = up_to_u8250p(p);
p                 310 drivers/tty/serial/8250/8250_dw.c 	unsigned int mcr = p->serial_in(p, UART_MCR);
p                 318 drivers/tty/serial/8250/8250_dw.c 		p->serial_out(p, UART_MCR, mcr);
p                 320 drivers/tty/serial/8250/8250_dw.c 	serial8250_do_set_ldisc(p, termios);
p                 341 drivers/tty/serial/8250/8250_dw.c static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
p                 343 drivers/tty/serial/8250/8250_dw.c 	if (p->dev->of_node) {
p                 344 drivers/tty/serial/8250/8250_dw.c 		struct device_node *np = p->dev->of_node;
p                 350 drivers/tty/serial/8250/8250_dw.c 			p->line = id;
p                 353 drivers/tty/serial/8250/8250_dw.c 			p->serial_in = dw8250_serial_inq;
p                 354 drivers/tty/serial/8250/8250_dw.c 			p->serial_out = dw8250_serial_outq;
p                 355 drivers/tty/serial/8250/8250_dw.c 			p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
p                 356 drivers/tty/serial/8250/8250_dw.c 			p->type = PORT_OCTEON;
p                 361 drivers/tty/serial/8250/8250_dw.c 		if (of_device_is_big_endian(p->dev->of_node)) {
p                 362 drivers/tty/serial/8250/8250_dw.c 			p->iotype = UPIO_MEM32BE;
p                 363 drivers/tty/serial/8250/8250_dw.c 			p->serial_in = dw8250_serial_in32be;
p                 364 drivers/tty/serial/8250/8250_dw.c 			p->serial_out = dw8250_serial_out32be;
p                 367 drivers/tty/serial/8250/8250_dw.c 			p->serial_out = dw8250_serial_out38x;
p                 370 drivers/tty/serial/8250/8250_dw.c 		p->iotype = UPIO_MEM32;
p                 371 drivers/tty/serial/8250/8250_dw.c 		p->regshift = 2;
p                 372 drivers/tty/serial/8250/8250_dw.c 		p->serial_in = dw8250_serial_in32;
p                 377 drivers/tty/serial/8250/8250_dw.c 	if (platform_get_resource_byname(to_platform_device(p->dev),
p                 379 drivers/tty/serial/8250/8250_dw.c 		data->data.dma.rx_param = p->dev->parent;
p                 380 drivers/tty/serial/8250/8250_dw.c 		data->data.dma.tx_param = p->dev->parent;
p                 389 drivers/tty/serial/8250/8250_dw.c 	struct uart_port *p = &up->port;
p                 405 drivers/tty/serial/8250/8250_dw.c 	spin_lock_init(&p->lock);
p                 406 drivers/tty/serial/8250/8250_dw.c 	p->mapbase	= regs->start;
p                 407 drivers/tty/serial/8250/8250_dw.c 	p->irq		= irq;
p                 408 drivers/tty/serial/8250/8250_dw.c 	p->handle_irq	= dw8250_handle_irq;
p                 409 drivers/tty/serial/8250/8250_dw.c 	p->pm		= dw8250_do_pm;
p                 410 drivers/tty/serial/8250/8250_dw.c 	p->type		= PORT_8250;
p                 411 drivers/tty/serial/8250/8250_dw.c 	p->flags	= UPF_SHARE_IRQ | UPF_FIXED_PORT;
p                 412 drivers/tty/serial/8250/8250_dw.c 	p->dev		= dev;
p                 413 drivers/tty/serial/8250/8250_dw.c 	p->iotype	= UPIO_MEM;
p                 414 drivers/tty/serial/8250/8250_dw.c 	p->serial_in	= dw8250_serial_in;
p                 415 drivers/tty/serial/8250/8250_dw.c 	p->serial_out	= dw8250_serial_out;
p                 416 drivers/tty/serial/8250/8250_dw.c 	p->set_ldisc	= dw8250_set_ldisc;
p                 417 drivers/tty/serial/8250/8250_dw.c 	p->set_termios	= dw8250_set_termios;
p                 419 drivers/tty/serial/8250/8250_dw.c 	p->membase = devm_ioremap(dev, regs->start, resource_size(regs));
p                 420 drivers/tty/serial/8250/8250_dw.c 	if (!p->membase)
p                 429 drivers/tty/serial/8250/8250_dw.c 	p->private_data = &data->data;
p                 436 drivers/tty/serial/8250/8250_dw.c 		p->regshift = val;
p                 440 drivers/tty/serial/8250/8250_dw.c 		p->iotype = UPIO_MEM32;
p                 441 drivers/tty/serial/8250/8250_dw.c 		p->serial_in = dw8250_serial_in32;
p                 442 drivers/tty/serial/8250/8250_dw.c 		p->serial_out = dw8250_serial_out32;
p                 470 drivers/tty/serial/8250/8250_dw.c 	device_property_read_u32(dev, "clock-frequency", &p->uartclk);
p                 484 drivers/tty/serial/8250/8250_dw.c 			p->uartclk = clk_get_rate(data->clk);
p                 488 drivers/tty/serial/8250/8250_dw.c 	if (!p->uartclk) {
p                 514 drivers/tty/serial/8250/8250_dw.c 	dw8250_quirks(p, data);
p                 518 drivers/tty/serial/8250/8250_dw.c 		p->handle_irq = NULL;
p                 521 drivers/tty/serial/8250/8250_dw.c 		dw8250_setup_port(p);
p                 524 drivers/tty/serial/8250/8250_dw.c 	if (p->fifosize) {
p                 525 drivers/tty/serial/8250/8250_dw.c 		data->data.dma.rxconf.src_maxburst = p->fifosize / 4;
p                 526 drivers/tty/serial/8250/8250_dw.c 		data->data.dma.txconf.dst_maxburst = p->fifosize / 4;
p                  35 drivers/tty/serial/8250/8250_dwlib.c static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
p                  37 drivers/tty/serial/8250/8250_dwlib.c 	if (p->iotype == UPIO_MEM32BE)
p                  38 drivers/tty/serial/8250/8250_dwlib.c 		return ioread32be(p->membase + offset);
p                  39 drivers/tty/serial/8250/8250_dwlib.c 	return readl(p->membase + offset);
p                  42 drivers/tty/serial/8250/8250_dwlib.c static inline void dw8250_writel_ext(struct uart_port *p, int offset, u32 reg)
p                  44 drivers/tty/serial/8250/8250_dwlib.c 	if (p->iotype == UPIO_MEM32BE)
p                  45 drivers/tty/serial/8250/8250_dwlib.c 		iowrite32be(reg, p->membase + offset);
p                  47 drivers/tty/serial/8250/8250_dwlib.c 		writel(reg, p->membase + offset);
p                  60 drivers/tty/serial/8250/8250_dwlib.c static unsigned int dw8250_get_divisor(struct uart_port *p, unsigned int baud,
p                  64 drivers/tty/serial/8250/8250_dwlib.c 	struct dw8250_port_data *d = p->private_data;
p                  66 drivers/tty/serial/8250/8250_dwlib.c 	quot = p->uartclk / base_baud;
p                  67 drivers/tty/serial/8250/8250_dwlib.c 	rem = p->uartclk % base_baud;
p                  73 drivers/tty/serial/8250/8250_dwlib.c static void dw8250_set_divisor(struct uart_port *p, unsigned int baud,
p                  76 drivers/tty/serial/8250/8250_dwlib.c 	dw8250_writel_ext(p, DW_UART_DLF, quot_frac);
p                  77 drivers/tty/serial/8250/8250_dwlib.c 	serial8250_do_set_divisor(p, baud, quot, quot_frac);
p                  80 drivers/tty/serial/8250/8250_dwlib.c void dw8250_setup_port(struct uart_port *p)
p                  82 drivers/tty/serial/8250/8250_dwlib.c 	struct uart_8250_port *up = up_to_u8250p(p);
p                  89 drivers/tty/serial/8250/8250_dwlib.c 	reg = dw8250_readl_ext(p, DW_UART_UCV);
p                  93 drivers/tty/serial/8250/8250_dwlib.c 	dev_dbg(p->dev, "Designware UART version %c.%c%c\n",
p                  96 drivers/tty/serial/8250/8250_dwlib.c 	dw8250_writel_ext(p, DW_UART_DLF, ~0U);
p                  97 drivers/tty/serial/8250/8250_dwlib.c 	reg = dw8250_readl_ext(p, DW_UART_DLF);
p                  98 drivers/tty/serial/8250/8250_dwlib.c 	dw8250_writel_ext(p, DW_UART_DLF, 0);
p                 101 drivers/tty/serial/8250/8250_dwlib.c 		struct dw8250_port_data *d = p->private_data;
p                 104 drivers/tty/serial/8250/8250_dwlib.c 		p->get_divisor = dw8250_get_divisor;
p                 105 drivers/tty/serial/8250/8250_dwlib.c 		p->set_divisor = dw8250_set_divisor;
p                 108 drivers/tty/serial/8250/8250_dwlib.c 	reg = dw8250_readl_ext(p, DW_UART_CPR);
p                 114 drivers/tty/serial/8250/8250_dwlib.c 		p->type = PORT_16550A;
p                 115 drivers/tty/serial/8250/8250_dwlib.c 		p->flags |= UPF_FIXED_TYPE;
p                 116 drivers/tty/serial/8250/8250_dwlib.c 		p->fifosize = DW_UART_CPR_FIFO_SIZE(reg);
p                  19 drivers/tty/serial/8250/8250_dwlib.h void dw8250_setup_port(struct uart_port *p);
p                 184 drivers/tty/serial/8250/8250_early.c unsigned int au_serial_in(struct uart_port *p, int offset);
p                 185 drivers/tty/serial/8250/8250_early.c void au_serial_out(struct uart_port *p, int offset, int value);
p                  28 drivers/tty/serial/8250/8250_em.c static void serial8250_em_serial_out(struct uart_port *p, int offset, int value)
p                  32 drivers/tty/serial/8250/8250_em.c 		writeb(value, p->membase);
p                  38 drivers/tty/serial/8250/8250_em.c 		writel(value, p->membase + ((offset + 1) << 2));
p                  45 drivers/tty/serial/8250/8250_em.c 		writel(value, p->membase + (offset << 2));
p                  49 drivers/tty/serial/8250/8250_em.c static unsigned int serial8250_em_serial_in(struct uart_port *p, int offset)
p                  53 drivers/tty/serial/8250/8250_em.c 		return readb(p->membase);
p                  58 drivers/tty/serial/8250/8250_em.c 		return readl(p->membase + ((offset + 1) << 2));
p                  63 drivers/tty/serial/8250/8250_em.c 		return readl(p->membase + (offset << 2));
p                 156 drivers/tty/serial/8250/8250_exar.c static unsigned int xr17v35x_get_divisor(struct uart_port *p, unsigned int baud,
p                 161 drivers/tty/serial/8250/8250_exar.c 	quot_16 = DIV_ROUND_CLOSEST(p->uartclk, baud);
p                 167 drivers/tty/serial/8250/8250_exar.c static void xr17v35x_set_divisor(struct uart_port *p, unsigned int baud,
p                 170 drivers/tty/serial/8250/8250_exar.c 	serial8250_do_set_divisor(p, baud, quot, quot_frac);
p                 173 drivers/tty/serial/8250/8250_exar.c 	quot_frac |= serial_port_in(p, 0x2) & 0xf0;
p                 174 drivers/tty/serial/8250/8250_exar.c 	serial_port_out(p, 0x2, quot_frac);
p                 239 drivers/tty/serial/8250/8250_exar.c 	u8 __iomem *p;
p                 248 drivers/tty/serial/8250/8250_exar.c 	p = port->port.membase;
p                 250 drivers/tty/serial/8250/8250_exar.c 	writeb(0x00, p + UART_EXAR_8XMODE);
p                 251 drivers/tty/serial/8250/8250_exar.c 	writeb(UART_FCTR_EXAR_TRGD, p + UART_EXAR_FCTR);
p                 252 drivers/tty/serial/8250/8250_exar.c 	writeb(32, p + UART_EXAR_TXTRG);
p                 253 drivers/tty/serial/8250/8250_exar.c 	writeb(32, p + UART_EXAR_RXTRG);
p                 262 drivers/tty/serial/8250/8250_exar.c 			writeb(0x78, p + UART_EXAR_MPIOLVL_7_0);
p                 263 drivers/tty/serial/8250/8250_exar.c 			writeb(0x00, p + UART_EXAR_MPIOINV_7_0);
p                 264 drivers/tty/serial/8250/8250_exar.c 			writeb(0x00, p + UART_EXAR_MPIOSEL_7_0);
p                 268 drivers/tty/serial/8250/8250_exar.c 			writeb(0x00, p + UART_EXAR_MPIOLVL_7_0);
p                 269 drivers/tty/serial/8250/8250_exar.c 			writeb(0xc0, p + UART_EXAR_MPIOINV_7_0);
p                 270 drivers/tty/serial/8250/8250_exar.c 			writeb(0xc0, p + UART_EXAR_MPIOSEL_7_0);
p                 273 drivers/tty/serial/8250/8250_exar.c 		writeb(0x00, p + UART_EXAR_MPIOINT_7_0);
p                 274 drivers/tty/serial/8250/8250_exar.c 		writeb(0x00, p + UART_EXAR_MPIO3T_7_0);
p                 275 drivers/tty/serial/8250/8250_exar.c 		writeb(0x00, p + UART_EXAR_MPIOOD_7_0);
p                 303 drivers/tty/serial/8250/8250_exar.c static void setup_gpio(struct pci_dev *pcidev, u8 __iomem *p)
p                 312 drivers/tty/serial/8250/8250_exar.c 	writeb(0x00, p + UART_EXAR_MPIOINT_7_0);
p                 313 drivers/tty/serial/8250/8250_exar.c 	writeb(0x00, p + UART_EXAR_MPIOLVL_7_0);
p                 314 drivers/tty/serial/8250/8250_exar.c 	writeb(0x00, p + UART_EXAR_MPIO3T_7_0);
p                 315 drivers/tty/serial/8250/8250_exar.c 	writeb(0x00, p + UART_EXAR_MPIOINV_7_0);
p                 316 drivers/tty/serial/8250/8250_exar.c 	writeb(dir,  p + UART_EXAR_MPIOSEL_7_0);
p                 317 drivers/tty/serial/8250/8250_exar.c 	writeb(0x00, p + UART_EXAR_MPIOOD_7_0);
p                 318 drivers/tty/serial/8250/8250_exar.c 	writeb(0x00, p + UART_EXAR_MPIOINT_15_8);
p                 319 drivers/tty/serial/8250/8250_exar.c 	writeb(0x00, p + UART_EXAR_MPIOLVL_15_8);
p                 320 drivers/tty/serial/8250/8250_exar.c 	writeb(0x00, p + UART_EXAR_MPIO3T_15_8);
p                 321 drivers/tty/serial/8250/8250_exar.c 	writeb(0x00, p + UART_EXAR_MPIOINV_15_8);
p                 322 drivers/tty/serial/8250/8250_exar.c 	writeb(dir,  p + UART_EXAR_MPIOSEL_15_8);
p                 323 drivers/tty/serial/8250/8250_exar.c 	writeb(0x00, p + UART_EXAR_MPIOOD_15_8);
p                 368 drivers/tty/serial/8250/8250_exar.c 	u8 __iomem *p = port->membase;
p                 371 drivers/tty/serial/8250/8250_exar.c 	value = readb(p + UART_EXAR_FCTR);
p                 377 drivers/tty/serial/8250/8250_exar.c 	writeb(value, p + UART_EXAR_FCTR);
p                 380 drivers/tty/serial/8250/8250_exar.c 		writeb(UART_EXAR_RS485_DLY(4), p + UART_MSR);
p                 396 drivers/tty/serial/8250/8250_exar.c 	u8 __iomem *p = port->membase;
p                 417 drivers/tty/serial/8250/8250_exar.c 	value = readb(p + UART_EXAR_MPIOLVL_7_0);
p                 420 drivers/tty/serial/8250/8250_exar.c 	writeb(value, p + UART_EXAR_MPIOLVL_7_0);
p                 434 drivers/tty/serial/8250/8250_exar.c 	u8 __iomem *p = port->port.membase;
p                 436 drivers/tty/serial/8250/8250_exar.c 	writeb(IOT2040_UARTS_DEFAULT_MODE, p + UART_EXAR_MPIOLVL_7_0);
p                 437 drivers/tty/serial/8250/8250_exar.c 	writeb(IOT2040_UARTS_GPIO_LO_MODE, p + UART_EXAR_MPIOSEL_7_0);
p                 438 drivers/tty/serial/8250/8250_exar.c 	writeb(IOT2040_UARTS_ENABLE, p + UART_EXAR_MPIOLVL_15_8);
p                 439 drivers/tty/serial/8250/8250_exar.c 	writeb(IOT2040_UARTS_GPIO_HI_MODE, p + UART_EXAR_MPIOSEL_15_8);
p                 475 drivers/tty/serial/8250/8250_exar.c 	u8 __iomem *p;
p                 498 drivers/tty/serial/8250/8250_exar.c 	p = port->port.membase;
p                 500 drivers/tty/serial/8250/8250_exar.c 	writeb(0x00, p + UART_EXAR_8XMODE);
p                 501 drivers/tty/serial/8250/8250_exar.c 	writeb(UART_FCTR_EXAR_TRGD, p + UART_EXAR_FCTR);
p                 502 drivers/tty/serial/8250/8250_exar.c 	writeb(128, p + UART_EXAR_TXTRG);
p                 503 drivers/tty/serial/8250/8250_exar.c 	writeb(128, p + UART_EXAR_RXTRG);
p                 507 drivers/tty/serial/8250/8250_exar.c 		setup_gpio(pcidev, p);
p                 147 drivers/tty/serial/8250/8250_ingenic.c static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
p                 170 drivers/tty/serial/8250/8250_ingenic.c 		ier = p->serial_in(p, UART_IER);
p                 182 drivers/tty/serial/8250/8250_ingenic.c 	writeb(value, p->membase + (offset << p->regshift));
p                 185 drivers/tty/serial/8250/8250_ingenic.c static unsigned int ingenic_uart_serial_in(struct uart_port *p, int offset)
p                 189 drivers/tty/serial/8250/8250_ingenic.c 	value = readb(p->membase + (offset << p->regshift));
p                  87 drivers/tty/serial/8250/8250_lpc18xx.c static void lpc18xx_uart_serial_out(struct uart_port *p, int offset, int value)
p                  97 drivers/tty/serial/8250/8250_lpc18xx.c 	offset = offset << p->regshift;
p                  98 drivers/tty/serial/8250/8250_lpc18xx.c 	writel(value, p->membase + offset);
p                  53 drivers/tty/serial/8250/8250_lpss.c 	int (*setup)(struct lpss8250 *, struct uart_port *p);
p                  72 drivers/tty/serial/8250/8250_lpss.c static void byt_set_termios(struct uart_port *p, struct ktermios *termios,
p                  76 drivers/tty/serial/8250/8250_lpss.c 	struct lpss8250 *lpss = to_lpss8250(p->private_data);
p                  95 drivers/tty/serial/8250/8250_lpss.c 	p->uartclk = fuart;
p                  99 drivers/tty/serial/8250/8250_lpss.c 	writel(reg, p->membase + BYT_PRV_CLK);
p                 101 drivers/tty/serial/8250/8250_lpss.c 	writel(reg, p->membase + BYT_PRV_CLK);
p                 103 drivers/tty/serial/8250/8250_lpss.c 	p->status &= ~UPSTAT_AUTOCTS;
p                 105 drivers/tty/serial/8250/8250_lpss.c 		p->status |= UPSTAT_AUTOCTS;
p                 107 drivers/tty/serial/8250/8250_lpss.c 	serial8250_do_set_termios(p, termios, old);
p                  38 drivers/tty/serial/8250/8250_mid.c 	int (*setup)(struct mid8250 *, struct uart_port *p);
p                  53 drivers/tty/serial/8250/8250_mid.c static int pnw_setup(struct mid8250 *mid, struct uart_port *p)
p                  55 drivers/tty/serial/8250/8250_mid.c 	struct pci_dev *pdev = to_pci_dev(p->dev);
p                  76 drivers/tty/serial/8250/8250_mid.c static int tng_handle_irq(struct uart_port *p)
p                  78 drivers/tty/serial/8250/8250_mid.c 	struct mid8250 *mid = p->private_data;
p                  79 drivers/tty/serial/8250/8250_mid.c 	struct uart_8250_port *up = up_to_u8250p(p);
p                 103 drivers/tty/serial/8250/8250_mid.c 	ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
p                 107 drivers/tty/serial/8250/8250_mid.c static int tng_setup(struct mid8250 *mid, struct uart_port *p)
p                 109 drivers/tty/serial/8250/8250_mid.c 	struct pci_dev *pdev = to_pci_dev(p->dev);
p                 123 drivers/tty/serial/8250/8250_mid.c 	p->handle_irq = tng_handle_irq;
p                 127 drivers/tty/serial/8250/8250_mid.c static int dnv_handle_irq(struct uart_port *p)
p                 129 drivers/tty/serial/8250/8250_mid.c 	struct mid8250 *mid = p->private_data;
p                 130 drivers/tty/serial/8250/8250_mid.c 	struct uart_8250_port *up = up_to_u8250p(p);
p                 131 drivers/tty/serial/8250/8250_mid.c 	unsigned int fisr = serial_port_in(p, INTEL_MID_UART_FISR);
p                 152 drivers/tty/serial/8250/8250_mid.c 		ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
p                 158 drivers/tty/serial/8250/8250_mid.c static int dnv_setup(struct mid8250 *mid, struct uart_port *p)
p                 161 drivers/tty/serial/8250/8250_mid.c 	struct pci_dev *pdev = to_pci_dev(p->dev);
p                 171 drivers/tty/serial/8250/8250_mid.c 	p->irq = pci_irq_vector(pdev, 0);
p                 175 drivers/tty/serial/8250/8250_mid.c 	chip->regs = p->membase;
p                 186 drivers/tty/serial/8250/8250_mid.c 	p->handle_irq = dnv_handle_irq;
p                 199 drivers/tty/serial/8250/8250_mid.c static void mid8250_set_termios(struct uart_port *p,
p                 204 drivers/tty/serial/8250/8250_mid.c 	struct mid8250 *mid = p->private_data;
p                 226 drivers/tty/serial/8250/8250_mid.c 	p->uartclk = fuart * 16 / ps;		/* core uses ps = 16 always */
p                 228 drivers/tty/serial/8250/8250_mid.c 	writel(ps, p->membase + INTEL_MID_UART_PS);		/* set PS */
p                 229 drivers/tty/serial/8250/8250_mid.c 	writel(mul, p->membase + INTEL_MID_UART_MUL);		/* set MUL */
p                 230 drivers/tty/serial/8250/8250_mid.c 	writel(div, p->membase + INTEL_MID_UART_DIV);
p                 232 drivers/tty/serial/8250/8250_mid.c 	serial8250_do_set_termios(p, termios, old);
p                 437 drivers/tty/serial/8250/8250_mtk.c static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p,
p                  30 drivers/tty/serial/8250/8250_of.c static void tegra_serial_handle_break(struct uart_port *p)
p                  35 drivers/tty/serial/8250/8250_of.c 		status = p->serial_in(p, UART_LSR);
p                  37 drivers/tty/serial/8250/8250_of.c 			status = p->serial_in(p, UART_RX);
p                 126 drivers/tty/serial/8250/8250_omap.c static void omap_8250_rx_dma_flush(struct uart_8250_port *p);
p                 128 drivers/tty/serial/8250/8250_omap.c static inline void omap_8250_rx_dma_flush(struct uart_8250_port *p) { }
p                 772 drivers/tty/serial/8250/8250_omap.c static int omap_8250_rx_dma(struct uart_8250_port *p);
p                 774 drivers/tty/serial/8250/8250_omap.c static void __dma_rx_do_complete(struct uart_8250_port *p)
p                 776 drivers/tty/serial/8250/8250_omap.c 	struct omap8250_priv	*priv = p->port.private_data;
p                 777 drivers/tty/serial/8250/8250_omap.c 	struct uart_8250_dma    *dma = p->dma;
p                 778 drivers/tty/serial/8250/8250_omap.c 	struct tty_port         *tty_port = &p->port.state->port;
p                 796 drivers/tty/serial/8250/8250_omap.c 	p->port.icount.rx += ret;
p                 797 drivers/tty/serial/8250/8250_omap.c 	p->port.icount.buf_overrun += count - ret;
p                 806 drivers/tty/serial/8250/8250_omap.c 	struct uart_8250_port *p = param;
p                 807 drivers/tty/serial/8250/8250_omap.c 	struct omap8250_priv *priv = p->port.private_data;
p                 808 drivers/tty/serial/8250/8250_omap.c 	struct uart_8250_dma *dma = p->dma;
p                 812 drivers/tty/serial/8250/8250_omap.c 	spin_lock_irqsave(&p->port.lock, flags);
p                 821 drivers/tty/serial/8250/8250_omap.c 		spin_unlock_irqrestore(&p->port.lock, flags);
p                 824 drivers/tty/serial/8250/8250_omap.c 	__dma_rx_do_complete(p);
p                 826 drivers/tty/serial/8250/8250_omap.c 		omap_8250_rx_dma(p);
p                 828 drivers/tty/serial/8250/8250_omap.c 	spin_unlock_irqrestore(&p->port.lock, flags);
p                 831 drivers/tty/serial/8250/8250_omap.c static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
p                 833 drivers/tty/serial/8250/8250_omap.c 	struct omap8250_priv	*priv = p->port.private_data;
p                 834 drivers/tty/serial/8250/8250_omap.c 	struct uart_8250_dma	*dma = p->dma;
p                 854 drivers/tty/serial/8250/8250_omap.c 	__dma_rx_do_complete(p);
p                 858 drivers/tty/serial/8250/8250_omap.c static int omap_8250_rx_dma(struct uart_8250_port *p)
p                 860 drivers/tty/serial/8250/8250_omap.c 	struct omap8250_priv		*priv = p->port.private_data;
p                 861 drivers/tty/serial/8250/8250_omap.c 	struct uart_8250_dma            *dma = p->dma;
p                 884 drivers/tty/serial/8250/8250_omap.c 	desc->callback_param = p;
p                 894 drivers/tty/serial/8250/8250_omap.c static int omap_8250_tx_dma(struct uart_8250_port *p);
p                 898 drivers/tty/serial/8250/8250_omap.c 	struct uart_8250_port	*p = param;
p                 899 drivers/tty/serial/8250/8250_omap.c 	struct uart_8250_dma	*dma = p->dma;
p                 900 drivers/tty/serial/8250/8250_omap.c 	struct circ_buf		*xmit = &p->port.state->xmit;
p                 903 drivers/tty/serial/8250/8250_omap.c 	struct omap8250_priv	*priv = p->port.private_data;
p                 908 drivers/tty/serial/8250/8250_omap.c 	spin_lock_irqsave(&p->port.lock, flags);
p                 914 drivers/tty/serial/8250/8250_omap.c 	p->port.icount.tx += dma->tx_size;
p                 918 drivers/tty/serial/8250/8250_omap.c 		omap8250_restore_regs(p);
p                 922 drivers/tty/serial/8250/8250_omap.c 		uart_write_wakeup(&p->port);
p                 924 drivers/tty/serial/8250/8250_omap.c 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port)) {
p                 927 drivers/tty/serial/8250/8250_omap.c 		ret = omap_8250_tx_dma(p);
p                 930 drivers/tty/serial/8250/8250_omap.c 	} else if (p->capabilities & UART_CAP_RPM) {
p                 936 drivers/tty/serial/8250/8250_omap.c 		serial8250_set_THRI(p);
p                 939 drivers/tty/serial/8250/8250_omap.c 	spin_unlock_irqrestore(&p->port.lock, flags);
p                 942 drivers/tty/serial/8250/8250_omap.c static int omap_8250_tx_dma(struct uart_8250_port *p)
p                 944 drivers/tty/serial/8250/8250_omap.c 	struct uart_8250_dma		*dma = p->dma;
p                 945 drivers/tty/serial/8250/8250_omap.c 	struct omap8250_priv		*priv = p->port.private_data;
p                 946 drivers/tty/serial/8250/8250_omap.c 	struct circ_buf			*xmit = &p->port.state->xmit;
p                 953 drivers/tty/serial/8250/8250_omap.c 	if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
p                 960 drivers/tty/serial/8250/8250_omap.c 		if (dma->tx_err || p->capabilities & UART_CAP_RPM) {
p                 964 drivers/tty/serial/8250/8250_omap.c 		serial8250_clear_THRI(p);
p                 987 drivers/tty/serial/8250/8250_omap.c 		tx_lvl = serial_in(p, UART_OMAP_TX_LVL);
p                 988 drivers/tty/serial/8250/8250_omap.c 		if (tx_lvl == p->tx_loadsz) {
p                1011 drivers/tty/serial/8250/8250_omap.c 	desc->callback_param = p;
p                1022 drivers/tty/serial/8250/8250_omap.c 	serial8250_clear_THRI(p);
p                1024 drivers/tty/serial/8250/8250_omap.c 		serial_out(p, UART_TX, xmit->buf[xmit->tail]);
p                1101 drivers/tty/serial/8250/8250_omap.c static inline int omap_8250_rx_dma(struct uart_8250_port *p)
p                 254 drivers/tty/serial/8250/8250_pci.c 	void __iomem *p;
p                 280 drivers/tty/serial/8250/8250_pci.c 	p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
p                 281 drivers/tty/serial/8250/8250_pci.c 	if (p == NULL)
p                 283 drivers/tty/serial/8250/8250_pci.c 	writel(irq_config, p + 0x4c);
p                 288 drivers/tty/serial/8250/8250_pci.c 	readl(p + 0x4c);
p                 289 drivers/tty/serial/8250/8250_pci.c 	iounmap(p);
p                 296 drivers/tty/serial/8250/8250_pci.c 	u8 __iomem *p;
p                 304 drivers/tty/serial/8250/8250_pci.c 	p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
p                 305 drivers/tty/serial/8250/8250_pci.c 	if (p != NULL) {
p                 306 drivers/tty/serial/8250/8250_pci.c 		writel(0, p + 0x4c);
p                 311 drivers/tty/serial/8250/8250_pci.c 		readl(p + 0x4c);
p                 312 drivers/tty/serial/8250/8250_pci.c 		iounmap(p);
p                 321 drivers/tty/serial/8250/8250_pci.c 	void __iomem *p;
p                 329 drivers/tty/serial/8250/8250_pci.c 	p = pci_ioremap_bar(dev, bar);
p                 330 drivers/tty/serial/8250/8250_pci.c 	if (p == NULL)
p                 334 drivers/tty/serial/8250/8250_pci.c 	writel(readl(p + NI8420_INT_ENABLE_REG) & ~(NI8420_INT_ENABLE_BIT),
p                 335 drivers/tty/serial/8250/8250_pci.c 	       p + NI8420_INT_ENABLE_REG);
p                 336 drivers/tty/serial/8250/8250_pci.c 	iounmap(p);
p                 350 drivers/tty/serial/8250/8250_pci.c 	void __iomem *p;
p                 358 drivers/tty/serial/8250/8250_pci.c 	p = pci_ioremap_bar(dev, bar);
p                 359 drivers/tty/serial/8250/8250_pci.c 	if (p == NULL)
p                 363 drivers/tty/serial/8250/8250_pci.c 	writel(MITE_LCIMR2_CLR_CPU_IE, p + MITE_LCIMR2);
p                 364 drivers/tty/serial/8250/8250_pci.c 	iounmap(p);
p                 400 drivers/tty/serial/8250/8250_pci.c 	u8 __iomem *p;
p                 402 drivers/tty/serial/8250/8250_pci.c 	p = pci_ioremap_bar(dev, 0);
p                 404 drivers/tty/serial/8250/8250_pci.c 	if (p == NULL)
p                 407 drivers/tty/serial/8250/8250_pci.c 	writeb(0x10, p + OCT_REG_CR_OFF);
p                 409 drivers/tty/serial/8250/8250_pci.c 	writeb(0x0, p + OCT_REG_CR_OFF);
p                 412 drivers/tty/serial/8250/8250_pci.c 	writeb(0x4, p + OCT_REG_CR_OFF);
p                 413 drivers/tty/serial/8250/8250_pci.c 	iounmap(p);
p                 424 drivers/tty/serial/8250/8250_pci.c 	u8 __iomem *p;
p                 426 drivers/tty/serial/8250/8250_pci.c 	p = pci_ioremap_bar(dev, 0);
p                 428 drivers/tty/serial/8250/8250_pci.c 	if (p != NULL)
p                 429 drivers/tty/serial/8250/8250_pci.c 		writeb(0, p + OCT_REG_CR_OFF);
p                 430 drivers/tty/serial/8250/8250_pci.c 	iounmap(p);
p                 466 drivers/tty/serial/8250/8250_pci.c 	void __iomem *p;
p                 480 drivers/tty/serial/8250/8250_pci.c 	p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
p                 481 drivers/tty/serial/8250/8250_pci.c 	if (p == NULL)
p                 484 drivers/tty/serial/8250/8250_pci.c 	writew(readw(p + 0x28) & data, p + 0x28);
p                 485 drivers/tty/serial/8250/8250_pci.c 	readw(p + 0x28);
p                 486 drivers/tty/serial/8250/8250_pci.c 	iounmap(p);
p                 680 drivers/tty/serial/8250/8250_pci.c 	void __iomem *p;
p                 688 drivers/tty/serial/8250/8250_pci.c 	p = pci_ioremap_bar(dev, bar);
p                 689 drivers/tty/serial/8250/8250_pci.c 	if (p == NULL)
p                 693 drivers/tty/serial/8250/8250_pci.c 	writel(readl(p + NI8420_INT_ENABLE_REG) | NI8420_INT_ENABLE_BIT,
p                 694 drivers/tty/serial/8250/8250_pci.c 	       p + NI8420_INT_ENABLE_REG);
p                 696 drivers/tty/serial/8250/8250_pci.c 	iounmap(p);
p                 709 drivers/tty/serial/8250/8250_pci.c 	void __iomem *p;
p                 719 drivers/tty/serial/8250/8250_pci.c 	p = pci_ioremap_bar(dev, bar);
p                 720 drivers/tty/serial/8250/8250_pci.c 	if (p == NULL)
p                 731 drivers/tty/serial/8250/8250_pci.c 	writel(device_window, p + MITE_IOWBSR1);
p                 734 drivers/tty/serial/8250/8250_pci.c 	writel((readl(p + MITE_IOWCR1) & MITE_IOWCR1_RAMSEL_MASK),
p                 735 drivers/tty/serial/8250/8250_pci.c 	       p + MITE_IOWCR1);
p                 738 drivers/tty/serial/8250/8250_pci.c 	writel(MITE_LCIMR1_IO_IE_0, p + MITE_LCIMR1);
p                 741 drivers/tty/serial/8250/8250_pci.c 	writel(MITE_LCIMR2_SET_CPU_IE, p + MITE_LCIMR2);
p                 743 drivers/tty/serial/8250/8250_pci.c 	iounmap(p);
p                 757 drivers/tty/serial/8250/8250_pci.c 	void __iomem *p;
p                 766 drivers/tty/serial/8250/8250_pci.c 	p = pci_ioremap_bar(dev, bar);
p                 767 drivers/tty/serial/8250/8250_pci.c 	if (!p)
p                 771 drivers/tty/serial/8250/8250_pci.c 	writeb(readb(p + offset + NI8430_PORTCON) | NI8430_PORTCON_TXVR_ENABLE,
p                 772 drivers/tty/serial/8250/8250_pci.c 	       p + offset + NI8430_PORTCON);
p                 774 drivers/tty/serial/8250/8250_pci.c 	iounmap(p);
p                1010 drivers/tty/serial/8250/8250_pci.c 	u8 __iomem *p;
p                1019 drivers/tty/serial/8250/8250_pci.c 	p = pci_iomap(dev, 0, 5);
p                1020 drivers/tty/serial/8250/8250_pci.c 	if (p == NULL)
p                1023 drivers/tty/serial/8250/8250_pci.c 	deviceID = ioread32(p);
p                1026 drivers/tty/serial/8250/8250_pci.c 		number_uarts = ioread8(p + 4);
p                1031 drivers/tty/serial/8250/8250_pci.c 	pci_iounmap(dev, p);
p                1042 drivers/tty/serial/8250/8250_pci.c 	u8 __iomem *p;
p                1051 drivers/tty/serial/8250/8250_pci.c 	p = pci_iomap(dev, 0, 5);
p                1052 drivers/tty/serial/8250/8250_pci.c 	if (p == NULL)
p                1055 drivers/tty/serial/8250/8250_pci.c 	deviceID = ioread32(p);
p                1058 drivers/tty/serial/8250/8250_pci.c 		number_uarts = ioread8(p + 4);
p                1063 drivers/tty/serial/8250/8250_pci.c 	pci_iounmap(dev, p);
p                1614 drivers/tty/serial/8250/8250_pci.c static void f815xxa_mem_serial_out(struct uart_port *p, int offset, int value)
p                1616 drivers/tty/serial/8250/8250_pci.c 	struct f815xxa_data *data = p->private_data;
p                1620 drivers/tty/serial/8250/8250_pci.c 	writeb(value, p->membase + offset);
p                1621 drivers/tty/serial/8250/8250_pci.c 	readb(p->membase + UART_SCR); /* Dummy read for flush pcie tx queue */
p                1698 drivers/tty/serial/8250/8250_pci.c static void kt_handle_break(struct uart_port *p)
p                1700 drivers/tty/serial/8250/8250_pci.c 	struct uart_8250_port *up = up_to_u8250p(p);
p                1709 drivers/tty/serial/8250/8250_pci.c static unsigned int kt_serial_in(struct uart_port *p, int offset)
p                1711 drivers/tty/serial/8250/8250_pci.c 	struct uart_8250_port *up = up_to_u8250p(p);
p                1724 drivers/tty/serial/8250/8250_pci.c 	val = inb(p->iobase + offset);
p                 352 drivers/tty/serial/8250/8250_port.c unsigned int au_serial_in(struct uart_port *p, int offset)
p                 359 drivers/tty/serial/8250/8250_port.c 	return __raw_readl(p->membase + (offset << p->regshift));
p                 362 drivers/tty/serial/8250/8250_port.c void au_serial_out(struct uart_port *p, int offset, int value)
p                 369 drivers/tty/serial/8250/8250_port.c 	__raw_writel(value, p->membase + (offset << p->regshift));
p                 385 drivers/tty/serial/8250/8250_port.c static unsigned int hub6_serial_in(struct uart_port *p, int offset)
p                 387 drivers/tty/serial/8250/8250_port.c 	offset = offset << p->regshift;
p                 388 drivers/tty/serial/8250/8250_port.c 	outb(p->hub6 - 1 + offset, p->iobase);
p                 389 drivers/tty/serial/8250/8250_port.c 	return inb(p->iobase + 1);
p                 392 drivers/tty/serial/8250/8250_port.c static void hub6_serial_out(struct uart_port *p, int offset, int value)
p                 394 drivers/tty/serial/8250/8250_port.c 	offset = offset << p->regshift;
p                 395 drivers/tty/serial/8250/8250_port.c 	outb(p->hub6 - 1 + offset, p->iobase);
p                 396 drivers/tty/serial/8250/8250_port.c 	outb(value, p->iobase + 1);
p                 399 drivers/tty/serial/8250/8250_port.c static unsigned int mem_serial_in(struct uart_port *p, int offset)
p                 401 drivers/tty/serial/8250/8250_port.c 	offset = offset << p->regshift;
p                 402 drivers/tty/serial/8250/8250_port.c 	return readb(p->membase + offset);
p                 405 drivers/tty/serial/8250/8250_port.c static void mem_serial_out(struct uart_port *p, int offset, int value)
p                 407 drivers/tty/serial/8250/8250_port.c 	offset = offset << p->regshift;
p                 408 drivers/tty/serial/8250/8250_port.c 	writeb(value, p->membase + offset);
p                 411 drivers/tty/serial/8250/8250_port.c static void mem16_serial_out(struct uart_port *p, int offset, int value)
p                 413 drivers/tty/serial/8250/8250_port.c 	offset = offset << p->regshift;
p                 414 drivers/tty/serial/8250/8250_port.c 	writew(value, p->membase + offset);
p                 417 drivers/tty/serial/8250/8250_port.c static unsigned int mem16_serial_in(struct uart_port *p, int offset)
p                 419 drivers/tty/serial/8250/8250_port.c 	offset = offset << p->regshift;
p                 420 drivers/tty/serial/8250/8250_port.c 	return readw(p->membase + offset);
p                 423 drivers/tty/serial/8250/8250_port.c static void mem32_serial_out(struct uart_port *p, int offset, int value)
p                 425 drivers/tty/serial/8250/8250_port.c 	offset = offset << p->regshift;
p                 426 drivers/tty/serial/8250/8250_port.c 	writel(value, p->membase + offset);
p                 429 drivers/tty/serial/8250/8250_port.c static unsigned int mem32_serial_in(struct uart_port *p, int offset)
p                 431 drivers/tty/serial/8250/8250_port.c 	offset = offset << p->regshift;
p                 432 drivers/tty/serial/8250/8250_port.c 	return readl(p->membase + offset);
p                 435 drivers/tty/serial/8250/8250_port.c static void mem32be_serial_out(struct uart_port *p, int offset, int value)
p                 437 drivers/tty/serial/8250/8250_port.c 	offset = offset << p->regshift;
p                 438 drivers/tty/serial/8250/8250_port.c 	iowrite32be(value, p->membase + offset);
p                 441 drivers/tty/serial/8250/8250_port.c static unsigned int mem32be_serial_in(struct uart_port *p, int offset)
p                 443 drivers/tty/serial/8250/8250_port.c 	offset = offset << p->regshift;
p                 444 drivers/tty/serial/8250/8250_port.c 	return ioread32be(p->membase + offset);
p                 447 drivers/tty/serial/8250/8250_port.c static unsigned int io_serial_in(struct uart_port *p, int offset)
p                 449 drivers/tty/serial/8250/8250_port.c 	offset = offset << p->regshift;
p                 450 drivers/tty/serial/8250/8250_port.c 	return inb(p->iobase + offset);
p                 453 drivers/tty/serial/8250/8250_port.c static void io_serial_out(struct uart_port *p, int offset, int value)
p                 455 drivers/tty/serial/8250/8250_port.c 	offset = offset << p->regshift;
p                 456 drivers/tty/serial/8250/8250_port.c 	outb(value, p->iobase + offset);
p                 461 drivers/tty/serial/8250/8250_port.c static void set_io_from_upio(struct uart_port *p)
p                 463 drivers/tty/serial/8250/8250_port.c 	struct uart_8250_port *up = up_to_u8250p(p);
p                 468 drivers/tty/serial/8250/8250_port.c 	switch (p->iotype) {
p                 470 drivers/tty/serial/8250/8250_port.c 		p->serial_in = hub6_serial_in;
p                 471 drivers/tty/serial/8250/8250_port.c 		p->serial_out = hub6_serial_out;
p                 475 drivers/tty/serial/8250/8250_port.c 		p->serial_in = mem_serial_in;
p                 476 drivers/tty/serial/8250/8250_port.c 		p->serial_out = mem_serial_out;
p                 480 drivers/tty/serial/8250/8250_port.c 		p->serial_in = mem16_serial_in;
p                 481 drivers/tty/serial/8250/8250_port.c 		p->serial_out = mem16_serial_out;
p                 485 drivers/tty/serial/8250/8250_port.c 		p->serial_in = mem32_serial_in;
p                 486 drivers/tty/serial/8250/8250_port.c 		p->serial_out = mem32_serial_out;
p                 490 drivers/tty/serial/8250/8250_port.c 		p->serial_in = mem32be_serial_in;
p                 491 drivers/tty/serial/8250/8250_port.c 		p->serial_out = mem32be_serial_out;
p                 496 drivers/tty/serial/8250/8250_port.c 		p->serial_in = au_serial_in;
p                 497 drivers/tty/serial/8250/8250_port.c 		p->serial_out = au_serial_out;
p                 504 drivers/tty/serial/8250/8250_port.c 		p->serial_in = io_serial_in;
p                 505 drivers/tty/serial/8250/8250_port.c 		p->serial_out = io_serial_out;
p                 509 drivers/tty/serial/8250/8250_port.c 	up->cur_iotype = p->iotype;
p                 510 drivers/tty/serial/8250/8250_port.c 	p->handle_irq = serial8250_default_handle_irq;
p                 514 drivers/tty/serial/8250/8250_port.c serial_port_out_sync(struct uart_port *p, int offset, int value)
p                 516 drivers/tty/serial/8250/8250_port.c 	switch (p->iotype) {
p                 522 drivers/tty/serial/8250/8250_port.c 		p->serial_out(p, offset, value);
p                 523 drivers/tty/serial/8250/8250_port.c 		p->serial_in(p, UART_LCR);	/* safe, no side-effects */
p                 526 drivers/tty/serial/8250/8250_port.c 		p->serial_out(p, offset, value);
p                 554 drivers/tty/serial/8250/8250_port.c static void serial8250_clear_fifos(struct uart_8250_port *p)
p                 556 drivers/tty/serial/8250/8250_port.c 	if (p->capabilities & UART_CAP_FIFO) {
p                 557 drivers/tty/serial/8250/8250_port.c 		serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO);
p                 558 drivers/tty/serial/8250/8250_port.c 		serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO |
p                 560 drivers/tty/serial/8250/8250_port.c 		serial_out(p, UART_FCR, 0);
p                 564 drivers/tty/serial/8250/8250_port.c static inline void serial8250_em485_rts_after_send(struct uart_8250_port *p)
p                 566 drivers/tty/serial/8250/8250_port.c 	unsigned char mcr = serial8250_in_MCR(p);
p                 568 drivers/tty/serial/8250/8250_port.c 	if (p->port.rs485.flags & SER_RS485_RTS_AFTER_SEND)
p                 572 drivers/tty/serial/8250/8250_port.c 	serial8250_out_MCR(p, mcr);
p                 578 drivers/tty/serial/8250/8250_port.c void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p)
p                 580 drivers/tty/serial/8250/8250_port.c 	serial8250_clear_fifos(p);
p                 581 drivers/tty/serial/8250/8250_port.c 	serial_out(p, UART_FCR, p->fcr);
p                 585 drivers/tty/serial/8250/8250_port.c void serial8250_rpm_get(struct uart_8250_port *p)
p                 587 drivers/tty/serial/8250/8250_port.c 	if (!(p->capabilities & UART_CAP_RPM))
p                 589 drivers/tty/serial/8250/8250_port.c 	pm_runtime_get_sync(p->port.dev);
p                 593 drivers/tty/serial/8250/8250_port.c void serial8250_rpm_put(struct uart_8250_port *p)
p                 595 drivers/tty/serial/8250/8250_port.c 	if (!(p->capabilities & UART_CAP_RPM))
p                 597 drivers/tty/serial/8250/8250_port.c 	pm_runtime_mark_last_busy(p->port.dev);
p                 598 drivers/tty/serial/8250/8250_port.c 	pm_runtime_put_autosuspend(p->port.dev);
p                 622 drivers/tty/serial/8250/8250_port.c int serial8250_em485_init(struct uart_8250_port *p)
p                 624 drivers/tty/serial/8250/8250_port.c 	if (p->em485)
p                 627 drivers/tty/serial/8250/8250_port.c 	p->em485 = kmalloc(sizeof(struct uart_8250_em485), GFP_ATOMIC);
p                 628 drivers/tty/serial/8250/8250_port.c 	if (!p->em485)
p                 631 drivers/tty/serial/8250/8250_port.c 	hrtimer_init(&p->em485->stop_tx_timer, CLOCK_MONOTONIC,
p                 633 drivers/tty/serial/8250/8250_port.c 	hrtimer_init(&p->em485->start_tx_timer, CLOCK_MONOTONIC,
p                 635 drivers/tty/serial/8250/8250_port.c 	p->em485->stop_tx_timer.function = &serial8250_em485_handle_stop_tx;
p                 636 drivers/tty/serial/8250/8250_port.c 	p->em485->start_tx_timer.function = &serial8250_em485_handle_start_tx;
p                 637 drivers/tty/serial/8250/8250_port.c 	p->em485->port = p;
p                 638 drivers/tty/serial/8250/8250_port.c 	p->em485->active_timer = NULL;
p                 639 drivers/tty/serial/8250/8250_port.c 	serial8250_em485_rts_after_send(p);
p                 658 drivers/tty/serial/8250/8250_port.c void serial8250_em485_destroy(struct uart_8250_port *p)
p                 660 drivers/tty/serial/8250/8250_port.c 	if (!p->em485)
p                 663 drivers/tty/serial/8250/8250_port.c 	hrtimer_cancel(&p->em485->start_tx_timer);
p                 664 drivers/tty/serial/8250/8250_port.c 	hrtimer_cancel(&p->em485->stop_tx_timer);
p                 666 drivers/tty/serial/8250/8250_port.c 	kfree(p->em485);
p                 667 drivers/tty/serial/8250/8250_port.c 	p->em485 = NULL;
p                 676 drivers/tty/serial/8250/8250_port.c void serial8250_rpm_get_tx(struct uart_8250_port *p)
p                 680 drivers/tty/serial/8250/8250_port.c 	if (!(p->capabilities & UART_CAP_RPM))
p                 683 drivers/tty/serial/8250/8250_port.c 	rpm_active = xchg(&p->rpm_tx_active, 1);
p                 686 drivers/tty/serial/8250/8250_port.c 	pm_runtime_get_sync(p->port.dev);
p                 690 drivers/tty/serial/8250/8250_port.c void serial8250_rpm_put_tx(struct uart_8250_port *p)
p                 694 drivers/tty/serial/8250/8250_port.c 	if (!(p->capabilities & UART_CAP_RPM))
p                 697 drivers/tty/serial/8250/8250_port.c 	rpm_active = xchg(&p->rpm_tx_active, 0);
p                 700 drivers/tty/serial/8250/8250_port.c 	pm_runtime_mark_last_busy(p->port.dev);
p                 701 drivers/tty/serial/8250/8250_port.c 	pm_runtime_put_autosuspend(p->port.dev);
p                 710 drivers/tty/serial/8250/8250_port.c static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
p                 714 drivers/tty/serial/8250/8250_port.c 	serial8250_rpm_get(p);
p                 716 drivers/tty/serial/8250/8250_port.c 	if (p->capabilities & UART_CAP_SLEEP) {
p                 717 drivers/tty/serial/8250/8250_port.c 		if (p->capabilities & UART_CAP_EFR) {
p                 718 drivers/tty/serial/8250/8250_port.c 			lcr = serial_in(p, UART_LCR);
p                 719 drivers/tty/serial/8250/8250_port.c 			efr = serial_in(p, UART_EFR);
p                 720 drivers/tty/serial/8250/8250_port.c 			serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
p                 721 drivers/tty/serial/8250/8250_port.c 			serial_out(p, UART_EFR, UART_EFR_ECB);
p                 722 drivers/tty/serial/8250/8250_port.c 			serial_out(p, UART_LCR, 0);
p                 724 drivers/tty/serial/8250/8250_port.c 		serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
p                 725 drivers/tty/serial/8250/8250_port.c 		if (p->capabilities & UART_CAP_EFR) {
p                 726 drivers/tty/serial/8250/8250_port.c 			serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
p                 727 drivers/tty/serial/8250/8250_port.c 			serial_out(p, UART_EFR, efr);
p                 728 drivers/tty/serial/8250/8250_port.c 			serial_out(p, UART_LCR, lcr);
p                 732 drivers/tty/serial/8250/8250_port.c 	serial8250_rpm_put(p);
p                 845 drivers/tty/serial/8250/8250_port.c static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
p                 850 drivers/tty/serial/8250/8250_port.c 	old_lcr = serial_in(p, UART_LCR);
p                 851 drivers/tty/serial/8250/8250_port.c 	serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
p                 852 drivers/tty/serial/8250/8250_port.c 	old_dl = serial_dl_read(p);
p                 853 drivers/tty/serial/8250/8250_port.c 	serial_dl_write(p, 0);
p                 854 drivers/tty/serial/8250/8250_port.c 	id = serial_dl_read(p);
p                 855 drivers/tty/serial/8250/8250_port.c 	serial_dl_write(p, old_dl);
p                 857 drivers/tty/serial/8250/8250_port.c 	serial_out(p, UART_LCR, old_lcr);
p                1398 drivers/tty/serial/8250/8250_port.c static void __do_stop_tx_rs485(struct uart_8250_port *p)
p                1400 drivers/tty/serial/8250/8250_port.c 	serial8250_em485_rts_after_send(p);
p                1407 drivers/tty/serial/8250/8250_port.c 	if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
p                1408 drivers/tty/serial/8250/8250_port.c 		serial8250_clear_and_reinit_fifos(p);
p                1410 drivers/tty/serial/8250/8250_port.c 		p->ier |= UART_IER_RLSI | UART_IER_RDI;
p                1411 drivers/tty/serial/8250/8250_port.c 		serial_port_out(&p->port, UART_IER, p->ier);
p                1417 drivers/tty/serial/8250/8250_port.c 	struct uart_8250_port *p;
p                1421 drivers/tty/serial/8250/8250_port.c 	p = em485->port;
p                1423 drivers/tty/serial/8250/8250_port.c 	serial8250_rpm_get(p);
p                1424 drivers/tty/serial/8250/8250_port.c 	spin_lock_irqsave(&p->port.lock, flags);
p                1426 drivers/tty/serial/8250/8250_port.c 		__do_stop_tx_rs485(p);
p                1429 drivers/tty/serial/8250/8250_port.c 	spin_unlock_irqrestore(&p->port.lock, flags);
p                1430 drivers/tty/serial/8250/8250_port.c 	serial8250_rpm_put(p);
p                1443 drivers/tty/serial/8250/8250_port.c static void __stop_tx_rs485(struct uart_8250_port *p)
p                1445 drivers/tty/serial/8250/8250_port.c 	struct uart_8250_em485 *em485 = p->em485;
p                1451 drivers/tty/serial/8250/8250_port.c 	if (p->port.rs485.delay_rts_after_send > 0) {
p                1454 drivers/tty/serial/8250/8250_port.c 				   p->port.rs485.delay_rts_after_send);
p                1456 drivers/tty/serial/8250/8250_port.c 		__do_stop_tx_rs485(p);
p                1460 drivers/tty/serial/8250/8250_port.c static inline void __do_stop_tx(struct uart_8250_port *p)
p                1462 drivers/tty/serial/8250/8250_port.c 	if (serial8250_clear_THRI(p))
p                1463 drivers/tty/serial/8250/8250_port.c 		serial8250_rpm_put_tx(p);
p                1466 drivers/tty/serial/8250/8250_port.c static inline void __stop_tx(struct uart_8250_port *p)
p                1468 drivers/tty/serial/8250/8250_port.c 	struct uart_8250_em485 *em485 = p->em485;
p                1471 drivers/tty/serial/8250/8250_port.c 		unsigned char lsr = serial_in(p, UART_LSR);
p                1483 drivers/tty/serial/8250/8250_port.c 		__stop_tx_rs485(p);
p                1485 drivers/tty/serial/8250/8250_port.c 	__do_stop_tx(p);
p                1566 drivers/tty/serial/8250/8250_port.c 	struct uart_8250_port *p;
p                1570 drivers/tty/serial/8250/8250_port.c 	p = em485->port;
p                1572 drivers/tty/serial/8250/8250_port.c 	spin_lock_irqsave(&p->port.lock, flags);
p                1574 drivers/tty/serial/8250/8250_port.c 		__start_tx(&p->port);
p                1577 drivers/tty/serial/8250/8250_port.c 	spin_unlock_irqrestore(&p->port.lock, flags);
p                2731 drivers/tty/serial/8250/8250_port.c 	struct uart_8250_port *p = up_to_u8250p(port);
p                2733 drivers/tty/serial/8250/8250_port.c 	serial8250_set_sleep(p, state != 0);
p                  66 drivers/tty/serial/8250/8250_uniphier.c static unsigned int uniphier_serial_in(struct uart_port *p, int offset)
p                  92 drivers/tty/serial/8250/8250_uniphier.c 	return (readl(p->membase + offset) >> valshift) & 0xff;
p                  95 drivers/tty/serial/8250/8250_uniphier.c static void uniphier_serial_out(struct uart_port *p, int offset, int value)
p                 123 drivers/tty/serial/8250/8250_uniphier.c 		writel(value, p->membase + offset);
p                 130 drivers/tty/serial/8250/8250_uniphier.c 		struct uniphier8250_priv *priv = p->private_data;
p                 135 drivers/tty/serial/8250/8250_uniphier.c 		tmp = readl(p->membase + offset);
p                 138 drivers/tty/serial/8250/8250_uniphier.c 		writel(tmp, p->membase + offset);
p                 680 drivers/tty/serial/cpm_uart/cpm_uart_core.c 	u8 *p;
p                 691 drivers/tty/serial/cpm_uart/cpm_uart_core.c 		p = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
p                 693 drivers/tty/serial/cpm_uart/cpm_uart_core.c 		*p++ = port->x_char;
p                 720 drivers/tty/serial/cpm_uart/cpm_uart_core.c 		p = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
p                 722 drivers/tty/serial/cpm_uart/cpm_uart_core.c 			*p++ = xmit->buf[xmit->tail];
p                 145 drivers/tty/serial/kgdboc.c 	struct tty_driver *p;
p                 168 drivers/tty/serial/kgdboc.c 	p = tty_find_polling_driver(cptr, &tty_line);
p                 169 drivers/tty/serial/kgdboc.c 	if (!p)
p                 175 drivers/tty/serial/kgdboc.c 		if (cons->device && cons->device(cons, &idx) == p &&
p                 183 drivers/tty/serial/kgdboc.c 	kgdb_tty_driver = p;
p                 298 drivers/tty/serial/lantiq.c static irqreturn_t lqasc_irq(int irq, void *p)
p                 302 drivers/tty/serial/lantiq.c 	struct uart_port *port = p;
p                 312 drivers/tty/serial/lantiq.c 		lqasc_tx_int(irq, p);
p                 315 drivers/tty/serial/lantiq.c 		lqasc_rx_int(irq, p);
p                 318 drivers/tty/serial/lantiq.c 		lqasc_err_int(irq, p);
p                 637 drivers/tty/serial/lpc32xx_hs.c 	struct lpc32xx_hsuart_port *p = &lpc32xx_hs_ports[uarts_registered];
p                 648 drivers/tty/serial/lpc32xx_hs.c 	memset(p, 0, sizeof(*p));
p                 657 drivers/tty/serial/lpc32xx_hs.c 	p->port.mapbase = res->start;
p                 658 drivers/tty/serial/lpc32xx_hs.c 	p->port.membase = NULL;
p                 663 drivers/tty/serial/lpc32xx_hs.c 	p->port.irq = ret;
p                 665 drivers/tty/serial/lpc32xx_hs.c 	p->port.iotype = UPIO_MEM32;
p                 666 drivers/tty/serial/lpc32xx_hs.c 	p->port.uartclk = LPC32XX_MAIN_OSC_FREQ;
p                 667 drivers/tty/serial/lpc32xx_hs.c 	p->port.regshift = 2;
p                 668 drivers/tty/serial/lpc32xx_hs.c 	p->port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_IOREMAP;
p                 669 drivers/tty/serial/lpc32xx_hs.c 	p->port.dev = &pdev->dev;
p                 670 drivers/tty/serial/lpc32xx_hs.c 	p->port.ops = &serial_lpc32xx_pops;
p                 671 drivers/tty/serial/lpc32xx_hs.c 	p->port.line = uarts_registered++;
p                 672 drivers/tty/serial/lpc32xx_hs.c 	spin_lock_init(&p->port.lock);
p                 675 drivers/tty/serial/lpc32xx_hs.c 	lpc32xx_loopback_set(p->port.mapbase, 1);
p                 677 drivers/tty/serial/lpc32xx_hs.c 	ret = uart_add_one_port(&lpc32xx_hs_reg, &p->port);
p                 679 drivers/tty/serial/lpc32xx_hs.c 	platform_set_drvdata(pdev, p);
p                 689 drivers/tty/serial/lpc32xx_hs.c 	struct lpc32xx_hsuart_port *p = platform_get_drvdata(pdev);
p                 691 drivers/tty/serial/lpc32xx_hs.c 	uart_remove_one_port(&lpc32xx_hs_reg, &p->port);
p                 701 drivers/tty/serial/lpc32xx_hs.c 	struct lpc32xx_hsuart_port *p = platform_get_drvdata(pdev);
p                 703 drivers/tty/serial/lpc32xx_hs.c 	uart_suspend_port(&lpc32xx_hs_reg, &p->port);
p                 710 drivers/tty/serial/lpc32xx_hs.c 	struct lpc32xx_hsuart_port *p = platform_get_drvdata(pdev);
p                 712 drivers/tty/serial/lpc32xx_hs.c 	uart_resume_port(&lpc32xx_hs_reg, &p->port);
p                 276 drivers/tty/serial/max310x.c 	struct max310x_one	p[0];
p                 799 drivers/tty/serial/max310x.c 	struct uart_port *port = &s->p[portno].port;
p                1164 drivers/tty/serial/max310x.c 		uart_suspend_port(&max310x_uart, &s->p[i].port);
p                1165 drivers/tty/serial/max310x.c 		s->devtype->power(&s->p[i].port, 0);
p                1177 drivers/tty/serial/max310x.c 		s->devtype->power(&s->p[i].port, 1);
p                1178 drivers/tty/serial/max310x.c 		uart_resume_port(&max310x_uart, &s->p[i].port);
p                1191 drivers/tty/serial/max310x.c 	struct uart_port *port = &s->p[offset / 4].port;
p                1201 drivers/tty/serial/max310x.c 	struct uart_port *port = &s->p[offset / 4].port;
p                1210 drivers/tty/serial/max310x.c 	struct uart_port *port = &s->p[offset / 4].port;
p                1221 drivers/tty/serial/max310x.c 	struct uart_port *port = &s->p[offset / 4].port;
p                1235 drivers/tty/serial/max310x.c 	struct uart_port *port = &s->p[offset / 4].port;
p                1265 drivers/tty/serial/max310x.c 	s = devm_kzalloc(dev, struct_size(s, p, devtype->nr), GFP_KERNEL);
p                1342 drivers/tty/serial/max310x.c 		s->p[i].port.line	= line;
p                1343 drivers/tty/serial/max310x.c 		s->p[i].port.dev	= dev;
p                1344 drivers/tty/serial/max310x.c 		s->p[i].port.irq	= irq;
p                1345 drivers/tty/serial/max310x.c 		s->p[i].port.type	= PORT_MAX310X;
p                1346 drivers/tty/serial/max310x.c 		s->p[i].port.fifosize	= MAX310X_FIFO_SIZE;
p                1347 drivers/tty/serial/max310x.c 		s->p[i].port.flags	= UPF_FIXED_TYPE | UPF_LOW_LATENCY;
p                1348 drivers/tty/serial/max310x.c 		s->p[i].port.iotype	= UPIO_PORT;
p                1349 drivers/tty/serial/max310x.c 		s->p[i].port.iobase	= i * 0x20;
p                1350 drivers/tty/serial/max310x.c 		s->p[i].port.membase	= (void __iomem *)~0;
p                1351 drivers/tty/serial/max310x.c 		s->p[i].port.uartclk	= uartclk;
p                1352 drivers/tty/serial/max310x.c 		s->p[i].port.rs485_config = max310x_rs485_config;
p                1353 drivers/tty/serial/max310x.c 		s->p[i].port.ops	= &max310x_ops;
p                1355 drivers/tty/serial/max310x.c 		max310x_port_write(&s->p[i].port, MAX310X_IRQEN_REG, 0);
p                1357 drivers/tty/serial/max310x.c 		max310x_port_read(&s->p[i].port, MAX310X_IRQSTS_REG);
p                1359 drivers/tty/serial/max310x.c 		INIT_WORK(&s->p[i].tx_work, max310x_tx_proc);
p                1361 drivers/tty/serial/max310x.c 		INIT_WORK(&s->p[i].md_work, max310x_md_proc);
p                1363 drivers/tty/serial/max310x.c 		INIT_WORK(&s->p[i].rs_work, max310x_rs_proc);
p                1365 drivers/tty/serial/max310x.c 		s->p[i].wr_header = (s->p[i].port.iobase + MAX310X_THR_REG) |
p                1367 drivers/tty/serial/max310x.c 		s->p[i].rd_header = (s->p[i].port.iobase + MAX310X_RHR_REG);
p                1370 drivers/tty/serial/max310x.c 		ret = uart_add_one_port(&max310x_uart, &s->p[i].port);
p                1372 drivers/tty/serial/max310x.c 			s->p[i].port.dev = NULL;
p                1378 drivers/tty/serial/max310x.c 		devtype->power(&s->p[i].port, 0);
p                1409 drivers/tty/serial/max310x.c 		if (s->p[i].port.dev) {
p                1410 drivers/tty/serial/max310x.c 			uart_remove_one_port(&max310x_uart, &s->p[i].port);
p                1411 drivers/tty/serial/max310x.c 			clear_bit(s->p[i].port.line, max310x_lines);
p                1427 drivers/tty/serial/max310x.c 		cancel_work_sync(&s->p[i].tx_work);
p                1428 drivers/tty/serial/max310x.c 		cancel_work_sync(&s->p[i].md_work);
p                1429 drivers/tty/serial/max310x.c 		cancel_work_sync(&s->p[i].rs_work);
p                1430 drivers/tty/serial/max310x.c 		uart_remove_one_port(&max310x_uart, &s->p[i].port);
p                1431 drivers/tty/serial/max310x.c 		clear_bit(s->p[i].port.line, max310x_lines);
p                1432 drivers/tty/serial/max310x.c 		s->devtype->power(&s->p[i].port, 0);
p                  38 drivers/tty/serial/mcf.c #define	mcf_getppdcd(p)		(1)
p                  41 drivers/tty/serial/mcf.c #define	mcf_getppdtr(p)		(1)
p                  44 drivers/tty/serial/mcf.c #define	mcf_setppdtr(p, v)	do { } while (0)
p                  67 drivers/tty/serial/mux.c #define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + IO_DATA_REG_OFFSET)
p                  68 drivers/tty/serial/mux.c #define UART_GET_FIFO_CNT(p) __raw_readl((p)->membase + IO_DCOUNT_REG_OFFSET)
p                 171 drivers/tty/serial/omap-serial.c #define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
p                  70 drivers/tty/serial/pmac_zilog.h #define to_pmz(p) ((struct uart_pmac_port *)(p))
p                 683 drivers/tty/serial/rp2.c 		struct uart_port *p;
p                 692 drivers/tty/serial/rp2.c 		p = &rp->port;
p                 693 drivers/tty/serial/rp2.c 		p->line = card->minor_start + i;
p                 694 drivers/tty/serial/rp2.c 		p->dev = &card->pdev->dev;
p                 695 drivers/tty/serial/rp2.c 		p->type = PORT_RP2;
p                 696 drivers/tty/serial/rp2.c 		p->iotype = UPIO_MEM32;
p                 697 drivers/tty/serial/rp2.c 		p->uartclk = UART_CLOCK;
p                 698 drivers/tty/serial/rp2.c 		p->regshift = 2;
p                 699 drivers/tty/serial/rp2.c 		p->fifosize = FIFO_SIZE;
p                 700 drivers/tty/serial/rp2.c 		p->ops = &rp2_uart_ops;
p                 701 drivers/tty/serial/rp2.c 		p->irq = card->pdev->irq;
p                 702 drivers/tty/serial/rp2.c 		p->membase = rp->base;
p                 703 drivers/tty/serial/rp2.c 		p->mapbase = phys_base + RP2_PORT_BASE + j*RP2_PORT_SPACING;
p                 709 drivers/tty/serial/rp2.c 			p->mapbase += RP2_ASIC_SPACING;
p                 713 drivers/tty/serial/rp2.c 		rc = uart_add_one_port(&rp2_uart_driver, p);
p                 856 drivers/tty/serial/samsung.c static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
p                 858 drivers/tty/serial/samsung.c 	struct s3c24xx_uart_dma	*dma = p->dma;
p                 866 drivers/tty/serial/samsung.c 	dma->rx_conf.src_addr		= p->port.mapbase + S3C2410_URXH;
p                 871 drivers/tty/serial/samsung.c 	dma->tx_conf.dst_addr		= p->port.mapbase + S3C2410_UTXH;
p                 874 drivers/tty/serial/samsung.c 	dma->rx_chan = dma_request_chan(p->port.dev, "rx");
p                 892 drivers/tty/serial/samsung.c 	dma->tx_chan = dma_request_chan(p->port.dev, "tx");
p                 918 drivers/tty/serial/samsung.c 	dma->rx_addr = dma_map_single(p->port.dev, dma->rx_buf,
p                 920 drivers/tty/serial/samsung.c 	if (dma_mapping_error(p->port.dev, dma->rx_addr)) {
p                 927 drivers/tty/serial/samsung.c 	dma->tx_addr = dma_map_single(p->port.dev, p->port.state->xmit.buf,
p                 929 drivers/tty/serial/samsung.c 	if (dma_mapping_error(p->port.dev, dma->tx_addr)) {
p                 938 drivers/tty/serial/samsung.c 	dma_unmap_single(p->port.dev, dma->rx_addr, dma->rx_size,
p                 948 drivers/tty/serial/samsung.c 		dev_warn(p->port.dev, "%s, DMA will not be used\n", reason);
p                 952 drivers/tty/serial/samsung.c static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
p                 954 drivers/tty/serial/samsung.c 	struct s3c24xx_uart_dma	*dma = p->dma;
p                 958 drivers/tty/serial/samsung.c 		dma_unmap_single(p->port.dev, dma->rx_addr,
p                 967 drivers/tty/serial/samsung.c 		dma_unmap_single(p->port.dev, dma->tx_addr,
p                 332 drivers/tty/serial/sc16is7xx.c 	struct sc16is7xx_one		p[0];
p                 343 drivers/tty/serial/sc16is7xx.c #define to_sc16is7xx_port(p,e)	((container_of((p), struct sc16is7xx_port, e)))
p                 344 drivers/tty/serial/sc16is7xx.c #define to_sc16is7xx_one(p,e)	((container_of((p), struct sc16is7xx_one, e)))
p                 680 drivers/tty/serial/sc16is7xx.c 	struct uart_port *port = &s->p[portno].port;
p                1131 drivers/tty/serial/sc16is7xx.c 	struct uart_port *port = &s->p[0].port;
p                1141 drivers/tty/serial/sc16is7xx.c 	struct uart_port *port = &s->p[0].port;
p                1151 drivers/tty/serial/sc16is7xx.c 	struct uart_port *port = &s->p[0].port;
p                1162 drivers/tty/serial/sc16is7xx.c 	struct uart_port *port = &s->p[0].port;
p                1191 drivers/tty/serial/sc16is7xx.c 	s = devm_kzalloc(dev, struct_size(s, p, devtype->nr_uart), GFP_KERNEL);
p                1257 drivers/tty/serial/sc16is7xx.c 		s->p[i].line		= i;
p                1259 drivers/tty/serial/sc16is7xx.c 		s->p[i].port.dev	= dev;
p                1260 drivers/tty/serial/sc16is7xx.c 		s->p[i].port.irq	= irq;
p                1261 drivers/tty/serial/sc16is7xx.c 		s->p[i].port.type	= PORT_SC16IS7XX;
p                1262 drivers/tty/serial/sc16is7xx.c 		s->p[i].port.fifosize	= SC16IS7XX_FIFO_SIZE;
p                1263 drivers/tty/serial/sc16is7xx.c 		s->p[i].port.flags	= UPF_FIXED_TYPE | UPF_LOW_LATENCY;
p                1264 drivers/tty/serial/sc16is7xx.c 		s->p[i].port.iotype	= UPIO_PORT;
p                1265 drivers/tty/serial/sc16is7xx.c 		s->p[i].port.uartclk	= freq;
p                1266 drivers/tty/serial/sc16is7xx.c 		s->p[i].port.rs485_config = sc16is7xx_config_rs485;
p                1267 drivers/tty/serial/sc16is7xx.c 		s->p[i].port.ops	= &sc16is7xx_ops;
p                1268 drivers/tty/serial/sc16is7xx.c 		s->p[i].port.line	= sc16is7xx_alloc_line();
p                1269 drivers/tty/serial/sc16is7xx.c 		if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
p                1275 drivers/tty/serial/sc16is7xx.c 		sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_IER_REG, 0);
p                1277 drivers/tty/serial/sc16is7xx.c 		sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_EFCR_REG,
p                1281 drivers/tty/serial/sc16is7xx.c 		kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
p                1282 drivers/tty/serial/sc16is7xx.c 		kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
p                1284 drivers/tty/serial/sc16is7xx.c 		uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
p                1287 drivers/tty/serial/sc16is7xx.c 		sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG,
p                1293 drivers/tty/serial/sc16is7xx.c 		sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_EFR_REG,
p                1299 drivers/tty/serial/sc16is7xx.c 		sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG, 0x00);
p                1302 drivers/tty/serial/sc16is7xx.c 		sc16is7xx_power(&s->p[i].port, 0);
p                1313 drivers/tty/serial/sc16is7xx.c 		uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
p                1314 drivers/tty/serial/sc16is7xx.c 		clear_bit(s->p[i].port.line, &sc16is7xx_lines);
p                1343 drivers/tty/serial/sc16is7xx.c 		uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
p                1344 drivers/tty/serial/sc16is7xx.c 		clear_bit(s->p[i].port.line, &sc16is7xx_lines);
p                1345 drivers/tty/serial/sc16is7xx.c 		sc16is7xx_power(&s->p[i].port, 0);
p                1975 drivers/tty/serial/serial_core.c int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
p                1978 drivers/tty/serial/serial_core.c 	if (strncmp(p, "mmio,", 5) == 0) {
p                1980 drivers/tty/serial/serial_core.c 		p += 5;
p                1981 drivers/tty/serial/serial_core.c 	} else if (strncmp(p, "mmio16,", 7) == 0) {
p                1983 drivers/tty/serial/serial_core.c 		p += 7;
p                1984 drivers/tty/serial/serial_core.c 	} else if (strncmp(p, "mmio32,", 7) == 0) {
p                1986 drivers/tty/serial/serial_core.c 		p += 7;
p                1987 drivers/tty/serial/serial_core.c 	} else if (strncmp(p, "mmio32be,", 9) == 0) {
p                1989 drivers/tty/serial/serial_core.c 		p += 9;
p                1990 drivers/tty/serial/serial_core.c 	} else if (strncmp(p, "mmio32native,", 13) == 0) {
p                1993 drivers/tty/serial/serial_core.c 		p += 13;
p                1994 drivers/tty/serial/serial_core.c 	} else if (strncmp(p, "io,", 3) == 0) {
p                1996 drivers/tty/serial/serial_core.c 		p += 3;
p                1997 drivers/tty/serial/serial_core.c 	} else if (strncmp(p, "0x", 2) == 0) {
p                2007 drivers/tty/serial/serial_core.c 	*addr = simple_strtoull(p, NULL, 0);
p                2008 drivers/tty/serial/serial_core.c 	p = strchr(p, ',');
p                2009 drivers/tty/serial/serial_core.c 	if (p)
p                2010 drivers/tty/serial/serial_core.c 		p++;
p                2012 drivers/tty/serial/serial_core.c 	*options = p;
p                2588 drivers/tty/serial/serial_core.c 	struct tty_driver *p = drv->tty_driver;
p                2591 drivers/tty/serial/serial_core.c 	tty_unregister_driver(p);
p                2592 drivers/tty/serial/serial_core.c 	put_tty_driver(p);
p                2602 drivers/tty/serial/serial_core.c 	struct uart_driver *p = co->data;
p                2604 drivers/tty/serial/serial_core.c 	return p->tty_driver;
p                1084 drivers/tty/serial/serial_txx9.c 	struct uart_port *p = dev_get_platdata(&dev->dev);
p                1089 drivers/tty/serial/serial_txx9.c 	for (i = 0; p && p->uartclk != 0; p++, i++) {
p                1090 drivers/tty/serial/serial_txx9.c 		port.iobase	= p->iobase;
p                1091 drivers/tty/serial/serial_txx9.c 		port.membase	= p->membase;
p                1092 drivers/tty/serial/serial_txx9.c 		port.irq	= p->irq;
p                1093 drivers/tty/serial/serial_txx9.c 		port.uartclk	= p->uartclk;
p                1094 drivers/tty/serial/serial_txx9.c 		port.iotype	= p->iotype;
p                1095 drivers/tty/serial/serial_txx9.c 		port.flags	= p->flags;
p                1096 drivers/tty/serial/serial_txx9.c 		port.mapbase	= p->mapbase;
p                1102 drivers/tty/serial/serial_txx9.c 				p->iobase, (unsigned long long)p->mapbase,
p                1103 drivers/tty/serial/serial_txx9.c 				p->irq, ret);
p                 506 drivers/tty/serial/sh-sci.c static unsigned int sci_serial_in(struct uart_port *p, int offset)
p                 508 drivers/tty/serial/sh-sci.c 	const struct plat_sci_reg *reg = sci_getreg(p, offset);
p                 511 drivers/tty/serial/sh-sci.c 		return ioread8(p->membase + (reg->offset << p->regshift));
p                 513 drivers/tty/serial/sh-sci.c 		return ioread16(p->membase + (reg->offset << p->regshift));
p                 520 drivers/tty/serial/sh-sci.c static void sci_serial_out(struct uart_port *p, int offset, int value)
p                 522 drivers/tty/serial/sh-sci.c 	const struct plat_sci_reg *reg = sci_getreg(p, offset);
p                 525 drivers/tty/serial/sh-sci.c 		iowrite8(value, p->membase + (reg->offset << p->regshift));
p                 527 drivers/tty/serial/sh-sci.c 		iowrite16(value, p->membase + (reg->offset << p->regshift));
p                2884 drivers/tty/serial/sh-sci.c 			   const struct plat_sci_port *p, bool early)
p                2891 drivers/tty/serial/sh-sci.c 	sci_port->cfg	= p;
p                2925 drivers/tty/serial/sh-sci.c 	sci_port->params = sci_probe_regmap(p);
p                2929 drivers/tty/serial/sh-sci.c 	switch (p->type) {
p                2940 drivers/tty/serial/sh-sci.c 		if (p->regtype == SCIx_SH7705_SCIF_REGTYPE)
p                2958 drivers/tty/serial/sh-sci.c 	sci_port->sampling_rate_mask = p->sampling_rate
p                2959 drivers/tty/serial/sh-sci.c 				     ? SCI_SR(p->sampling_rate)
p                2972 drivers/tty/serial/sh-sci.c 	port->type		= p->type;
p                2973 drivers/tty/serial/sh-sci.c 	port->flags		= UPF_FIXED_PORT | UPF_BOOT_AUTOCONF | p->flags;
p                3224 drivers/tty/serial/sh-sci.c 	struct plat_sci_port *p;
p                3234 drivers/tty/serial/sh-sci.c 	p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
p                3235 drivers/tty/serial/sh-sci.c 	if (!p)
p                3254 drivers/tty/serial/sh-sci.c 	p->type = SCI_OF_TYPE(data);
p                3255 drivers/tty/serial/sh-sci.c 	p->regtype = SCI_OF_REGTYPE(data);
p                3259 drivers/tty/serial/sh-sci.c 	return p;
p                3264 drivers/tty/serial/sh-sci.c 				      struct plat_sci_port *p,
p                3290 drivers/tty/serial/sh-sci.c 	ret = sci_init_single(dev, sciport, index, p, false);
p                3318 drivers/tty/serial/sh-sci.c 	struct plat_sci_port *p;
p                3332 drivers/tty/serial/sh-sci.c 		p = sci_parse_dt(dev, &dev_id);
p                3333 drivers/tty/serial/sh-sci.c 		if (p == NULL)
p                3336 drivers/tty/serial/sh-sci.c 		p = dev->dev.platform_data;
p                3337 drivers/tty/serial/sh-sci.c 		if (p == NULL) {
p                3348 drivers/tty/serial/sh-sci.c 	ret = sci_probe_single(dev, dev_id, p, sp);
p                 171 drivers/tty/serial/sifive.c #define port_to_sifive_serial_port(p) (container_of((p), \
p                 421 drivers/tty/serial/sunhv.c 	char *p = con_write_page;
p                 428 drivers/tty/serial/sunhv.c 			*p++ = '\r';
p                 432 drivers/tty/serial/sunhv.c 		*p++ = *s++;
p                 435 drivers/tty/serial/sunhv.c 	*page_bytes = p - con_write_page;
p                 258 drivers/tty/serial/sunsu.c static inline void __stop_tx(struct uart_sunsu_port *p)
p                 260 drivers/tty/serial/sunsu.c 	if (p->ier & UART_IER_THRI) {
p                 261 drivers/tty/serial/sunsu.c 		p->ier &= ~UART_IER_THRI;
p                 262 drivers/tty/serial/sunsu.c 		serial_out(p, UART_IER, p->ier);
p                 330 drivers/tty/serial/ucc_uart.c 	unsigned char *p;
p                 342 drivers/tty/serial/ucc_uart.c 		p = qe2cpu_addr(bdp->buf, qe_port);
p                 344 drivers/tty/serial/ucc_uart.c 		*p++ = port->x_char;
p                 370 drivers/tty/serial/ucc_uart.c 		p = qe2cpu_addr(bdp->buf, qe_port);
p                 372 drivers/tty/serial/ucc_uart.c 			*p++ = xmit->buf[xmit->tail];
p                1824 drivers/tty/synclink_gt.c 	unsigned char *p;
p                1836 drivers/tty/synclink_gt.c 		p     = bufs[end].buf + info->rbuf_index;
p                1839 drivers/tty/synclink_gt.c 		DBGDATA(info, p, count, "rx");
p                1841 drivers/tty/synclink_gt.c 		for(i=0 ; i < count; i+=2, p+=2) {
p                1842 drivers/tty/synclink_gt.c 			ch = *p;
p                1847 drivers/tty/synclink_gt.c 			status = *(p + 1) & (BIT1 + BIT0);
p                4708 drivers/tty/synclink_gt.c 			unsigned char *p = info->tmp_rbuf;
p                4715 drivers/tty/synclink_gt.c 				memcpy(p, info->rbufs[i].buf, partial_count);
p                4716 drivers/tty/synclink_gt.c 				p += partial_count;
p                4723 drivers/tty/synclink_gt.c 				*p = (status & BIT1) ? RX_CRC_ERROR : RX_OK;
p                 335 drivers/tty/sysrq.c 	struct task_struct *p;
p                 338 drivers/tty/sysrq.c 	for_each_process(p) {
p                 339 drivers/tty/sysrq.c 		if (p->flags & PF_KTHREAD)
p                 341 drivers/tty/sysrq.c 		if (is_global_init(p))
p                 344 drivers/tty/sysrq.c 		do_send_sig_info(sig, SEND_SIG_PRIV, p, PIDTYPE_MAX);
p                 716 drivers/tty/sysrq.c 	const __be32 *p;
p                 727 drivers/tty/sysrq.c 	of_property_for_each_u32(np, "keyset", prop, p, key) {
p                1017 drivers/tty/sysrq.c #define param_check_sysrq_reset_seq(name, p)	\
p                1018 drivers/tty/sysrq.c 	__param_check(name, p, unsigned short)
p                  98 drivers/tty/tty_buffer.c static void tty_buffer_reset(struct tty_buffer *p, size_t size)
p                 100 drivers/tty/tty_buffer.c 	p->used = 0;
p                 101 drivers/tty/tty_buffer.c 	p->size = size;
p                 102 drivers/tty/tty_buffer.c 	p->next = NULL;
p                 103 drivers/tty/tty_buffer.c 	p->commit = 0;
p                 104 drivers/tty/tty_buffer.c 	p->read = 0;
p                 105 drivers/tty/tty_buffer.c 	p->flags = 0;
p                 119 drivers/tty/tty_buffer.c 	struct tty_buffer *p, *next;
p                 124 drivers/tty/tty_buffer.c 	while ((p = buf->head) != NULL) {
p                 125 drivers/tty/tty_buffer.c 		buf->head = p->next;
p                 126 drivers/tty/tty_buffer.c 		freed += p->size;
p                 127 drivers/tty/tty_buffer.c 		if (p->size > 0)
p                 128 drivers/tty/tty_buffer.c 			kfree(p);
p                 131 drivers/tty/tty_buffer.c 	llist_for_each_entry_safe(p, next, llist, free)
p                 132 drivers/tty/tty_buffer.c 		kfree(p);
p                 158 drivers/tty/tty_buffer.c 	struct tty_buffer *p;
p                 166 drivers/tty/tty_buffer.c 			p = llist_entry(free, struct tty_buffer, free);
p                 175 drivers/tty/tty_buffer.c 	p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
p                 176 drivers/tty/tty_buffer.c 	if (p == NULL)
p                 180 drivers/tty/tty_buffer.c 	tty_buffer_reset(p, size);
p                 182 drivers/tty/tty_buffer.c 	return p;
p                 457 drivers/tty/tty_buffer.c int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
p                 461 drivers/tty/tty_buffer.c 		count = ld->ops->receive_buf2(ld->tty, p, f, count);
p                 465 drivers/tty/tty_buffer.c 			ld->ops->receive_buf(ld->tty, p, f, count);
p                 474 drivers/tty/tty_buffer.c 	unsigned char *p = char_buf_ptr(head, head->read);
p                 481 drivers/tty/tty_buffer.c 	n = port->client_ops->receive_buf(port, p, f, count);
p                 483 drivers/tty/tty_buffer.c 		memset(p, 0, n);
p                 284 drivers/tty/tty_io.c 	struct list_head *p;
p                 288 drivers/tty/tty_io.c 	list_for_each(p, &tty->tty_files) {
p                 320 drivers/tty/tty_io.c 	struct tty_driver *p;
p                 322 drivers/tty/tty_io.c 	list_for_each_entry(p, &tty_drivers, tty_drivers) {
p                 323 drivers/tty/tty_io.c 		dev_t base = MKDEV(p->major, p->minor_start);
p                 324 drivers/tty/tty_io.c 		if (device < base || device >= base + p->num)
p                 327 drivers/tty/tty_io.c 		return tty_driver_kref_get(p);
p                 347 drivers/tty/tty_io.c 	struct tty_driver *p;
p                 365 drivers/tty/tty_io.c 	list_for_each_entry(p, &tty_drivers, tty_drivers)
p                 366 drivers/tty/tty_io.c 		if (prefix_length == strlen(p->name) && strncmp(name,
p                 367 drivers/tty/tty_io.c 					p->name, prefix_length) == 0) {
p                 368 drivers/tty/tty_io.c 			if (index < p->num) {
p                 369 drivers/tty/tty_io.c 				*number = MKDEV(p->major, p->minor_start + index);
p                 395 drivers/tty/tty_io.c 	struct tty_driver *p, *res = NULL;
p                 411 drivers/tty/tty_io.c 	list_for_each_entry(p, &tty_drivers, tty_drivers) {
p                 412 drivers/tty/tty_io.c 		if (!len || strncmp(name, p->name, len) != 0)
p                 420 drivers/tty/tty_io.c 		if (tty_line >= 0 && tty_line < p->num && p->ops &&
p                 421 drivers/tty/tty_io.c 		    p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
p                 422 drivers/tty/tty_io.c 			res = tty_driver_kref_get(p);
p                1053 drivers/tty/tty_io.c 	struct file *p = NULL;
p                1057 drivers/tty/tty_io.c 		p = get_file(redirect);
p                1060 drivers/tty/tty_io.c 	if (p) {
p                1062 drivers/tty/tty_io.c 		res = vfs_write(p, buf, count, &p->f_pos);
p                1063 drivers/tty/tty_io.c 		fput(p);
p                1115 drivers/tty/tty_io.c static void pty_line_name(struct tty_driver *driver, int index, char *p)
p                1119 drivers/tty/tty_io.c 	sprintf(p, "%s%c%x",
p                1135 drivers/tty/tty_io.c static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
p                1138 drivers/tty/tty_io.c 		return sprintf(p, "%s", driver->name);
p                1140 drivers/tty/tty_io.c 		return sprintf(p, "%s%d", driver->name,
p                2183 drivers/tty/tty_io.c static int tiocsti(struct tty_struct *tty, char __user *p)
p                2190 drivers/tty/tty_io.c 	if (get_user(ch, p))
p                2326 drivers/tty/tty_io.c static int tiocsetd(struct tty_struct *tty, int __user *p)
p                2331 drivers/tty/tty_io.c 	if (get_user(disc, p))
p                2350 drivers/tty/tty_io.c static int tiocgetd(struct tty_struct *tty, int __user *p)
p                2358 drivers/tty/tty_io.c 	ret = put_user(ld->ops->num, p);
p                2415 drivers/tty/tty_io.c static int tty_tiocmget(struct tty_struct *tty, int __user *p)
p                2423 drivers/tty/tty_io.c 			retval = put_user(retval, p);
p                2441 drivers/tty/tty_io.c 	     unsigned __user *p)
p                2449 drivers/tty/tty_io.c 	retval = get_user(val, p);
p                2539 drivers/tty/tty_io.c 	void __user *p = (void __user *)arg;
p                2573 drivers/tty/tty_io.c 		return tiocsti(tty, p);
p                2575 drivers/tty/tty_io.c 		return tiocgwinsz(real_tty, p);
p                2577 drivers/tty/tty_io.c 		return tiocswinsz(real_tty, p);
p                2589 drivers/tty/tty_io.c 		return put_user(excl, (int __user *)p);
p                2592 drivers/tty/tty_io.c 		return tiocgetd(tty, p);
p                2594 drivers/tty/tty_io.c 		return tiocsetd(tty, p);
p                2603 drivers/tty/tty_io.c 		return put_user(ret, (unsigned int __user *)p);
p                2628 drivers/tty/tty_io.c 		return tty_tiocmget(tty, p);
p                2632 drivers/tty/tty_io.c 		return tty_tiocmset(tty, cmd, p);
p                2634 drivers/tty/tty_io.c 		return tty_tiocgicount(tty, p);
p                2645 drivers/tty/tty_io.c 		return tty_tiocsserial(tty, p);
p                2647 drivers/tty/tty_io.c 		return tty_tiocgserial(tty, p);
p                2894 drivers/tty/tty_io.c 	struct task_struct *g, *p;
p                2908 drivers/tty/tty_io.c 	do_each_pid_task(session, PIDTYPE_SID, p) {
p                2910 drivers/tty/tty_io.c 			   task_pid_nr(p), p->comm);
p                2911 drivers/tty/tty_io.c 		group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
p                2912 drivers/tty/tty_io.c 	} while_each_pid_task(session, PIDTYPE_SID, p);
p                2915 drivers/tty/tty_io.c 	do_each_thread(g, p) {
p                2916 drivers/tty/tty_io.c 		if (p->signal->tty == tty) {
p                2918 drivers/tty/tty_io.c 				   task_pid_nr(p), p->comm);
p                2919 drivers/tty/tty_io.c 			group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
p                2922 drivers/tty/tty_io.c 		task_lock(p);
p                2923 drivers/tty/tty_io.c 		i = iterate_fd(p->files, 0, this_tty, tty);
p                2926 drivers/tty/tty_io.c 				   task_pid_nr(p), p->comm, i - 1);
p                2927 drivers/tty/tty_io.c 			group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
p                2929 drivers/tty/tty_io.c 		task_unlock(p);
p                2930 drivers/tty/tty_io.c 	} while_each_thread(g, p);
p                 712 drivers/tty/tty_ioctl.c 	void __user *p = (void __user *)arg;
p                 734 drivers/tty/tty_ioctl.c 		return get_tchars(real_tty, p);
p                 736 drivers/tty/tty_ioctl.c 		return set_tchars(real_tty, p);
p                 740 drivers/tty/tty_ioctl.c 		return get_ltchars(real_tty, p);
p                 742 drivers/tty/tty_ioctl.c 		return set_ltchars(real_tty, p);
p                 745 drivers/tty/tty_ioctl.c 		return set_termios(real_tty, p,  TERMIOS_FLUSH | TERMIOS_WAIT | TERMIOS_OLD);
p                 747 drivers/tty/tty_ioctl.c 		return set_termios(real_tty, p, TERMIOS_WAIT | TERMIOS_OLD);
p                 749 drivers/tty/tty_ioctl.c 		return set_termios(real_tty, p, TERMIOS_OLD);
p                 768 drivers/tty/tty_ioctl.c 		return set_termios(real_tty, p,  TERMIOS_FLUSH | TERMIOS_WAIT);
p                 770 drivers/tty/tty_ioctl.c 		return set_termios(real_tty, p, TERMIOS_WAIT);
p                 772 drivers/tty/tty_ioctl.c 		return set_termios(real_tty, p, 0);
p                 775 drivers/tty/tty_ioctl.c 		return get_termio(real_tty, p);
p                 777 drivers/tty/tty_ioctl.c 		return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT | TERMIOS_TERMIO);
p                 779 drivers/tty/tty_ioctl.c 		return set_termios(real_tty, p, TERMIOS_WAIT | TERMIOS_TERMIO);
p                 781 drivers/tty/tty_ioctl.c 		return set_termios(real_tty, p, TERMIOS_TERMIO);
p                 825 drivers/tty/tty_ioctl.c 		if (copy_to_user(p, &ktermx, sizeof(struct termiox)))
p                 830 drivers/tty/tty_ioctl.c 		return set_termiox(real_tty, p, 0);
p                 832 drivers/tty/tty_ioctl.c 		return set_termiox(real_tty, p, TERMIOS_WAIT);
p                 834 drivers/tty/tty_ioctl.c 		return set_termiox(real_tty, p, TERMIOS_FLUSH);
p                  73 drivers/tty/tty_jobctrl.c void proc_clear_tty(struct task_struct *p)
p                  77 drivers/tty/tty_jobctrl.c 	spin_lock_irqsave(&p->sighand->siglock, flags);
p                  78 drivers/tty/tty_jobctrl.c 	tty = p->signal->tty;
p                  79 drivers/tty/tty_jobctrl.c 	p->signal->tty = NULL;
p                  80 drivers/tty/tty_jobctrl.c 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
p                 173 drivers/tty/tty_jobctrl.c 	struct task_struct *p;
p                 174 drivers/tty/tty_jobctrl.c 	do_each_pid_task(session, PIDTYPE_SID, p) {
p                 175 drivers/tty/tty_jobctrl.c 		proc_clear_tty(p);
p                 176 drivers/tty/tty_jobctrl.c 	} while_each_pid_task(session, PIDTYPE_SID, p);
p                 193 drivers/tty/tty_jobctrl.c 	struct task_struct *p;
p                 199 drivers/tty/tty_jobctrl.c 		do_each_pid_task(tty->session, PIDTYPE_SID, p) {
p                 200 drivers/tty/tty_jobctrl.c 			spin_lock_irq(&p->sighand->siglock);
p                 201 drivers/tty/tty_jobctrl.c 			if (p->signal->tty == tty) {
p                 202 drivers/tty/tty_jobctrl.c 				p->signal->tty = NULL;
p                 207 drivers/tty/tty_jobctrl.c 			if (!p->signal->leader) {
p                 208 drivers/tty/tty_jobctrl.c 				spin_unlock_irq(&p->sighand->siglock);
p                 211 drivers/tty/tty_jobctrl.c 			__group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p);
p                 212 drivers/tty/tty_jobctrl.c 			__group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p);
p                 213 drivers/tty/tty_jobctrl.c 			put_pid(p->signal->tty_old_pgrp);  /* A noop */
p                 217 drivers/tty/tty_jobctrl.c 				p->signal->tty_old_pgrp = get_pid(tty->pgrp);
p                 219 drivers/tty/tty_jobctrl.c 			spin_unlock_irq(&p->sighand->siglock);
p                 220 drivers/tty/tty_jobctrl.c 		} while_each_pid_task(tty->session, PIDTYPE_SID, p);
p                 420 drivers/tty/tty_jobctrl.c 	struct task_struct *p;
p                 423 drivers/tty/tty_jobctrl.c 	p = pid_task(pgrp, PIDTYPE_PGID);
p                 424 drivers/tty/tty_jobctrl.c 	if (p == NULL)
p                 425 drivers/tty/tty_jobctrl.c 		p = pid_task(pgrp, PIDTYPE_PID);
p                 426 drivers/tty/tty_jobctrl.c 	if (p != NULL)
p                 427 drivers/tty/tty_jobctrl.c 		sid = task_session(p);
p                 443 drivers/tty/tty_jobctrl.c static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
p                 454 drivers/tty/tty_jobctrl.c 	ret =  put_user(pid_vnr(pid), p);
p                 470 drivers/tty/tty_jobctrl.c static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
p                 484 drivers/tty/tty_jobctrl.c 	if (get_user(pgrp_nr, p))
p                 517 drivers/tty/tty_jobctrl.c static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
p                 527 drivers/tty/tty_jobctrl.c 	return put_user(pid_vnr(real_tty->session), p);
p                 537 drivers/tty/tty_jobctrl.c 	void __user *p = (void __user *)arg;
p                 548 drivers/tty/tty_jobctrl.c 		return tiocgpgrp(tty, real_tty, p);
p                 550 drivers/tty/tty_jobctrl.c 		return tiocspgrp(tty, real_tty, p);
p                 552 drivers/tty/tty_jobctrl.c 		return tiocgsid(tty, real_tty, p);
p                  23 drivers/tty/tty_port.c 					const unsigned char *p,
p                  38 drivers/tty/tty_port.c 	ret = tty_ldisc_receive_buf(disc, p, (char *)f, count);
p                 199 drivers/tty/vt/consolemap.c static void set_inverse_transl(struct vc_data *conp, struct uni_pagedir *p, int i)
p                 205 drivers/tty/vt/consolemap.c 	if (!p) return;
p                 206 drivers/tty/vt/consolemap.c 	q = p->inverse_translations[i];
p                 209 drivers/tty/vt/consolemap.c 		q = p->inverse_translations[i] = kmalloc(MAX_GLYPH, GFP_KERNEL);
p                 224 drivers/tty/vt/consolemap.c 				      struct uni_pagedir *p)
p                 230 drivers/tty/vt/consolemap.c 	if (!p) return;
p                 231 drivers/tty/vt/consolemap.c 	q = p->inverse_trans_unicode;
p                 233 drivers/tty/vt/consolemap.c 		q = p->inverse_trans_unicode =
p                 241 drivers/tty/vt/consolemap.c 		p1 = p->uni_pgdir[i];
p                 273 drivers/tty/vt/consolemap.c 	struct uni_pagedir *p;
p                 278 drivers/tty/vt/consolemap.c 		p = *conp->vc_uni_pagedir_loc;
p                 279 drivers/tty/vt/consolemap.c 		if (!p)
p                 282 drivers/tty/vt/consolemap.c 			if (!p->inverse_trans_unicode)
p                 285 drivers/tty/vt/consolemap.c 				return p->inverse_trans_unicode[glyph];
p                 288 drivers/tty/vt/consolemap.c 			if (!p->inverse_translations[m])
p                 291 drivers/tty/vt/consolemap.c 				return p->inverse_translations[m][glyph];
p                 300 drivers/tty/vt/consolemap.c 	struct uni_pagedir *p, *q = NULL;
p                 305 drivers/tty/vt/consolemap.c 		p = *vc_cons[i].d->vc_uni_pagedir_loc;
p                 306 drivers/tty/vt/consolemap.c 		if (p && p != q) {
p                 307 drivers/tty/vt/consolemap.c 			set_inverse_transl(vc_cons[i].d, p, USER_MAP);
p                 308 drivers/tty/vt/consolemap.c 			set_inverse_trans_unicode(vc_cons[i].d, p);
p                 309 drivers/tty/vt/consolemap.c 			q = p;
p                 344 drivers/tty/vt/consolemap.c 	unsigned short *p = translations[USER_MAP];
p                 350 drivers/tty/vt/consolemap.c 		ch = conv_uni_to_pc(vc_cons[fg_console].d, p[i]);
p                 396 drivers/tty/vt/consolemap.c static void con_release_unimap(struct uni_pagedir *p)
p                 401 drivers/tty/vt/consolemap.c 	if (p == dflt) dflt = NULL;  
p                 403 drivers/tty/vt/consolemap.c 		p1 = p->uni_pgdir[i];
p                 409 drivers/tty/vt/consolemap.c 		p->uni_pgdir[i] = NULL;
p                 412 drivers/tty/vt/consolemap.c 		kfree(p->inverse_translations[i]);
p                 413 drivers/tty/vt/consolemap.c 		p->inverse_translations[i] = NULL;
p                 415 drivers/tty/vt/consolemap.c 	kfree(p->inverse_trans_unicode);
p                 416 drivers/tty/vt/consolemap.c 	p->inverse_trans_unicode = NULL;
p                 422 drivers/tty/vt/consolemap.c 	struct uni_pagedir *p;
p                 424 drivers/tty/vt/consolemap.c 	p = *vc->vc_uni_pagedir_loc;
p                 425 drivers/tty/vt/consolemap.c 	if (!p)
p                 428 drivers/tty/vt/consolemap.c 	if (--p->refcount)
p                 430 drivers/tty/vt/consolemap.c 	con_release_unimap(p);
p                 431 drivers/tty/vt/consolemap.c 	kfree(p);
p                 434 drivers/tty/vt/consolemap.c static int con_unify_unimap(struct vc_data *conp, struct uni_pagedir *p)
p                 443 drivers/tty/vt/consolemap.c 		if (!q || q == p || q->sum != p->sum)
p                 447 drivers/tty/vt/consolemap.c 			p1 = p->uni_pgdir[j]; q1 = q->uni_pgdir[j];
p                 466 drivers/tty/vt/consolemap.c 			con_release_unimap(p);
p                 467 drivers/tty/vt/consolemap.c 			kfree(p);
p                 475 drivers/tty/vt/consolemap.c con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
p                 480 drivers/tty/vt/consolemap.c 	p1 = p->uni_pgdir[n = unicode >> 11];
p                 482 drivers/tty/vt/consolemap.c 		p1 = p->uni_pgdir[n] = kmalloc_array(32, sizeof(u16 *),
p                 498 drivers/tty/vt/consolemap.c 	p->sum += (fontpos << 20) + unicode;
p                 506 drivers/tty/vt/consolemap.c 	struct uni_pagedir *p, *q;
p                 508 drivers/tty/vt/consolemap.c 	p = *vc->vc_uni_pagedir_loc;
p                 509 drivers/tty/vt/consolemap.c 	if (!p || --p->refcount) {
p                 510 drivers/tty/vt/consolemap.c 		q = kzalloc(sizeof(*p), GFP_KERNEL);
p                 512 drivers/tty/vt/consolemap.c 			if (p)
p                 513 drivers/tty/vt/consolemap.c 				p->refcount++;
p                 519 drivers/tty/vt/consolemap.c 		if (p == dflt) dflt = NULL;
p                 520 drivers/tty/vt/consolemap.c 		p->refcount++;
p                 521 drivers/tty/vt/consolemap.c 		p->sum = 0;
p                 522 drivers/tty/vt/consolemap.c 		con_release_unimap(p);
p                 539 drivers/tty/vt/consolemap.c 	struct uni_pagedir *p, *q;
p                 552 drivers/tty/vt/consolemap.c 	p = *vc->vc_uni_pagedir_loc;
p                 554 drivers/tty/vt/consolemap.c 	if (!p) {
p                 560 drivers/tty/vt/consolemap.c 	if (p->refcount > 1) {
p                 584 drivers/tty/vt/consolemap.c 		p1 = p->uni_pgdir[i];
p                 597 drivers/tty/vt/consolemap.c 						p->refcount++;
p                 598 drivers/tty/vt/consolemap.c 						*vc->vc_uni_pagedir_loc = p;
p                 618 drivers/tty/vt/consolemap.c 		p = q;
p                 619 drivers/tty/vt/consolemap.c 	} else if (p == dflt) {
p                 627 drivers/tty/vt/consolemap.c 		err1 = con_insert_unipair(p, plist->unicode, plist->fontpos);
p                 635 drivers/tty/vt/consolemap.c 	if (con_unify_unimap(vc, p))
p                 639 drivers/tty/vt/consolemap.c 		set_inverse_transl(vc, p, i); /* Update inverse translations */
p                 640 drivers/tty/vt/consolemap.c 	set_inverse_trans_unicode(vc, p);
p                 663 drivers/tty/vt/consolemap.c 	struct uni_pagedir *p;
p                 666 drivers/tty/vt/consolemap.c 		p = *vc->vc_uni_pagedir_loc;
p                 667 drivers/tty/vt/consolemap.c 		if (p == dflt)
p                 672 drivers/tty/vt/consolemap.c 		if (p && !--p->refcount) {
p                 673 drivers/tty/vt/consolemap.c 			con_release_unimap(p);
p                 674 drivers/tty/vt/consolemap.c 			kfree(p);
p                 685 drivers/tty/vt/consolemap.c 	p = *vc->vc_uni_pagedir_loc;
p                 690 drivers/tty/vt/consolemap.c 			err1 = con_insert_unipair(p, *(q++), i);
p                 695 drivers/tty/vt/consolemap.c 	if (con_unify_unimap(vc, p)) {
p                 701 drivers/tty/vt/consolemap.c 		set_inverse_transl(vc, p, i);	/* Update all inverse translations */
p                 702 drivers/tty/vt/consolemap.c 	set_inverse_trans_unicode(vc, p);
p                 703 drivers/tty/vt/consolemap.c 	dflt = p;
p                 743 drivers/tty/vt/consolemap.c 	struct uni_pagedir *p;
p                 754 drivers/tty/vt/consolemap.c 		p = *vc->vc_uni_pagedir_loc;
p                 756 drivers/tty/vt/consolemap.c 		p1 = p->uni_pgdir[i];
p                 814 drivers/tty/vt/consolemap.c 	struct uni_pagedir *p;
p                 834 drivers/tty/vt/consolemap.c 	p = *conp->vc_uni_pagedir_loc;
p                 835 drivers/tty/vt/consolemap.c 	if ((p1 = p->uni_pgdir[ucs >> 11]) &&
p                1997 drivers/tty/vt/keyboard.c 	char *p;
p                2029 drivers/tty/vt/keyboard.c 		p = func_table[i];
p                2030 drivers/tty/vt/keyboard.c 		if(p)
p                2031 drivers/tty/vt/keyboard.c 			for ( ; *p && sz; p++, sz--)
p                2032 drivers/tty/vt/keyboard.c 				if (put_user(*p, up++)) {
p                2041 drivers/tty/vt/keyboard.c 		return ((p && *p) ? -EOVERFLOW : 0);
p                 119 drivers/tty/vt/selection.c int sel_loadlut(char __user *p)
p                 122 drivers/tty/vt/selection.c 	if (copy_from_user(tmplut, (u32 __user *)(p+4), sizeof(inwordLut)))
p                 129 drivers/tty/vt/selection.c static inline int atedge(const int p, int size_row)
p                 131 drivers/tty/vt/selection.c 	return (!(p % size_row)	|| !((p + 2) % size_row));
p                 135 drivers/tty/vt/selection.c static int store_utf8(u32 c, char *p)
p                 139 drivers/tty/vt/selection.c 		p[0] = c;
p                 143 drivers/tty/vt/selection.c 		p[0] = 0xc0 | (c >> 6);
p                 144 drivers/tty/vt/selection.c 		p[1] = 0x80 | (c & 0x3f);
p                 148 drivers/tty/vt/selection.c 		p[0] = 0xe0 | (c >> 12);
p                 149 drivers/tty/vt/selection.c 		p[1] = 0x80 | ((c >> 6) & 0x3f);
p                 150 drivers/tty/vt/selection.c 		p[2] = 0x80 | (c & 0x3f);
p                 154 drivers/tty/vt/selection.c 		p[0] = 0xf0 | (c >> 18);
p                 155 drivers/tty/vt/selection.c 		p[1] = 0x80 | ((c >> 12) & 0x3f);
p                 156 drivers/tty/vt/selection.c 		p[2] = 0x80 | ((c >> 6) & 0x3f);
p                 157 drivers/tty/vt/selection.c 		p[3] = 0x80 | (c & 0x3f);
p                 161 drivers/tty/vt/selection.c 		p[0] = 0xef;
p                 162 drivers/tty/vt/selection.c 		p[1] = 0xbf;
p                 163 drivers/tty/vt/selection.c 		p[2] = 0xbd;
p                 289 drivers/tty/vt/vc_screen.c 		long p = pos;
p                 325 drivers/tty/vt/vc_screen.c 			p /= 4;
p                 326 drivers/tty/vt/vc_screen.c 			row = p / vc->vc_cols;
p                 327 drivers/tty/vt/vc_screen.c 			col = p % maxcol;
p                 341 drivers/tty/vt/vc_screen.c 			org = screen_pos(vc, p, viewed);
p                 342 drivers/tty/vt/vc_screen.c 			col = p % maxcol;
p                 343 drivers/tty/vt/vc_screen.c 			p += maxcol - col;
p                 347 drivers/tty/vt/vc_screen.c 					org = screen_pos(vc, p, viewed);
p                 349 drivers/tty/vt/vc_screen.c 					p += maxcol;
p                 353 drivers/tty/vt/vc_screen.c 			if (p < HEADER_SIZE) {
p                 361 drivers/tty/vt/vc_screen.c 				con_buf_start += p;
p                 362 drivers/tty/vt/vc_screen.c 				this_round += p;
p                 365 drivers/tty/vt/vc_screen.c 					orig_count = this_round - p;
p                 374 drivers/tty/vt/vc_screen.c 				p = HEADER_SIZE;
p                 377 drivers/tty/vt/vc_screen.c 			} else if (p & 1) {
p                 391 drivers/tty/vt/vc_screen.c 				p -= HEADER_SIZE;
p                 392 drivers/tty/vt/vc_screen.c 				p /= 2;
p                 393 drivers/tty/vt/vc_screen.c 				col = p % maxcol;
p                 395 drivers/tty/vt/vc_screen.c 				org = screen_pos(vc, p, viewed);
p                 396 drivers/tty/vt/vc_screen.c 				p += maxcol - col;
p                 408 drivers/tty/vt/vc_screen.c 						org = screen_pos(vc, p, viewed);
p                 410 drivers/tty/vt/vc_screen.c 						p += maxcol;
p                 489 drivers/tty/vt/vc_screen.c 		long p;
p                 537 drivers/tty/vt/vc_screen.c 		p = pos;
p                 539 drivers/tty/vt/vc_screen.c 			org0 = org = screen_pos(vc, p, viewed);
p                 540 drivers/tty/vt/vc_screen.c 			col = p % maxcol;
p                 541 drivers/tty/vt/vc_screen.c 			p += maxcol - col;
p                 551 drivers/tty/vt/vc_screen.c 					org = screen_pos(vc, p, viewed);
p                 553 drivers/tty/vt/vc_screen.c 					p += maxcol;
p                 557 drivers/tty/vt/vc_screen.c 			if (p < HEADER_SIZE) {
p                 561 drivers/tty/vt/vc_screen.c 				while (p < HEADER_SIZE && this_round > 0) {
p                 563 drivers/tty/vt/vc_screen.c 					header[p++] = *con_buf0++;
p                 568 drivers/tty/vt/vc_screen.c 			p -= HEADER_SIZE;
p                 569 drivers/tty/vt/vc_screen.c 			col = (p/2) % maxcol;
p                 571 drivers/tty/vt/vc_screen.c 				org0 = org = screen_pos(vc, p/2, viewed);
p                 572 drivers/tty/vt/vc_screen.c 				if ((p & 1) && this_round > 0) {
p                 585 drivers/tty/vt/vc_screen.c 					p++;
p                 587 drivers/tty/vt/vc_screen.c 						org = screen_pos(vc, p/2, viewed);
p                 591 drivers/tty/vt/vc_screen.c 				p /= 2;
p                 592 drivers/tty/vt/vc_screen.c 				p += maxcol - col;
p                 602 drivers/tty/vt/vc_screen.c 					org = screen_pos(vc, p, viewed);
p                 604 drivers/tty/vt/vc_screen.c 					p += maxcol;
p                 158 drivers/tty/vt/vt.c static int set_vesa_blanking(char __user *p);
p                 296 drivers/tty/vt/vt.c 	unsigned short *p;
p                 299 drivers/tty/vt/vt.c 		p = (unsigned short *)(vc->vc_origin + offset);
p                 301 drivers/tty/vt/vt.c 		p = (unsigned short *)(vc->vc_visible_origin + offset);
p                 303 drivers/tty/vt/vt.c 		p = vc->vc_sw->con_screen_pos(vc, offset);
p                 304 drivers/tty/vt/vt.c 	return p;
p                 348 drivers/tty/vt/vt.c 	void *p;
p                 354 drivers/tty/vt/vt.c 	p = vmalloc(memsize);
p                 355 drivers/tty/vt/vt.c 	if (!p)
p                 359 drivers/tty/vt/vt.c 	uniscr = p;
p                 360 drivers/tty/vt/vt.c 	p = uniscr->lines + rows;
p                 362 drivers/tty/vt/vt.c 		uniscr->lines[i] = p;
p                 363 drivers/tty/vt/vt.c 		p += cols * sizeof(char32_t);
p                 511 drivers/tty/vt/vt.c 	unsigned short *p;
p                 535 drivers/tty/vt/vt.c 	p = (unsigned short *)vc->vc_origin;
p                 540 drivers/tty/vt/vt.c 			u16 glyph = scr_readw(p++) & mask;
p                 580 drivers/tty/vt/vt.c 		u16 *p = (u16 *)pos;
p                 584 drivers/tty/vt/vt.c 			u16 glyph = scr_readw(p++) & mask;
p                 594 drivers/tty/vt/vt.c 	unsigned short *p;
p                 606 drivers/tty/vt/vt.c 	p = (unsigned short *)vc->vc_origin;
p                 611 drivers/tty/vt/vt.c 			u16 glyph = scr_readw(p++) & mask;
p                 654 drivers/tty/vt/vt.c 	u16 *p;
p                 656 drivers/tty/vt/vt.c 	p = (u16 *) start;
p                 667 drivers/tty/vt/vt.c 		u16 attrib = scr_readw(p) & 0xff00;
p                 669 drivers/tty/vt/vt.c 		u16 *q = p;
p                 671 drivers/tty/vt/vt.c 			if (attrib != (scr_readw(p) & 0xff00)) {
p                 672 drivers/tty/vt/vt.c 				if (p > q)
p                 673 drivers/tty/vt/vt.c 					vc->vc_sw->con_putcs(vc, q, p-q, yy, startx);
p                 675 drivers/tty/vt/vt.c 				q = p;
p                 676 drivers/tty/vt/vt.c 				attrib = scr_readw(p) & 0xff00;
p                 678 drivers/tty/vt/vt.c 			p++;
p                 682 drivers/tty/vt/vt.c 		if (p > q)
p                 683 drivers/tty/vt/vt.c 			vc->vc_sw->con_putcs(vc, q, p-q, yy, startx);
p                 689 drivers/tty/vt/vt.c 			p = (u16 *)start;
p                 762 drivers/tty/vt/vt.c 	unsigned short *p;
p                 767 drivers/tty/vt/vt.c 	p = screenpos(vc, offset, viewed);
p                 769 drivers/tty/vt/vt.c 		vc->vc_sw->con_invert_region(vc, p, count);
p                 771 drivers/tty/vt/vt.c 		u16 *q = p;
p                 800 drivers/tty/vt/vt.c 		do_update_region(vc, (unsigned long) p, count);
p                 826 drivers/tty/vt/vt.c 		unsigned short *p;
p                 827 drivers/tty/vt/vt.c 		p = screenpos(vc, offset, 1);
p                 828 drivers/tty/vt/vt.c 		old = scr_readw(p);
p                 830 drivers/tty/vt/vt.c 		scr_writew(new, p);
p                 842 drivers/tty/vt/vt.c 	unsigned short *p = (unsigned short *) vc->vc_pos;
p                 845 drivers/tty/vt/vt.c 	scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x - nr) * 2);
p                 846 drivers/tty/vt/vt.c 	scr_memsetw(p, vc->vc_video_erase_char, nr * 2);
p                 849 drivers/tty/vt/vt.c 		do_update_region(vc, (unsigned long) p,
p                 855 drivers/tty/vt/vt.c 	unsigned short *p = (unsigned short *) vc->vc_pos;
p                 858 drivers/tty/vt/vt.c 	scr_memcpyw(p, p + nr, (vc->vc_cols - vc->vc_x - nr) * 2);
p                 859 drivers/tty/vt/vt.c 	scr_memsetw(p + vc->vc_cols - vc->vc_x - nr, vc->vc_video_erase_char,
p                 863 drivers/tty/vt/vt.c 		do_update_region(vc, (unsigned long) p,
p                 969 drivers/tty/vt/vt.c 	unsigned short *p = (unsigned short *)vc->vc_origin;
p                 973 drivers/tty/vt/vt.c 	for (; count > 0; count--, p++) {
p                 974 drivers/tty/vt/vt.c 		scr_writew((scr_readw(p)&mask) | (vc->vc_video_erase_char & ~mask), p);
p                1809 drivers/tty/vt/vt.c static void respond_string(const char *p, struct tty_port *port)
p                1811 drivers/tty/vt/vt.c 	while (*p) {
p                1812 drivers/tty/vt/vt.c 		tty_insert_flip_char(port, *p, 0);
p                1813 drivers/tty/vt/vt.c 		p++;
p                3055 drivers/tty/vt/vt.c 	char __user *p = (char __user *)arg;
p                3061 drivers/tty/vt/vt.c 	if (get_user(type, p))
p                3069 drivers/tty/vt/vt.c 						 __user *)(p+1), tty);
p                3081 drivers/tty/vt/vt.c 			ret = sel_loadlut(p);
p                3093 drivers/tty/vt/vt.c 			ret = put_user(data, p);
p                3099 drivers/tty/vt/vt.c 			ret = put_user(data, p);
p                3103 drivers/tty/vt/vt.c 			ret = set_vesa_blanking(p);
p                3108 drivers/tty/vt/vt.c 			ret = put_user(data, p);
p                3114 drivers/tty/vt/vt.c 				if (get_user(data, p+1))
p                3127 drivers/tty/vt/vt.c 			if (get_user(lines, (s32 __user *)(p+4))) {
p                4210 drivers/tty/vt/vt.c static int set_vesa_blanking(char __user *p)
p                4214 drivers/tty/vt/vt.c 	if (get_user(mode, p + 1))
p                4684 drivers/tty/vt/vt.c void getconsxy(struct vc_data *vc, unsigned char *p)
p                4687 drivers/tty/vt/vt.c 	p[0] = min(vc->vc_x, 0xFFu);
p                4688 drivers/tty/vt/vt.c 	p[1] = min(vc->vc_y, 0xFFu);
p                4691 drivers/tty/vt/vt.c void putconsxy(struct vc_data *vc, unsigned char *p)
p                4694 drivers/tty/vt/vt.c 	gotoxy(vc, p[0], p[1]);
p                  98 drivers/uio/uio_pruss.c 	struct uio_info *p = gdev->info;
p                 100 drivers/uio/uio_pruss.c 	for (cnt = 0; cnt < MAX_PRUSS_EVT; cnt++, p++) {
p                 101 drivers/uio/uio_pruss.c 		uio_unregister_device(p);
p                 102 drivers/uio/uio_pruss.c 		kfree(p->name);
p                 121 drivers/uio/uio_pruss.c 	struct uio_info *p;
p                 132 drivers/uio/uio_pruss.c 	gdev->info = kcalloc(MAX_PRUSS_EVT, sizeof(*p), GFP_KERNEL);
p                 196 drivers/uio/uio_pruss.c 	for (cnt = 0, p = gdev->info; cnt < MAX_PRUSS_EVT; cnt++, p++) {
p                 197 drivers/uio/uio_pruss.c 		p->mem[0].addr = regs_prussio->start;
p                 198 drivers/uio/uio_pruss.c 		p->mem[0].size = resource_size(regs_prussio);
p                 199 drivers/uio/uio_pruss.c 		p->mem[0].memtype = UIO_MEM_PHYS;
p                 201 drivers/uio/uio_pruss.c 		p->mem[1].addr = gdev->sram_paddr;
p                 202 drivers/uio/uio_pruss.c 		p->mem[1].size = sram_pool_sz;
p                 203 drivers/uio/uio_pruss.c 		p->mem[1].memtype = UIO_MEM_PHYS;
p                 205 drivers/uio/uio_pruss.c 		p->mem[2].addr = gdev->ddr_paddr;
p                 206 drivers/uio/uio_pruss.c 		p->mem[2].size = extram_pool_sz;
p                 207 drivers/uio/uio_pruss.c 		p->mem[2].memtype = UIO_MEM_PHYS;
p                 209 drivers/uio/uio_pruss.c 		p->name = kasprintf(GFP_KERNEL, "pruss_evt%d", cnt);
p                 210 drivers/uio/uio_pruss.c 		p->version = DRV_VERSION;
p                 213 drivers/uio/uio_pruss.c 		p->irq = gdev->hostirq_start + cnt;
p                 214 drivers/uio/uio_pruss.c 		p->handler = pruss_handler;
p                 215 drivers/uio/uio_pruss.c 		p->priv = gdev;
p                 217 drivers/uio/uio_pruss.c 		ret = uio_register_device(dev, p);
p                 219 drivers/uio/uio_pruss.c 			kfree(p->name);
p                 228 drivers/uio/uio_pruss.c 	for (i = 0, p = gdev->info; i < cnt; i++, p++) {
p                 229 drivers/uio/uio_pruss.c 		uio_unregister_device(p);
p                 230 drivers/uio/uio_pruss.c 		kfree(p->name);
p                 263 drivers/usb/atm/ueagle-atm.c #define FW_GET_BYTE(p) (*((__u8 *) (p)))
p                 727 drivers/usb/atm/ueagle-atm.c 	unsigned int i, j, p, pp;
p                 730 drivers/usb/atm/ueagle-atm.c 	p = 1;
p                 733 drivers/usb/atm/ueagle-atm.c 	if (p + 4 * pagecount > len)
p                 738 drivers/usb/atm/ueagle-atm.c 		pageoffset = get_unaligned_le32(dsp + p);
p                 739 drivers/usb/atm/ueagle-atm.c 		p += 4;
p                 776 drivers/usb/atm/ueagle-atm.c 	struct l1_code *p = (struct l1_code *) dsp;
p                 777 drivers/usb/atm/ueagle-atm.c 	unsigned int sum = p->code - dsp;
p                 782 drivers/usb/atm/ueagle-atm.c 	if (strcmp("STRATIPHY ANEXA", p->string_header) != 0 &&
p                 783 drivers/usb/atm/ueagle-atm.c 	    strcmp("STRATIPHY ANEXB", p->string_header) != 0)
p                 788 drivers/usb/atm/ueagle-atm.c 		u8 blockno = p->page_number_to_block_index[i];
p                 798 drivers/usb/atm/ueagle-atm.c 			blockidx = &p->page_header[blockno++];
p                 905 drivers/usb/atm/ueagle-atm.c 	const u8 *p;
p                 920 drivers/usb/atm/ueagle-atm.c 	p = sc->dsp_firm->data;
p                 921 drivers/usb/atm/ueagle-atm.c 	pagecount = FW_GET_BYTE(p);
p                 922 drivers/usb/atm/ueagle-atm.c 	p += 1;
p                 927 drivers/usb/atm/ueagle-atm.c 	p += 4 * pageno;
p                 928 drivers/usb/atm/ueagle-atm.c 	pageoffset = get_unaligned_le32(p);
p                 933 drivers/usb/atm/ueagle-atm.c 	p = sc->dsp_firm->data + pageoffset;
p                 934 drivers/usb/atm/ueagle-atm.c 	blockcount = FW_GET_BYTE(p);
p                 935 drivers/usb/atm/ueagle-atm.c 	p += 1;
p                 945 drivers/usb/atm/ueagle-atm.c 		blockaddr = get_unaligned_le16(p);
p                 946 drivers/usb/atm/ueagle-atm.c 		p += 2;
p                 948 drivers/usb/atm/ueagle-atm.c 		blocksize = get_unaligned_le16(p);
p                 949 drivers/usb/atm/ueagle-atm.c 		p += 2;
p                 960 drivers/usb/atm/ueagle-atm.c 		if (uea_idma_write(sc, p, blocksize))
p                 963 drivers/usb/atm/ueagle-atm.c 		p += blocksize;
p                 979 drivers/usb/atm/ueagle-atm.c 	struct l1_code *p = (struct l1_code *) sc->dsp_firm->data;
p                 980 drivers/usb/atm/ueagle-atm.c 	u8 blockno = p->page_number_to_block_index[pageno];
p                 991 drivers/usb/atm/ueagle-atm.c 		blockidx = &p->page_header[blockno];
p                1029 drivers/usb/atm/ueagle-atm.c 	struct l1_code *p;
p                1042 drivers/usb/atm/ueagle-atm.c 	p = (struct l1_code *) sc->dsp_firm->data;
p                1043 drivers/usb/atm/ueagle-atm.c 	if (pageno >= le16_to_cpu(p->page_header[0].PageNumber)) {
p                1055 drivers/usb/atm/ueagle-atm.c 	       "sending Main DSP page %u\n", p->page_header[0].PageNumber);
p                1057 drivers/usb/atm/ueagle-atm.c 	for (i = 0; i < le16_to_cpu(p->page_header[0].PageNumber); i++) {
p                1058 drivers/usb/atm/ueagle-atm.c 		if (E4_IS_BOOT_PAGE(p->page_header[i].PageSize))
p                1068 drivers/usb/atm/ueagle-atm.c 	bi.dwSize = cpu_to_be32(E4_PAGE_BYTES(p->page_header[0].PageSize));
p                1069 drivers/usb/atm/ueagle-atm.c 	bi.dwAddress = cpu_to_be32(le32_to_cpu(p->page_header[0].PageAddress));
p                  73 drivers/usb/cdns3/drd.h #define CDNS_RID(p)			((p) & GENMASK(15, 0))
p                  76 drivers/usb/cdns3/drd.h #define CDNS_DID(p)			((p) & GENMASK(31, 0))
p                 124 drivers/usb/cdns3/drd.h #define OTGSTS_OTG_NRDY(p)		((p) & OTGSTS_OTG_NRDY_MASK)
p                 131 drivers/usb/cdns3/drd.h #define OTGSTS_STRAP(p)			(((p) & GENMASK(14, 12)) >> 12)
p                 146 drivers/usb/cdns3/drd.h #define OTGSTATE_HOST_STATE(p)		(((p) & OTGSTATE_HOST_STATE_MASK) >> 3)
p                 196 drivers/usb/cdns3/gadget.h #define USB_STS_CFGSTS(p)	((p) & USB_STS_CFGSTS_MASK)
p                 203 drivers/usb/cdns3/gadget.h #define USB_STS_OV(p)		((p) & USB_STS_OV_MASK)
p                 210 drivers/usb/cdns3/gadget.h #define USB_STS_USB3CONS(p)	((p) & USB_STS_USB3CONS_MASK)
p                 218 drivers/usb/cdns3/gadget.h #define USB_STS_DTRANS(p)	((p) & USB_STS_DTRANS_MASK)
p                 228 drivers/usb/cdns3/gadget.h #define USB_STS_USBSPEED(p)	(((p) & USB_STS_USBSPEED_MASK) >> 4)
p                 233 drivers/usb/cdns3/gadget.h #define DEV_UNDEFSPEED(p)	(((p) & USB_STS_USBSPEED_MASK) == (0x0 << 4))
p                 234 drivers/usb/cdns3/gadget.h #define DEV_LOWSPEED(p)		(((p) & USB_STS_USBSPEED_MASK) == USB_STS_LS)
p                 235 drivers/usb/cdns3/gadget.h #define DEV_FULLSPEED(p)	(((p) & USB_STS_USBSPEED_MASK) == USB_STS_FS)
p                 236 drivers/usb/cdns3/gadget.h #define DEV_HIGHSPEED(p)	(((p) & USB_STS_USBSPEED_MASK) == USB_STS_HS)
p                 237 drivers/usb/cdns3/gadget.h #define DEV_SUPERSPEED(p)	(((p) & USB_STS_USBSPEED_MASK) == USB_STS_SS)
p                 244 drivers/usb/cdns3/gadget.h #define USB_STS_ENDIAN(p)	((p) & USB_STS_ENDIAN_MASK)
p                 252 drivers/usb/cdns3/gadget.h #define USB_STS_CLK2OFF(p)	((p) & USB_STS_CLK2OFF_MASK)
p                 260 drivers/usb/cdns3/gadget.h #define USB_STS_CLK3OFF(p)	((p) & USB_STS_CLK3OFF_MASK)
p                 267 drivers/usb/cdns3/gadget.h #define USB_STS_IN_RST(p)	((p) & USB_STS_IN_RST_MASK)
p                 281 drivers/usb/cdns3/gadget.h #define USB_STS_DEVS(p)		((p) & USB_STS_DEVS_MASK)
p                 288 drivers/usb/cdns3/gadget.h #define USB_STS_ADDRESSED(p)	((p) & USB_STS_ADDRESSED_MASK)
p                 295 drivers/usb/cdns3/gadget.h #define USB_STS_L1ENS(p)	((p) & USB_STS_L1ENS_MASK)
p                 302 drivers/usb/cdns3/gadget.h #define USB_STS_VBUSS(p)	((p) & USB_STS_VBUSS_MASK)
p                 311 drivers/usb/cdns3/gadget.h #define DEV_L0_STATE(p)		(((p) & USB_STS_LPMST_MASK) == (0x0 << 18))
p                 312 drivers/usb/cdns3/gadget.h #define DEV_L1_STATE(p)		(((p) & USB_STS_LPMST_MASK) == (0x1 << 18))
p                 313 drivers/usb/cdns3/gadget.h #define DEV_L2_STATE(p)		(((p) & USB_STS_LPMST_MASK) == (0x2 << 18))
p                 314 drivers/usb/cdns3/gadget.h #define DEV_L3_STATE(p)		(((p) & USB_STS_LPMST_MASK) == (0x3 << 18))
p                 321 drivers/usb/cdns3/gadget.h #define USB_STS_USB2CONS(p)	((p) & USB_STS_USB2CONS_MASK)
p                 328 drivers/usb/cdns3/gadget.h #define USB_STS_DISABLE_HS(p)	((p) & USB_STS_DISABLE_HS_MASK)
p                 335 drivers/usb/cdns3/gadget.h #define USB_STS_U1ENS(p)	((p) & USB_STS_U1ENS_MASK)
p                 342 drivers/usb/cdns3/gadget.h #define USB_STS_U2ENS(p)	((p) & USB_STS_U2ENS_MASK)
p                 348 drivers/usb/cdns3/gadget.h #define DEV_LST_U0		(((p) & USB_STS_LST_MASK) == (0x0 << 26))
p                 349 drivers/usb/cdns3/gadget.h #define DEV_LST_U1		(((p) & USB_STS_LST_MASK) == (0x1 << 26))
p                 350 drivers/usb/cdns3/gadget.h #define DEV_LST_U2		(((p) & USB_STS_LST_MASK) == (0x2 << 26))
p                 351 drivers/usb/cdns3/gadget.h #define DEV_LST_U3		(((p) & USB_STS_LST_MASK) == (0x3 << 26))
p                 352 drivers/usb/cdns3/gadget.h #define DEV_LST_DISABLED	(((p) & USB_STS_LST_MASK) == (0x4 << 26))
p                 353 drivers/usb/cdns3/gadget.h #define DEV_LST_RXDETECT	(((p) & USB_STS_LST_MASK) == (0x5 << 26))
p                 354 drivers/usb/cdns3/gadget.h #define DEV_LST_INACTIVE	(((p) & USB_STS_LST_MASK) == (0x6 << 26))
p                 355 drivers/usb/cdns3/gadget.h #define DEV_LST_POLLING		(((p) & USB_STS_LST_MASK) == (0x7 << 26))
p                 356 drivers/usb/cdns3/gadget.h #define DEV_LST_RECOVERY	(((p) & USB_STS_LST_MASK) == (0x8 << 26))
p                 357 drivers/usb/cdns3/gadget.h #define DEV_LST_HOT_RESET	(((p) & USB_STS_LST_MASK) == (0x9 << 26))
p                 358 drivers/usb/cdns3/gadget.h #define DEV_LST_COMP_MODE	(((p) & USB_STS_LST_MASK) == (0xa << 26))
p                 359 drivers/usb/cdns3/gadget.h #define DEV_LST_LB_STATE	(((p) & USB_STS_LST_MASK) == (0xb << 26))
p                 366 drivers/usb/cdns3/gadget.h #define USB_STS_DMAOFF(p)	((p) & USB_STS_DMAOFF_MASK)
p                 373 drivers/usb/cdns3/gadget.h #define USB_STS_ENDIAN2(p)	((p) & USB_STS_ENDIAN2_MASK)
p                 386 drivers/usb/cdns3/gadget.h #define USB_CMD_FADDR(p)	(((p) << 1) & USB_CMD_FADDR_MASK)
p                 393 drivers/usb/cdns3/gadget.h #define USB_STS_TMODE_SEL(p)	(((p) << 10) & USB_STS_TMODE_SEL_MASK)
p                 403 drivers/usb/cdns3/gadget.h #define USB_STS_DNFW_INT(p)	(((p) << 16) & USB_CMD_DNFW_INT_MASK)
p                 409 drivers/usb/cdns3/gadget.h #define USB_STS_DNLTM_BELT(p)	(((p) << 16) & USB_CMD_DNLTM_BELT_MASK)
p                 418 drivers/usb/cdns3/gadget.h #define USB_ITPN(p)		((p) & USB_ITPN_MASK)
p                 423 drivers/usb/cdns3/gadget.h #define USB_LPM_HIRD(p)		((p) & USB_LPM_HIRD_MASK)
p                 531 drivers/usb/cdns3/gadget.h #define EP_SEL_EPNO(p)		((p) & EP_SEL_EPNO_MASK)
p                 535 drivers/usb/cdns3/gadget.h #define select_ep_in(nr)	(EP_SEL_EPNO(p) | EP_SEL_DIR)
p                 536 drivers/usb/cdns3/gadget.h #define select_ep_out		(EP_SEL_EPNO(p))
p                 540 drivers/usb/cdns3/gadget.h #define EP_TRADDR_TRADDR(p)	((p))
p                 552 drivers/usb/cdns3/gadget.h #define EP_CFG_EPTYPE(p)	(((p) << 1)  & EP_CFG_EPTYPE_MASK)
p                 563 drivers/usb/cdns3/gadget.h #define EP_CFG_MAXBURST(p)	(((p) << 8) & EP_CFG_MAXBURST_MASK)
p                 566 drivers/usb/cdns3/gadget.h #define EP_CFG_MULT(p)		(((p) << 14) & EP_CFG_MULT_MASK)
p                 569 drivers/usb/cdns3/gadget.h #define EP_CFG_MAXPKTSIZE(p)	(((p) << 16) & EP_CFG_MAXPKTSIZE_MASK)
p                 572 drivers/usb/cdns3/gadget.h #define EP_CFG_BUFFERING(p)	(((p) << 27) & EP_CFG_BUFFERING_MASK)
p                 600 drivers/usb/cdns3/gadget.h #define EP_CMD_TDL_SET(p)	(((p) << 9) & EP_CMD_TDL_MASK)
p                 601 drivers/usb/cdns3/gadget.h #define EP_CMD_TDL_GET(p)	(((p) & EP_CMD_TDL_MASK) >> 9)
p                 605 drivers/usb/cdns3/gadget.h #define EP_CMD_ERDY_SID(p)	(((p) << 16) & EP_CMD_ERDY_SID_MASK)
p                 611 drivers/usb/cdns3/gadget.h #define EP_STS_STALL(p)		((p) & BIT(1))
p                 629 drivers/usb/cdns3/gadget.h #define EP_STS_BUFFEMPTY(p)	((p) & BIT(10))
p                 631 drivers/usb/cdns3/gadget.h #define EP_STS_CCS(p)		((p) & BIT(11))
p                 641 drivers/usb/cdns3/gadget.h #define EP_STS_HOSTPP(p)	((p) & BIT(16))
p                 644 drivers/usb/cdns3/gadget.h #define EP_STS_SPSMST_DISABLED(p)	(((p) & EP_STS_SPSMST_MASK) >> 17)
p                 645 drivers/usb/cdns3/gadget.h #define EP_STS_SPSMST_IDLE(p)		(((p) & EP_STS_SPSMST_MASK) >> 17)
p                 646 drivers/usb/cdns3/gadget.h #define EP_STS_SPSMST_START_STREAM(p)	(((p) & EP_STS_SPSMST_MASK) >> 17)
p                 647 drivers/usb/cdns3/gadget.h #define EP_STS_SPSMST_MOVE_DATA(p)	(((p) & EP_STS_SPSMST_MASK) >> 17)
p                 652 drivers/usb/cdns3/gadget.h #define EP_STS_OUTQ_NO(p)	(((p) & EP_STS_OUTQ_NO_MASK) >> 24)
p                 655 drivers/usb/cdns3/gadget.h #define EP_STS_OUTQ_VAL(p)	((p) & EP_STS_OUTQ_VAL_MASK)
p                 662 drivers/usb/cdns3/gadget.h #define EP_STS_SID(p)		((p) & EP_STS_SID_MASK)
p                 751 drivers/usb/cdns3/gadget.h #define DEV_SFR_TYPE_OCP(p)	(((p) & USB_CAP1_SFR_TYPE_MASK) == 0x0)
p                 752 drivers/usb/cdns3/gadget.h #define DEV_SFR_TYPE_AHB(p)	(((p) & USB_CAP1_SFR_TYPE_MASK) == 0x1)
p                 753 drivers/usb/cdns3/gadget.h #define DEV_SFR_TYPE_PLB(p)	(((p) & USB_CAP1_SFR_TYPE_MASK) == 0x2)
p                 754 drivers/usb/cdns3/gadget.h #define DEV_SFR_TYPE_AXI(p)	(((p) & USB_CAP1_SFR_TYPE_MASK) == 0x3)
p                 765 drivers/usb/cdns3/gadget.h #define DEV_SFR_WIDTH_8(p)	(((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x0 << 4))
p                 766 drivers/usb/cdns3/gadget.h #define DEV_SFR_WIDTH_16(p)	(((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x1 << 4))
p                 767 drivers/usb/cdns3/gadget.h #define DEV_SFR_WIDTH_32(p)	(((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x2 << 4))
p                 768 drivers/usb/cdns3/gadget.h #define DEV_SFR_WIDTH_64(p)	(((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x3 << 4))
p                 779 drivers/usb/cdns3/gadget.h #define DEV_DMA_TYPE_OCP(p)	(((p) & USB_CAP1_DMA_TYPE_MASK) == (0x0 << 8))
p                 780 drivers/usb/cdns3/gadget.h #define DEV_DMA_TYPE_AHB(p)	(((p) & USB_CAP1_DMA_TYPE_MASK) == (0x1 << 8))
p                 781 drivers/usb/cdns3/gadget.h #define DEV_DMA_TYPE_PLB(p)	(((p) & USB_CAP1_DMA_TYPE_MASK) == (0x2 << 8))
p                 782 drivers/usb/cdns3/gadget.h #define DEV_DMA_TYPE_AXI(p)	(((p) & USB_CAP1_DMA_TYPE_MASK) == (0x3 << 8))
p                 793 drivers/usb/cdns3/gadget.h #define DEV_DMA_WIDTH_32(p)	(((p) & USB_CAP1_DMA_WIDTH_MASK) == (0x2 << 12))
p                 794 drivers/usb/cdns3/gadget.h #define DEV_DMA_WIDTH_64(p)	(((p) & USB_CAP1_DMA_WIDTH_MASK) == (0x3 << 12))
p                 803 drivers/usb/cdns3/gadget.h #define DEV_U3PHY_PIPE(p) (((p) & USB_CAP1_U3PHY_TYPE_MASK) == (0x0 << 16))
p                 804 drivers/usb/cdns3/gadget.h #define DEV_U3PHY_RMMI(p) (((p) & USB_CAP1_U3PHY_TYPE_MASK) == (0x1 << 16))
p                 817 drivers/usb/cdns3/gadget.h #define DEV_U3PHY_WIDTH_8(p) \
p                 818 drivers/usb/cdns3/gadget.h 	(((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x0 << 20))
p                 819 drivers/usb/cdns3/gadget.h #define DEV_U3PHY_WIDTH_16(p) \
p                 820 drivers/usb/cdns3/gadget.h 	(((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x1 << 16))
p                 821 drivers/usb/cdns3/gadget.h #define DEV_U3PHY_WIDTH_32(p) \
p                 822 drivers/usb/cdns3/gadget.h 	(((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x2 << 20))
p                 823 drivers/usb/cdns3/gadget.h #define DEV_U3PHY_WIDTH_64(p) \
p                 824 drivers/usb/cdns3/gadget.h 	(((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x3 << 16))
p                 832 drivers/usb/cdns3/gadget.h #define USB_CAP1_U2PHY_EN(p)	((p) & BIT(24))
p                 839 drivers/usb/cdns3/gadget.h #define DEV_U2PHY_ULPI(p)	((p) & BIT(25))
p                 847 drivers/usb/cdns3/gadget.h #define DEV_U2PHY_WIDTH_16(p)	((p) & BIT(26))
p                 853 drivers/usb/cdns3/gadget.h #define USB_CAP1_OTG_READY(p)	((p) & BIT(27))
p                 860 drivers/usb/cdns3/gadget.h #define USB_CAP1_TDL_FROM_TRB(p)	((p) & BIT(28))
p                 868 drivers/usb/cdns3/gadget.h #define USB_CAP2_ACTUAL_MEM_SIZE(p) ((p) & GENMASK(7, 0))
p                 883 drivers/usb/cdns3/gadget.h #define USB_CAP2_MAX_MEM_SIZE(p) ((p) & GENMASK(11, 8))
p                 896 drivers/usb/cdns3/gadget.h #define GET_DEV_BASE_VERSION(p) ((p) & GENMASK(23, 0))
p                 898 drivers/usb/cdns3/gadget.h #define GET_DEV_CUSTOM_VERSION(p) ((p) & GENMASK(31, 24))
p                 910 drivers/usb/cdns3/gadget.h #define DBG_LINK1_LFPS_MIN_DET_U1_EXIT(p)	((p) & GENMASK(7, 0))
p                 916 drivers/usb/cdns3/gadget.h #define DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(p)	(((p) << 8) & GENMASK(15, 8))
p                 927 drivers/usb/cdns3/gadget.h #define DBG_LINK1_LFPS_GEN_PING(p)		(((p) << 17) & GENMASK(21, 17))
p                 955 drivers/usb/cdns3/gadget.h #define DMA_AXI_CTRL_MARPROT(p) ((p) & GENMASK(2, 0))
p                 957 drivers/usb/cdns3/gadget.h #define DMA_AXI_CTRL_MAWPROT(p) (((p) & GENMASK(2, 0)) << 16)
p                1008 drivers/usb/cdns3/gadget.h #define TRB_TYPE(p)		((p) << 10)
p                1009 drivers/usb/cdns3/gadget.h #define TRB_FIELD_TO_TYPE(p)	(((p) & TRB_TYPE_BITMASK) >> 10)
p                1045 drivers/usb/cdns3/gadget.h #define TRB_STREAM_ID(p)		((p) << 16)
p                1046 drivers/usb/cdns3/gadget.h #define TRB_FIELD_TO_STREAMID(p)	(((p) & TRB_STREAM_ID_BITMASK) >> 16)
p                1049 drivers/usb/cdns3/gadget.h #define TRB_TDL_HS_SIZE(p)	(((p) << 16) & GENMASK(31, 16))
p                1050 drivers/usb/cdns3/gadget.h #define TRB_TDL_HS_SIZE_GET(p)	(((p) & GENMASK(31, 16)) >> 16)
p                1053 drivers/usb/cdns3/gadget.h #define TRB_LEN(p)		((p) & GENMASK(16, 0))
p                1056 drivers/usb/cdns3/gadget.h #define TRB_TDL_SS_SIZE(p)	(((p) << 17) & GENMASK(23, 17))
p                1057 drivers/usb/cdns3/gadget.h #define TRB_TDL_SS_SIZE_GET(p)	(((p) & GENMASK(23, 17)) >> 17)
p                1060 drivers/usb/cdns3/gadget.h #define TRB_BURST_LEN(p)	(((p) << 24) & GENMASK(31, 24))
p                1061 drivers/usb/cdns3/gadget.h #define TRB_BURST_LEN_GET(p)	(((p) & GENMASK(31, 24)) >> 24)
p                1064 drivers/usb/cdns3/gadget.h #define TRB_BUFFER(p)		((p) & GENMASK(31, 0))
p                 799 drivers/usb/chipidea/core.c 		struct pinctrl_state *p;
p                 801 drivers/usb/chipidea/core.c 		p = pinctrl_lookup_state(platdata->pctl, "default");
p                 802 drivers/usb/chipidea/core.c 		if (!IS_ERR(p))
p                 803 drivers/usb/chipidea/core.c 			platdata->pins_default = p;
p                 805 drivers/usb/chipidea/core.c 		p = pinctrl_lookup_state(platdata->pctl, "host");
p                 806 drivers/usb/chipidea/core.c 		if (!IS_ERR(p))
p                 807 drivers/usb/chipidea/core.c 			platdata->pins_host = p;
p                 809 drivers/usb/chipidea/core.c 		p = pinctrl_lookup_state(platdata->pctl, "device");
p                 810 drivers/usb/chipidea/core.c 		if (!IS_ERR(p))
p                 811 drivers/usb/chipidea/core.c 			platdata->pins_device = p;
p                 171 drivers/usb/class/usblp.c 	int p;
p                 179 drivers/usb/class/usblp.c 	for (p = USBLP_FIRST_PROTOCOL; p <= USBLP_LAST_PROTOCOL; p++) {
p                 180 drivers/usb/class/usblp.c 		dev_dbg(dev, "protocol[%d].alt_setting=%d\n", p,
p                 181 drivers/usb/class/usblp.c 			usblp->protocol[p].alt_setting);
p                 182 drivers/usb/class/usblp.c 		dev_dbg(dev, "protocol[%d].epwrite=%p\n", p,
p                 183 drivers/usb/class/usblp.c 			usblp->protocol[p].epwrite);
p                 184 drivers/usb/class/usblp.c 		dev_dbg(dev, "protocol[%d].epread=%p\n", p,
p                 185 drivers/usb/class/usblp.c 			usblp->protocol[p].epread);
p                1236 drivers/usb/class/usblp.c 	int p, i;
p                1241 drivers/usb/class/usblp.c 	for (p = 0; p < USBLP_MAX_PROTOCOLS; p++)
p                1242 drivers/usb/class/usblp.c 		usblp->protocol[p].alt_setting = -1;
p                 665 drivers/usb/core/devio.c 	struct list_head *p, *q, hitlist;
p                 670 drivers/usb/core/devio.c 	list_for_each_safe(p, q, &ps->async_pending)
p                 671 drivers/usb/core/devio.c 		if (ifnum == list_entry(p, struct async, asynclist)->ifnum)
p                 672 drivers/usb/core/devio.c 			list_move_tail(p, &hitlist);
p                2025 drivers/usb/core/devio.c 	struct usbdevfs_ctrltransfer __user *p;
p                2027 drivers/usb/core/devio.c 	p = compat_alloc_user_space(sizeof(*p));
p                2028 drivers/usb/core/devio.c 	if (copy_in_user(p, p32, (sizeof(*p32) - sizeof(compat_caddr_t))) ||
p                2030 drivers/usb/core/devio.c 	    put_user(compat_ptr(udata), &p->data))
p                2032 drivers/usb/core/devio.c 	return proc_control(ps, p);
p                2038 drivers/usb/core/devio.c 	struct usbdevfs_bulktransfer __user *p;
p                2042 drivers/usb/core/devio.c 	p = compat_alloc_user_space(sizeof(*p));
p                2044 drivers/usb/core/devio.c 	if (get_user(n, &p32->ep) || put_user(n, &p->ep) ||
p                2045 drivers/usb/core/devio.c 	    get_user(n, &p32->len) || put_user(n, &p->len) ||
p                2046 drivers/usb/core/devio.c 	    get_user(n, &p32->timeout) || put_user(n, &p->timeout) ||
p                2047 drivers/usb/core/devio.c 	    get_user(addr, &p32->data) || put_user(compat_ptr(addr), &p->data))
p                2050 drivers/usb/core/devio.c 	return proc_bulk(ps, p);
p                2487 drivers/usb/core/devio.c 				void __user *p)
p                2503 drivers/usb/core/devio.c 		ret = proc_reapurb(ps, p);
p                2508 drivers/usb/core/devio.c 		ret = proc_reapurbnonblock(ps, p);
p                2514 drivers/usb/core/devio.c 		ret = proc_reapurb_compat(ps, p);
p                2519 drivers/usb/core/devio.c 		ret = proc_reapurbnonblock_compat(ps, p);
p                2532 drivers/usb/core/devio.c 		ret = proc_control(ps, p);
p                2539 drivers/usb/core/devio.c 		ret = proc_bulk(ps, p);
p                2546 drivers/usb/core/devio.c 		ret = proc_resetep(ps, p);
p                2558 drivers/usb/core/devio.c 		ret = proc_clearhalt(ps, p);
p                2565 drivers/usb/core/devio.c 		ret = proc_getdriver(ps, p);
p                2570 drivers/usb/core/devio.c 		ret = proc_connectinfo(ps, p);
p                2575 drivers/usb/core/devio.c 		ret = proc_setintf(ps, p);
p                2580 drivers/usb/core/devio.c 		ret = proc_setconfig(ps, p);
p                2585 drivers/usb/core/devio.c 		ret = proc_submiturb(ps, p);
p                2593 drivers/usb/core/devio.c 		ret = proc_control_compat(ps, p);
p                2600 drivers/usb/core/devio.c 		ret = proc_bulk_compat(ps, p);
p                2607 drivers/usb/core/devio.c 		ret = proc_disconnectsignal_compat(ps, p);
p                2612 drivers/usb/core/devio.c 		ret = proc_submiturb_compat(ps, p);
p                2619 drivers/usb/core/devio.c 		ret = proc_ioctl_compat(ps, ptr_to_compat(p));
p                2624 drivers/usb/core/devio.c 		snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p);
p                2625 drivers/usb/core/devio.c 		ret = proc_unlinkurb(ps, p);
p                2630 drivers/usb/core/devio.c 		ret = proc_disconnectsignal(ps, p);
p                2635 drivers/usb/core/devio.c 		ret = proc_claiminterface(ps, p);
p                2640 drivers/usb/core/devio.c 		ret = proc_releaseinterface(ps, p);
p                2645 drivers/usb/core/devio.c 		ret = proc_ioctl_default(ps, p);
p                2650 drivers/usb/core/devio.c 		ret = proc_claim_port(ps, p);
p                2655 drivers/usb/core/devio.c 		ret = proc_release_port(ps, p);
p                2658 drivers/usb/core/devio.c 		ret = proc_get_capabilities(ps, p);
p                2661 drivers/usb/core/devio.c 		ret = proc_disconnect_claim(ps, p);
p                2664 drivers/usb/core/devio.c 		ret = proc_alloc_streams(ps, p);
p                2667 drivers/usb/core/devio.c 		ret = proc_free_streams(ps, p);
p                2670 drivers/usb/core/devio.c 		ret = proc_drop_privileges(ps, p);
p                2689 drivers/usb/core/devio.c 		ret = proc_conninfo_ex(ps, p, _IOC_SIZE(cmd));
p                  88 drivers/usb/core/port.c 	const char *p;
p                  92 drivers/usb/core/port.c 			p = "u1_u2";
p                  94 drivers/usb/core/port.c 			p = "u1";
p                  97 drivers/usb/core/port.c 			p = "u2";
p                  99 drivers/usb/core/port.c 			p = "0";
p                 102 drivers/usb/core/port.c 	return sprintf(buf, "%s\n", p);
p                 445 drivers/usb/core/port.c static int match_location(struct usb_device *peer_hdev, void *p)
p                 449 drivers/usb/core/port.c 	struct usb_port *port_dev = p, *peer;
p                  30 drivers/usb/core/quirks.c 	char *p, *field;
p                  66 drivers/usb/core/quirks.c 	for (i = 0, p = (char *)val; p && *p;) {
p                  68 drivers/usb/core/quirks.c 		field = strsep(&p, ":");
p                  75 drivers/usb/core/quirks.c 		field = strsep(&p, ":");
p                  82 drivers/usb/core/quirks.c 		field = strsep(&p, ",");
p                 460 drivers/usb/core/sysfs.c 	const char *p = auto_string;
p                 464 drivers/usb/core/sysfs.c 		p = on_string;
p                 465 drivers/usb/core/sysfs.c 	return sprintf(buf, "%s\n", p);
p                 506 drivers/usb/core/sysfs.c 	const char *p;
p                 509 drivers/usb/core/sysfs.c 		p = "enabled";
p                 511 drivers/usb/core/sysfs.c 		p = "disabled";
p                 513 drivers/usb/core/sysfs.c 	return sprintf(buf, "%s\n", p);
p                 598 drivers/usb/core/sysfs.c 	const char *p;
p                 606 drivers/usb/core/sysfs.c 		p = "enabled";
p                 608 drivers/usb/core/sysfs.c 		p = "disabled";
p                 612 drivers/usb/core/sysfs.c 	return sprintf(buf, "%s\n", p);
p                 620 drivers/usb/core/sysfs.c 	const char *p;
p                 628 drivers/usb/core/sysfs.c 		p = "enabled";
p                 630 drivers/usb/core/sysfs.c 		p = "disabled";
p                 634 drivers/usb/core/sysfs.c 	return sprintf(buf, "%s\n", p);
p                 669 drivers/usb/dwc2/debugfs.c 	struct dwc2_core_params *p = &hsotg->params;
p                 672 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, otg_cap);
p                 673 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, dma_desc_enable);
p                 674 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, dma_desc_fs_enable);
p                 675 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, speed);
p                 676 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, enable_dynamic_fifo);
p                 677 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, en_multiple_tx_fifo);
p                 678 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, host_rx_fifo_size);
p                 679 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, host_nperio_tx_fifo_size);
p                 680 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, host_perio_tx_fifo_size);
p                 681 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, max_transfer_size);
p                 682 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, max_packet_count);
p                 683 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, host_channels);
p                 684 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, phy_type);
p                 685 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, phy_utmi_width);
p                 686 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, phy_ulpi_ddr);
p                 687 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, phy_ulpi_ext_vbus);
p                 688 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, i2c_enable);
p                 689 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, ipg_isoc_en);
p                 690 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, ulpi_fs_ls);
p                 691 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, host_support_fs_ls_low_power);
p                 692 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, host_ls_low_power_phy_clk);
p                 693 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, ts_dline);
p                 694 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, reload_ctl);
p                 695 drivers/usb/dwc2/debugfs.c 	print_param_hex(seq, p, ahbcfg);
p                 696 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, uframe_sched);
p                 697 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, external_id_pin_ctl);
p                 698 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, power_down);
p                 699 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, lpm);
p                 700 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, lpm_clock_gating);
p                 701 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, besl);
p                 702 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, hird_threshold_en);
p                 703 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, hird_threshold);
p                 704 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, service_interval);
p                 705 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, host_dma);
p                 706 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, g_dma);
p                 707 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, g_dma_desc);
p                 708 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, g_rx_fifo_size);
p                 709 drivers/usb/dwc2/debugfs.c 	print_param(seq, p, g_np_tx_fifo_size);
p                 715 drivers/usb/dwc2/debugfs.c 		seq_printf(seq, "%-30s: %d\n", str, p->g_tx_fifo_size[i]);
p                3966 drivers/usb/dwc2/hcd.c 	struct wrapper_priv_data *p;
p                3968 drivers/usb/dwc2/hcd.c 	p = (struct wrapper_priv_data *)&hcd->hcd_priv;
p                3969 drivers/usb/dwc2/hcd.c 	return p->hsotg;
p                  44 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                  46 drivers/usb/dwc2/params.c 	p->host_rx_fifo_size = 774;
p                  47 drivers/usb/dwc2/params.c 	p->max_transfer_size = 65535;
p                  48 drivers/usb/dwc2/params.c 	p->max_packet_count = 511;
p                  49 drivers/usb/dwc2/params.c 	p->ahbcfg = 0x10;
p                  54 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                  56 drivers/usb/dwc2/params.c 	p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
p                  57 drivers/usb/dwc2/params.c 	p->speed = DWC2_SPEED_PARAM_HIGH;
p                  58 drivers/usb/dwc2/params.c 	p->host_rx_fifo_size = 512;
p                  59 drivers/usb/dwc2/params.c 	p->host_nperio_tx_fifo_size = 512;
p                  60 drivers/usb/dwc2/params.c 	p->host_perio_tx_fifo_size = 512;
p                  61 drivers/usb/dwc2/params.c 	p->max_transfer_size = 65535;
p                  62 drivers/usb/dwc2/params.c 	p->max_packet_count = 511;
p                  63 drivers/usb/dwc2/params.c 	p->host_channels = 16;
p                  64 drivers/usb/dwc2/params.c 	p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
p                  65 drivers/usb/dwc2/params.c 	p->phy_utmi_width = 8;
p                  66 drivers/usb/dwc2/params.c 	p->i2c_enable = false;
p                  67 drivers/usb/dwc2/params.c 	p->reload_ctl = false;
p                  68 drivers/usb/dwc2/params.c 	p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
p                  70 drivers/usb/dwc2/params.c 	p->change_speed_quirk = true;
p                  71 drivers/usb/dwc2/params.c 	p->power_down = false;
p                  76 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                  78 drivers/usb/dwc2/params.c 	p->power_down = 0;
p                  79 drivers/usb/dwc2/params.c 	p->phy_utmi_width = 8;
p                  84 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                  86 drivers/usb/dwc2/params.c 	p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
p                  87 drivers/usb/dwc2/params.c 	p->host_rx_fifo_size = 525;
p                  88 drivers/usb/dwc2/params.c 	p->host_nperio_tx_fifo_size = 128;
p                  89 drivers/usb/dwc2/params.c 	p->host_perio_tx_fifo_size = 256;
p                  90 drivers/usb/dwc2/params.c 	p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
p                  92 drivers/usb/dwc2/params.c 	p->power_down = 0;
p                  97 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                  99 drivers/usb/dwc2/params.c 	p->otg_cap = 2;
p                 100 drivers/usb/dwc2/params.c 	p->host_rx_fifo_size = 288;
p                 101 drivers/usb/dwc2/params.c 	p->host_nperio_tx_fifo_size = 128;
p                 102 drivers/usb/dwc2/params.c 	p->host_perio_tx_fifo_size = 96;
p                 103 drivers/usb/dwc2/params.c 	p->max_transfer_size = 65535;
p                 104 drivers/usb/dwc2/params.c 	p->max_packet_count = 511;
p                 105 drivers/usb/dwc2/params.c 	p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
p                 111 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                 113 drivers/usb/dwc2/params.c 	p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
p                 114 drivers/usb/dwc2/params.c 	p->speed = DWC2_SPEED_PARAM_HIGH;
p                 115 drivers/usb/dwc2/params.c 	p->host_rx_fifo_size = 512;
p                 116 drivers/usb/dwc2/params.c 	p->host_nperio_tx_fifo_size = 500;
p                 117 drivers/usb/dwc2/params.c 	p->host_perio_tx_fifo_size = 500;
p                 118 drivers/usb/dwc2/params.c 	p->host_channels = 16;
p                 119 drivers/usb/dwc2/params.c 	p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
p                 120 drivers/usb/dwc2/params.c 	p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
p                 122 drivers/usb/dwc2/params.c 	p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
p                 127 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                 129 drivers/usb/dwc2/params.c 	p->lpm = false;
p                 130 drivers/usb/dwc2/params.c 	p->lpm_clock_gating = false;
p                 131 drivers/usb/dwc2/params.c 	p->besl = false;
p                 132 drivers/usb/dwc2/params.c 	p->hird_threshold_en = false;
p                 137 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                 139 drivers/usb/dwc2/params.c 	p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
p                 144 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                 146 drivers/usb/dwc2/params.c 	p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
p                 147 drivers/usb/dwc2/params.c 	p->speed = DWC2_SPEED_PARAM_FULL;
p                 148 drivers/usb/dwc2/params.c 	p->host_rx_fifo_size = 128;
p                 149 drivers/usb/dwc2/params.c 	p->host_nperio_tx_fifo_size = 96;
p                 150 drivers/usb/dwc2/params.c 	p->host_perio_tx_fifo_size = 96;
p                 151 drivers/usb/dwc2/params.c 	p->max_packet_count = 256;
p                 152 drivers/usb/dwc2/params.c 	p->phy_type = DWC2_PHY_TYPE_PARAM_FS;
p                 153 drivers/usb/dwc2/params.c 	p->i2c_enable = false;
p                 154 drivers/usb/dwc2/params.c 	p->activate_stm_fs_transceiver = true;
p                 159 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                 161 drivers/usb/dwc2/params.c 	p->host_rx_fifo_size = 622;
p                 162 drivers/usb/dwc2/params.c 	p->host_nperio_tx_fifo_size = 128;
p                 163 drivers/usb/dwc2/params.c 	p->host_perio_tx_fifo_size = 256;
p                 271 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                 278 drivers/usb/dwc2/params.c 	memset(p->g_tx_fifo_size, 0, sizeof(p->g_tx_fifo_size));
p                 281 drivers/usb/dwc2/params.c 		p->g_tx_fifo_size[i] = depth_average;
p                 300 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                 302 drivers/usb/dwc2/params.c 	p->lpm = hsotg->hw_params.lpm_mode;
p                 303 drivers/usb/dwc2/params.c 	if (p->lpm) {
p                 304 drivers/usb/dwc2/params.c 		p->lpm_clock_gating = true;
p                 305 drivers/usb/dwc2/params.c 		p->besl = true;
p                 306 drivers/usb/dwc2/params.c 		p->hird_threshold_en = true;
p                 307 drivers/usb/dwc2/params.c 		p->hird_threshold = 4;
p                 309 drivers/usb/dwc2/params.c 		p->lpm_clock_gating = false;
p                 310 drivers/usb/dwc2/params.c 		p->besl = false;
p                 311 drivers/usb/dwc2/params.c 		p->hird_threshold_en = false;
p                 325 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                 334 drivers/usb/dwc2/params.c 	p->phy_ulpi_ddr = false;
p                 335 drivers/usb/dwc2/params.c 	p->phy_ulpi_ext_vbus = false;
p                 337 drivers/usb/dwc2/params.c 	p->enable_dynamic_fifo = hw->enable_dynamic_fifo;
p                 338 drivers/usb/dwc2/params.c 	p->en_multiple_tx_fifo = hw->en_multiple_tx_fifo;
p                 339 drivers/usb/dwc2/params.c 	p->i2c_enable = hw->i2c_enable;
p                 340 drivers/usb/dwc2/params.c 	p->acg_enable = hw->acg_enable;
p                 341 drivers/usb/dwc2/params.c 	p->ulpi_fs_ls = false;
p                 342 drivers/usb/dwc2/params.c 	p->ts_dline = false;
p                 343 drivers/usb/dwc2/params.c 	p->reload_ctl = (hw->snpsid >= DWC2_CORE_REV_2_92a);
p                 344 drivers/usb/dwc2/params.c 	p->uframe_sched = true;
p                 345 drivers/usb/dwc2/params.c 	p->external_id_pin_ctl = false;
p                 346 drivers/usb/dwc2/params.c 	p->ipg_isoc_en = false;
p                 347 drivers/usb/dwc2/params.c 	p->service_interval = false;
p                 348 drivers/usb/dwc2/params.c 	p->max_packet_count = hw->max_packet_count;
p                 349 drivers/usb/dwc2/params.c 	p->max_transfer_size = hw->max_transfer_size;
p                 350 drivers/usb/dwc2/params.c 	p->ahbcfg = GAHBCFG_HBSTLEN_INCR << GAHBCFG_HBSTLEN_SHIFT;
p                 351 drivers/usb/dwc2/params.c 	p->ref_clk_per = 33333;
p                 352 drivers/usb/dwc2/params.c 	p->sof_cnt_wkup_alert = 100;
p                 356 drivers/usb/dwc2/params.c 		p->host_dma = dma_capable;
p                 357 drivers/usb/dwc2/params.c 		p->dma_desc_enable = false;
p                 358 drivers/usb/dwc2/params.c 		p->dma_desc_fs_enable = false;
p                 359 drivers/usb/dwc2/params.c 		p->host_support_fs_ls_low_power = false;
p                 360 drivers/usb/dwc2/params.c 		p->host_ls_low_power_phy_clk = false;
p                 361 drivers/usb/dwc2/params.c 		p->host_channels = hw->host_channels;
p                 362 drivers/usb/dwc2/params.c 		p->host_rx_fifo_size = hw->rx_fifo_size;
p                 363 drivers/usb/dwc2/params.c 		p->host_nperio_tx_fifo_size = hw->host_nperio_tx_fifo_size;
p                 364 drivers/usb/dwc2/params.c 		p->host_perio_tx_fifo_size = hw->host_perio_tx_fifo_size;
p                 369 drivers/usb/dwc2/params.c 		p->g_dma = dma_capable;
p                 370 drivers/usb/dwc2/params.c 		p->g_dma_desc = hw->dma_desc_enable;
p                 381 drivers/usb/dwc2/params.c 		p->g_rx_fifo_size = 2048;
p                 382 drivers/usb/dwc2/params.c 		p->g_np_tx_fifo_size = 1024;
p                 396 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                 402 drivers/usb/dwc2/params.c 					 &p->g_rx_fifo_size);
p                 405 drivers/usb/dwc2/params.c 					 &p->g_np_tx_fifo_size);
p                 410 drivers/usb/dwc2/params.c 			memset(p->g_tx_fifo_size, 0,
p                 411 drivers/usb/dwc2/params.c 			       sizeof(p->g_tx_fifo_size));
p                 414 drivers/usb/dwc2/params.c 						       &p->g_tx_fifo_size[1],
p                 420 drivers/usb/dwc2/params.c 		p->oc_disable = true;
p                 619 drivers/usb/dwc2/params.c 	struct dwc2_core_params *p = &hsotg->params;
p                 651 drivers/usb/dwc2/params.c 		CHECK_BOOL(dma_desc_enable, p->host_dma);
p                 652 drivers/usb/dwc2/params.c 		CHECK_BOOL(dma_desc_fs_enable, p->dma_desc_enable);
p                 654 drivers/usb/dwc2/params.c 			   p->phy_type == DWC2_PHY_TYPE_PARAM_FS);
p                 672 drivers/usb/dwc2/params.c 		CHECK_BOOL(g_dma_desc, (p->g_dma && hw->dma_desc_enable));
p                 851 drivers/usb/dwc3/core.h #define DWC3_NUM_EPS(p)		(((p)->hwparams3 &		\
p                 853 drivers/usb/dwc3/core.h #define DWC3_NUM_IN_EPS(p)	(((p)->hwparams3 &		\
p                 219 drivers/usb/dwc3/dwc3-pci.c 	struct property_entry *p = (struct property_entry *)id->driver_data;
p                 262 drivers/usb/dwc3/dwc3-pci.c 	ret = platform_device_add_properties(dwc->dwc3, p);
p                 107 drivers/usb/early/ehci-dbgp.c #define EARLY_HC_LENGTH(p)	(0x00ff & (p)) /* bits 7 : 0 */
p                  35 drivers/usb/early/xhci-dbc.h #define DEBUG_MAX_BURST(p)	(((p) >> 16) & 0xff)
p                  44 drivers/usb/early/xhci-dbc.h #define DCST_DEBUG_PORT(p)	(((p) >> 24) & 0xff)
p                 217 drivers/usb/early/xhci-dbc.h #define DOOR_BELL_TARGET(p)	(((p) & 0xff) << 8)
p                  79 drivers/usb/gadget/function/f_acm.c static inline struct f_acm *port_to_acm(struct gserial *p)
p                  81 drivers/usb/gadget/function/f_acm.c 	return container_of(p, struct f_acm, port);
p                1185 drivers/usb/gadget/function/f_fs.c 	struct ffs_io_data io_data, *p = &io_data;
p                1191 drivers/usb/gadget/function/f_fs.c 		p = kzalloc(sizeof(io_data), GFP_KERNEL);
p                1192 drivers/usb/gadget/function/f_fs.c 		if (unlikely(!p))
p                1194 drivers/usb/gadget/function/f_fs.c 		p->aio = true;
p                1196 drivers/usb/gadget/function/f_fs.c 		memset(p, 0, sizeof(*p));
p                1197 drivers/usb/gadget/function/f_fs.c 		p->aio = false;
p                1200 drivers/usb/gadget/function/f_fs.c 	p->read = false;
p                1201 drivers/usb/gadget/function/f_fs.c 	p->kiocb = kiocb;
p                1202 drivers/usb/gadget/function/f_fs.c 	p->data = *from;
p                1203 drivers/usb/gadget/function/f_fs.c 	p->mm = current->mm;
p                1205 drivers/usb/gadget/function/f_fs.c 	kiocb->private = p;
p                1207 drivers/usb/gadget/function/f_fs.c 	if (p->aio)
p                1210 drivers/usb/gadget/function/f_fs.c 	res = ffs_epfile_io(kiocb->ki_filp, p);
p                1213 drivers/usb/gadget/function/f_fs.c 	if (p->aio)
p                1214 drivers/usb/gadget/function/f_fs.c 		kfree(p);
p                1216 drivers/usb/gadget/function/f_fs.c 		*from = p->data;
p                1222 drivers/usb/gadget/function/f_fs.c 	struct ffs_io_data io_data, *p = &io_data;
p                1228 drivers/usb/gadget/function/f_fs.c 		p = kzalloc(sizeof(io_data), GFP_KERNEL);
p                1229 drivers/usb/gadget/function/f_fs.c 		if (unlikely(!p))
p                1231 drivers/usb/gadget/function/f_fs.c 		p->aio = true;
p                1233 drivers/usb/gadget/function/f_fs.c 		memset(p, 0, sizeof(*p));
p                1234 drivers/usb/gadget/function/f_fs.c 		p->aio = false;
p                1237 drivers/usb/gadget/function/f_fs.c 	p->read = true;
p                1238 drivers/usb/gadget/function/f_fs.c 	p->kiocb = kiocb;
p                1239 drivers/usb/gadget/function/f_fs.c 	if (p->aio) {
p                1240 drivers/usb/gadget/function/f_fs.c 		p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
p                1241 drivers/usb/gadget/function/f_fs.c 		if (!p->to_free) {
p                1242 drivers/usb/gadget/function/f_fs.c 			kfree(p);
p                1246 drivers/usb/gadget/function/f_fs.c 		p->data = *to;
p                1247 drivers/usb/gadget/function/f_fs.c 		p->to_free = NULL;
p                1249 drivers/usb/gadget/function/f_fs.c 	p->mm = current->mm;
p                1251 drivers/usb/gadget/function/f_fs.c 	kiocb->private = p;
p                1253 drivers/usb/gadget/function/f_fs.c 	if (p->aio)
p                1256 drivers/usb/gadget/function/f_fs.c 	res = ffs_epfile_io(kiocb->ki_filp, p);
p                1260 drivers/usb/gadget/function/f_fs.c 	if (p->aio) {
p                1261 drivers/usb/gadget/function/f_fs.c 		kfree(p->to_free);
p                1262 drivers/usb/gadget/function/f_fs.c 		kfree(p);
p                1264 drivers/usb/gadget/function/f_fs.c 		*to = p->data;
p                2743 drivers/usb/gadget/function/f_mass_storage.c 	char *pathbuf, *p;
p                2796 drivers/usb/gadget/function/f_mass_storage.c 	p = "(no medium)";
p                2798 drivers/usb/gadget/function/f_mass_storage.c 		p = "(error)";
p                2800 drivers/usb/gadget/function/f_mass_storage.c 			p = file_path(lun->filp, pathbuf, PATH_MAX);
p                2801 drivers/usb/gadget/function/f_mass_storage.c 			if (IS_ERR(p))
p                2802 drivers/usb/gadget/function/f_mass_storage.c 				p = "(error)";
p                2809 drivers/usb/gadget/function/f_mass_storage.c 	      p);
p                 450 drivers/usb/gadget/function/f_midi.c 	uint8_t p[4] = { port->cable << 4, 0, 0, 0 };
p                 456 drivers/usb/gadget/function/f_midi.c 		p[0] |= 0x0f;
p                 457 drivers/usb/gadget/function/f_midi.c 		p[1] = b;
p                 466 drivers/usb/gadget/function/f_midi.c 			p[0] |= 0x05;
p                 467 drivers/usb/gadget/function/f_midi.c 			p[1] = 0xf7;
p                 471 drivers/usb/gadget/function/f_midi.c 			p[0] |= 0x06;
p                 472 drivers/usb/gadget/function/f_midi.c 			p[1] = port->data[0];
p                 473 drivers/usb/gadget/function/f_midi.c 			p[2] = 0xf7;
p                 477 drivers/usb/gadget/function/f_midi.c 			p[0] |= 0x07;
p                 478 drivers/usb/gadget/function/f_midi.c 			p[1] = port->data[0];
p                 479 drivers/usb/gadget/function/f_midi.c 			p[2] = port->data[1];
p                 480 drivers/usb/gadget/function/f_midi.c 			p[3] = 0xf7;
p                 514 drivers/usb/gadget/function/f_midi.c 			p[0] |= 0x05;
p                 515 drivers/usb/gadget/function/f_midi.c 			p[1] = 0xf6;
p                 540 drivers/usb/gadget/function/f_midi.c 				p[0] |= port->data[0] >> 4;
p                 542 drivers/usb/gadget/function/f_midi.c 				p[0] |= 0x02;
p                 544 drivers/usb/gadget/function/f_midi.c 			p[1] = port->data[0];
p                 545 drivers/usb/gadget/function/f_midi.c 			p[2] = b;
p                 555 drivers/usb/gadget/function/f_midi.c 				p[0] |= port->data[0] >> 4;
p                 557 drivers/usb/gadget/function/f_midi.c 				p[0] |= 0x03;
p                 559 drivers/usb/gadget/function/f_midi.c 			p[1] = port->data[0];
p                 560 drivers/usb/gadget/function/f_midi.c 			p[2] = port->data[1];
p                 561 drivers/usb/gadget/function/f_midi.c 			p[3] = b;
p                 574 drivers/usb/gadget/function/f_midi.c 			p[0] |= 0x04;
p                 575 drivers/usb/gadget/function/f_midi.c 			p[1] = port->data[0];
p                 576 drivers/usb/gadget/function/f_midi.c 			p[2] = port->data[1];
p                 577 drivers/usb/gadget/function/f_midi.c 			p[3] = b;
p                 594 drivers/usb/gadget/function/f_midi.c 		memcpy(buf, p, sizeof(p));
p                 595 drivers/usb/gadget/function/f_midi.c 		req->length = length + sizeof(p);
p                 487 drivers/usb/gadget/function/f_ncm.c static inline void put_ncm(__le16 **p, unsigned size, unsigned val)
p                 491 drivers/usb/gadget/function/f_ncm.c 		put_unaligned_le16((u16)val, *p);
p                 494 drivers/usb/gadget/function/f_ncm.c 		put_unaligned_le32((u32)val, *p);
p                 501 drivers/usb/gadget/function/f_ncm.c 	*p += size;
p                 504 drivers/usb/gadget/function/f_ncm.c static inline unsigned get_ncm(__le16 **p, unsigned size)
p                 510 drivers/usb/gadget/function/f_ncm.c 		tmp = get_unaligned_le16(*p);
p                 513 drivers/usb/gadget/function/f_ncm.c 		tmp = get_unaligned_le32(*p);
p                 519 drivers/usb/gadget/function/f_ncm.c 	*p += size;
p                  42 drivers/usb/gadget/function/f_obex.c static inline struct f_obex *port_to_obex(struct gserial *p)
p                  44 drivers/usb/gadget/function/f_obex.c 	return container_of(p, struct f_obex, port);
p                1555 drivers/usb/gadget/function/f_tcm.c 			      struct se_session *se_sess, void *p)
p                1560 drivers/usb/gadget/function/f_tcm.c 	tpg->tpg_nexus = p;
p                 474 drivers/usb/gadget/function/f_uac2.c #define USBDHDR(p) (struct usb_descriptor_header *)(p)
p                1120 drivers/usb/gadget/function/rndis.c 	rndis_params *p = PDE_DATA(file_inode(file));
p                1144 drivers/usb/gadget/function/rndis.c 			rndis_signal_connect(p);
p                1148 drivers/usb/gadget/function/rndis.c 			rndis_signal_disconnect(p);
p                1151 drivers/usb/gadget/function/rndis.c 			if (fl_speed) p->speed = speed;
p                 333 drivers/usb/gadget/function/storage_common.c 	char		*p;
p                 338 drivers/usb/gadget/function/storage_common.c 		p = file_path(curlun->filp, buf, PAGE_SIZE - 1);
p                 339 drivers/usb/gadget/function/storage_common.c 		if (IS_ERR(p))
p                 340 drivers/usb/gadget/function/storage_common.c 			rc = PTR_ERR(p);
p                 342 drivers/usb/gadget/function/storage_common.c 			rc = strlen(p);
p                 343 drivers/usb/gadget/function/storage_common.c 			memmove(buf, p, rc);
p                 141 drivers/usb/gadget/function/u_ether.c static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
p                 145 drivers/usb/gadget/function/u_ether.c 	strlcpy(p->driver, "g_ether", sizeof(p->driver));
p                 146 drivers/usb/gadget/function/u_ether.c 	strlcpy(p->version, UETH__VERSION, sizeof(p->version));
p                 147 drivers/usb/gadget/function/u_ether.c 	strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
p                 148 drivers/usb/gadget/function/u_ether.c 	strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
p                 687 drivers/usb/gadget/function/u_serial.c static int gs_writes_finished(struct gs_port *p)
p                 692 drivers/usb/gadget/function/u_serial.c 	spin_lock_irq(&p->port_lock);
p                 693 drivers/usb/gadget/function/u_serial.c 	cond = (p->port_usb == NULL) || !kfifo_len(&p->port_write_buf);
p                 694 drivers/usb/gadget/function/u_serial.c 	spin_unlock_irq(&p->port_lock);
p                1079 drivers/usb/gadget/function/u_serial.c 	struct tty_driver **p = (struct tty_driver **)co->data;
p                1081 drivers/usb/gadget/function/u_serial.c 	if (!*p)
p                1085 drivers/usb/gadget/function/u_serial.c 	return *p;
p                  47 drivers/usb/gadget/function/u_serial.h 	void (*connect)(struct gserial *p);
p                  48 drivers/usb/gadget/function/u_serial.h 	void (*disconnect)(struct gserial *p);
p                  49 drivers/usb/gadget/function/u_serial.h 	int (*send_break)(struct gserial *p, int duration);
p                 432 drivers/usb/gadget/udc/aspeed-vhub/hub.c 	struct ast_vhub_port *p = &vhub->ports[port];
p                 436 drivers/usb/gadget/udc/aspeed-vhub/hub.c 	prev = p->status;
p                 437 drivers/usb/gadget/udc/aspeed-vhub/hub.c 	p->status = (prev & ~clr_flags) | set_flags;
p                 438 drivers/usb/gadget/udc/aspeed-vhub/hub.c 	DDBG(&p->dev, "port %d status %04x -> %04x (C=%d)\n",
p                 439 drivers/usb/gadget/udc/aspeed-vhub/hub.c 	     port + 1, prev, p->status, set_c);
p                 443 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		u16 chg = p->status ^ prev;
p                 457 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		if (p->status & USB_PORT_STAT_ENABLE)
p                 460 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		p->change = chg;
p                 508 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		struct ast_vhub_port *p = &vhub->ports[i];
p                 510 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		if (!(p->status & USB_PORT_STAT_SUSPEND))
p                 515 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		ast_vhub_dev_resume(&p->dev);
p                 533 drivers/usb/gadget/udc/aspeed-vhub/hub.c 	struct ast_vhub_port *p = &vhub->ports[port];
p                 543 drivers/usb/gadget/udc/aspeed-vhub/hub.c 	if (!p->dev.driver)
p                 550 drivers/usb/gadget/udc/aspeed-vhub/hub.c 	ast_vhub_dev_reset(&p->dev);
p                 553 drivers/usb/gadget/udc/aspeed-vhub/hub.c 	speed = p->dev.driver->max_speed;
p                 588 drivers/usb/gadget/udc/aspeed-vhub/hub.c 	struct ast_vhub_port *p;
p                 593 drivers/usb/gadget/udc/aspeed-vhub/hub.c 	p = &vhub->ports[port];
p                 597 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		if (!(p->status & USB_PORT_STAT_ENABLE))
p                 602 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		ast_vhub_dev_suspend(&p->dev);
p                 614 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		if (p->status & USB_PORT_STAT_CONNECTION) {
p                 615 drivers/usb/gadget/udc/aspeed-vhub/hub.c 			p->change |= USB_PORT_STAT_C_CONNECTION;
p                 631 drivers/usb/gadget/udc/aspeed-vhub/hub.c 	struct ast_vhub_port *p;
p                 636 drivers/usb/gadget/udc/aspeed-vhub/hub.c 	p = &vhub->ports[port];
p                 644 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		ast_vhub_dev_suspend(&p->dev);
p                 647 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		if (!(p->status & USB_PORT_STAT_SUSPEND))
p                 652 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		ast_vhub_dev_resume(&p->dev);
p                 666 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		p->change &= ~(1u << (feat - 16));
p                 761 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		struct ast_vhub_port *p = &vhub->ports[i];
p                 763 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		if (!(p->status & USB_PORT_STAT_SUSPEND))
p                 764 drivers/usb/gadget/udc/aspeed-vhub/hub.c 			ast_vhub_dev_suspend(&p->dev);
p                 784 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		struct ast_vhub_port *p = &vhub->ports[i];
p                 786 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		if (!(p->status & USB_PORT_STAT_SUSPEND))
p                 787 drivers/usb/gadget/udc/aspeed-vhub/hub.c 			ast_vhub_dev_resume(&p->dev);
p                 818 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		struct ast_vhub_port *p = &vhub->ports[i];
p                 821 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		p->status &= USB_PORT_STAT_CONNECTION;
p                 822 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		p->change = 0;
p                 825 drivers/usb/gadget/udc/aspeed-vhub/hub.c 		ast_vhub_dev_suspend(&p->dev);
p                2127 drivers/usb/gadget/udc/bcm63xx_udc.c static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
p                2166 drivers/usb/gadget/udc/bcm63xx_udc.c static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
p                  97 drivers/usb/gadget/udc/bdc/bdc.h #define NUM_NCS(p)	(p >> 28)
p                 101 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_PGS(p)	(((p) & (0x7 << 8)) >> 8)
p                 102 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_SPB(p)	(p & 0x7)
p                 115 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_CMD_CST(p)		(((p) & (0xf << 6))>>6)
p                 116 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_CMD_EPN(p)		((p & 0x1f) << 10)
p                 153 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_PSP(p)	(((p) & (0x7 << 20))>>20)
p                 159 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_PSP(p)	(((p) & (0x7 << 20))>>20)
p                 166 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_PST(p)	(p & 0xf)
p                 174 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_U1T(p)	((p) & 0xff)
p                 175 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_U2T(p)	(((p) & 0xff) << 8)
p                 190 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_CSTS(p)	(((p) & (0x7 << 20)) >> 20)
p                 210 drivers/usb/gadget/udc/bdc/bdc.h #define BD_INTR_TARGET(p)	(((p) & 0x1f) << 27)
p                 217 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_SRR_EPI(p)	(((p) & (0xff << 24)) >> 24)
p                 218 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_SRR_DPI(p) (((p) & (0xff << 16)) >> 16)
p                 231 drivers/usb/gadget/udc/bdc/bdc.h #define SR_BD_LEN(p)    (p & 0xffffff)
p                 240 drivers/usb/gadget/udc/bdc/bdc.h #define XSF_STS(p) (((p) >> 28) & 0xf)
p                 243 drivers/usb/gadget/udc/bdc/bdc.h #define BD_LEN(p) ((p) & 0x1ffff)
p                 747 drivers/usb/gadget/udc/fotg210-udc.c 	u8 *p = (u8 *)ctrl;
p                 750 drivers/usb/gadget/udc/fotg210-udc.c 	fotg210_rdsetupp(fotg210, p);
p                  90 drivers/usb/gadget/udc/fsl_udc_core.c static u32 _fsl_readl_be(const unsigned __iomem *p)
p                  92 drivers/usb/gadget/udc/fsl_udc_core.c 	return in_be32(p);
p                  95 drivers/usb/gadget/udc/fsl_udc_core.c static u32 _fsl_readl_le(const unsigned __iomem *p)
p                  97 drivers/usb/gadget/udc/fsl_udc_core.c 	return in_le32(p);
p                 100 drivers/usb/gadget/udc/fsl_udc_core.c static void _fsl_writel_be(u32 v, unsigned __iomem *p)
p                 102 drivers/usb/gadget/udc/fsl_udc_core.c 	out_be32(p, v);
p                 105 drivers/usb/gadget/udc/fsl_udc_core.c static void _fsl_writel_le(u32 v, unsigned __iomem *p)
p                 107 drivers/usb/gadget/udc/fsl_udc_core.c 	out_le32(p, v);
p                 110 drivers/usb/gadget/udc/fsl_udc_core.c static u32 (*_fsl_readl)(const unsigned __iomem *p);
p                 111 drivers/usb/gadget/udc/fsl_udc_core.c static void (*_fsl_writel)(u32 v, unsigned __iomem *p);
p                 113 drivers/usb/gadget/udc/fsl_udc_core.c #define fsl_readl(p)		(*_fsl_readl)((p))
p                 114 drivers/usb/gadget/udc/fsl_udc_core.c #define fsl_writel(v, p)	(*_fsl_writel)((v), (p))
p                1577 drivers/usb/gadget/udc/fsl_udc_core.c 			u32 *p = (u32 *)buffer_ptr;
p                1581 drivers/usb/gadget/udc/fsl_udc_core.c 			*p++ = le32_to_cpu(*s++);
p                1582 drivers/usb/gadget/udc/fsl_udc_core.c 			*p = le32_to_cpu(*s);
p                 522 drivers/usb/gadget/udc/fsl_usb2_udc.h 	char line[52], *p;
p                 530 drivers/usb/gadget/udc/fsl_usb2_udc.h 		p = line;
p                 533 drivers/usb/gadget/udc/fsl_usb2_udc.h 				*p++ = ' ';
p                 534 drivers/usb/gadget/udc/fsl_usb2_udc.h 			sprintf(p, " %02x", buf[i]);
p                 535 drivers/usb/gadget/udc/fsl_usb2_udc.h 			p += 3;
p                 537 drivers/usb/gadget/udc/fsl_usb2_udc.h 		*p = 0;
p                 822 drivers/usb/gadget/udc/fusb300_udc.c 	u8 *p = (u8 *)ctrl;
p                 826 drivers/usb/gadget/udc/fusb300_udc.c 	fusb300_rdcxf(fusb300, p, 8);
p                 313 drivers/usb/gadget/udc/lpc32xx_udc.c #define USBD_CMD_PHASE(p)	((p) << 8)
p                1086 drivers/usb/gadget/udc/m66592-udc.c 	u16 *p = (u16 *)ctrl;
p                1094 drivers/usb/gadget/udc/m66592-udc.c 		p[i] = m66592_read(m66592, offset + i*2);
p                 591 drivers/usb/gadget/udc/m66592-udc.h 			unsigned char *p = buf + len*2;
p                 594 drivers/usb/gadget/udc/m66592-udc.h 			iowrite8(*p, fifoaddr);
p                  90 drivers/usb/gadget/udc/pxa27x_udc.c static int state_dbg_show(struct seq_file *s, void *p)
p                 136 drivers/usb/gadget/udc/pxa27x_udc.c static int queues_dbg_show(struct seq_file *s, void *p)
p                 169 drivers/usb/gadget/udc/pxa27x_udc.c static int eps_dbg_show(struct seq_file *s, void *p)
p                1287 drivers/usb/gadget/udc/r8a66597-udc.c 	u16 *p = (u16 *)ctrl;
p                1295 drivers/usb/gadget/udc/r8a66597-udc.c 		p[i] = r8a66597_read(r8a66597, offset + i*2);
p                 122 drivers/usb/gadget/udc/s3c2410_udc.c static int s3c2410_udc_debugfs_show(struct seq_file *m, void *p)
p                 626 drivers/usb/host/ehci-dbg.c 	union ehci_shadow	p, *seen;
p                 652 drivers/usb/host/ehci-dbg.c 		p = ehci->pshadow[i];
p                 653 drivers/usb/host/ehci-dbg.c 		if (likely(!p.ptr))
p                 666 drivers/usb/host/ehci-dbg.c 				hw = p.qh->hw;
p                 668 drivers/usb/host/ehci-dbg.c 						p.qh->ps.period,
p                 673 drivers/usb/host/ehci-dbg.c 						p.qh);
p                 678 drivers/usb/host/ehci-dbg.c 					if (seen[temp].ptr != p.ptr)
p                 680 drivers/usb/host/ehci-dbg.c 					if (p.qh->qh_next.ptr) {
p                 691 drivers/usb/host/ehci-dbg.c 						hw, p.qh, size);
p                 694 drivers/usb/host/ehci-dbg.c 						seen[seen_count++].qh = p.qh;
p                 699 drivers/usb/host/ehci-dbg.c 				p = p.qh->qh_next;
p                 703 drivers/usb/host/ehci-dbg.c 					" fstn-%8x/%p", p.fstn->hw_prev,
p                 704 drivers/usb/host/ehci-dbg.c 					p.fstn);
p                 705 drivers/usb/host/ehci-dbg.c 				tag = Q_NEXT_TYPE(ehci, p.fstn->hw_next);
p                 706 drivers/usb/host/ehci-dbg.c 				p = p.fstn->fstn_next;
p                 710 drivers/usb/host/ehci-dbg.c 					" itd/%p", p.itd);
p                 711 drivers/usb/host/ehci-dbg.c 				tag = Q_NEXT_TYPE(ehci, p.itd->hw_next);
p                 712 drivers/usb/host/ehci-dbg.c 				p = p.itd->itd_next;
p                 717 drivers/usb/host/ehci-dbg.c 					p.sitd->stream->ps.period,
p                 718 drivers/usb/host/ehci-dbg.c 					hc32_to_cpup(ehci, &p.sitd->hw_uframe)
p                 720 drivers/usb/host/ehci-dbg.c 					p.sitd);
p                 721 drivers/usb/host/ehci-dbg.c 				tag = Q_NEXT_TYPE(ehci, p.sitd->hw_next);
p                 722 drivers/usb/host/ehci-dbg.c 				p = p.sitd->sitd_next;
p                 727 drivers/usb/host/ehci-dbg.c 		} while (p.ptr);
p                 488 drivers/usb/host/fhci.h static inline int cq_put(struct kfifo *kfifo, void *p)
p                 490 drivers/usb/host/fhci.h 	return kfifo_in(kfifo, (void *)&p, sizeof(p));
p                 496 drivers/usb/host/fhci.h 	void *p;
p                 498 drivers/usb/host/fhci.h 	sz = kfifo_out(kfifo, (void *)&p, sizeof(p));
p                 499 drivers/usb/host/fhci.h 	if (sz != sizeof(p))
p                 502 drivers/usb/host/fhci.h 	return p;
p                 519 drivers/usb/host/fotg210-hcd.c 	union fotg210_shadow p, *seen;
p                 545 drivers/usb/host/fotg210-hcd.c 		p = fotg210->pshadow[i];
p                 546 drivers/usb/host/fotg210-hcd.c 		if (likely(!p.ptr))
p                 560 drivers/usb/host/fotg210-hcd.c 				hw = p.qh->hw;
p                 562 drivers/usb/host/fotg210-hcd.c 						p.qh->period,
p                 567 drivers/usb/host/fotg210-hcd.c 						p.qh);
p                 572 drivers/usb/host/fotg210-hcd.c 					if (seen[temp].ptr != p.ptr)
p                 574 drivers/usb/host/fotg210-hcd.c 					if (p.qh->qh_next.ptr) {
p                 586 drivers/usb/host/fotg210-hcd.c 							p.qh, size);
p                 589 drivers/usb/host/fotg210-hcd.c 						seen[seen_count++].qh = p.qh;
p                 593 drivers/usb/host/fotg210-hcd.c 				p = p.qh->qh_next;
p                 598 drivers/usb/host/fotg210-hcd.c 						p.fstn->hw_prev, p.fstn);
p                 599 drivers/usb/host/fotg210-hcd.c 				tag = Q_NEXT_TYPE(fotg210, p.fstn->hw_next);
p                 600 drivers/usb/host/fotg210-hcd.c 				p = p.fstn->fstn_next;
p                 604 drivers/usb/host/fotg210-hcd.c 						" itd/%p", p.itd);
p                 605 drivers/usb/host/fotg210-hcd.c 				tag = Q_NEXT_TYPE(fotg210, p.itd->hw_next);
p                 606 drivers/usb/host/fotg210-hcd.c 				p = p.itd->itd_next;
p                 611 drivers/usb/host/fotg210-hcd.c 		} while (p.ptr);
p                 215 drivers/usb/host/fotg210.h #define HC_LENGTH(fotg210, p)	(0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
p                 217 drivers/usb/host/fotg210.h #define HC_VERSION(fotg210, p)	(0xffff&((p) >> /* bits 31:16 / offset 02h */ \
p                 220 drivers/usb/host/fotg210.h #define HCS_N_PORTS(p)		(((p)>>0)&0xf)	/* bits 3:0, ports on HC */
p                 223 drivers/usb/host/fotg210.h #define HCC_CANPARK(p)		((p)&(1 << 2))  /* true: can park on async qh */
p                 224 drivers/usb/host/fotg210.h #define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1))  /* true: periodic_size changes*/
p                 236 drivers/usb/host/imx21-hcd.c 	u8 *p = src;
p                 242 drivers/usb/host/imx21-hcd.c 		word += (*p++ << (byte * 8));
p                 186 drivers/usb/host/isp116x.h #define PTD_GET_COUNT(p)	(((p)->count & PTD_COUNT_MSK) >> 0)
p                 188 drivers/usb/host/isp116x.h #define PTD_GET_TOGGLE(p)	(((p)->count & PTD_TOGGLE_MSK) >> 10)
p                 190 drivers/usb/host/isp116x.h #define PTD_GET_ACTIVE(p)	(((p)->count & PTD_ACTIVE_MSK) >> 11)
p                 192 drivers/usb/host/isp116x.h #define PTD_GET_CC(p)		(((p)->count & PTD_CC_MSK) >> 12)
p                 194 drivers/usb/host/isp116x.h #define PTD_GET_MPS(p)		(((p)->mps & PTD_MPS_MSK) >> 0)
p                 196 drivers/usb/host/isp116x.h #define PTD_GET_SPD(p)		(((p)->mps & PTD_SPD_MSK) >> 10)
p                 198 drivers/usb/host/isp116x.h #define PTD_GET_LAST(p)		(((p)->mps & PTD_LAST_MSK) >> 11)
p                 200 drivers/usb/host/isp116x.h #define PTD_GET_EP(p)		(((p)->mps & PTD_EP_MSK) >> 12)
p                 202 drivers/usb/host/isp116x.h #define PTD_GET_LEN(p)		(((p)->len & PTD_LEN_MSK) >> 0)
p                 204 drivers/usb/host/isp116x.h #define PTD_GET_DIR(p)		(((p)->len & PTD_DIR_MSK) >> 10)
p                 206 drivers/usb/host/isp116x.h #define PTD_GET_B5_5(p)		(((p)->len & PTD_B5_5_MSK) >> 13)
p                 208 drivers/usb/host/isp116x.h #define PTD_GET_FA(p)		(((p)->faddr & PTD_FA_MSK) >> 0)
p                 210 drivers/usb/host/isp116x.h #define PTD_GET_FMT(p)		(((p)->faddr & PTD_FMT_MSK) >> 7)
p                 348 drivers/usb/host/isp1362.h #define PTD_GET_COUNT(p)	(((p)->count & PTD_COUNT_MSK) >> 0)
p                 350 drivers/usb/host/isp1362.h #define PTD_GET_TOGGLE(p)	(((p)->count & PTD_TOGGLE_MSK) >> 10)
p                 352 drivers/usb/host/isp1362.h #define PTD_GET_ACTIVE(p)	(((p)->count & PTD_ACTIVE_MSK) >> 11)
p                 354 drivers/usb/host/isp1362.h #define PTD_GET_CC(p)		(((p)->count & PTD_CC_MSK) >> 12)
p                 356 drivers/usb/host/isp1362.h #define PTD_GET_MPS(p)		(((p)->mps & PTD_MPS_MSK) >> 0)
p                 358 drivers/usb/host/isp1362.h #define PTD_GET_SPD(p)		(((p)->mps & PTD_SPD_MSK) >> 10)
p                 360 drivers/usb/host/isp1362.h #define PTD_GET_LAST(p)		(((p)->mps & PTD_LAST_MSK) >> 11)
p                 362 drivers/usb/host/isp1362.h #define PTD_GET_EP(p)		(((p)->mps & PTD_EP_MSK) >> 12)
p                 364 drivers/usb/host/isp1362.h #define PTD_GET_LEN(p)		(((p)->len & PTD_LEN_MSK) >> 0)
p                 366 drivers/usb/host/isp1362.h #define PTD_GET_DIR(p)		(((p)->len & PTD_DIR_MSK) >> 10)
p                 368 drivers/usb/host/isp1362.h #define PTD_GET_FA(p)		(((p)->faddr & PTD_FA_MSK) >> 0)
p                 370 drivers/usb/host/isp1362.h #define PTD_GET_SF_INT(p)	(((p)->faddr & PTD_SF_INT_MSK) >> 8)
p                 372 drivers/usb/host/isp1362.h #define PTD_GET_SF_ISO(p)	(((p)->faddr & PTD_SF_ISO_MSK) >> 8)
p                 374 drivers/usb/host/isp1362.h #define PTD_GET_PR(p)		(((p)->faddr & PTD_PR_MSK) >> 13)
p                 511 drivers/usb/host/isp1362.h 	unsigned long p = (unsigned long)ptr;
p                 512 drivers/usb/host/isp1362.h 	if (!(p & 0xf))
p                 514 drivers/usb/host/isp1362.h 	else if (!(p & 0x7))
p                 516 drivers/usb/host/isp1362.h 	else if (!(p & 0x3))
p                 518 drivers/usb/host/isp1362.h 	else if (!(p & 0x1))
p                  98 drivers/usb/host/oxu210hp-hcd.c #define HC_LENGTH(p)		(((p)>>00)&0x00ff)	/* bits 7:0 */
p                  99 drivers/usb/host/oxu210hp-hcd.c #define HC_VERSION(p)		(((p)>>16)&0xffff)	/* bits 31:16 */
p                 101 drivers/usb/host/oxu210hp-hcd.c #define HCS_DEBUG_PORT(p)	(((p)>>20)&0xf)	/* bits 23:20, debug port? */
p                 102 drivers/usb/host/oxu210hp-hcd.c #define HCS_INDICATOR(p)	((p)&(1 << 16))	/* true: has port indicators */
p                 103 drivers/usb/host/oxu210hp-hcd.c #define HCS_N_CC(p)		(((p)>>12)&0xf)	/* bits 15:12, #companion HCs */
p                 104 drivers/usb/host/oxu210hp-hcd.c #define HCS_N_PCC(p)		(((p)>>8)&0xf)	/* bits 11:8, ports per CC */
p                 105 drivers/usb/host/oxu210hp-hcd.c #define HCS_PORTROUTED(p)	((p)&(1 << 7))	/* true: port routing */
p                 106 drivers/usb/host/oxu210hp-hcd.c #define HCS_PPC(p)		((p)&(1 << 4))	/* true: port power control */
p                 107 drivers/usb/host/oxu210hp-hcd.c #define HCS_N_PORTS(p)		(((p)>>0)&0xf)	/* bits 3:0, ports on HC */
p                 110 drivers/usb/host/oxu210hp-hcd.c #define HCC_EXT_CAPS(p)		(((p)>>8)&0xff)	/* for pci extended caps */
p                 111 drivers/usb/host/oxu210hp-hcd.c #define HCC_ISOC_CACHE(p)       ((p)&(1 << 7))  /* true: can cache isoc frame */
p                 112 drivers/usb/host/oxu210hp-hcd.c #define HCC_ISOC_THRES(p)       (((p)>>4)&0x7)  /* bits 6:4, uframes cached */
p                 113 drivers/usb/host/oxu210hp-hcd.c #define HCC_CANPARK(p)		((p)&(1 << 2))  /* true: can park on async qh */
p                 114 drivers/usb/host/oxu210hp-hcd.c #define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1))  /* true: periodic_size changes*/
p                 115 drivers/usb/host/oxu210hp-hcd.c #define HCC_64BIT_ADDR(p)       ((p)&(1))       /* true: can use 64-bit addr */
p                1070 drivers/usb/host/r8a66597-hcd.c 	__le16 *p = (__le16 *)td->urb->setup_packet;
p                1078 drivers/usb/host/r8a66597-hcd.c 		r8a66597_write(r8a66597, le16_to_cpu(p[i]), setup_addr);
p                  29 drivers/usb/host/uhci-debug.c 	char *p;
p                  33 drivers/usb/host/uhci-debug.c 		p = strchr(buf, '\n');
p                  34 drivers/usb/host/uhci-debug.c 		if (p)
p                  35 drivers/usb/host/uhci-debug.c 			*p = 0;
p                  37 drivers/usb/host/uhci-debug.c 		buf = p;
p                 663 drivers/usb/host/uhci-q.c 	char *p = "??";
p                 674 drivers/usb/host/uhci-q.c 		p = "INT";
p                 678 drivers/usb/host/uhci-q.c 		p = "ISO";
p                 685 drivers/usb/host/uhci-q.c 			qh->hep->desc.bEndpointAddress, p,
p                 696 drivers/usb/host/uhci-q.c 	char *p = "??";
p                 707 drivers/usb/host/uhci-q.c 		p = "INT";
p                 711 drivers/usb/host/uhci-q.c 		p = "ISO";
p                 718 drivers/usb/host/uhci-q.c 			qh->hep->desc.bEndpointAddress, p,
p                 877 drivers/usb/host/xhci-dbgcap.c 	const char		*p;
p                 886 drivers/usb/host/xhci-dbgcap.c 		p = "disabled";
p                 889 drivers/usb/host/xhci-dbgcap.c 		p = "initialized";
p                 892 drivers/usb/host/xhci-dbgcap.c 		p = "enabled";
p                 895 drivers/usb/host/xhci-dbgcap.c 		p = "connected";
p                 898 drivers/usb/host/xhci-dbgcap.c 		p = "configured";
p                 901 drivers/usb/host/xhci-dbgcap.c 		p = "stalled";
p                 904 drivers/usb/host/xhci-dbgcap.c 		p = "unknown";
p                 907 drivers/usb/host/xhci-dbgcap.c 	return sprintf(buf, "%s\n", p);
p                  46 drivers/usb/host/xhci-dbgcap.h #define DBC_CTRL_MAXBURST(p)		(((p) >> 16) & 0xff)
p                  47 drivers/usb/host/xhci-dbgcap.h #define DBC_DOOR_BELL_TARGET(p)		(((p) & 0xff) << 8)
p                 164 drivers/usb/host/xhci-dbgcap.h #define dbc_epctx_info2(t, p, b)	\
p                 165 drivers/usb/host/xhci-dbgcap.h 	cpu_to_le32(EP_TYPE(t) | MAX_PACKET(p) | MAX_BURST(b))
p                  18 drivers/usb/host/xhci-ext-caps.h #define XHCI_HCC_EXT_CAPS(p)	(((p)>>16)&0xffff)
p                  28 drivers/usb/host/xhci-ext-caps.h #define XHCI_HC_LENGTH(p)	(((p)>>00)&0x00ff)
p                  31 drivers/usb/host/xhci-ext-caps.h #define XHCI_EXT_CAPS_ID(p)	(((p)>>0)&0xff)
p                  32 drivers/usb/host/xhci-ext-caps.h #define XHCI_EXT_CAPS_NEXT(p)	(((p)>>8)&0xff)
p                  33 drivers/usb/host/xhci-ext-caps.h #define	XHCI_EXT_CAPS_VAL(p)	((p)>>16)
p                  29 drivers/usb/host/xhci-mtk-sch.c #define EP_BPKTS(p)	((p) & 0x7f)
p                  30 drivers/usb/host/xhci-mtk-sch.c #define EP_BCSCOUNT(p)	(((p) & 0x7) << 8)
p                  31 drivers/usb/host/xhci-mtk-sch.c #define EP_BBM(p)	((p) << 11)
p                  32 drivers/usb/host/xhci-mtk-sch.c #define EP_BOFFSET(p)	((p) & 0x3fff)
p                  33 drivers/usb/host/xhci-mtk-sch.c #define EP_BREPEAT(p)	(((p) & 0x7fff) << 16)
p                  43 drivers/usb/host/xhci-mtk.c #define CAP_U3_PORT_NUM(p)	((p) & 0xff)
p                  44 drivers/usb/host/xhci-mtk.c #define CAP_U2_PORT_NUM(p)	(((p) >> 8) & 0xff)
p                  64 drivers/usb/host/xhci.h #define HC_LENGTH(p)		XHCI_HC_LENGTH(p)
p                  66 drivers/usb/host/xhci.h #define HC_VERSION(p)		(((p) >> 16) & 0xffff)
p                  70 drivers/usb/host/xhci.h #define HCS_MAX_SLOTS(p)	(((p) >> 0) & 0xff)
p                  73 drivers/usb/host/xhci.h #define HCS_MAX_INTRS(p)	(((p) >> 8) & 0x7ff)
p                  75 drivers/usb/host/xhci.h #define HCS_MAX_PORTS(p)	(((p) >> 24) & 0x7f)
p                  80 drivers/usb/host/xhci.h #define HCS_IST(p)		(((p) >> 0) & 0xf)
p                  82 drivers/usb/host/xhci.h #define HCS_ERST_MAX(p)		(((p) >> 4) & 0xf)
p                  86 drivers/usb/host/xhci.h #define HCS_MAX_SCRATCHPAD(p)   ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
p                  90 drivers/usb/host/xhci.h #define HCS_U1_LATENCY(p)	(((p) >> 0) & 0xff)
p                  92 drivers/usb/host/xhci.h #define HCS_U2_LATENCY(p)	(((p) >> 16) & 0xffff)
p                  96 drivers/usb/host/xhci.h #define HCC_64BIT_ADDR(p)	((p) & (1 << 0))
p                  98 drivers/usb/host/xhci.h #define HCC_BANDWIDTH_NEG(p)	((p) & (1 << 1))
p                 102 drivers/usb/host/xhci.h #define HCC_64BYTE_CONTEXT(p)	((p) & (1 << 2))
p                 104 drivers/usb/host/xhci.h #define HCC_PPC(p)		((p) & (1 << 3))
p                 106 drivers/usb/host/xhci.h #define HCS_INDICATOR(p)	((p) & (1 << 4))
p                 108 drivers/usb/host/xhci.h #define HCC_LIGHT_RESET(p)	((p) & (1 << 5))
p                 110 drivers/usb/host/xhci.h #define HCC_LTC(p)		((p) & (1 << 6))
p                 112 drivers/usb/host/xhci.h #define HCC_NSS(p)		((p) & (1 << 7))
p                 114 drivers/usb/host/xhci.h #define HCC_SPC(p)		((p) & (1 << 9))
p                 116 drivers/usb/host/xhci.h #define HCC_CFC(p)		((p) & (1 << 11))
p                 118 drivers/usb/host/xhci.h #define HCC_MAX_PSA(p)		(1 << ((((p) >> 12) & 0xf) + 1))
p                 120 drivers/usb/host/xhci.h #define HCC_EXT_CAPS(p)		XHCI_HCC_EXT_CAPS(p)
p                 132 drivers/usb/host/xhci.h #define	HCC2_U3C(p)		((p) & (1 << 0))
p                 134 drivers/usb/host/xhci.h #define	HCC2_CMC(p)		((p) & (1 << 1))
p                 136 drivers/usb/host/xhci.h #define	HCC2_FSC(p)		((p) & (1 << 2))
p                 138 drivers/usb/host/xhci.h #define	HCC2_CTC(p)		((p) & (1 << 3))
p                 140 drivers/usb/host/xhci.h #define	HCC2_LEC(p)		((p) & (1 << 4))
p                 142 drivers/usb/host/xhci.h #define	HCC2_CIC(p)		((p) & (1 << 5))
p                 144 drivers/usb/host/xhci.h #define	HCC2_ETC(p)		((p) & (1 << 6))
p                 281 drivers/usb/host/xhci.h #define MAX_DEVS(p)	((p) & 0xff)
p                 333 drivers/usb/host/xhci.h #define DEV_UNDEFSPEED(p)	(((p) & DEV_SPEED_MASK) == (0x0<<10))
p                 334 drivers/usb/host/xhci.h #define DEV_FULLSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_FS)
p                 335 drivers/usb/host/xhci.h #define DEV_LOWSPEED(p)		(((p) & DEV_SPEED_MASK) == XDEV_LS)
p                 336 drivers/usb/host/xhci.h #define DEV_HIGHSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_HS)
p                 337 drivers/usb/host/xhci.h #define DEV_SUPERSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_SS)
p                 338 drivers/usb/host/xhci.h #define DEV_SUPERSPEEDPLUS(p)	(((p) & DEV_SPEED_MASK) == XDEV_SSP)
p                 339 drivers/usb/host/xhci.h #define DEV_SUPERSPEED_ANY(p)	(((p) & DEV_SPEED_MASK) >= XDEV_SS)
p                 340 drivers/usb/host/xhci.h #define DEV_PORT_SPEED(p)	(((p) >> 10) & 0x0f)
p                 413 drivers/usb/host/xhci.h #define PORT_U1_TIMEOUT(p)	((p) & 0xff)
p                 416 drivers/usb/host/xhci.h #define PORT_U2_TIMEOUT(p)	(((p) & 0xff) << 8)
p                 424 drivers/usb/host/xhci.h #define	PORT_HIRD(p)		(((p) & 0xf) << 4)
p                 427 drivers/usb/host/xhci.h #define	PORT_L1DS(p)		(((p) & 0xff) << 8)
p                 432 drivers/usb/host/xhci.h #define PORT_RX_LANES(p)	(((p) >> 16) & 0xf)
p                 433 drivers/usb/host/xhci.h #define PORT_TX_LANES(p)	(((p) >> 20) & 0xf)
p                 436 drivers/usb/host/xhci.h #define PORT_HIRDM(p)((p) & 3)
p                 437 drivers/usb/host/xhci.h #define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
p                 438 drivers/usb/host/xhci.h #define PORT_BESLD(p)(((p) & 0xf) << 10)
p                 490 drivers/usb/host/xhci.h #define	ER_IRQ_PENDING(p)	((p) & 0x1)
p                 493 drivers/usb/host/xhci.h #define	ER_IRQ_CLEAR(p)		((p) & 0xfffffffe)
p                 494 drivers/usb/host/xhci.h #define	ER_IRQ_ENABLE(p)	((ER_IRQ_CLEAR(p)) | 0x2)
p                 495 drivers/usb/host/xhci.h #define	ER_IRQ_DISABLE(p)	((ER_IRQ_CLEAR(p)) & ~(0x2))
p                 638 drivers/usb/host/xhci.h #define LAST_CTX(p)	((p) << 27)
p                 639 drivers/usb/host/xhci.h #define LAST_CTX_TO_EP_NUM(p)	(((p) >> 27) - 1)
p                 647 drivers/usb/host/xhci.h #define ROOT_HUB_PORT(p)	(((p) & 0xff) << 16)
p                 648 drivers/usb/host/xhci.h #define DEVINFO_TO_ROOT_HUB_PORT(p)	(((p) >> 16) & 0xff)
p                 650 drivers/usb/host/xhci.h #define XHCI_MAX_PORTS(p)	(((p) & 0xff) << 24)
p                 651 drivers/usb/host/xhci.h #define DEVINFO_TO_MAX_PORTS(p)	(((p) & (0xff << 24)) >> 24)
p                 665 drivers/usb/host/xhci.h #define TT_THINK_TIME(p)	(((p) & 0x3) << 16)
p                 666 drivers/usb/host/xhci.h #define GET_TT_THINK_TIME(p)	(((p) & (0x3 << 16)) >> 16)
p                 674 drivers/usb/host/xhci.h #define GET_SLOT_STATE(p)	(((p) & (0x1f << 27)) >> 27)
p                 728 drivers/usb/host/xhci.h #define EP_MULT(p)		(((p) & 0x3) << 8)
p                 729 drivers/usb/host/xhci.h #define CTX_TO_EP_MULT(p)	(((p) >> 8) & 0x3)
p                 733 drivers/usb/host/xhci.h #define EP_INTERVAL(p)			(((p) & 0xff) << 16)
p                 734 drivers/usb/host/xhci.h #define EP_INTERVAL_TO_UFRAMES(p)	(1 << (((p) >> 16) & 0xff))
p                 735 drivers/usb/host/xhci.h #define CTX_TO_EP_INTERVAL(p)		(((p) >> 16) & 0xff)
p                 737 drivers/usb/host/xhci.h #define EP_MAXPSTREAMS(p)		(((p) << 10) & EP_MAXPSTREAMS_MASK)
p                 738 drivers/usb/host/xhci.h #define CTX_TO_EP_MAXPSTREAMS(p)	(((p) & EP_MAXPSTREAMS_MASK) >> 10)
p                 742 drivers/usb/host/xhci.h #define CTX_TO_MAX_ESIT_PAYLOAD_HI(p)	(((p) >> 24) & 0xff)
p                 750 drivers/usb/host/xhci.h #define ERROR_COUNT(p)	(((p) & 0x3) << 1)
p                 751 drivers/usb/host/xhci.h #define CTX_TO_EP_TYPE(p)	(((p) >> 3) & 0x7)
p                 752 drivers/usb/host/xhci.h #define EP_TYPE(p)	((p) << 3)
p                 762 drivers/usb/host/xhci.h #define MAX_BURST(p)	(((p)&0xff) << 8)
p                 763 drivers/usb/host/xhci.h #define CTX_TO_MAX_BURST(p)	(((p) >> 8) & 0xff)
p                 764 drivers/usb/host/xhci.h #define MAX_PACKET(p)	(((p)&0xffff) << 16)
p                 766 drivers/usb/host/xhci.h #define MAX_PACKET_DECODED(p)	(((p) >> 16) & 0xffff)
p                 769 drivers/usb/host/xhci.h #define EP_AVG_TRB_LENGTH(p)		((p) & 0xffff)
p                 770 drivers/usb/host/xhci.h #define EP_MAX_ESIT_PAYLOAD_LO(p)	(((p) & 0xffff) << 16)
p                 771 drivers/usb/host/xhci.h #define EP_MAX_ESIT_PAYLOAD_HI(p)	((((p) >> 16) & 0xff) << 24)
p                 772 drivers/usb/host/xhci.h #define CTX_TO_MAX_ESIT_PAYLOAD(p)	(((p) >> 16) & 0xffff)
p                 827 drivers/usb/host/xhci.h #define	SCT_FOR_CTX(p)		(((p) & 0x7) << 1)
p                1080 drivers/usb/host/xhci.h #define	EVENT_TRB_LEN(p)		((p) & 0xffffff)
p                1083 drivers/usb/host/xhci.h #define	TRB_TO_EP_ID(p)	(((p) >> 16) & 0x1f)
p                1087 drivers/usb/host/xhci.h #define GET_COMP_CODE(p)	(((p) & COMP_CODE_MASK) >> 24)
p                1240 drivers/usb/host/xhci.h #define TRB_TO_VF_INTR_TARGET(p)	(((p) & (0x3ff << 22)) >> 22)
p                1241 drivers/usb/host/xhci.h #define TRB_TO_VF_ID(p)			(((p) & (0xff << 16)) >> 16)
p                1244 drivers/usb/host/xhci.h #define TRB_TO_BELT(p)			(((p) & (0xfff << 16)) >> 16)
p                1247 drivers/usb/host/xhci.h #define TRB_TO_DEV_SPEED(p)		(((p) & (0xf << 16)) >> 16)
p                1250 drivers/usb/host/xhci.h #define TRB_TO_PACKET_TYPE(p)		((p) & 0x1f)
p                1251 drivers/usb/host/xhci.h #define TRB_TO_ROOTHUB_PORT(p)		(((p) & (0xff << 24)) >> 24)
p                1260 drivers/usb/host/xhci.h #define TRB_TO_SLOT_ID(p)	(((p) & (0xff<<24)) >> 24)
p                1261 drivers/usb/host/xhci.h #define SLOT_ID_FOR_TRB(p)	(((p) & 0xff) << 24)
p                1264 drivers/usb/host/xhci.h #define TRB_TO_EP_INDEX(p)		((((p) & (0x1f << 16)) >> 16) - 1)
p                1265 drivers/usb/host/xhci.h #define	EP_ID_FOR_TRB(p)		((((p) + 1) & 0x1f) << 16)
p                1267 drivers/usb/host/xhci.h #define SUSPEND_PORT_FOR_TRB(p)		(((p) & 1) << 23)
p                1268 drivers/usb/host/xhci.h #define TRB_TO_SUSPEND_PORT(p)		(((p) & (1 << 23)) >> 23)
p                1272 drivers/usb/host/xhci.h #define TRB_TO_STREAM_ID(p)		((((p) & (0xffff << 16)) >> 16))
p                1273 drivers/usb/host/xhci.h #define STREAM_ID_FOR_TRB(p)		((((p)) & 0xffff) << 16)
p                1274 drivers/usb/host/xhci.h #define SCT_FOR_TRB(p)			(((p) << 1) & 0x7)
p                1281 drivers/usb/host/xhci.h #define GET_PORT_ID(p)		(((p) & (0xff << 24)) >> 24)
p                1287 drivers/usb/host/xhci.h #define	TRB_LEN(p)		((p) & 0x1ffff)
p                1289 drivers/usb/host/xhci.h #define TRB_TD_SIZE(p)          (min((p), (u32)31) << 17)
p                1290 drivers/usb/host/xhci.h #define GET_TD_SIZE(p)		(((p) & 0x3e0000) >> 17)
p                1292 drivers/usb/host/xhci.h #define TRB_TD_SIZE_TBC(p)      (min((p), (u32)31) << 17)
p                1294 drivers/usb/host/xhci.h #define TRB_INTR_TARGET(p)	(((p) & 0x3ff) << 22)
p                1295 drivers/usb/host/xhci.h #define GET_INTR_TARGET(p)	(((p) >> 22) & 0x3ff)
p                1297 drivers/usb/host/xhci.h #define TRB_TBC(p)		(((p) & 0x3) << 7)
p                1298 drivers/usb/host/xhci.h #define TRB_TLBPC(p)		(((p) & 0xf) << 16)
p                1325 drivers/usb/host/xhci.h #define	TRB_TX_TYPE(p)		((p) << 16)
p                1331 drivers/usb/host/xhci.h #define TRB_FRAME_ID(p)		(((p) & 0x7ff) << 20)
p                1346 drivers/usb/host/xhci.h #define TRB_TYPE(p)		((p) << 10)
p                1347 drivers/usb/host/xhci.h #define TRB_FIELD_TO_TYPE(p)	(((p) & TRB_TYPE_BITMASK) >> 10)
p                1501 drivers/usb/host/xhci.h #define NEC_FW_MINOR(p)		(((p) >> 0) & 0xff)
p                1502 drivers/usb/host/xhci.h #define NEC_FW_MAJOR(p)		(((p) >> 8) & 0xff)
p                  22 drivers/usb/isp1760/isp1760-regs.h #define HC_LENGTH(p)		(((p) >> 00) & 0x00ff)	/* bits 7:0 */
p                  23 drivers/usb/isp1760/isp1760-regs.h #define HC_VERSION(p)		(((p) >> 16) & 0xffff)	/* bits 31:16 */
p                  26 drivers/usb/isp1760/isp1760-regs.h #define HCS_INDICATOR(p)	((p) & (1 << 16))	/* true: has port indicators */
p                  27 drivers/usb/isp1760/isp1760-regs.h #define HCS_PPC(p)		((p) & (1 << 4))	/* true: port power control */
p                  28 drivers/usb/isp1760/isp1760-regs.h #define HCS_N_PORTS(p)		(((p) >> 0) & 0xf)	/* bits 3:0, ports on HC */
p                  31 drivers/usb/isp1760/isp1760-regs.h #define HCC_ISOC_CACHE(p)       ((p) & (1 << 7))	/* true: can cache isoc frame */
p                  32 drivers/usb/isp1760/isp1760-regs.h #define HCC_ISOC_THRES(p)       (((p) >> 4) & 0x7)	/* bits 6:4, uframes cached */
p                 672 drivers/usb/misc/ftdi-elan.c 			char *p = ++ftdi->bulk_in_last + ftdi->bulk_in_buffer;
p                 675 drivers/usb/misc/ftdi-elan.c 				d += sprintf(d, " %02X", 0x000000FF & *p);
p                 679 drivers/usb/misc/ftdi-elan.c 			if (copy_to_user(buffer++, p, 1)) {
p                  82 drivers/usb/misc/sisusbvga/sisusb.h #define SISUSB_CORRECT_ENDIANNESS_PACKET(p)		\
p                  84 drivers/usb/misc/sisusbvga/sisusb.h 		p->header  = cpu_to_le16(p->header);	\
p                  85 drivers/usb/misc/sisusbvga/sisusb.h 		p->address = cpu_to_le32(p->address);	\
p                  86 drivers/usb/misc/sisusbvga/sisusb.h 		p->data    = cpu_to_le32(p->data);	\
p                  89 drivers/usb/misc/sisusbvga/sisusb.h #define SISUSB_CORRECT_ENDIANNESS_PACKET(p)
p                 331 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_invert_region(struct vc_data *vc, u16 *p, int count)
p                 340 drivers/usb/misc/sisusbvga/sisusb_con.c 		u16 a = *p;
p                 342 drivers/usb/misc/sisusbvga/sisusb_con.c 		*p++ = ((a) & 0x88ff)        |
p                 383 drivers/usb/misc/usb251xb.c 	const __be32 *p;
p                 386 drivers/usb/misc/usb251xb.c 	of_property_for_each_u32(dev->of_node, prop_name, prop, p, port) {
p                  39 drivers/usb/mon/mon_main.c 	struct list_head *p;
p                  44 drivers/usb/mon/mon_main.c 			list_for_each (p, &mon_buses) {
p                  46 drivers/usb/mon/mon_main.c 				m1 = list_entry(p, struct mon_bus, bus_link);
p                 168 drivers/usb/mon/mon_main.c 	struct list_head *p;
p                 171 drivers/usb/mon/mon_main.c 		list_for_each (p, &mon_buses) {
p                 172 drivers/usb/mon/mon_main.c 			mbus = list_entry(p, struct mon_bus, bus_link);
p                 335 drivers/usb/mon/mon_main.c 	struct list_head *p;
p                 341 drivers/usb/mon/mon_main.c 	list_for_each (p, &mon_buses) {
p                 342 drivers/usb/mon/mon_main.c 		mbus = list_entry(p, struct mon_bus, bus_link);
p                 387 drivers/usb/mon/mon_main.c 	struct list_head *p;
p                 395 drivers/usb/mon/mon_main.c 		p = mon_buses.next;
p                 396 drivers/usb/mon/mon_main.c 		mbus = list_entry(p, struct mon_bus, bus_link);
p                 397 drivers/usb/mon/mon_main.c 		list_del(p);
p                 108 drivers/usb/mon/mon_text.c 	struct mon_text_ptr *p, const struct mon_event_text *ep);
p                 110 drivers/usb/mon/mon_text.c 	struct mon_text_ptr *p, const struct mon_event_text *ep);
p                 112 drivers/usb/mon/mon_text.c 	struct mon_text_ptr *p, const struct mon_event_text *ep);
p                 114 drivers/usb/mon/mon_text.c 	struct mon_text_ptr *p, const struct mon_event_text *ep);
p                 116 drivers/usb/mon/mon_text.c 	struct mon_text_ptr *p, const struct mon_event_text *ep);
p                 118 drivers/usb/mon/mon_text.c 	struct mon_text_ptr *p, const struct mon_event_text *ep);
p                 120 drivers/usb/mon/mon_text.c     struct mon_text_ptr *p, const struct mon_event_text *ep);
p                 307 drivers/usb/mon/mon_text.c 	struct list_head *p;
p                 315 drivers/usb/mon/mon_text.c 	p = rp->e_list.next;
p                 316 drivers/usb/mon/mon_text.c 	list_del(p);
p                 319 drivers/usb/mon/mon_text.c 	return list_entry(p, struct mon_event_text, e_link);
p                 512 drivers/usb/mon/mon_text.c 	struct mon_text_ptr *p, const struct mon_event_text *ep)
p                 523 drivers/usb/mon/mon_text.c 	p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
p                 530 drivers/usb/mon/mon_text.c 	struct mon_text_ptr *p, const struct mon_event_text *ep)
p                 541 drivers/usb/mon/mon_text.c 	p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
p                 548 drivers/usb/mon/mon_text.c 	struct mon_text_ptr *p, const struct mon_event_text *ep)
p                 552 drivers/usb/mon/mon_text.c 		p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
p                 560 drivers/usb/mon/mon_text.c 		p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
p                 563 drivers/usb/mon/mon_text.c 		p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
p                 569 drivers/usb/mon/mon_text.c 	struct mon_text_ptr *p, const struct mon_event_text *ep)
p                 571 drivers/usb/mon/mon_text.c 	p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
p                 576 drivers/usb/mon/mon_text.c 	struct mon_text_ptr *p, const struct mon_event_text *ep)
p                 579 drivers/usb/mon/mon_text.c 		p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
p                 582 drivers/usb/mon/mon_text.c 		p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
p                 589 drivers/usb/mon/mon_text.c 	struct mon_text_ptr *p, const struct mon_event_text *ep)
p                 595 drivers/usb/mon/mon_text.c 	p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
p                 604 drivers/usb/mon/mon_text.c 		p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
p                 611 drivers/usb/mon/mon_text.c     struct mon_text_ptr *p, const struct mon_event_text *ep)
p                 617 drivers/usb/mon/mon_text.c 			p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
p                 623 drivers/usb/mon/mon_text.c 					p->cnt += snprintf(p->pbuf + p->cnt,
p                 624 drivers/usb/mon/mon_text.c 					    p->limit - p->cnt,
p                 627 drivers/usb/mon/mon_text.c 				p->cnt += snprintf(p->pbuf + p->cnt,
p                 628 drivers/usb/mon/mon_text.c 				    p->limit - p->cnt,
p                 631 drivers/usb/mon/mon_text.c 			p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
p                 634 drivers/usb/mon/mon_text.c 			p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt,
p                 638 drivers/usb/mon/mon_text.c 		p->cnt += snprintf(p->pbuf + p->cnt, p->limit - p->cnt, "\n");
p                 647 drivers/usb/mon/mon_text.c 	struct list_head *p;
p                 669 drivers/usb/mon/mon_text.c 		p = rp->e_list.next;
p                 670 drivers/usb/mon/mon_text.c 		ep = list_entry(p, struct mon_event_text, e_link);
p                 671 drivers/usb/mon/mon_text.c 		list_del(p);
p                  51 drivers/usb/mtu3/mtu3.h #define SSUSB_U3_CTRL(p)	(U3D_SSUSB_U3_CTRL_0P + ((p) * 0x08))
p                  52 drivers/usb/mtu3/mtu3.h #define SSUSB_U2_CTRL(p)	(U3D_SSUSB_U2_CTRL_0P + ((p) * 0x08))
p                  73 drivers/usb/phy/phy-fsl-usb.c static u32 _fsl_readl_be(const unsigned __iomem *p)
p                  75 drivers/usb/phy/phy-fsl-usb.c 	return in_be32(p);
p                  78 drivers/usb/phy/phy-fsl-usb.c static u32 _fsl_readl_le(const unsigned __iomem *p)
p                  80 drivers/usb/phy/phy-fsl-usb.c 	return in_le32(p);
p                  83 drivers/usb/phy/phy-fsl-usb.c static void _fsl_writel_be(u32 v, unsigned __iomem *p)
p                  85 drivers/usb/phy/phy-fsl-usb.c 	out_be32(p, v);
p                  88 drivers/usb/phy/phy-fsl-usb.c static void _fsl_writel_le(u32 v, unsigned __iomem *p)
p                  90 drivers/usb/phy/phy-fsl-usb.c 	out_le32(p, v);
p                  93 drivers/usb/phy/phy-fsl-usb.c static u32 (*_fsl_readl)(const unsigned __iomem *p);
p                  94 drivers/usb/phy/phy-fsl-usb.c static void (*_fsl_writel)(u32 v, unsigned __iomem *p);
p                  96 drivers/usb/phy/phy-fsl-usb.c #define fsl_readl(p)		(*_fsl_readl)((p))
p                  97 drivers/usb/phy/phy-fsl-usb.c #define fsl_writel(v, p)	(*_fsl_writel)((v), (p))
p                  25 drivers/usb/phy/phy-isp1301.c #define phy_to_isp(p)		(container_of((p), struct isp1301, phy))
p                 120 drivers/usb/phy/phy-mxs-usb.c #define to_mxs_phy(p) container_of((p), struct mxs_phy, phy)
p                 294 drivers/usb/renesas_usbhs/common.h #define usbhs_lock(p, f) spin_lock_irqsave(usbhs_priv_to_lock(p), f)
p                 295 drivers/usb/renesas_usbhs/common.h #define usbhs_unlock(p, f) spin_unlock_irqrestore(usbhs_priv_to_lock(p), f)
p                  15 drivers/usb/renesas_usbhs/fifo.c #define usbhsf_get_cfifo(p)	(&((p)->fifo_info.cfifo))
p                 101 drivers/usb/renesas_usbhs/fifo.c #define usbhsf_dma_map(p)	__usbhsf_dma_map_ctrl(p, 1)
p                 102 drivers/usb/renesas_usbhs/fifo.c #define usbhsf_dma_unmap(p)	__usbhsf_dma_map_ctrl(p, 0)
p                 206 drivers/usb/renesas_usbhs/fifo.c #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e)
p                 207 drivers/usb/renesas_usbhs/fifo.c #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e)
p                 784 drivers/usb/renesas_usbhs/fifo.c #define usbhsf_dma_start(p, f)	__usbhsf_dma_ctrl(p, f, DREQE)
p                 785 drivers/usb/renesas_usbhs/fifo.c #define usbhsf_dma_stop(p, f)	__usbhsf_dma_ctrl(p, f, 0)
p                  37 drivers/usb/renesas_usbhs/fifo.h #define usbhsf_get_dnfifo(p, n)	(&((p)->fifo_info.dfifo[n]))
p                 100 drivers/usb/renesas_usbhs/mod_gadget.c #define usbhsg_pipe_to_uep(p)		((p)->mod_private)
p                 136 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_pipe_to_uep(p)	((p)->mod_private)
p                 149 drivers/usb/renesas_usbhs/mod_host.c #define usbhsh_pkt_to_ureq(p)	\
p                 150 drivers/usb/renesas_usbhs/mod_host.c 	container_of((void *)p, struct usbhsh_request, pkt)
p                  16 drivers/usb/renesas_usbhs/pipe.c #define usbhsp_addr_offset(p)	((usbhs_pipe_number(p) - 1) * 2)
p                  18 drivers/usb/renesas_usbhs/pipe.c #define usbhsp_flags_set(p, f)	((p)->flags |=  USBHS_PIPE_FLAGS_##f)
p                  19 drivers/usb/renesas_usbhs/pipe.c #define usbhsp_flags_clr(p, f)	((p)->flags &= ~USBHS_PIPE_FLAGS_##f)
p                  20 drivers/usb/renesas_usbhs/pipe.c #define usbhsp_flags_has(p, f)	((p)->flags &   USBHS_PIPE_FLAGS_##f)
p                  21 drivers/usb/renesas_usbhs/pipe.c #define usbhsp_flags_init(p)	do {(p)->flags = 0; } while (0)
p                 101 drivers/usb/renesas_usbhs/pipe.h #define usbhs_pipe_to_priv(p)	((p)->priv)
p                 102 drivers/usb/renesas_usbhs/pipe.h #define usbhs_pipe_number(p)	(int)((p) - (p)->priv->pipe_info.pipe)
p                 103 drivers/usb/renesas_usbhs/pipe.h #define usbhs_pipe_is_dcp(p)	((p)->priv->pipe_info.pipe == (p))
p                 104 drivers/usb/renesas_usbhs/pipe.h #define usbhs_pipe_to_fifo(p)	((p)->fifo)
p                 105 drivers/usb/renesas_usbhs/pipe.h #define usbhs_pipe_is_busy(p)	usbhs_pipe_to_fifo(p)
p                 107 drivers/usb/renesas_usbhs/pipe.h #define usbhs_pipe_type(p)		((p)->pipe_type)
p                 108 drivers/usb/renesas_usbhs/pipe.h #define usbhs_pipe_type_is(p, t)	((p)->pipe_type == t)
p                 245 drivers/usb/serial/console.c 	struct tty_driver **p = (struct tty_driver **)co->data;
p                 247 drivers/usb/serial/console.c 	if (!*p)
p                 251 drivers/usb/serial/console.c 	return *p;
p                  52 drivers/usb/serial/cp210x.c static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
p                1309 drivers/usb/serial/cp210x.c static void cp210x_dtr_rts(struct usb_serial_port *p, int on)
p                1312 drivers/usb/serial/cp210x.c 		cp210x_tiocmset_port(p, TIOCM_DTR|TIOCM_RTS, 0);
p                1314 drivers/usb/serial/cp210x.c 		cp210x_tiocmset_port(p, 0, TIOCM_DTR|TIOCM_RTS);
p                  31 drivers/usb/serial/qcserial.c #define DEVICE_G1K(v, p) \
p                  32 drivers/usb/serial/qcserial.c 	USB_DEVICE(v, p), .driver_info = QCSERIAL_G1K
p                  33 drivers/usb/serial/qcserial.c #define DEVICE_SWI(v, p) \
p                  34 drivers/usb/serial/qcserial.c 	USB_DEVICE(v, p), .driver_info = QCSERIAL_SWI
p                  35 drivers/usb/serial/qcserial.c #define DEVICE_HWI(v, p) \
p                  36 drivers/usb/serial/qcserial.c 	USB_DEVICE(v, p), .driver_info = QCSERIAL_HWI
p                 674 drivers/usb/serial/usb-serial.c 	struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
p                 675 drivers/usb/serial/usb-serial.c 	struct usb_serial_driver *drv = p->serial->type;
p                 678 drivers/usb/serial/usb-serial.c 		return drv->carrier_raised(p);
p                 685 drivers/usb/serial/usb-serial.c 	struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
p                 686 drivers/usb/serial/usb-serial.c 	struct usb_serial_driver *drv = p->serial->type;
p                 689 drivers/usb/serial/usb-serial.c 		drv->dtr_rts(p, on);
p                 477 drivers/usb/storage/usb.c 	char *p;
p                 493 drivers/usb/storage/usb.c 	p = quirks;
p                 494 drivers/usb/storage/usb.c 	while (*p) {
p                 496 drivers/usb/storage/usb.c 		if (vid == simple_strtoul(p, &p, 16) &&
p                 497 drivers/usb/storage/usb.c 				*p == ':' &&
p                 498 drivers/usb/storage/usb.c 				pid == simple_strtoul(p+1, &p, 16) &&
p                 499 drivers/usb/storage/usb.c 				*p == ':')
p                 503 drivers/usb/storage/usb.c 		while (*p) {
p                 504 drivers/usb/storage/usb.c 			if (*p++ == ',')
p                 508 drivers/usb/storage/usb.c 	if (!*p)	/* No match */
p                 512 drivers/usb/storage/usb.c 	while (*++p && *p != ',') {
p                 513 drivers/usb/storage/usb.c 		switch (TOLOWER(*p)) {
p                 772 drivers/usb/storage/usb.c 	int p;
p                 784 drivers/usb/storage/usb.c 		p = us->unusual_dev->initFunction(us);
p                 785 drivers/usb/storage/usb.c 		if (p)
p                 786 drivers/usb/storage/usb.c 			return p;
p                  95 drivers/usb/storage/usual-tables.c 	struct ignore_entry *p;
p                 102 drivers/usb/storage/usual-tables.c 	for (p = ignore_ids; p->vid; ++p) {
p                 103 drivers/usb/storage/usual-tables.c 		if (p->vid == vid && p->pid == pid &&
p                 104 drivers/usb/storage/usual-tables.c 				p->bcdmin <= bcd && p->bcdmax >= bcd)
p                 562 drivers/usb/typec/class.c 	struct typec_partner *p = to_typec_partner(dev);
p                 564 drivers/usb/typec/class.c 	return sprintf(buf, "%s\n", typec_accessory_modes[p->accessory]);
p                 572 drivers/usb/typec/class.c 	struct typec_partner *p = to_typec_partner(dev);
p                 574 drivers/usb/typec/class.c 	return sprintf(buf, "%s\n", p->usb_pd ? "yes" : "no");
p                1242 drivers/usb/typec/class.c 	struct typec_port *p = to_typec_port(dev);
p                1244 drivers/usb/typec/class.c 	return sprintf(buf, "%d\n", (p->cap->pd_revision >> 8) & 0xff);
p                 979 drivers/usb/typec/tcpm/tcpm.c 		u32 p = le32_to_cpu(payload[i]);
p                 982 drivers/usb/typec/tcpm/tcpm.c 		svid = (p >> 16) & 0xffff;
p                 992 drivers/usb/typec/tcpm/tcpm.c 		svid = p & 0xffff;
p                1060 drivers/usb/typec/tcpm/tcpm.c 	u32 p[PD_MAX_PAYLOAD];
p                1067 drivers/usb/typec/tcpm/tcpm.c 		p[i] = le32_to_cpu(payload[i]);
p                1069 drivers/usb/typec/tcpm/tcpm.c 	cmd_type = PD_VDO_CMDT(p[0]);
p                1070 drivers/usb/typec/tcpm/tcpm.c 	cmd = PD_VDO_CMD(p[0]);
p                1073 drivers/usb/typec/tcpm/tcpm.c 		 p[0], cmd_type, cmd, cnt);
p                1078 drivers/usb/typec/tcpm/tcpm.c 				   PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
p                1081 drivers/usb/typec/tcpm/tcpm.c 				   PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
p                1106 drivers/usb/typec/tcpm/tcpm.c 				typec_altmode_attention(adev, p[1]);
p                1112 drivers/usb/typec/tcpm/tcpm.c 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK);
p                1114 drivers/usb/typec/tcpm/tcpm.c 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
p                1117 drivers/usb/typec/tcpm/tcpm.c 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
p                1161 drivers/usb/typec/tcpm/tcpm.c 				if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
p                1202 drivers/usb/typec/tcpm/tcpm.c 		typec_altmode_vdm(adev, p[0], &p[1], cnt);
p                  58 drivers/usb/typec/tps6598x.c #define TPS_POWER_STATUS_PWROPMODE(p)	(((p) & GENMASK(3, 2)) >> 2)
p                  58 drivers/usb/typec/ucsi/displayport.c 		const struct typec_altmode *p = typec_altmode_get_partner(alt);
p                  60 drivers/usb/typec/ucsi/displayport.c 		dev_warn(&p->dev,
p                 112 drivers/usb/typec/ucsi/displayport.c 		const struct typec_altmode *p = typec_altmode_get_partner(alt);
p                 114 drivers/usb/typec/ucsi/displayport.c 		dev_warn(&p->dev,
p                 195 drivers/usb/typec/ucsi/displayport.c 		const struct typec_altmode *p = typec_altmode_get_partner(alt);
p                 197 drivers/usb/typec/ucsi/displayport.c 		dev_warn(&p->dev,
p                 555 drivers/usb/typec/ucsi/ucsi_ccg.c 	u8 *p;
p                 558 drivers/usb/typec/ucsi/ucsi_ccg.c 	p = (u8 *)&cmd.data;
p                 560 drivers/usb/typec/ucsi/ucsi_ccg.c 	p[0] = RESET_SIG;
p                 561 drivers/usb/typec/ucsi/ucsi_ccg.c 	p[1] = CMD_RESET_DEV;
p                 650 drivers/usb/typec/ucsi/ucsi_ccg.c 	u8 *p;
p                 669 drivers/usb/typec/ucsi/ucsi_ccg.c 	p = (u8 *)&cmd.data;
p                 671 drivers/usb/typec/ucsi/ucsi_ccg.c 	p[0] = FLASH_SIG;
p                 672 drivers/usb/typec/ucsi/ucsi_ccg.c 	p[1] = fcmd;
p                 673 drivers/usb/typec/ucsi/ucsi_ccg.c 	put_unaligned_le16(row, &p[2]);
p                 831 drivers/usb/typec/ucsi/ucsi_ccg.c 	const char *p, *s;
p                 920 drivers/usb/typec/ucsi/ucsi_ccg.c 	p = strnchr(fw->data, fw->size, ':');
p                 921 drivers/usb/typec/ucsi/ucsi_ccg.c 	while (p < eof) {
p                 922 drivers/usb/typec/ucsi/ucsi_ccg.c 		s = strnchr(p + 1, eof - p - 1, ':');
p                 927 drivers/usb/typec/ucsi/ucsi_ccg.c 		line_sz = s - p;
p                 935 drivers/usb/typec/ucsi/ucsi_ccg.c 		if (hex2bin(wr_buf, p + 3, CCG4_ROW_SIZE + 4)) {
p                 954 drivers/usb/typec/ucsi/ucsi_ccg.c 		p = s;
p                  58 drivers/usb/usbip/usbip_common.c static void usbip_dump_pipe(unsigned int p)
p                  60 drivers/usb/usbip/usbip_common.c 	unsigned char type = usb_pipetype(p);
p                  61 drivers/usb/usbip/usbip_common.c 	unsigned char ep   = usb_pipeendpoint(p);
p                  62 drivers/usb/usbip/usbip_common.c 	unsigned char dev  = usb_pipedevice(p);
p                  63 drivers/usb/usbip/usbip_common.c 	unsigned char dir  = usb_pipein(p);
p                1638 drivers/vfio/pci/vfio_pci.c 	char *p, *id;
p                1646 drivers/vfio/pci/vfio_pci.c 	p = ids;
p                1647 drivers/vfio/pci/vfio_pci.c 	while ((id = strsep(&p, ","))) {
p                 378 drivers/vfio/pci/vfio_pci_config.c static inline void p_setb(struct perm_bits *p, int off, u8 virt, u8 write)
p                 380 drivers/vfio/pci/vfio_pci_config.c 	p->virt[off] = virt;
p                 381 drivers/vfio/pci/vfio_pci_config.c 	p->write[off] = write;
p                 385 drivers/vfio/pci/vfio_pci_config.c static inline void p_setw(struct perm_bits *p, int off, u16 virt, u16 write)
p                 387 drivers/vfio/pci/vfio_pci_config.c 	*(__le16 *)(&p->virt[off]) = cpu_to_le16(virt);
p                 388 drivers/vfio/pci/vfio_pci_config.c 	*(__le16 *)(&p->write[off]) = cpu_to_le16(write);
p                 392 drivers/vfio/pci/vfio_pci_config.c static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
p                 394 drivers/vfio/pci/vfio_pci_config.c 	*(__le32 *)(&p->virt[off]) = cpu_to_le32(virt);
p                 395 drivers/vfio/pci/vfio_pci_config.c 	*(__le32 *)(&p->write[off]) = cpu_to_le32(write);
p                1211 drivers/vfio/vfio_iommu_type1.c 				phys_addr_t p;
p                1222 drivers/vfio/vfio_iommu_type1.c 				p = phys + size;
p                1225 drivers/vfio/vfio_iommu_type1.c 				       p == iommu_iova_to_phys(d->domain, i)) {
p                1227 drivers/vfio/vfio_iommu_type1.c 					p += PAGE_SIZE;
p                1864 drivers/vfio/vfio_iommu_type1.c 	struct rb_node *n, *p;
p                1873 drivers/vfio/vfio_iommu_type1.c 		p = rb_first(&dma->pfn_list);
p                1874 drivers/vfio/vfio_iommu_type1.c 		for (; p; p = rb_next(p)) {
p                1875 drivers/vfio/vfio_iommu_type1.c 			struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn,
p                 675 drivers/vhost/scsi.c 	struct scatterlist *p = sg;
p                 681 drivers/vhost/scsi.c 			while (p < sg) {
p                 682 drivers/vhost/scsi.c 				struct page *page = sg_page(p++);
p                1912 drivers/vhost/scsi.c 			       struct se_session *se_sess, void *p)
p                1752 drivers/vhost/vhost.c 	u64 p;
p                1772 drivers/vhost/vhost.c 		if (copy_from_user(&p, argp, sizeof p)) {
p                1776 drivers/vhost/vhost.c 		if ((u64)(unsigned long)p != p) {
p                1782 drivers/vhost/vhost.c 			void __user *base = (void __user *)(unsigned long)p;
p                  35 drivers/vhost/vringh.c 						  u16 *val, const __virtio16 *p),
p                 406 drivers/vhost/vringh.c 						  __virtio16 *p, u16 val),
p                 454 drivers/vhost/vringh.c 						     const __virtio16 *p))
p                 500 drivers/vhost/vringh.c 							u16 *val, const __virtio16 *p),
p                 502 drivers/vhost/vringh.c 							__virtio16 *p, u16 val))
p                 540 drivers/vhost/vringh.c 							 __virtio16 *p, u16 val))
p                 553 drivers/vhost/vringh.c static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
p                 556 drivers/vhost/vringh.c 	int rc = get_user(v, (__force __virtio16 __user *)p);
p                 561 drivers/vhost/vringh.c static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
p                 564 drivers/vhost/vringh.c 	return put_user(v, (__force __virtio16 __user *)p);
p                 823 drivers/vhost/vringh.c 			      u16 *val, const __virtio16 *p)
p                 825 drivers/vhost/vringh.c 	*val = vringh16_to_cpu(vrh, READ_ONCE(*p));
p                 829 drivers/vhost/vringh.c static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
p                 831 drivers/vhost/vringh.c 	WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
p                  20 drivers/video/backlight/rave-sp-backlight.c 	const struct backlight_properties *p = &bd->props;
p                  22 drivers/video/backlight/rave-sp-backlight.c 		(p->power == FB_BLANK_UNBLANK) ? p->brightness : 0;
p                 178 drivers/video/backlight/tdo24m.c 	const uint32_t *p = array;
p                 182 drivers/video/backlight/tdo24m.c 	for (; *p != CMD_NULL; p++) {
p                 183 drivers/video/backlight/tdo24m.c 		if (!lcd->color_invert && *p == CMD0(0x21))
p                 186 drivers/video/backlight/tdo24m.c 		nparams = (*p >> 30) & 0x3;
p                 188 drivers/video/backlight/tdo24m.c 		data = *p << (7 - nparams);
p                 203 drivers/video/console/mdacon.c 	u16 *p, p_save;
p                 208 drivers/video/console/mdacon.c 	p = mda_vram_base;
p                 211 drivers/video/console/mdacon.c 	p_save = scr_readw(p);
p                 214 drivers/video/console/mdacon.c 	scr_writew(0xAA55, p);
p                 215 drivers/video/console/mdacon.c 	if (scr_readw(p) == 0xAA55)
p                 218 drivers/video/console/mdacon.c 	scr_writew(0x55AA, p);
p                 219 drivers/video/console/mdacon.c 	if (scr_readw(p) == 0x55AA)
p                 222 drivers/video/console/mdacon.c 	scr_writew(p_save, p);
p                 231 drivers/video/console/mdacon.c 	scr_writew(0x0000, p);
p                 236 drivers/video/console/mdacon.c 	scr_writew(0x0000, p);
p                 240 drivers/video/console/mdacon.c 	scr_writew(p_save, p);
p                 415 drivers/video/console/mdacon.c static void mdacon_invert_region(struct vc_data *c, u16 *p, int count)
p                 418 drivers/video/console/mdacon.c 		scr_writew(scr_readw(p) ^ 0x0800, p);
p                 419 drivers/video/console/mdacon.c 		p++;
p                 375 drivers/video/console/newport_con.c 	unsigned char *p;
p                 377 drivers/video/console/newport_con.c 	p = &font_data[vc->vc_num][(charattr & 0xff) << 4];
p                 398 drivers/video/console/newport_con.c 	RENDER(npregs, p);
p                 406 drivers/video/console/newport_con.c 	unsigned char *p;
p                 428 drivers/video/console/newport_con.c 		p = &font_data[vc->vc_num][(scr_readw(s++) & 0xff) << 4];
p                 438 drivers/video/console/newport_con.c 		RENDER(npregs, p);
p                 508 drivers/video/console/newport_con.c 	unsigned char *new_data, *data = op->data, *p;
p                 524 drivers/video/console/newport_con.c 	p = new_data;
p                 526 drivers/video/console/newport_con.c 		memcpy(p, data, h);
p                 528 drivers/video/console/newport_con.c 		p += h;
p                  92 drivers/video/console/sticon.c     if ((p->cursor_x == xpos) && (p->cursor_y == ypos)) {
p                 116 drivers/video/console/sticon.c     if ((p->cursor_y == ypos) && (xpos <= p->cursor_x) &&
p                 117 drivers/video/console/sticon.c 	(p->cursor_x < (xpos + count))) {
p                 242 drivers/video/console/sticon.c     unsigned long p;
p                 249 drivers/video/console/sticon.c     p = softback_curr + offset;
p                 250 drivers/video/console/sticon.c     if (p >= softback_end)
p                 251 drivers/video/console/sticon.c     	p += softback_buf - softback_end;
p                 252 drivers/video/console/sticon.c     return (u16 *)p;
p                 303 drivers/video/console/sticon.c static void sticon_invert_region(struct vc_data *conp, u16 *p, int count)
p                 308 drivers/video/console/sticon.c 	u16 a = scr_readw(p);
p                 315 drivers/video/console/sticon.c 	scr_writew(a, p++);
p                 650 drivers/video/console/sticore.c 	unsigned char *n, *p, *q;
p                 656 drivers/video/console/sticore.c 	p = n + 3;
p                 659 drivers/video/console/sticore.c 		*p = *q++;
p                 660 drivers/video/console/sticore.c 		p+=4;
p                  77 drivers/video/console/vgacon.c static void vgacon_invert_region(struct vc_data *c, u16 * p, int count);
p                 245 drivers/video/console/vgacon.c 	void *p;
p                 251 drivers/video/console/vgacon.c 	p = (void *) (c->vc_origin + t * c->vc_size_row);
p                 256 drivers/video/console/vgacon.c 			    p, c->vc_size_row);
p                 259 drivers/video/console/vgacon.c 		p += c->vc_size_row;
p                 394 drivers/video/console/vgacon.c 	volatile u16 *p;
p                 538 drivers/video/console/vgacon.c 	p = (volatile u16 *) vga_vram_base;
p                 539 drivers/video/console/vgacon.c 	saved1 = scr_readw(p);
p                 540 drivers/video/console/vgacon.c 	saved2 = scr_readw(p + 1);
p                 541 drivers/video/console/vgacon.c 	scr_writew(0xAA55, p);
p                 542 drivers/video/console/vgacon.c 	scr_writew(0x55AA, p + 1);
p                 543 drivers/video/console/vgacon.c 	if (scr_readw(p) != 0xAA55 || scr_readw(p + 1) != 0x55AA) {
p                 544 drivers/video/console/vgacon.c 		scr_writew(saved1, p);
p                 545 drivers/video/console/vgacon.c 		scr_writew(saved2, p + 1);
p                 548 drivers/video/console/vgacon.c 	scr_writew(0x55AA, p);
p                 549 drivers/video/console/vgacon.c 	scr_writew(0xAA55, p + 1);
p                 550 drivers/video/console/vgacon.c 	if (scr_readw(p) != 0x55AA || scr_readw(p + 1) != 0xAA55) {
p                 551 drivers/video/console/vgacon.c 		scr_writew(saved1, p);
p                 552 drivers/video/console/vgacon.c 		scr_writew(saved2, p + 1);
p                 555 drivers/video/console/vgacon.c 	scr_writew(saved1, p);
p                 556 drivers/video/console/vgacon.c 	scr_writew(saved2, p + 1);
p                 582 drivers/video/console/vgacon.c 	struct uni_pagedir *p;
p                 603 drivers/video/console/vgacon.c 	p = *c->vc_uni_pagedir_loc;
p                 609 drivers/video/console/vgacon.c 	if (!vgacon_uni_pagedir && p)
p                 664 drivers/video/console/vgacon.c static void vgacon_invert_region(struct vc_data *c, u16 * p, int count)
p                 669 drivers/video/console/vgacon.c 		u16 a = scr_readw(p);
p                 675 drivers/video/console/vgacon.c 		scr_writew(a, p++);
p                 284 drivers/video/fbdev/acornfb.c 	pal.p = 0;
p                 294 drivers/video/fbdev/acornfb.c 		pal.p = 0;
p                 300 drivers/video/fbdev/acornfb.c 			vidc_writel(pal.p);
p                 305 drivers/video/fbdev/acornfb.c 		vidc_writel(pal.p);
p                 764 drivers/video/fbdev/acornfb.c 	char *p = opt;
p                 768 drivers/video/fbdev/acornfb.c 	fb_info.monspecs.hfmin = simple_strtoul(p, &p, 0);
p                 769 drivers/video/fbdev/acornfb.c 	if (*p == '-')
p                 770 drivers/video/fbdev/acornfb.c 		fb_info.monspecs.hfmax = simple_strtoul(p + 1, &p, 0);
p                 774 drivers/video/fbdev/acornfb.c 	if (*p != ':')
p                 777 drivers/video/fbdev/acornfb.c 	fb_info.monspecs.vfmin = simple_strtoul(p + 1, &p, 0);
p                 778 drivers/video/fbdev/acornfb.c 	if (*p == '-')
p                 779 drivers/video/fbdev/acornfb.c 		fb_info.monspecs.vfmax = simple_strtoul(p + 1, &p, 0);
p                 783 drivers/video/fbdev/acornfb.c 	if (*p != ':')
p                 786 drivers/video/fbdev/acornfb.c 	fb_info.monspecs.dpms = simple_strtoul(p + 1, &p, 0);
p                 788 drivers/video/fbdev/acornfb.c 	if (*p != ':')
p                 791 drivers/video/fbdev/acornfb.c 	fb_info.var.width = simple_strtoul(p + 1, &p, 0);
p                 793 drivers/video/fbdev/acornfb.c 	if (*p != ':')
p                 796 drivers/video/fbdev/acornfb.c 	fb_info.var.height = simple_strtoul(p + 1, NULL, 0);
p                  39 drivers/video/fbdev/acornfb.h 	u_int	p;
p                2105 drivers/video/fbdev/amifb.c 	u_long p;
p                2115 drivers/video/fbdev/amifb.c 	p = ZTWO_PADDR(dummysprite);
p                2118 drivers/video/fbdev/amifb.c 		(cop++)->l = CMOVE(highw(p), sprpt[i]);
p                2119 drivers/video/fbdev/amifb.c 		(cop++)->l = CMOVE2(loww(p), sprpt[i]);
p                2150 drivers/video/fbdev/amifb.c 	u_long p;
p                2161 drivers/video/fbdev/amifb.c 	p = par->bplpt0;
p                2165 drivers/video/fbdev/amifb.c 				for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
p                2166 drivers/video/fbdev/amifb.c 					(copl++)->l = CMOVE(highw(p), bplpt[i]);
p                2167 drivers/video/fbdev/amifb.c 					(copl++)->l = CMOVE2(loww(p), bplpt[i]);
p                2178 drivers/video/fbdev/amifb.c 				p = par->bplpt0wrap;
p                2181 drivers/video/fbdev/amifb.c 			p = par->bplpt0wrap;
p                2183 drivers/video/fbdev/amifb.c 	for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
p                2184 drivers/video/fbdev/amifb.c 		(copl++)->l = CMOVE(highw(p), bplpt[i]);
p                2185 drivers/video/fbdev/amifb.c 		(copl++)->l = CMOVE2(loww(p), bplpt[i]);
p                2191 drivers/video/fbdev/amifb.c 		p = par->bplpt0;
p                2193 drivers/video/fbdev/amifb.c 			p -= par->next_line;
p                2195 drivers/video/fbdev/amifb.c 			p += par->next_line;
p                2199 drivers/video/fbdev/amifb.c 					for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
p                2200 drivers/video/fbdev/amifb.c 						(cops++)->l = CMOVE(highw(p), bplpt[i]);
p                2201 drivers/video/fbdev/amifb.c 						(cops++)->l = CMOVE2(loww(p), bplpt[i]);
p                2212 drivers/video/fbdev/amifb.c 					p = par->bplpt0wrap;
p                2215 drivers/video/fbdev/amifb.c 						p -= par->next_line;
p                2217 drivers/video/fbdev/amifb.c 						p += par->next_line;
p                2220 drivers/video/fbdev/amifb.c 				p = par->bplpt0wrap - par->next_line;
p                2222 drivers/video/fbdev/amifb.c 		for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
p                2223 drivers/video/fbdev/amifb.c 			(cops++)->l = CMOVE(highw(p), bplpt[i]);
p                2224 drivers/video/fbdev/amifb.c 			(cops++)->l = CMOVE2(loww(p), bplpt[i]);
p                2239 drivers/video/fbdev/amifb.c 	u_long p;
p                2278 drivers/video/fbdev/amifb.c 		p = ZTWO_PADDR(copdisplay.list[currentcop][0]);
p                2279 drivers/video/fbdev/amifb.c 		(copl++)->l = CMOVE(highw(p), cop2lc);
p                2280 drivers/video/fbdev/amifb.c 		(copl++)->l = CMOVE2(loww(p), cop2lc);
p                2281 drivers/video/fbdev/amifb.c 		p = ZTWO_PADDR(copdisplay.list[currentcop][1]);
p                2282 drivers/video/fbdev/amifb.c 		(cops++)->l = CMOVE(highw(p), cop2lc);
p                2283 drivers/video/fbdev/amifb.c 		(cops++)->l = CMOVE2(loww(p), cop2lc);
p                2309 drivers/video/fbdev/amifb.c 	char *p;
p                2317 drivers/video/fbdev/amifb.c 	if (!(p = strsep(&spec, ";")) || !*p)
p                2319 drivers/video/fbdev/amifb.c 	vmin = simple_strtoul(p, NULL, 10);
p                2322 drivers/video/fbdev/amifb.c 	if (!(p = strsep(&spec, ";")) || !*p)
p                2324 drivers/video/fbdev/amifb.c 	vmax = simple_strtoul(p, NULL, 10);
p                2327 drivers/video/fbdev/amifb.c 	if (!(p = strsep(&spec, ";")) || !*p)
p                2329 drivers/video/fbdev/amifb.c 	hmin = 1000 * simple_strtoul(p, NULL, 10);
p                2332 drivers/video/fbdev/amifb.c 	if (!(p = strsep(&spec, "")) || !*p)
p                2334 drivers/video/fbdev/amifb.c 	hmax = 1000 * simple_strtoul(p, NULL, 10);
p                 448 drivers/video/fbdev/arcfb.c 	unsigned long p;
p                 454 drivers/video/fbdev/arcfb.c 	p = *ppos;
p                 459 drivers/video/fbdev/arcfb.c 	if (p > fbmemlength)
p                 463 drivers/video/fbdev/arcfb.c 	if ((count + p) > fbmemlength) {
p                 464 drivers/video/fbdev/arcfb.c 		count = fbmemlength - p;
p                 472 drivers/video/fbdev/arcfb.c 		count -= copy_from_user(base_addr + p, buf, count);
p                 478 drivers/video/fbdev/arcfb.c 	bitppos = p*8;
p                  48 drivers/video/fbdev/asiliantfb.c #define mmio_base (p->screen_base + 0x400000)
p                  54 drivers/video/fbdev/asiliantfb.c static void mm_write_xr(struct fb_info *p, u8 reg, u8 data)
p                  58 drivers/video/fbdev/asiliantfb.c #define write_xr(num, val)	mm_write_xr(p, num, val)
p                  60 drivers/video/fbdev/asiliantfb.c static void mm_write_fr(struct fb_info *p, u8 reg, u8 data)
p                  64 drivers/video/fbdev/asiliantfb.c #define write_fr(num, val)	mm_write_fr(p, num, val)
p                  66 drivers/video/fbdev/asiliantfb.c static void mm_write_cr(struct fb_info *p, u8 reg, u8 data)
p                  70 drivers/video/fbdev/asiliantfb.c #define write_cr(num, val)	mm_write_cr(p, num, val)
p                  72 drivers/video/fbdev/asiliantfb.c static void mm_write_gr(struct fb_info *p, u8 reg, u8 data)
p                  76 drivers/video/fbdev/asiliantfb.c #define write_gr(num, val)	mm_write_gr(p, num, val)
p                  78 drivers/video/fbdev/asiliantfb.c static void mm_write_sr(struct fb_info *p, u8 reg, u8 data)
p                  82 drivers/video/fbdev/asiliantfb.c #define write_sr(num, val)	mm_write_sr(p, num, val)
p                  84 drivers/video/fbdev/asiliantfb.c static void mm_write_ar(struct fb_info *p, u8 reg, u8 data)
p                  89 drivers/video/fbdev/asiliantfb.c #define write_ar(num, val)	mm_write_ar(p, num, val)
p                 174 drivers/video/fbdev/asiliantfb.c static void asiliant_set_timing(struct fb_info *p)
p                 176 drivers/video/fbdev/asiliantfb.c 	unsigned hd = p->var.xres / 8;
p                 177 drivers/video/fbdev/asiliantfb.c 	unsigned hs = (p->var.xres + p->var.right_margin) / 8;
p                 178 drivers/video/fbdev/asiliantfb.c        	unsigned he = (p->var.xres + p->var.right_margin + p->var.hsync_len) / 8;
p                 179 drivers/video/fbdev/asiliantfb.c 	unsigned ht = (p->var.left_margin + p->var.xres + p->var.right_margin + p->var.hsync_len) / 8;
p                 180 drivers/video/fbdev/asiliantfb.c 	unsigned vd = p->var.yres;
p                 181 drivers/video/fbdev/asiliantfb.c 	unsigned vs = p->var.yres + p->var.lower_margin;
p                 182 drivers/video/fbdev/asiliantfb.c 	unsigned ve = p->var.yres + p->var.lower_margin + p->var.vsync_len;
p                 183 drivers/video/fbdev/asiliantfb.c 	unsigned vt = p->var.upper_margin + p->var.yres + p->var.lower_margin + p->var.vsync_len;
p                 184 drivers/video/fbdev/asiliantfb.c 	unsigned wd = (p->var.xres_virtual * ((p->var.bits_per_pixel+7)/8)) / 8;
p                 186 drivers/video/fbdev/asiliantfb.c 	if ((p->var.xres == 640) && (p->var.yres == 480) && (p->var.pixclock == 39722)) {
p                 218 drivers/video/fbdev/asiliantfb.c 	if (p->var.xres == 640) {
p                 226 drivers/video/fbdev/asiliantfb.c 			     struct fb_info *p)
p                 269 drivers/video/fbdev/asiliantfb.c static int asiliantfb_set_par(struct fb_info *p)
p                 276 drivers/video/fbdev/asiliantfb.c 	asiliant_calc_dclk2(&p->var.pixclock, &dclk2_m, &dclk2_n, &dclk2_div);
p                 279 drivers/video/fbdev/asiliantfb.c 	if (p->var.bits_per_pixel == 24) {
p                 283 drivers/video/fbdev/asiliantfb.c 	} else if (p->var.bits_per_pixel == 16) {
p                 284 drivers/video/fbdev/asiliantfb.c 		if (p->var.red.offset == 11)
p                 290 drivers/video/fbdev/asiliantfb.c 	} else if (p->var.bits_per_pixel == 8) {
p                 296 drivers/video/fbdev/asiliantfb.c 	p->fix.line_length = p->var.xres * (p->var.bits_per_pixel >> 3);
p                 297 drivers/video/fbdev/asiliantfb.c 	p->fix.visual = (p->var.bits_per_pixel == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
p                 302 drivers/video/fbdev/asiliantfb.c 	asiliant_set_timing(p);
p                 307 drivers/video/fbdev/asiliantfb.c 			     u_int transp, struct fb_info *p)
p                 323 drivers/video/fbdev/asiliantfb.c 		switch(p->var.red.offset) {
p                 325 drivers/video/fbdev/asiliantfb.c 			((u32 *)(p->pseudo_palette))[regno] =
p                 331 drivers/video/fbdev/asiliantfb.c 			((u32 *)(p->pseudo_palette))[regno] =
p                 337 drivers/video/fbdev/asiliantfb.c 			((u32 *)(p->pseudo_palette))[regno] =
p                 454 drivers/video/fbdev/asiliantfb.c static void chips_hw_init(struct fb_info *p)
p                 507 drivers/video/fbdev/asiliantfb.c static int init_asiliant(struct fb_info *p, unsigned long addr)
p                 511 drivers/video/fbdev/asiliantfb.c 	p->fix			= asiliantfb_fix;
p                 512 drivers/video/fbdev/asiliantfb.c 	p->fix.smem_start	= addr;
p                 513 drivers/video/fbdev/asiliantfb.c 	p->var			= asiliantfb_var;
p                 514 drivers/video/fbdev/asiliantfb.c 	p->fbops		= &asiliantfb_ops;
p                 515 drivers/video/fbdev/asiliantfb.c 	p->flags		= FBINFO_DEFAULT;
p                 517 drivers/video/fbdev/asiliantfb.c 	err = fb_alloc_cmap(&p->cmap, 256, 0);
p                 523 drivers/video/fbdev/asiliantfb.c 	err = register_framebuffer(p);
p                 526 drivers/video/fbdev/asiliantfb.c 		fb_dealloc_cmap(&p->cmap);
p                 530 drivers/video/fbdev/asiliantfb.c 	fb_info(p, "Asiliant 69000 frame buffer (%dK RAM detected)\n",
p                 531 drivers/video/fbdev/asiliantfb.c 		p->fix.smem_len / 1024);
p                 534 drivers/video/fbdev/asiliantfb.c 	chips_hw_init(p);
p                 542 drivers/video/fbdev/asiliantfb.c 	struct fb_info *p;
p                 554 drivers/video/fbdev/asiliantfb.c 	p = framebuffer_alloc(sizeof(u32) * 16, &dp->dev);
p                 555 drivers/video/fbdev/asiliantfb.c 	if (!p)	{
p                 559 drivers/video/fbdev/asiliantfb.c 	p->pseudo_palette = p->par;
p                 560 drivers/video/fbdev/asiliantfb.c 	p->par = NULL;
p                 562 drivers/video/fbdev/asiliantfb.c 	p->screen_base = ioremap(addr, 0x800000);
p                 563 drivers/video/fbdev/asiliantfb.c 	if (p->screen_base == NULL) {
p                 565 drivers/video/fbdev/asiliantfb.c 		framebuffer_release(p);
p                 570 drivers/video/fbdev/asiliantfb.c 	writeb(3, p->screen_base + 0x400784);
p                 572 drivers/video/fbdev/asiliantfb.c 	err = init_asiliant(p, addr);
p                 574 drivers/video/fbdev/asiliantfb.c 		iounmap(p->screen_base);
p                 576 drivers/video/fbdev/asiliantfb.c 		framebuffer_release(p);
p                 580 drivers/video/fbdev/asiliantfb.c 	pci_set_drvdata(dp, p);
p                 586 drivers/video/fbdev/asiliantfb.c 	struct fb_info *p = pci_get_drvdata(dp);
p                 588 drivers/video/fbdev/asiliantfb.c 	unregister_framebuffer(p);
p                 589 drivers/video/fbdev/asiliantfb.c 	fb_dealloc_cmap(&p->cmap);
p                 590 drivers/video/fbdev/asiliantfb.c 	iounmap(p->screen_base);
p                 592 drivers/video/fbdev/asiliantfb.c 	framebuffer_release(p);
p                2792 drivers/video/fbdev/atafb.c 	char *p;
p                2804 drivers/video/fbdev/atafb.c 	p = strsep(&spec, ";");
p                2805 drivers/video/fbdev/atafb.c 	if (!p || !*p)
p                2807 drivers/video/fbdev/atafb.c 	xres_virtual = xres = simple_strtoul(p, NULL, 10);
p                2811 drivers/video/fbdev/atafb.c 	p = strsep(&spec, ";");
p                2812 drivers/video/fbdev/atafb.c 	if (!p || !*p)
p                2814 drivers/video/fbdev/atafb.c 	yres = simple_strtoul(p, NULL, 10);
p                2818 drivers/video/fbdev/atafb.c 	p = strsep(&spec, ";");
p                2819 drivers/video/fbdev/atafb.c 	if (!p || !*p)
p                2821 drivers/video/fbdev/atafb.c 	depth = simple_strtoul(p, NULL, 10);
p                2826 drivers/video/fbdev/atafb.c 	p = strsep(&spec, ";");
p                2827 drivers/video/fbdev/atafb.c 	if (!p || !*p)
p                2829 drivers/video/fbdev/atafb.c 	if (*p == 'i')
p                2831 drivers/video/fbdev/atafb.c 	else if (*p == 'p')
p                2833 drivers/video/fbdev/atafb.c 	else if (*p == 'n')
p                2835 drivers/video/fbdev/atafb.c 	else if (*p == 't')
p                2840 drivers/video/fbdev/atafb.c 	p = strsep(&spec, ";");
p                2841 drivers/video/fbdev/atafb.c 	if (!p || !*p)
p                2843 drivers/video/fbdev/atafb.c 	addr = simple_strtoul(p, NULL, 0);
p                2845 drivers/video/fbdev/atafb.c 	p = strsep(&spec, ";");
p                2846 drivers/video/fbdev/atafb.c 	if (!p || !*p)
p                2849 drivers/video/fbdev/atafb.c 		len = simple_strtoul(p, NULL, 0);
p                2851 drivers/video/fbdev/atafb.c 	p = strsep(&spec, ";");
p                2852 drivers/video/fbdev/atafb.c 	if (p && *p)
p                2853 drivers/video/fbdev/atafb.c 		external_vgaiobase = simple_strtoul(p, NULL, 0);
p                2855 drivers/video/fbdev/atafb.c 	p = strsep(&spec, ";");
p                2856 drivers/video/fbdev/atafb.c 	if (p && *p) {
p                2857 drivers/video/fbdev/atafb.c 		external_bitspercol = simple_strtoul(p, NULL, 0);
p                2864 drivers/video/fbdev/atafb.c 	p = strsep(&spec, ";");
p                2865 drivers/video/fbdev/atafb.c 	if (p && *p) {
p                2866 drivers/video/fbdev/atafb.c 		if (!strcmp(p, "vga"))
p                2868 drivers/video/fbdev/atafb.c 		if (!strcmp(p, "mv300"))
p                2872 drivers/video/fbdev/atafb.c 	p = strsep(&spec, ";");
p                2873 drivers/video/fbdev/atafb.c 	if (p && *p) {
p                2874 drivers/video/fbdev/atafb.c 		xres_virtual = simple_strtoul(p, NULL, 10);
p                2920 drivers/video/fbdev/atafb.c 	char *p;
p                2922 drivers/video/fbdev/atafb.c 	if (!(p = strsep(&spec, ";")) || !*p)
p                2924 drivers/video/fbdev/atafb.c 	xres = simple_strtoul(p, NULL, 10);
p                2925 drivers/video/fbdev/atafb.c 	if (!(p = strsep(&spec, ";")) || !*p)
p                2928 drivers/video/fbdev/atafb.c 	tt_yres = st_yres = simple_strtoul(p, NULL, 10);
p                2929 drivers/video/fbdev/atafb.c 	if ((p = strsep(&spec, ";")) && *p)
p                2930 drivers/video/fbdev/atafb.c 		sttt_xres_virtual = simple_strtoul(p, NULL, 10);
p                2931 drivers/video/fbdev/atafb.c 	if ((p = strsep(&spec, ";")) && *p)
p                2932 drivers/video/fbdev/atafb.c 		sttt_yres_virtual = simple_strtoul(p, NULL, 0);
p                2933 drivers/video/fbdev/atafb.c 	if ((p = strsep(&spec, ";")) && *p)
p                2934 drivers/video/fbdev/atafb.c 		ovsc_offset = simple_strtoul(p, NULL, 0);
p                2943 drivers/video/fbdev/atafb.c 	char *p;
p                2950 drivers/video/fbdev/atafb.c 	if (!(p = strsep(&spec, ";")) || !*p)
p                2952 drivers/video/fbdev/atafb.c 	vmin = simple_strtoul(p, NULL, 10);
p                2955 drivers/video/fbdev/atafb.c 	if (!(p = strsep(&spec, ";")) || !*p)
p                2957 drivers/video/fbdev/atafb.c 	vmax = simple_strtoul(p, NULL, 10);
p                2960 drivers/video/fbdev/atafb.c 	if (!(p = strsep(&spec, ";")) || !*p)
p                2962 drivers/video/fbdev/atafb.c 	hmin = 1000 * simple_strtoul(p, NULL, 10);
p                2965 drivers/video/fbdev/atafb.c 	if (!(p = strsep(&spec, "")) || !*p)
p                2967 drivers/video/fbdev/atafb.c 	hmax = 1000 * simple_strtoul(p, NULL, 10);
p                2982 drivers/video/fbdev/atafb.c 	char *p;
p                2985 drivers/video/fbdev/atafb.c 	p = strsep(&spec, ";");
p                2986 drivers/video/fbdev/atafb.c 	if (!p || !*p)
p                2988 drivers/video/fbdev/atafb.c 	xres = simple_strtoul(p, NULL, 10);
p                2989 drivers/video/fbdev/atafb.c 	p = strsep(&spec, ";");
p                2990 drivers/video/fbdev/atafb.c 	if (!p || !*p)
p                2992 drivers/video/fbdev/atafb.c 	yres = simple_strtoul(p, NULL, 10);
p                2993 drivers/video/fbdev/atafb.c 	p = strsep(&spec, "");
p                2994 drivers/video/fbdev/atafb.c 	if (!p || !*p)
p                2996 drivers/video/fbdev/atafb.c 	depth = simple_strtoul(p, NULL, 10);
p                1438 drivers/video/fbdev/aty/aty128fb.c 	s32 x, b, p, ron, roff;
p                1463 drivers/video/fbdev/aty/aty128fb.c 	p = b + 1;
p                1465 drivers/video/fbdev/aty/aty128fb.c 	ron <<= (11 - p);
p                1467 drivers/video/fbdev/aty/aty128fb.c 	n <<= (11 - p);
p                1477 drivers/video/fbdev/aty/aty128fb.c 	    p, m->Rloop, x, ron, roff);
p                1479 drivers/video/fbdev/aty/aty128fb.c 	dsp->dda_config = p << 16 | m->Rloop << 20 | x;
p                2737 drivers/video/fbdev/aty/atyfb_base.c 	char *p;
p                2742 drivers/video/fbdev/aty/atyfb_base.c 	if (!(p = strsep(&video_str, ";")) || !*p)
p                2744 drivers/video/fbdev/aty/atyfb_base.c 	vmembase = simple_strtoul(p, NULL, 0);
p                2745 drivers/video/fbdev/aty/atyfb_base.c 	if (!(p = strsep(&video_str, ";")) || !*p)
p                2747 drivers/video/fbdev/aty/atyfb_base.c 	size = simple_strtoul(p, NULL, 0);
p                2748 drivers/video/fbdev/aty/atyfb_base.c 	if (!(p = strsep(&video_str, ";")) || !*p)
p                2750 drivers/video/fbdev/aty/atyfb_base.c 	guiregbase = simple_strtoul(p, NULL, 0);
p                 502 drivers/video/fbdev/aty/radeonfb.h extern void radeonfb_imageblit(struct fb_info *p, const struct fb_image *image);
p                1013 drivers/video/fbdev/broadsheetfb.c 	unsigned long p = *ppos;
p                1023 drivers/video/fbdev/broadsheetfb.c 	if (p > total_size)
p                1031 drivers/video/fbdev/broadsheetfb.c 	if (count + p > total_size) {
p                1035 drivers/video/fbdev/broadsheetfb.c 		count = total_size - p;
p                1038 drivers/video/fbdev/broadsheetfb.c 	dst = (void *)(info->screen_base + p);
p                 234 drivers/video/fbdev/bw2.c 	u8 *p;
p                 241 drivers/video/fbdev/bw2.c 			p = bw2regs_1600;
p                 246 drivers/video/fbdev/bw2.c 			p = bw2regs_ecl;
p                 250 drivers/video/fbdev/bw2.c 		p = bw2regs_analog;
p                 256 drivers/video/fbdev/bw2.c 			p = bw2regs_76hz;
p                 258 drivers/video/fbdev/bw2.c 			p = bw2regs_66hz;
p                 269 drivers/video/fbdev/bw2.c 	for ( ; *p; p += 2) {
p                 270 drivers/video/fbdev/bw2.c 		u8 __iomem *regp = &((u8 __iomem *)par->regs)[p[0]];
p                 271 drivers/video/fbdev/bw2.c 		sbus_writeb(p[1], regp);
p                  94 drivers/video/fbdev/c2p_iplan2.c 	void *p;
p                 104 drivers/video/fbdev/c2p_iplan2.c 		p = dst;
p                 113 drivers/video/fbdev/c2p_iplan2.c 			store_iplan2_masked(p, bpp, d.words, first);
p                 114 drivers/video/fbdev/c2p_iplan2.c 			p += bpp*2;
p                 125 drivers/video/fbdev/c2p_iplan2.c 				store_iplan2_masked(p, bpp, d.words, first);
p                 126 drivers/video/fbdev/c2p_iplan2.c 				p += bpp*2;
p                 134 drivers/video/fbdev/c2p_iplan2.c 				store_iplan2(p, bpp, d.words);
p                 135 drivers/video/fbdev/c2p_iplan2.c 				p += bpp*2;
p                 144 drivers/video/fbdev/c2p_iplan2.c 				store_iplan2_masked(p, bpp, d.words, last);
p                  96 drivers/video/fbdev/c2p_planar.c 	void *p;
p                 104 drivers/video/fbdev/c2p_planar.c 		p = dst;
p                 113 drivers/video/fbdev/c2p_planar.c 			store_planar_masked(p, dst_nextplane, bpp, d.words,
p                 115 drivers/video/fbdev/c2p_planar.c 			p += 4;
p                 126 drivers/video/fbdev/c2p_planar.c 				store_planar_masked(p, dst_nextplane, bpp,
p                 128 drivers/video/fbdev/c2p_planar.c 				p += 4;
p                 136 drivers/video/fbdev/c2p_planar.c 				store_planar(p, dst_nextplane, bpp, d.words);
p                 137 drivers/video/fbdev/c2p_planar.c 				p += 4;
p                 146 drivers/video/fbdev/c2p_planar.c 				store_planar_masked(p, dst_nextplane, bpp,
p                 264 drivers/video/fbdev/cg3.c 	char *p;
p                 269 drivers/video/fbdev/cg3.c 		ww = simple_strtoul(params, &p, 10);
p                 270 drivers/video/fbdev/cg3.c 		if (ww && *p == 'x') {
p                 271 drivers/video/fbdev/cg3.c 			hh = simple_strtoul(p + 1, &p, 10);
p                 272 drivers/video/fbdev/cg3.c 			if (hh && *p == '-') {
p                 315 drivers/video/fbdev/cg3.c 	u8 *p;
p                 335 drivers/video/fbdev/cg3.c 	for (p = cg3_regvals[type]; *p; p += 2) {
p                 336 drivers/video/fbdev/cg3.c 		u8 __iomem *regp = &((u8 __iomem *)par->regs)[p[0]];
p                 337 drivers/video/fbdev/cg3.c 		sbus_writeb(p[1], regp);
p                 339 drivers/video/fbdev/cg3.c 	for (p = cg3_dacvals; *p; p += 2) {
p                 343 drivers/video/fbdev/cg3.c 		sbus_writeb(p[0], regp);
p                 345 drivers/video/fbdev/cg3.c 		sbus_writeb(p[1], regp);
p                 332 drivers/video/fbdev/chipsfb.c static void init_chips(struct fb_info *p, unsigned long addr)
p                 334 drivers/video/fbdev/chipsfb.c 	memset(p->screen_base, 0, 0x100000);
p                 336 drivers/video/fbdev/chipsfb.c 	p->fix = chipsfb_fix;
p                 337 drivers/video/fbdev/chipsfb.c 	p->fix.smem_start = addr;
p                 339 drivers/video/fbdev/chipsfb.c 	p->var = chipsfb_var;
p                 341 drivers/video/fbdev/chipsfb.c 	p->fbops = &chipsfb_ops;
p                 342 drivers/video/fbdev/chipsfb.c 	p->flags = FBINFO_DEFAULT;
p                 344 drivers/video/fbdev/chipsfb.c 	fb_alloc_cmap(&p->cmap, 256, 0);
p                 351 drivers/video/fbdev/chipsfb.c 	struct fb_info *p;
p                 367 drivers/video/fbdev/chipsfb.c 	p = framebuffer_alloc(0, &dp->dev);
p                 368 drivers/video/fbdev/chipsfb.c 	if (p == NULL) {
p                 401 drivers/video/fbdev/chipsfb.c 	p->screen_base = ioremap_wc(addr, 0x200000);
p                 403 drivers/video/fbdev/chipsfb.c 	p->screen_base = ioremap(addr, 0x200000);
p                 405 drivers/video/fbdev/chipsfb.c 	if (p->screen_base == NULL) {
p                 411 drivers/video/fbdev/chipsfb.c 	pci_set_drvdata(dp, p);
p                 413 drivers/video/fbdev/chipsfb.c 	init_chips(p, addr);
p                 415 drivers/video/fbdev/chipsfb.c 	if (register_framebuffer(p) < 0) {
p                 422 drivers/video/fbdev/chipsfb.c 		 p->node, p->fix.smem_len / 1024);
p                 427 drivers/video/fbdev/chipsfb.c 	iounmap(p->screen_base);
p                 431 drivers/video/fbdev/chipsfb.c 	framebuffer_release(p);
p                 439 drivers/video/fbdev/chipsfb.c 	struct fb_info *p = pci_get_drvdata(dp);
p                 441 drivers/video/fbdev/chipsfb.c 	if (p->screen_base == NULL)
p                 443 drivers/video/fbdev/chipsfb.c 	unregister_framebuffer(p);
p                 444 drivers/video/fbdev/chipsfb.c 	iounmap(p->screen_base);
p                 445 drivers/video/fbdev/chipsfb.c 	p->screen_base = NULL;
p                 452 drivers/video/fbdev/chipsfb.c         struct fb_info *p = pci_get_drvdata(pdev);
p                 460 drivers/video/fbdev/chipsfb.c 	chipsfb_blank(1, p);
p                 461 drivers/video/fbdev/chipsfb.c 	fb_set_suspend(p, 1);
p                 470 drivers/video/fbdev/chipsfb.c         struct fb_info *p = pci_get_drvdata(pdev);
p                 473 drivers/video/fbdev/chipsfb.c 	fb_set_suspend(p, 0);
p                 474 drivers/video/fbdev/chipsfb.c 	chipsfb_blank(0, p);
p                 137 drivers/video/fbdev/controlfb.c static int init_control(struct fb_info_control *p);
p                 138 drivers/video/fbdev/controlfb.c static void control_set_hardware(struct fb_info_control *p,
p                 141 drivers/video/fbdev/controlfb.c static void find_vram_size(struct fb_info_control *p);
p                 142 drivers/video/fbdev/controlfb.c static int read_control_sense(struct fb_info_control *p);
p                 148 drivers/video/fbdev/controlfb.c static void control_init_info(struct fb_info *info, struct fb_info_control *p);
p                 219 drivers/video/fbdev/controlfb.c 	struct fb_info_control *p =
p                 230 drivers/video/fbdev/controlfb.c 	control_set_hardware(p, &par);
p                 232 drivers/video/fbdev/controlfb.c 	info->fix.visual = (p->par.cmode == CMODE_8) ?
p                 234 drivers/video/fbdev/controlfb.c 	info->fix.line_length = p->par.pitch;
p                 235 drivers/video/fbdev/controlfb.c 	info->fix.xpanstep = 32 >> p->par.cmode;
p                 245 drivers/video/fbdev/controlfb.c 	struct fb_info_control *p)
p                 247 drivers/video/fbdev/controlfb.c 	struct fb_par_control *par = &p->par;
p                 251 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,start_addr),
p                 260 drivers/video/fbdev/controlfb.c 	struct fb_info_control *p =
p                 262 drivers/video/fbdev/controlfb.c 	struct fb_par_control *par = &p->par;
p                 274 drivers/video/fbdev/controlfb.c 	set_screen_start(xoffset, var->yoffset, p);
p                 312 drivers/video/fbdev/controlfb.c 	struct fb_info_control *p =
p                 316 drivers/video/fbdev/controlfb.c 	ctrl = le32_to_cpup(CNTRL_REG(p,ctrl));
p                 338 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,ctrl), ctrl);
p                 346 drivers/video/fbdev/controlfb.c 	struct fb_info_control *p =
p                 357 drivers/video/fbdev/controlfb.c 	out_8(&p->cmap_regs->addr, regno);	/* tell clut what addr to fill	*/
p                 358 drivers/video/fbdev/controlfb.c 	out_8(&p->cmap_regs->lut, r);		/* send one color channel at	*/
p                 359 drivers/video/fbdev/controlfb.c 	out_8(&p->cmap_regs->lut, g);		/* a time...			*/
p                 360 drivers/video/fbdev/controlfb.c 	out_8(&p->cmap_regs->lut, b);
p                 364 drivers/video/fbdev/controlfb.c 		switch (p->par.cmode) {
p                 366 drivers/video/fbdev/controlfb.c 			p->pseudo_palette[regno] =
p                 371 drivers/video/fbdev/controlfb.c 			p->pseudo_palette[regno] = (i << 16) | i;
p                 403 drivers/video/fbdev/controlfb.c static int __init init_control(struct fb_info_control *p)
p                 411 drivers/video/fbdev/controlfb.c 	full = p->total_vram == 0x400000;
p                 425 drivers/video/fbdev/controlfb.c 		sense = read_control_sense(p);
p                 434 drivers/video/fbdev/controlfb.c 	control_init_info(&p->info, p);
p                 451 drivers/video/fbdev/controlfb.c 	vyres = (p->total_vram - CTRLFB_OFF) / (var.xres << cmode);
p                 457 drivers/video/fbdev/controlfb.c 	rc = fb_set_var(&p->info, &var);
p                 462 drivers/video/fbdev/controlfb.c 	if (register_framebuffer(&p->info) < 0)
p                 465 drivers/video/fbdev/controlfb.c 	fb_info(&p->info, "control display adapter\n");
p                 471 drivers/video/fbdev/controlfb.c 	out_8(&p->cmap_regs->addr, (a)); \
p                 472 drivers/video/fbdev/controlfb.c 	out_8(&p->cmap_regs->dat,   (d))
p                 476 drivers/video/fbdev/controlfb.c static void control_set_hardware(struct fb_info_control *p, struct fb_par_control *par)
p                 482 drivers/video/fbdev/controlfb.c 	if (PAR_EQUAL(&p->par, par)) {
p                 487 drivers/video/fbdev/controlfb.c 		if (p->par.xoffset != par->xoffset ||
p                 488 drivers/video/fbdev/controlfb.c 		    p->par.yoffset != par->yoffset)
p                 489 drivers/video/fbdev/controlfb.c 			set_screen_start(par->xoffset, par->yoffset, p);
p                 494 drivers/video/fbdev/controlfb.c 	p->par = *par;
p                 495 drivers/video/fbdev/controlfb.c 	cmode = p->par.cmode;
p                 499 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,ctrl), 0x400 | par->ctrl);
p                 504 drivers/video/fbdev/controlfb.c 	RADACAL_WRITE(0x21, p->control_use_bank2 ? 0 : 1);
p                 508 drivers/video/fbdev/controlfb.c 	rp = &p->control_regs->vswin;
p                 512 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,pitch), par->pitch);
p                 513 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,mode), r->mode);
p                 514 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,vram_attr), p->vram_attr);
p                 515 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,start_addr), par->yoffset * par->pitch
p                 517 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,rfrcnt), 0x1e5);
p                 518 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,intr_ena), 0);
p                 521 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,ctrl), par->ctrl);
p                 524 drivers/video/fbdev/controlfb.c 	btext_update_display(p->frame_buffer_phys + CTRLFB_OFF,
p                 525 drivers/video/fbdev/controlfb.c 			     p->par.xres, p->par.yres,
p                 527 drivers/video/fbdev/controlfb.c 			     p->par.pitch);
p                 595 drivers/video/fbdev/controlfb.c static void __init find_vram_size(struct fb_info_control *p)
p                 604 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,vram_attr), 0x31);
p                 606 drivers/video/fbdev/controlfb.c 	out_8(&p->frame_buffer[0x600000], 0xb3);
p                 607 drivers/video/fbdev/controlfb.c 	out_8(&p->frame_buffer[0x600001], 0x71);
p                 608 drivers/video/fbdev/controlfb.c 	asm volatile("eieio; dcbf 0,%0" : : "r" (&p->frame_buffer[0x600000])
p                 611 drivers/video/fbdev/controlfb.c 	asm volatile("eieio; dcbi 0,%0" : : "r" (&p->frame_buffer[0x600000])
p                 615 drivers/video/fbdev/controlfb.c 	bank2 = (in_8(&p->frame_buffer[0x600000]) == 0xb3)
p                 616 drivers/video/fbdev/controlfb.c 		&& (in_8(&p->frame_buffer[0x600001]) == 0x71);
p                 623 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,vram_attr), 0x39);
p                 625 drivers/video/fbdev/controlfb.c 	out_8(&p->frame_buffer[0], 0x5a);
p                 626 drivers/video/fbdev/controlfb.c 	out_8(&p->frame_buffer[1], 0xc7);
p                 627 drivers/video/fbdev/controlfb.c 	asm volatile("eieio; dcbf 0,%0" : : "r" (&p->frame_buffer[0])
p                 630 drivers/video/fbdev/controlfb.c 	asm volatile("eieio; dcbi 0,%0" : : "r" (&p->frame_buffer[0])
p                 634 drivers/video/fbdev/controlfb.c 	bank1 = (in_8(&p->frame_buffer[0]) == 0x5a)
p                 635 drivers/video/fbdev/controlfb.c 		&& (in_8(&p->frame_buffer[1]) == 0xc7);
p                 642 drivers/video/fbdev/controlfb.c 			p->control_use_bank2 = 1;
p                 643 drivers/video/fbdev/controlfb.c 			p->vram_attr = 0x39;
p                 644 drivers/video/fbdev/controlfb.c 			p->frame_buffer += 0x600000;
p                 645 drivers/video/fbdev/controlfb.c 			p->frame_buffer_phys += 0x600000;
p                 650 drivers/video/fbdev/controlfb.c 			p->vram_attr = 0x51;
p                 656 drivers/video/fbdev/controlfb.c 		p->vram_attr = 0x31;
p                 659 drivers/video/fbdev/controlfb.c         p->total_vram = (bank1 + bank2) * 0x200000;
p                 672 drivers/video/fbdev/controlfb.c 	struct fb_info_control	*p;
p                 685 drivers/video/fbdev/controlfb.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 686 drivers/video/fbdev/controlfb.c 	if (!p)
p                 688 drivers/video/fbdev/controlfb.c 	control_fb = p;	/* save it for cleanups */
p                 691 drivers/video/fbdev/controlfb.c 	p->fb_orig_base = fb_res.start;
p                 692 drivers/video/fbdev/controlfb.c 	p->fb_orig_size = resource_size(&fb_res);
p                 694 drivers/video/fbdev/controlfb.c 	p->frame_buffer_phys = fb_res.start + 0x800000;
p                 695 drivers/video/fbdev/controlfb.c 	p->control_regs_phys = reg_res.start;
p                 696 drivers/video/fbdev/controlfb.c 	p->control_regs_size = resource_size(&reg_res);
p                 698 drivers/video/fbdev/controlfb.c 	if (!p->fb_orig_base ||
p                 699 drivers/video/fbdev/controlfb.c 	    !request_mem_region(p->fb_orig_base,p->fb_orig_size,"controlfb")) {
p                 700 drivers/video/fbdev/controlfb.c 		p->fb_orig_base = 0;
p                 704 drivers/video/fbdev/controlfb.c 	p->frame_buffer = ioremap_wt(p->frame_buffer_phys, 0x800000);
p                 706 drivers/video/fbdev/controlfb.c 	if (!p->control_regs_phys ||
p                 707 drivers/video/fbdev/controlfb.c 	    !request_mem_region(p->control_regs_phys, p->control_regs_size,
p                 709 drivers/video/fbdev/controlfb.c 		p->control_regs_phys = 0;
p                 712 drivers/video/fbdev/controlfb.c 	p->control_regs = ioremap(p->control_regs_phys, p->control_regs_size);
p                 714 drivers/video/fbdev/controlfb.c 	p->cmap_regs_phys = 0xf301b000;	 /* XXX not in prom? */
p                 715 drivers/video/fbdev/controlfb.c 	if (!request_mem_region(p->cmap_regs_phys, 0x1000, "controlfb cmap")) {
p                 716 drivers/video/fbdev/controlfb.c 		p->cmap_regs_phys = 0;
p                 719 drivers/video/fbdev/controlfb.c 	p->cmap_regs = ioremap(p->cmap_regs_phys, 0x1000);
p                 721 drivers/video/fbdev/controlfb.c 	if (!p->cmap_regs || !p->control_regs || !p->frame_buffer)
p                 724 drivers/video/fbdev/controlfb.c 	find_vram_size(p);
p                 725 drivers/video/fbdev/controlfb.c 	if (!p->total_vram)
p                 728 drivers/video/fbdev/controlfb.c 	if (init_control(p) < 0)
p                 743 drivers/video/fbdev/controlfb.c static int read_control_sense(struct fb_info_control *p)
p                 747 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,mon_sense), 7);	/* drive all lines high */
p                 749 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,mon_sense), 077);	/* turn off drivers */
p                 751 drivers/video/fbdev/controlfb.c 	sense = (in_le32(CNTRL_REG(p,mon_sense)) & 0x1c0) << 2;
p                 754 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,mon_sense), 033);	/* drive A low */
p                 756 drivers/video/fbdev/controlfb.c 	sense |= (in_le32(CNTRL_REG(p,mon_sense)) & 0xc0) >> 2;
p                 757 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,mon_sense), 055);	/* drive B low */
p                 759 drivers/video/fbdev/controlfb.c 	sense |= ((in_le32(CNTRL_REG(p,mon_sense)) & 0x100) >> 5)
p                 760 drivers/video/fbdev/controlfb.c 		| ((in_le32(CNTRL_REG(p,mon_sense)) & 0x40) >> 4);
p                 761 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,mon_sense), 066);	/* drive C low */
p                 763 drivers/video/fbdev/controlfb.c 	sense |= (in_le32(CNTRL_REG(p,mon_sense)) & 0x180) >> 7;
p                 765 drivers/video/fbdev/controlfb.c 	out_le32(CNTRL_REG(p,mon_sense), 077);	/* turn off drivers */
p                 825 drivers/video/fbdev/controlfb.c 	struct fb_info_control *p =
p                 832 drivers/video/fbdev/controlfb.c 		if (p->total_vram > 0x200000) {
p                 845 drivers/video/fbdev/controlfb.c 		if (p->total_vram > 0x200000) {
p                 857 drivers/video/fbdev/controlfb.c 		if (p->total_vram > 0x200000) {
p                 890 drivers/video/fbdev/controlfb.c 	if (par->pitch * par->vyres + CTRLFB_OFF > p->total_vram)
p                1025 drivers/video/fbdev/controlfb.c static void __init control_init_info(struct fb_info *info, struct fb_info_control *p)
p                1028 drivers/video/fbdev/controlfb.c 	info->par = &p->par;
p                1030 drivers/video/fbdev/controlfb.c 	info->pseudo_palette = p->pseudo_palette;
p                1032 drivers/video/fbdev/controlfb.c 	info->screen_base = p->frame_buffer + CTRLFB_OFF;
p                1038 drivers/video/fbdev/controlfb.c 	info->fix.mmio_start = p->control_regs_phys;
p                1041 drivers/video/fbdev/controlfb.c 	info->fix.smem_start = p->frame_buffer_phys + CTRLFB_OFF;
p                1042 drivers/video/fbdev/controlfb.c 	info->fix.smem_len = p->total_vram - CTRLFB_OFF;
p                1051 drivers/video/fbdev/controlfb.c 	struct fb_info_control	*p = control_fb;
p                1053 drivers/video/fbdev/controlfb.c 	if (!p)
p                1056 drivers/video/fbdev/controlfb.c 	if (p->cmap_regs)
p                1057 drivers/video/fbdev/controlfb.c 		iounmap(p->cmap_regs);
p                1058 drivers/video/fbdev/controlfb.c 	if (p->control_regs)
p                1059 drivers/video/fbdev/controlfb.c 		iounmap(p->control_regs);
p                1060 drivers/video/fbdev/controlfb.c 	if (p->frame_buffer) {
p                1061 drivers/video/fbdev/controlfb.c 		if (p->control_use_bank2)
p                1062 drivers/video/fbdev/controlfb.c 			p->frame_buffer -= 0x600000;
p                1063 drivers/video/fbdev/controlfb.c 		iounmap(p->frame_buffer);
p                1065 drivers/video/fbdev/controlfb.c 	if (p->cmap_regs_phys)
p                1066 drivers/video/fbdev/controlfb.c 		release_mem_region(p->cmap_regs_phys, 0x1000);
p                1067 drivers/video/fbdev/controlfb.c 	if (p->control_regs_phys)
p                1068 drivers/video/fbdev/controlfb.c 		release_mem_region(p->control_regs_phys, p->control_regs_size);
p                1069 drivers/video/fbdev/controlfb.c 	if (p->fb_orig_base)
p                1070 drivers/video/fbdev/controlfb.c 		release_mem_region(p->fb_orig_base, p->fb_orig_size);
p                1071 drivers/video/fbdev/controlfb.c 	kfree(p);
p                 243 drivers/video/fbdev/core/bitblit.c 	int y = real_y(ops->p, vc->vc_y);
p                 312 drivers/video/fbdev/core/bitblit.c 	    vc->vc_cursor_type != ops->p->cursor_shape ||
p                 325 drivers/video/fbdev/core/bitblit.c 		ops->p->cursor_shape = vc->vc_cursor_type;
p                 328 drivers/video/fbdev/core/bitblit.c 		switch (ops->p->cursor_shape & CUR_HWMASK) {
p                  46 drivers/video/fbdev/core/cfbcopyarea.c bitcpy(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
p                  63 drivers/video/fbdev/core/cfbcopyarea.c 	first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
p                  64 drivers/video/fbdev/core/cfbcopyarea.c 	last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
p                 212 drivers/video/fbdev/core/cfbcopyarea.c bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
p                 236 drivers/video/fbdev/core/cfbcopyarea.c 	first = ~fb_shifted_pixels_mask_long(p, (dst_idx + 1) % bits, bswapmask);
p                 237 drivers/video/fbdev/core/cfbcopyarea.c 	last = fb_shifted_pixels_mask_long(p, (bits + dst_idx + 1 - n) % bits, bswapmask);
p                 381 drivers/video/fbdev/core/cfbcopyarea.c void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
p                 385 drivers/video/fbdev/core/cfbcopyarea.c 	unsigned long const bits_per_line = p->fix.line_length*8u;
p                 389 drivers/video/fbdev/core/cfbcopyarea.c 	u32 bswapmask = fb_compute_bswapmask(p);
p                 391 drivers/video/fbdev/core/cfbcopyarea.c 	if (p->state != FBINFO_STATE_RUNNING)
p                 404 drivers/video/fbdev/core/cfbcopyarea.c 	base = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
p                 405 drivers/video/fbdev/core/cfbcopyarea.c 	dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1));
p                 407 drivers/video/fbdev/core/cfbcopyarea.c 	dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel;
p                 408 drivers/video/fbdev/core/cfbcopyarea.c 	src_idx += sy*bits_per_line + sx*p->var.bits_per_pixel;
p                 410 drivers/video/fbdev/core/cfbcopyarea.c 	if (p->fbops->fb_sync)
p                 411 drivers/video/fbdev/core/cfbcopyarea.c 		p->fbops->fb_sync(p);
p                 417 drivers/video/fbdev/core/cfbcopyarea.c 			bitcpy_rev(p, base + (dst_idx / bits), dst_idx % bits,
p                 419 drivers/video/fbdev/core/cfbcopyarea.c 				width*p->var.bits_per_pixel, bswapmask);
p                 423 drivers/video/fbdev/core/cfbcopyarea.c 			bitcpy(p, base + (dst_idx / bits), dst_idx % bits,
p                 425 drivers/video/fbdev/core/cfbcopyarea.c 				width*p->var.bits_per_pixel, bswapmask);
p                  35 drivers/video/fbdev/core/cfbfillrect.c bitfill_aligned(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
p                  43 drivers/video/fbdev/core/cfbfillrect.c 	first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
p                  44 drivers/video/fbdev/core/cfbfillrect.c 	last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
p                  92 drivers/video/fbdev/core/cfbfillrect.c bitfill_unaligned(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
p                 100 drivers/video/fbdev/core/cfbfillrect.c 	first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
p                 101 drivers/video/fbdev/core/cfbfillrect.c 	last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
p                 146 drivers/video/fbdev/core/cfbfillrect.c bitfill_aligned_rev(struct fb_info *p, unsigned long __iomem *dst,
p                 156 drivers/video/fbdev/core/cfbfillrect.c 	first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
p                 157 drivers/video/fbdev/core/cfbfillrect.c 	last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
p                 217 drivers/video/fbdev/core/cfbfillrect.c bitfill_unaligned_rev(struct fb_info *p, unsigned long __iomem *dst,
p                 226 drivers/video/fbdev/core/cfbfillrect.c 	first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
p                 227 drivers/video/fbdev/core/cfbfillrect.c 	last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
p                 278 drivers/video/fbdev/core/cfbfillrect.c void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
p                 283 drivers/video/fbdev/core/cfbfillrect.c 	u32 bpp = p->var.bits_per_pixel;
p                 287 drivers/video/fbdev/core/cfbfillrect.c 	if (p->state != FBINFO_STATE_RUNNING)
p                 290 drivers/video/fbdev/core/cfbfillrect.c 	if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p                 291 drivers/video/fbdev/core/cfbfillrect.c 	    p->fix.visual == FB_VISUAL_DIRECTCOLOR )
p                 292 drivers/video/fbdev/core/cfbfillrect.c 		fg = ((u32 *) (p->pseudo_palette))[rect->color];
p                 298 drivers/video/fbdev/core/cfbfillrect.c 	dst = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
p                 299 drivers/video/fbdev/core/cfbfillrect.c 	dst_idx = ((unsigned long)p->screen_base & (bytes - 1))*8;
p                 300 drivers/video/fbdev/core/cfbfillrect.c 	dst_idx += rect->dy*p->fix.line_length*8+rect->dx*bpp;
p                 303 drivers/video/fbdev/core/cfbfillrect.c 	if (p->fbops->fb_sync)
p                 304 drivers/video/fbdev/core/cfbfillrect.c 		p->fbops->fb_sync(p);
p                 306 drivers/video/fbdev/core/cfbfillrect.c 		u32 bswapmask = fb_compute_bswapmask(p);
p                 307 drivers/video/fbdev/core/cfbfillrect.c 		void (*fill_op32)(struct fb_info *p,
p                 327 drivers/video/fbdev/core/cfbfillrect.c 			fill_op32(p, dst, dst_idx, pat, width*bpp, bits,
p                 329 drivers/video/fbdev/core/cfbfillrect.c 			dst_idx += p->fix.line_length*8;
p                 333 drivers/video/fbdev/core/cfbfillrect.c 		void (*fill_op)(struct fb_info *p, unsigned long __iomem *dst,
p                 360 drivers/video/fbdev/core/cfbfillrect.c 			fill_op(p, dst, dst_idx, pat2, left, right,
p                 362 drivers/video/fbdev/core/cfbfillrect.c 			dst_idx += p->fix.line_length*8;
p                  76 drivers/video/fbdev/core/cfbimgblt.c 				   struct fb_info *p, u8 __iomem *dst1, 
p                  83 drivers/video/fbdev/core/cfbimgblt.c 	int i, n, bpp = p->var.bits_per_pixel;
p                  85 drivers/video/fbdev/core/cfbimgblt.c 	u32 *palette = (u32 *) p->pseudo_palette;
p                  87 drivers/video/fbdev/core/cfbimgblt.c 	u32 bswapmask = fb_compute_bswapmask(p);
p                  97 drivers/video/fbdev/core/cfbimgblt.c 			u32 start_mask = ~fb_shifted_pixels_mask_u32(p,
p                 103 drivers/video/fbdev/core/cfbimgblt.c 			if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p                 104 drivers/video/fbdev/core/cfbimgblt.c 			    p->fix.visual == FB_VISUAL_DIRECTCOLOR )
p                 108 drivers/video/fbdev/core/cfbimgblt.c 			color <<= FB_LEFT_POS(p, bpp);
p                 109 drivers/video/fbdev/core/cfbimgblt.c 			val |= FB_SHIFT_HIGH(p, color, shift ^ bswapmask);
p                 114 drivers/video/fbdev/core/cfbimgblt.c 					FB_SHIFT_LOW(p, color, 32 - shift);
p                 121 drivers/video/fbdev/core/cfbimgblt.c 			u32 end_mask = fb_shifted_pixels_mask_u32(p, shift,
p                 126 drivers/video/fbdev/core/cfbimgblt.c 		dst1 += p->fix.line_length;
p                 128 drivers/video/fbdev/core/cfbimgblt.c 			dst2 += p->fix.line_length;
p                 137 drivers/video/fbdev/core/cfbimgblt.c static inline void slow_imageblit(const struct fb_image *image, struct fb_info *p, 
p                 143 drivers/video/fbdev/core/cfbimgblt.c 	u32 shift, color = 0, bpp = p->var.bits_per_pixel;
p                 145 drivers/video/fbdev/core/cfbimgblt.c 	u32 val, pitch = p->fix.line_length;
p                 150 drivers/video/fbdev/core/cfbimgblt.c 	u32 bswapmask = fb_compute_bswapmask(p);
p                 153 drivers/video/fbdev/core/cfbimgblt.c 	fgcolor <<= FB_LEFT_POS(p, bpp);
p                 154 drivers/video/fbdev/core/cfbimgblt.c 	bgcolor <<= FB_LEFT_POS(p, bpp);
p                 165 drivers/video/fbdev/core/cfbimgblt.c 			u32 start_mask = ~fb_shifted_pixels_mask_u32(p,
p                 174 drivers/video/fbdev/core/cfbimgblt.c 			val |= FB_SHIFT_HIGH(p, color, shift ^ bswapmask);
p                 180 drivers/video/fbdev/core/cfbimgblt.c 					FB_SHIFT_LOW(p, color, 32 - shift);
p                 189 drivers/video/fbdev/core/cfbimgblt.c 			u32 end_mask = fb_shifted_pixels_mask_u32(p, shift,
p                 215 drivers/video/fbdev/core/cfbimgblt.c static inline void fast_imageblit(const struct fb_image *image, struct fb_info *p, 
p                 219 drivers/video/fbdev/core/cfbimgblt.c 	u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
p                 229 drivers/video/fbdev/core/cfbimgblt.c 		tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
p                 232 drivers/video/fbdev/core/cfbimgblt.c 		tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
p                 260 drivers/video/fbdev/core/cfbimgblt.c 		dst1 += p->fix.line_length;
p                 265 drivers/video/fbdev/core/cfbimgblt.c void cfb_imageblit(struct fb_info *p, const struct fb_image *image)
p                 268 drivers/video/fbdev/core/cfbimgblt.c 	u32 bpl = sizeof(u32), bpp = p->var.bits_per_pixel;
p                 273 drivers/video/fbdev/core/cfbimgblt.c 	if (p->state != FBINFO_STATE_RUNNING)
p                 276 drivers/video/fbdev/core/cfbimgblt.c 	bitstart = (dy * p->fix.line_length * 8) + (dx * bpp);
p                 278 drivers/video/fbdev/core/cfbimgblt.c 	pitch_index = (p->fix.line_length & (bpl - 1)) * 8;
p                 282 drivers/video/fbdev/core/cfbimgblt.c 	dst1 = p->screen_base + bitstart;
p                 284 drivers/video/fbdev/core/cfbimgblt.c 	if (p->fbops->fb_sync)
p                 285 drivers/video/fbdev/core/cfbimgblt.c 		p->fbops->fb_sync(p);
p                 288 drivers/video/fbdev/core/cfbimgblt.c 		if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p                 289 drivers/video/fbdev/core/cfbimgblt.c 		    p->fix.visual == FB_VISUAL_DIRECTCOLOR) {
p                 290 drivers/video/fbdev/core/cfbimgblt.c 			fgcolor = ((u32*)(p->pseudo_palette))[image->fg_color];
p                 291 drivers/video/fbdev/core/cfbimgblt.c 			bgcolor = ((u32*)(p->pseudo_palette))[image->bg_color];
p                 300 drivers/video/fbdev/core/cfbimgblt.c 			fast_imageblit(image, p, dst1, fgcolor, bgcolor);
p                 302 drivers/video/fbdev/core/cfbimgblt.c 			slow_imageblit(image, p, dst1, fgcolor, bgcolor,
p                 305 drivers/video/fbdev/core/cfbimgblt.c 		color_imageblit(image, p, dst1, start_index, pitch_index);
p                 101 drivers/video/fbdev/core/fb_draw.h static inline u32 fb_shifted_pixels_mask_u32(struct fb_info *p, u32 index,
p                 107 drivers/video/fbdev/core/fb_draw.h 		mask = FB_SHIFT_HIGH(p, ~(u32)0, index);
p                 109 drivers/video/fbdev/core/fb_draw.h 		mask = 0xff << FB_LEFT_POS(p, 8);
p                 110 drivers/video/fbdev/core/fb_draw.h 		mask = FB_SHIFT_LOW(p, mask, index & (bswapmask)) & mask;
p                 111 drivers/video/fbdev/core/fb_draw.h 		mask = FB_SHIFT_HIGH(p, mask, index & ~(bswapmask));
p                 116 drivers/video/fbdev/core/fb_draw.h 			mask |= FB_SHIFT_HIGH(p, ~(u32)0,
p                 122 drivers/video/fbdev/core/fb_draw.h static inline unsigned long fb_shifted_pixels_mask_long(struct fb_info *p,
p                 129 drivers/video/fbdev/core/fb_draw.h 		mask = FB_SHIFT_HIGH(p, ~0UL, index);
p                 131 drivers/video/fbdev/core/fb_draw.h 		mask = 0xff << FB_LEFT_POS(p, 8);
p                 132 drivers/video/fbdev/core/fb_draw.h 		mask = FB_SHIFT_LOW(p, mask, index & (bswapmask)) & mask;
p                 133 drivers/video/fbdev/core/fb_draw.h 		mask = FB_SHIFT_HIGH(p, mask, index & ~(bswapmask));
p                 138 drivers/video/fbdev/core/fb_draw.h 			mask |= FB_SHIFT_HIGH(p, ~0UL,
p                 168 drivers/video/fbdev/core/fb_draw.h #define fb_shifted_pixels_mask_u32(p, i, b) FB_SHIFT_HIGH((p), ~(u32)0, (i))
p                 169 drivers/video/fbdev/core/fb_draw.h #define fb_shifted_pixels_mask_long(p, i, b) FB_SHIFT_HIGH((p), ~0UL, (i))
p                  19 drivers/video/fbdev/core/fb_sys_fops.c 	unsigned long p = *ppos;
p                  32 drivers/video/fbdev/core/fb_sys_fops.c 	if (p >= total_size)
p                  38 drivers/video/fbdev/core/fb_sys_fops.c 	if (count + p > total_size)
p                  39 drivers/video/fbdev/core/fb_sys_fops.c 		count = total_size - p;
p                  41 drivers/video/fbdev/core/fb_sys_fops.c 	src = (void __force *)(info->screen_base + p);
p                  59 drivers/video/fbdev/core/fb_sys_fops.c 	unsigned long p = *ppos;
p                  72 drivers/video/fbdev/core/fb_sys_fops.c 	if (p > total_size)
p                  80 drivers/video/fbdev/core/fb_sys_fops.c 	if (count + p > total_size) {
p                  84 drivers/video/fbdev/core/fb_sys_fops.c 		count = total_size - p;
p                  87 drivers/video/fbdev/core/fb_sys_fops.c 	dst = (void __force *) (info->screen_base + p);
p                 172 drivers/video/fbdev/core/fbcon.c #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row)
p                 207 drivers/video/fbdev/core/fbcon.c static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
p                 211 drivers/video/fbdev/core/fbcon.c static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
p                 225 drivers/video/fbdev/core/fbcon.c 	    ops->p->con_rotate < 4)
p                 226 drivers/video/fbdev/core/fbcon.c 		ops->rotate = ops->p->con_rotate;
p                 242 drivers/video/fbdev/core/fbcon.c 		struct fbcon_display *p = &fb_display[ops->currcon];
p                 245 drivers/video/fbdev/core/fbcon.c 			p->con_rotate = rotate;
p                 247 drivers/video/fbdev/core/fbcon.c 			p->con_rotate = 0;
p                 257 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p;
p                 269 drivers/video/fbdev/core/fbcon.c 		p = &fb_display[vc->vc_num];
p                 270 drivers/video/fbdev/core/fbcon.c 		p->con_rotate = rotate;
p                 697 drivers/video/fbdev/core/fbcon.c 	ops->p = &fb_display[vc->vc_num];
p                 723 drivers/video/fbdev/core/fbcon.c 	ops->p = &fb_display[vc->vc_num];
p                 968 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[fg_console];
p                1011 drivers/video/fbdev/core/fbcon.c 	p->con_rotate = initial_rotation;
p                1012 drivers/video/fbdev/core/fbcon.c 	if (p->con_rotate == -1)
p                1013 drivers/video/fbdev/core/fbcon.c 		p->con_rotate = info->fbcon_rotate_hint;
p                1014 drivers/video/fbdev/core/fbcon.c 	if (p->con_rotate == -1)
p                1015 drivers/video/fbdev/core/fbcon.c 		p->con_rotate = FB_ROTATE_UR;
p                1045 drivers/video/fbdev/core/fbcon.c 	if (!p->fontdata && !vc->vc_font.data) {
p                1053 drivers/video/fbdev/core/fbcon.c 		vc->vc_font.data = (void *)(p->fontdata = font->data);
p                1056 drivers/video/fbdev/core/fbcon.c 		p->fontdata = vc->vc_font.data;
p                1081 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *t, *p = &fb_display[vc->vc_num];
p                1101 drivers/video/fbdev/core/fbcon.c 	if (var_to_display(p, &info->var, info))
p                1110 drivers/video/fbdev/core/fbcon.c 	if (!p->fontdata) {
p                1114 drivers/video/fbdev/core/fbcon.c 			vc->vc_font.data = (void *)(p->fontdata =
p                1118 drivers/video/fbdev/core/fbcon.c 			p->userfont = t->userfont;
p                1120 drivers/video/fbdev/core/fbcon.c 			if (p->userfont)
p                1121 drivers/video/fbdev/core/fbcon.c 				REFCOUNT(p->fontdata)++;
p                1132 drivers/video/fbdev/core/fbcon.c 			vc->vc_font.data = (void *)(p->fontdata = font->data);
p                1138 drivers/video/fbdev/core/fbcon.c 	if (p->userfont)
p                1139 drivers/video/fbdev/core/fbcon.c 		charcnt = FNTCHARCNT(p->fontdata);
p                1159 drivers/video/fbdev/core/fbcon.c 	p->con_rotate = initial_rotation;
p                1160 drivers/video/fbdev/core/fbcon.c 	if (p->con_rotate == -1)
p                1161 drivers/video/fbdev/core/fbcon.c 		p->con_rotate = info->fbcon_rotate_hint;
p                1162 drivers/video/fbdev/core/fbcon.c 	if (p->con_rotate == -1)
p                1163 drivers/video/fbdev/core/fbcon.c 		p->con_rotate = FB_ROTATE_UR;
p                1199 drivers/video/fbdev/core/fbcon.c 		p->scrollmode = SCROLL_MOVE;
p                1201 drivers/video/fbdev/core/fbcon.c 		p->scrollmode = SCROLL_REDRAW;
p                1225 drivers/video/fbdev/core/fbcon.c 	ops->p = &fb_display[fg_console];
p                1228 drivers/video/fbdev/core/fbcon.c static void fbcon_free_font(struct fbcon_display *p, bool freefont)
p                1230 drivers/video/fbdev/core/fbcon.c 	if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
p                1231 drivers/video/fbdev/core/fbcon.c 		kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
p                1232 drivers/video/fbdev/core/fbcon.c 	p->fontdata = NULL;
p                1233 drivers/video/fbdev/core/fbcon.c 	p->userfont = 0;
p                1240 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[vc->vc_num];
p                1269 drivers/video/fbdev/core/fbcon.c 	fbcon_free_font(p, free_font);
p                1316 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[vc->vc_num];
p                1338 drivers/video/fbdev/core/fbcon.c 	y_break = p->vrows - p->yscroll;
p                1341 drivers/video/fbdev/core/fbcon.c 		ops->clear(vc, info, real_y(p, sy), sx, b, width);
p                1342 drivers/video/fbdev/core/fbcon.c 		ops->clear(vc, info, real_y(p, sy + b), sx, height - b,
p                1345 drivers/video/fbdev/core/fbcon.c 		ops->clear(vc, info, real_y(p, sy), sx, height, width);
p                1352 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[vc->vc_num];
p                1356 drivers/video/fbdev/core/fbcon.c 		ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
p                1416 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p, *t;
p                1422 drivers/video/fbdev/core/fbcon.c 	p = &fb_display[unit];
p                1424 drivers/video/fbdev/core/fbcon.c 	if (var_to_display(p, var, info))
p                1437 drivers/video/fbdev/core/fbcon.c 		vc->vc_font.data = (void *)(p->fontdata = t->fontdata);
p                1440 drivers/video/fbdev/core/fbcon.c 		p->userfont = t->userfont;
p                1441 drivers/video/fbdev/core/fbcon.c 		if (p->userfont)
p                1442 drivers/video/fbdev/core/fbcon.c 			REFCOUNT(p->fontdata)++;
p                1444 drivers/video/fbdev/core/fbcon.c 	if (p->userfont)
p                1445 drivers/video/fbdev/core/fbcon.c 		charcnt = FNTCHARCNT(p->fontdata);
p                1485 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[vc->vc_num];
p                1487 drivers/video/fbdev/core/fbcon.c 	p->yscroll += count;
p                1488 drivers/video/fbdev/core/fbcon.c 	if (p->yscroll >= p->vrows)	/* Deal with wrap */
p                1489 drivers/video/fbdev/core/fbcon.c 		p->yscroll -= p->vrows;
p                1491 drivers/video/fbdev/core/fbcon.c 	ops->var.yoffset = p->yscroll * vc->vc_font.height;
p                1504 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[vc->vc_num];
p                1506 drivers/video/fbdev/core/fbcon.c 	p->yscroll -= count;
p                1507 drivers/video/fbdev/core/fbcon.c 	if (p->yscroll < 0)	/* Deal with wrap */
p                1508 drivers/video/fbdev/core/fbcon.c 		p->yscroll += p->vrows;
p                1510 drivers/video/fbdev/core/fbcon.c 	ops->var.yoffset = p->yscroll * vc->vc_font.height;
p                1522 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[vc->vc_num];
p                1525 drivers/video/fbdev/core/fbcon.c 	p->yscroll += count;
p                1526 drivers/video/fbdev/core/fbcon.c 	if (p->yscroll > p->vrows - vc->vc_rows) {
p                1527 drivers/video/fbdev/core/fbcon.c 		ops->bmove(vc, info, p->vrows - vc->vc_rows,
p                1529 drivers/video/fbdev/core/fbcon.c 		p->yscroll -= p->vrows - vc->vc_rows;
p                1533 drivers/video/fbdev/core/fbcon.c 	ops->var.yoffset = p->yscroll * vc->vc_font.height;
p                1547 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[vc->vc_num];
p                1549 drivers/video/fbdev/core/fbcon.c 	p->yscroll += count;
p                1551 drivers/video/fbdev/core/fbcon.c 	if (p->yscroll > p->vrows - vc->vc_rows) {
p                1552 drivers/video/fbdev/core/fbcon.c 		p->yscroll -= p->vrows - vc->vc_rows;
p                1553 drivers/video/fbdev/core/fbcon.c 		fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t);
p                1557 drivers/video/fbdev/core/fbcon.c 	ops->var.yoffset = p->yscroll * vc->vc_font.height;
p                1570 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[vc->vc_num];
p                1573 drivers/video/fbdev/core/fbcon.c 	p->yscroll -= count;
p                1574 drivers/video/fbdev/core/fbcon.c 	if (p->yscroll < 0) {
p                1575 drivers/video/fbdev/core/fbcon.c 		ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows,
p                1577 drivers/video/fbdev/core/fbcon.c 		p->yscroll += p->vrows - vc->vc_rows;
p                1581 drivers/video/fbdev/core/fbcon.c 	ops->var.yoffset = p->yscroll * vc->vc_font.height;
p                1595 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[vc->vc_num];
p                1597 drivers/video/fbdev/core/fbcon.c 	p->yscroll -= count;
p                1599 drivers/video/fbdev/core/fbcon.c 	if (p->yscroll < 0) {
p                1600 drivers/video/fbdev/core/fbcon.c 		p->yscroll += p->vrows - vc->vc_rows;
p                1601 drivers/video/fbdev/core/fbcon.c 		fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count);
p                1605 drivers/video/fbdev/core/fbcon.c 	ops->var.yoffset = p->yscroll * vc->vc_font.height;
p                1615 drivers/video/fbdev/core/fbcon.c static void fbcon_redraw_softback(struct vc_data *vc, struct fbcon_display *p,
p                1708 drivers/video/fbdev/core/fbcon.c static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
p                1743 drivers/video/fbdev/core/fbcon.c 			struct fbcon_display *p, int line, int count, int ycount)
p                1792 drivers/video/fbdev/core/fbcon.c static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p,
p                1850 drivers/video/fbdev/core/fbcon.c 	unsigned short *p;
p                1854 drivers/video/fbdev/core/fbcon.c 	p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row);
p                1857 drivers/video/fbdev/core/fbcon.c 		scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row);
p                1859 drivers/video/fbdev/core/fbcon.c 		p = advance_row(p, 1);
p                1876 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[vc->vc_num];
p                1898 drivers/video/fbdev/core/fbcon.c 		switch (p->scrollmode) {
p                1900 drivers/video/fbdev/core/fbcon.c 			fbcon_redraw_blit(vc, info, p, t, b - t - count,
p                1930 drivers/video/fbdev/core/fbcon.c 			if ((p->yscroll + count <=
p                1931 drivers/video/fbdev/core/fbcon.c 			     2 * (p->vrows - vc->vc_rows))
p                1937 drivers/video/fbdev/core/fbcon.c 					fbcon_redraw_move(vc, p, 0, t, count);
p                1940 drivers/video/fbdev/core/fbcon.c 					fbcon_redraw_move(vc, p, b,
p                1943 drivers/video/fbdev/core/fbcon.c 				fbcon_redraw_move(vc, p, t + count, b - t - count, t);
p                1948 drivers/video/fbdev/core/fbcon.c 			if ((p->yscroll + count <=
p                1949 drivers/video/fbdev/core/fbcon.c 			     2 * (p->vrows - vc->vc_rows))
p                1972 drivers/video/fbdev/core/fbcon.c 			fbcon_redraw(vc, p, t, b - t - count,
p                1989 drivers/video/fbdev/core/fbcon.c 		switch (p->scrollmode) {
p                1991 drivers/video/fbdev/core/fbcon.c 			fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
p                2021 drivers/video/fbdev/core/fbcon.c 			if ((count - p->yscroll <= p->vrows - vc->vc_rows)
p                2043 drivers/video/fbdev/core/fbcon.c 			if ((count - p->yscroll <= p->vrows - vc->vc_rows)
p                2049 drivers/video/fbdev/core/fbcon.c 					fbcon_redraw_move(vc, p, b, vc->vc_rows - b,
p                2053 drivers/video/fbdev/core/fbcon.c 					fbcon_redraw_move(vc, p, count, t, 0);
p                2055 drivers/video/fbdev/core/fbcon.c 				fbcon_redraw_move(vc, p, t, b - t - count, t + count);
p                2061 drivers/video/fbdev/core/fbcon.c 			fbcon_redraw(vc, p, b - 1, b - t - count,
p                2080 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[vc->vc_num];
p                2095 drivers/video/fbdev/core/fbcon.c 	fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width,
p                2096 drivers/video/fbdev/core/fbcon.c 			p->vrows - p->yscroll);
p                2099 drivers/video/fbdev/core/fbcon.c static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
p                2109 drivers/video/fbdev/core/fbcon.c 			fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
p                2111 drivers/video/fbdev/core/fbcon.c 			fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
p                2114 drivers/video/fbdev/core/fbcon.c 			fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
p                2116 drivers/video/fbdev/core/fbcon.c 			fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
p                2125 drivers/video/fbdev/core/fbcon.c 			fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
p                2127 drivers/video/fbdev/core/fbcon.c 			fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
p                2130 drivers/video/fbdev/core/fbcon.c 			fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
p                2132 drivers/video/fbdev/core/fbcon.c 			fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
p                2137 drivers/video/fbdev/core/fbcon.c 	ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
p                2141 drivers/video/fbdev/core/fbcon.c static void updatescrollmode(struct fbcon_display *p,
p                2167 drivers/video/fbdev/core/fbcon.c 	p->vrows = vyres/fh;
p                2169 drivers/video/fbdev/core/fbcon.c 		p->vrows -= (yres - (fh * vc->vc_rows)) / fh;
p                2171 drivers/video/fbdev/core/fbcon.c 		p->vrows--;
p                2175 drivers/video/fbdev/core/fbcon.c 			p->scrollmode = good_wrap ?
p                2178 drivers/video/fbdev/core/fbcon.c 			p->scrollmode = good_wrap ? SCROLL_REDRAW :
p                2182 drivers/video/fbdev/core/fbcon.c 			p->scrollmode = SCROLL_MOVE;
p                2184 drivers/video/fbdev/core/fbcon.c 			p->scrollmode = SCROLL_REDRAW;
p                2193 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[vc->vc_num];
p                2215 drivers/video/fbdev/core/fbcon.c 		display_to_var(&var, p);
p                2227 drivers/video/fbdev/core/fbcon.c 		var_to_display(p, &info->var, info);
p                2230 drivers/video/fbdev/core/fbcon.c 	updatescrollmode(p, info, vc);
p                2238 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[vc->vc_num];
p                2281 drivers/video/fbdev/core/fbcon.c 	display_to_var(&var, p);
p                2325 drivers/video/fbdev/core/fbcon.c 	if (p->userfont)
p                2331 drivers/video/fbdev/core/fbcon.c 	updatescrollmode(p, info, vc);
p                2333 drivers/video/fbdev/core/fbcon.c 	switch (p->scrollmode) {
p                2335 drivers/video/fbdev/core/fbcon.c 		scrollback_phys_max = p->vrows - vc->vc_rows;
p                2339 drivers/video/fbdev/core/fbcon.c 		scrollback_phys_max = p->vrows - 2 * vc->vc_rows;
p                2352 drivers/video/fbdev/core/fbcon.c 	    ops->var.xoffset = ops->var.yoffset = p->yscroll = 0;
p                2570 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p = &fb_display[vc->vc_num];
p                2579 drivers/video/fbdev/core/fbcon.c 	if (p->userfont)
p                2585 drivers/video/fbdev/core/fbcon.c 	vc->vc_font.data = (void *)(p->fontdata = data);
p                2586 drivers/video/fbdev/core/fbcon.c 	if ((p->userfont = userfont))
p                2763 drivers/video/fbdev/core/fbcon.c 	unsigned long p;
p                2772 drivers/video/fbdev/core/fbcon.c 	p = softback_curr + offset;
p                2773 drivers/video/fbdev/core/fbcon.c 	if (p >= softback_end)
p                2774 drivers/video/fbdev/core/fbcon.c 		p += softback_buf - softback_end;
p                2775 drivers/video/fbdev/core/fbcon.c 	return (u16 *) p;
p                2819 drivers/video/fbdev/core/fbcon.c static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt)
p                2822 drivers/video/fbdev/core/fbcon.c 		u16 a = scr_readw(p);
p                2831 drivers/video/fbdev/core/fbcon.c 		scr_writew(a, p++);
p                2832 drivers/video/fbdev/core/fbcon.c 		if (p == (u16 *) softback_end)
p                2833 drivers/video/fbdev/core/fbcon.c 			p = (u16 *) softback_buf;
p                2834 drivers/video/fbdev/core/fbcon.c 		if (p == (u16 *) softback_in)
p                2835 drivers/video/fbdev/core/fbcon.c 			p = (u16 *) vc->vc_origin;
p                2858 drivers/video/fbdev/core/fbcon.c 				unsigned long p, q;
p                2861 drivers/video/fbdev/core/fbcon.c 				p = softback_in;
p                2865 drivers/video/fbdev/core/fbcon.c 					if (p == softback_top)
p                2867 drivers/video/fbdev/core/fbcon.c 					if (p == softback_buf)
p                2868 drivers/video/fbdev/core/fbcon.c 						p = softback_end;
p                2869 drivers/video/fbdev/core/fbcon.c 					p -= vc->vc_size_row;
p                2871 drivers/video/fbdev/core/fbcon.c 					scr_memcpyw((u16 *) q, (u16 *) p,
p                2874 drivers/video/fbdev/core/fbcon.c 				softback_in = softback_curr = p;
p                2964 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p;
p                2974 drivers/video/fbdev/core/fbcon.c 	p = &fb_display[vc->vc_num];
p                2978 drivers/video/fbdev/core/fbcon.c 		var_to_display(p, &info->var, info);
p                2984 drivers/video/fbdev/core/fbcon.c 		updatescrollmode(p, info, vc);
p                2989 drivers/video/fbdev/core/fbcon.c 		    ops->var.xoffset = ops->var.yoffset = p->yscroll = 0;
p                3004 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p;
p                3021 drivers/video/fbdev/core/fbcon.c 		p = &fb_display[vc->vc_num];
p                3023 drivers/video/fbdev/core/fbcon.c 		var_to_display(p, &info->var, info);
p                3049 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p;
p                3060 drivers/video/fbdev/core/fbcon.c 		p = &fb_display[i];
p                3061 drivers/video/fbdev/core/fbcon.c 		if (!p || !p->mode)
p                3063 drivers/video/fbdev/core/fbcon.c 		if (fb_mode_is_equal(p->mode, mode)) {
p                3313 drivers/video/fbdev/core/fbcon.c 	struct fbcon_display *p;
p                3322 drivers/video/fbdev/core/fbcon.c 				p = &fb_display[i];
p                3325 drivers/video/fbdev/core/fbcon.c 				charcnt = (p->userfont) ?
p                3326 drivers/video/fbdev/core/fbcon.c 					FNTCHARCNT(p->fontdata) : 256;
p                3336 drivers/video/fbdev/core/fbcon.c 			p = &fb_display[fg_console];
p                3339 drivers/video/fbdev/core/fbcon.c 			caps->len = (p->userfont) ?
p                3340 drivers/video/fbdev/core/fbcon.c 				FNTCHARCNT(p->fontdata) : 256;
p                  71 drivers/video/fbdev/core/fbcon.h 	struct fbcon_display *p;
p                 228 drivers/video/fbdev/core/fbcon.h static inline int real_y(struct fbcon_display *p, int ypos)
p                 230 drivers/video/fbdev/core/fbcon.h 	int rows = p->vrows;
p                 232 drivers/video/fbdev/core/fbcon.h 	ypos += p->yscroll;
p                  68 drivers/video/fbdev/core/fbcon_ccw.c 	u32 vyres = GETVYRES(ops->p->scrollmode, info);
p                  86 drivers/video/fbdev/core/fbcon_ccw.c 	u32 vyres = GETVYRES(ops->p->scrollmode, info);
p                 143 drivers/video/fbdev/core/fbcon_ccw.c 	u32 vyres = GETVYRES(ops->p->scrollmode, info);
p                 228 drivers/video/fbdev/core/fbcon_ccw.c 	int y = real_y(ops->p, vc->vc_y);
p                 232 drivers/video/fbdev/core/fbcon_ccw.c 	u32 vyres = GETVYRES(ops->p->scrollmode, info);
p                 304 drivers/video/fbdev/core/fbcon_ccw.c 	    vc->vc_cursor_type != ops->p->cursor_shape ||
p                 325 drivers/video/fbdev/core/fbcon_ccw.c 		ops->p->cursor_shape = vc->vc_cursor_type;
p                 328 drivers/video/fbdev/core/fbcon_ccw.c 		switch (ops->p->cursor_shape & CUR_HWMASK) {
p                 399 drivers/video/fbdev/core/fbcon_ccw.c 	u32 vyres = GETVYRES(ops->p->scrollmode, info);
p                  53 drivers/video/fbdev/core/fbcon_cw.c 	u32 vxres = GETVXRES(ops->p->scrollmode, info);
p                  71 drivers/video/fbdev/core/fbcon_cw.c 	u32 vxres = GETVXRES(ops->p->scrollmode, info);
p                 128 drivers/video/fbdev/core/fbcon_cw.c 	u32 vxres = GETVXRES(ops->p->scrollmode, info);
p                 211 drivers/video/fbdev/core/fbcon_cw.c 	int y = real_y(ops->p, vc->vc_y);
p                 215 drivers/video/fbdev/core/fbcon_cw.c 	u32 vxres = GETVXRES(ops->p->scrollmode, info);
p                 287 drivers/video/fbdev/core/fbcon_cw.c 	    vc->vc_cursor_type != ops->p->cursor_shape ||
p                 308 drivers/video/fbdev/core/fbcon_cw.c 		ops->p->cursor_shape = vc->vc_cursor_type;
p                 311 drivers/video/fbdev/core/fbcon_cw.c 		switch (ops->p->cursor_shape & CUR_HWMASK) {
p                 381 drivers/video/fbdev/core/fbcon_cw.c 	u32 vxres = GETVXRES(ops->p->scrollmode, info);
p                  30 drivers/video/fbdev/core/fbcon_rotate.c 	    ops->p->con_rotate == ops->cur_rotate)
p                  34 drivers/video/fbdev/core/fbcon_rotate.c 	ops->cur_rotate = ops->p->con_rotate;
p                  35 drivers/video/fbdev/core/fbcon_rotate.c 	len = (!ops->p->userfont) ? 256 : FNTCHARCNT(src);
p                  53 drivers/video/fbdev/core/fbcon_ud.c 	u32 vyres = GETVYRES(ops->p->scrollmode, info);
p                  54 drivers/video/fbdev/core/fbcon_ud.c 	u32 vxres = GETVXRES(ops->p->scrollmode, info);
p                  72 drivers/video/fbdev/core/fbcon_ud.c 	u32 vyres = GETVYRES(ops->p->scrollmode, info);
p                  73 drivers/video/fbdev/core/fbcon_ud.c 	u32 vxres = GETVXRES(ops->p->scrollmode, info);
p                 165 drivers/video/fbdev/core/fbcon_ud.c 	u32 vyres = GETVYRES(ops->p->scrollmode, info);
p                 166 drivers/video/fbdev/core/fbcon_ud.c 	u32 vxres = GETVXRES(ops->p->scrollmode, info);
p                 258 drivers/video/fbdev/core/fbcon_ud.c 	int y = real_y(ops->p, vc->vc_y);
p                 262 drivers/video/fbdev/core/fbcon_ud.c 	u32 vyres = GETVYRES(ops->p->scrollmode, info);
p                 263 drivers/video/fbdev/core/fbcon_ud.c 	u32 vxres = GETVXRES(ops->p->scrollmode, info);
p                 335 drivers/video/fbdev/core/fbcon_ud.c 	    vc->vc_cursor_type != ops->p->cursor_shape ||
p                 348 drivers/video/fbdev/core/fbcon_ud.c 		ops->p->cursor_shape = vc->vc_cursor_type;
p                 351 drivers/video/fbdev/core/fbcon_ud.c 		switch (ops->p->cursor_shape & CUR_HWMASK) {
p                 422 drivers/video/fbdev/core/fbcon_ud.c 	u32 vyres = GETVYRES(ops->p->scrollmode, info);
p                 423 drivers/video/fbdev/core/fbcon_ud.c 	u32 vxres = GETVXRES(ops->p->scrollmode, info);
p                 760 drivers/video/fbdev/core/fbmem.c 	unsigned long p = *ppos;
p                 781 drivers/video/fbdev/core/fbmem.c 	if (p >= total_size)
p                 787 drivers/video/fbdev/core/fbmem.c 	if (count + p > total_size)
p                 788 drivers/video/fbdev/core/fbmem.c 		count = total_size - p;
p                 795 drivers/video/fbdev/core/fbmem.c 	src = (u8 __iomem *) (info->screen_base + p);
p                 825 drivers/video/fbdev/core/fbmem.c 	unsigned long p = *ppos;
p                 846 drivers/video/fbdev/core/fbmem.c 	if (p > total_size)
p                 854 drivers/video/fbdev/core/fbmem.c 	if (count + p > total_size) {
p                 858 drivers/video/fbdev/core/fbmem.c 		count = total_size - p;
p                 866 drivers/video/fbdev/core/fbmem.c 	dst = (u8 __iomem *) (info->screen_base + p);
p                  42 drivers/video/fbdev/core/fbsysfs.c 	char *p;
p                  47 drivers/video/fbdev/core/fbsysfs.c 	p = kzalloc(fb_info_size + size, GFP_KERNEL);
p                  49 drivers/video/fbdev/core/fbsysfs.c 	if (!p)
p                  52 drivers/video/fbdev/core/fbsysfs.c 	info = (struct fb_info *) p;
p                  55 drivers/video/fbdev/core/fbsysfs.c 		info->par = p + fb_info_size;
p                  28 drivers/video/fbdev/core/syscopyarea.c bitcpy(struct fb_info *p, unsigned long *dst, unsigned dst_idx,
p                  35 drivers/video/fbdev/core/syscopyarea.c 	first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
p                  36 drivers/video/fbdev/core/syscopyarea.c 	last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
p                 170 drivers/video/fbdev/core/syscopyarea.c bitcpy_rev(struct fb_info *p, unsigned long *dst, unsigned dst_idx,
p                 184 drivers/video/fbdev/core/syscopyarea.c 	first = ~FB_SHIFT_HIGH(p, ~0UL, (dst_idx + 1) % bits);
p                 185 drivers/video/fbdev/core/syscopyarea.c 	last = FB_SHIFT_HIGH(p, ~0UL, (bits + dst_idx + 1 - n) % bits);
p                 315 drivers/video/fbdev/core/syscopyarea.c void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
p                 319 drivers/video/fbdev/core/syscopyarea.c 	unsigned long const bits_per_line = p->fix.line_length*8u;
p                 324 drivers/video/fbdev/core/syscopyarea.c 	if (p->state != FBINFO_STATE_RUNNING)
p                 337 drivers/video/fbdev/core/syscopyarea.c 	base = (unsigned long *)((unsigned long)p->screen_base & ~(bytes-1));
p                 338 drivers/video/fbdev/core/syscopyarea.c 	dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1));
p                 340 drivers/video/fbdev/core/syscopyarea.c 	dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel;
p                 341 drivers/video/fbdev/core/syscopyarea.c 	src_idx += sy*bits_per_line + sx*p->var.bits_per_pixel;
p                 343 drivers/video/fbdev/core/syscopyarea.c 	if (p->fbops->fb_sync)
p                 344 drivers/video/fbdev/core/syscopyarea.c 		p->fbops->fb_sync(p);
p                 350 drivers/video/fbdev/core/syscopyarea.c 			bitcpy_rev(p, base + (dst_idx / bits), dst_idx % bits,
p                 352 drivers/video/fbdev/core/syscopyarea.c 				width*p->var.bits_per_pixel);
p                 356 drivers/video/fbdev/core/syscopyarea.c 			bitcpy(p, base + (dst_idx / bits), dst_idx % bits,
p                 358 drivers/video/fbdev/core/syscopyarea.c 				width*p->var.bits_per_pixel);
p                  25 drivers/video/fbdev/core/sysfillrect.c bitfill_aligned(struct fb_info *p, unsigned long *dst, int dst_idx,
p                  33 drivers/video/fbdev/core/sysfillrect.c 	first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
p                  34 drivers/video/fbdev/core/sysfillrect.c 	last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
p                  81 drivers/video/fbdev/core/sysfillrect.c bitfill_unaligned(struct fb_info *p, unsigned long *dst, int dst_idx,
p                  89 drivers/video/fbdev/core/sysfillrect.c 	first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
p                  90 drivers/video/fbdev/core/sysfillrect.c 	last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
p                 135 drivers/video/fbdev/core/sysfillrect.c bitfill_aligned_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
p                 144 drivers/video/fbdev/core/sysfillrect.c 	first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
p                 145 drivers/video/fbdev/core/sysfillrect.c 	last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
p                 191 drivers/video/fbdev/core/sysfillrect.c bitfill_unaligned_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
p                 200 drivers/video/fbdev/core/sysfillrect.c 	first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
p                 201 drivers/video/fbdev/core/sysfillrect.c 	last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
p                 243 drivers/video/fbdev/core/sysfillrect.c void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
p                 248 drivers/video/fbdev/core/sysfillrect.c 	u32 bpp = p->var.bits_per_pixel;
p                 252 drivers/video/fbdev/core/sysfillrect.c 	if (p->state != FBINFO_STATE_RUNNING)
p                 255 drivers/video/fbdev/core/sysfillrect.c 	if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p                 256 drivers/video/fbdev/core/sysfillrect.c 	    p->fix.visual == FB_VISUAL_DIRECTCOLOR )
p                 257 drivers/video/fbdev/core/sysfillrect.c 		fg = ((u32 *) (p->pseudo_palette))[rect->color];
p                 263 drivers/video/fbdev/core/sysfillrect.c 	dst = (unsigned long *)((unsigned long)p->screen_base & ~(bytes-1));
p                 264 drivers/video/fbdev/core/sysfillrect.c 	dst_idx = ((unsigned long)p->screen_base & (bytes - 1))*8;
p                 265 drivers/video/fbdev/core/sysfillrect.c 	dst_idx += rect->dy*p->fix.line_length*8+rect->dx*bpp;
p                 268 drivers/video/fbdev/core/sysfillrect.c 	if (p->fbops->fb_sync)
p                 269 drivers/video/fbdev/core/sysfillrect.c 		p->fbops->fb_sync(p);
p                 271 drivers/video/fbdev/core/sysfillrect.c 		void (*fill_op32)(struct fb_info *p, unsigned long *dst,
p                 291 drivers/video/fbdev/core/sysfillrect.c 			fill_op32(p, dst, dst_idx, pat, width*bpp, bits);
p                 292 drivers/video/fbdev/core/sysfillrect.c 			dst_idx += p->fix.line_length*8;
p                 296 drivers/video/fbdev/core/sysfillrect.c 		void (*fill_op)(struct fb_info *p, unsigned long *dst,
p                 324 drivers/video/fbdev/core/sysfillrect.c 			fill_op(p, dst, dst_idx, pat2, left, right,
p                 326 drivers/video/fbdev/core/sysfillrect.c 			dst_idx += p->fix.line_length*8;
p                  52 drivers/video/fbdev/core/sysimgblt.c static void color_imageblit(const struct fb_image *image, struct fb_info *p,
p                  58 drivers/video/fbdev/core/sysimgblt.c 	int i, n, bpp = p->var.bits_per_pixel;
p                  60 drivers/video/fbdev/core/sysimgblt.c 	u32 *palette = (u32 *) p->pseudo_palette;
p                  71 drivers/video/fbdev/core/sysimgblt.c 			u32 start_mask = ~(FB_SHIFT_HIGH(p, ~(u32)0,
p                  77 drivers/video/fbdev/core/sysimgblt.c 			if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p                  78 drivers/video/fbdev/core/sysimgblt.c 			    p->fix.visual == FB_VISUAL_DIRECTCOLOR )
p                  82 drivers/video/fbdev/core/sysimgblt.c 			color <<= FB_LEFT_POS(p, bpp);
p                  83 drivers/video/fbdev/core/sysimgblt.c 			val |= FB_SHIFT_HIGH(p, color, shift);
p                  88 drivers/video/fbdev/core/sysimgblt.c 					FB_SHIFT_LOW(p, color, 32 - shift);
p                  95 drivers/video/fbdev/core/sysimgblt.c 			u32 end_mask = FB_SHIFT_HIGH(p, ~(u32)0, shift);
p                 100 drivers/video/fbdev/core/sysimgblt.c 		dst1 += p->fix.line_length;
p                 102 drivers/video/fbdev/core/sysimgblt.c 			dst2 += p->fix.line_length;
p                 111 drivers/video/fbdev/core/sysimgblt.c static void slow_imageblit(const struct fb_image *image, struct fb_info *p,
p                 115 drivers/video/fbdev/core/sysimgblt.c 	u32 shift, color = 0, bpp = p->var.bits_per_pixel;
p                 117 drivers/video/fbdev/core/sysimgblt.c 	u32 val, pitch = p->fix.line_length;
p                 124 drivers/video/fbdev/core/sysimgblt.c 	fgcolor <<= FB_LEFT_POS(p, bpp);
p                 125 drivers/video/fbdev/core/sysimgblt.c 	bgcolor <<= FB_LEFT_POS(p, bpp);
p                 136 drivers/video/fbdev/core/sysimgblt.c 			u32 start_mask = ~(FB_SHIFT_HIGH(p, ~(u32)0,
p                 145 drivers/video/fbdev/core/sysimgblt.c 			val |= FB_SHIFT_HIGH(p, color, shift);
p                 151 drivers/video/fbdev/core/sysimgblt.c 					FB_SHIFT_LOW(p, color, 32 - shift);
p                 160 drivers/video/fbdev/core/sysimgblt.c 			u32 end_mask = FB_SHIFT_HIGH(p, ~(u32)0, shift);
p                 186 drivers/video/fbdev/core/sysimgblt.c static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
p                 189 drivers/video/fbdev/core/sysimgblt.c 	u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
p                 199 drivers/video/fbdev/core/sysimgblt.c 		tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
p                 202 drivers/video/fbdev/core/sysimgblt.c 		tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
p                 235 drivers/video/fbdev/core/sysimgblt.c 		dst1 += p->fix.line_length;
p                 240 drivers/video/fbdev/core/sysimgblt.c void sys_imageblit(struct fb_info *p, const struct fb_image *image)
p                 243 drivers/video/fbdev/core/sysimgblt.c 	u32 bpl = sizeof(u32), bpp = p->var.bits_per_pixel;
p                 248 drivers/video/fbdev/core/sysimgblt.c 	if (p->state != FBINFO_STATE_RUNNING)
p                 251 drivers/video/fbdev/core/sysimgblt.c 	bitstart = (dy * p->fix.line_length * 8) + (dx * bpp);
p                 253 drivers/video/fbdev/core/sysimgblt.c 	pitch_index = (p->fix.line_length & (bpl - 1)) * 8;
p                 257 drivers/video/fbdev/core/sysimgblt.c 	dst1 = (void __force *)p->screen_base + bitstart;
p                 259 drivers/video/fbdev/core/sysimgblt.c 	if (p->fbops->fb_sync)
p                 260 drivers/video/fbdev/core/sysimgblt.c 		p->fbops->fb_sync(p);
p                 263 drivers/video/fbdev/core/sysimgblt.c 		if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p                 264 drivers/video/fbdev/core/sysimgblt.c 		    p->fix.visual == FB_VISUAL_DIRECTCOLOR) {
p                 265 drivers/video/fbdev/core/sysimgblt.c 			fgcolor = ((u32*)(p->pseudo_palette))[image->fg_color];
p                 266 drivers/video/fbdev/core/sysimgblt.c 			bgcolor = ((u32*)(p->pseudo_palette))[image->bg_color];
p                 275 drivers/video/fbdev/core/sysimgblt.c 			fast_imageblit(image, p, dst1, fgcolor, bgcolor);
p                 277 drivers/video/fbdev/core/sysimgblt.c 			slow_imageblit(image, p, dst1, fgcolor, bgcolor,
p                 280 drivers/video/fbdev/core/sysimgblt.c 		color_imageblit(image, p, dst1, start_index, pitch_index);
p                 143 drivers/video/fbdev/core/tileblit.c 	if (ops->p) {
p                 147 drivers/video/fbdev/core/tileblit.c 		map.length = (ops->p->userfont) ?
p                 148 drivers/video/fbdev/core/tileblit.c 			FNTCHARCNT(ops->p->fontdata) : 256;
p                 149 drivers/video/fbdev/core/tileblit.c 		map.data = ops->p->fontdata;
p                 407 drivers/video/fbdev/ffb.c static int ffb_sync(struct fb_info *p)
p                 409 drivers/video/fbdev/ffb.c 	struct ffb_par *par = (struct ffb_par *)p->par;
p                 390 drivers/video/fbdev/fsl-diu-fb.c #define DMA_ADDR(p, f) ((p)->dma_addr + offsetof(struct fsl_diu_data, f))
p                 871 drivers/video/fbdev/fsl-diu-fb.c 	void *p;
p                 873 drivers/video/fbdev/fsl-diu-fb.c 	p = alloc_pages_exact(smem_len, GFP_DMA | __GFP_ZERO);
p                 874 drivers/video/fbdev/fsl-diu-fb.c 	if (!p) {
p                 879 drivers/video/fbdev/fsl-diu-fb.c 	info->screen_base = p;
p                 890 drivers/video/fbdev/fsl-diu-fb.c 	void *p = info->screen_base;
p                 899 drivers/video/fbdev/fsl-diu-fb.c 	if (p)
p                 900 drivers/video/fbdev/fsl-diu-fb.c 		free_pages_exact(p, l);
p                 137 drivers/video/fbdev/g364fb.c 		    ((x * fontwidth(p)) << 12) | ((y * fontheight(p)) -
p                 162 drivers/video/fbdev/hecubafb.c 	unsigned long p = *ppos;
p                 172 drivers/video/fbdev/hecubafb.c 	if (p > total_size)
p                 180 drivers/video/fbdev/hecubafb.c 	if (count + p > total_size) {
p                 184 drivers/video/fbdev/hecubafb.c 		count = total_size - p;
p                 187 drivers/video/fbdev/hecubafb.c 	dst = (void __force *) (info->screen_base + p);
p                 282 drivers/video/fbdev/hgafb.c 	void __iomem *p, *q;
p                 298 drivers/video/fbdev/hgafb.c 	p = hga_vram;
p                 301 drivers/video/fbdev/hgafb.c 	p_save = readw(p); q_save = readw(q);
p                 303 drivers/video/fbdev/hgafb.c 	writew(0xaa55, p); if (readw(p) == 0xaa55) count++;
p                 304 drivers/video/fbdev/hgafb.c 	writew(0x55aa, p); if (readw(p) == 0x55aa) count++;
p                 305 drivers/video/fbdev/hgafb.c 	writew(p_save, p);
p                 125 drivers/video/fbdev/hitfb.c static void hitfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
p                 128 drivers/video/fbdev/hitfb.c 		cfb_fillrect(p, rect);
p                 134 drivers/video/fbdev/hitfb.c 		if (p->var.bits_per_pixel == 16) {
p                 135 drivers/video/fbdev/hitfb.c 			fb_writew(((u32 *) (p->pseudo_palette))[rect->color],
p                 149 drivers/video/fbdev/hitfb.c static void hitfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
p                 152 drivers/video/fbdev/hitfb.c 	hitfb_accel_bitblt(p->var.bits_per_pixel == 16, area->sx, area->sy,
p                 151 drivers/video/fbdev/hpfb.c static void hpfb_fillrect(struct fb_info *p, const struct fb_fillrect *region)
p                 534 drivers/video/fbdev/hyperv_fb.c 			 unsigned long e, void *p)
p                 596 drivers/video/fbdev/hyperv_fb.c static void hvfb_cfb_fillrect(struct fb_info *p,
p                 599 drivers/video/fbdev/hyperv_fb.c 	struct hvfb_par *par = p->par;
p                 601 drivers/video/fbdev/hyperv_fb.c 	cfb_fillrect(p, rect);
p                 603 drivers/video/fbdev/hyperv_fb.c 		synthvid_update(p);
p                 606 drivers/video/fbdev/hyperv_fb.c static void hvfb_cfb_copyarea(struct fb_info *p,
p                 609 drivers/video/fbdev/hyperv_fb.c 	struct hvfb_par *par = p->par;
p                 611 drivers/video/fbdev/hyperv_fb.c 	cfb_copyarea(p, area);
p                 613 drivers/video/fbdev/hyperv_fb.c 		synthvid_update(p);
p                 616 drivers/video/fbdev/hyperv_fb.c static void hvfb_cfb_imageblit(struct fb_info *p,
p                 619 drivers/video/fbdev/hyperv_fb.c 	struct hvfb_par *par = p->par;
p                 621 drivers/video/fbdev/hyperv_fb.c 	cfb_imageblit(p, image);
p                 623 drivers/video/fbdev/hyperv_fb.c 		synthvid_update(p);
p                 642 drivers/video/fbdev/hyperv_fb.c 	char *opt = NULL, *p;
p                 648 drivers/video/fbdev/hyperv_fb.c 	p = strsep(&opt, "x");
p                 649 drivers/video/fbdev/hyperv_fb.c 	if (!*p || kstrtouint(p, 0, &x) ||
p                 706 drivers/video/fbdev/i810/i810_main.c static void i810_calc_dclk(u32 freq, u32 *m, u32 *n, u32 *p)
p                 757 drivers/video/fbdev/i810/i810_main.c 	if (p) *p = (p_target << 4);
p                  27 drivers/video/fbdev/i810/i810_main.h extern void i810fb_fillrect (struct fb_info *p, 
p                  29 drivers/video/fbdev/i810/i810_main.h extern void i810fb_copyarea (struct fb_info *p, 
p                  31 drivers/video/fbdev/i810/i810_main.h extern void i810fb_imageblit(struct fb_info *p, const struct fb_image *image);
p                  32 drivers/video/fbdev/i810/i810_main.h extern int  i810fb_sync     (struct fb_info *p);
p                1556 drivers/video/fbdev/imsttfb.c 			char *p;
p                1559 drivers/video/fbdev/imsttfb.c 			p = this_opt + 5;
p                1561 drivers/video/fbdev/imsttfb.c 				if (!*p || *p == ' ' || *p == ',')
p                 275 drivers/video/fbdev/intelfb/intelfbdrv.c 	const char *p;
p                 279 drivers/video/fbdev/intelfb/intelfbdrv.c 	p = OPT_STRVAL(this_opt, name);
p                 281 drivers/video/fbdev/intelfb/intelfbdrv.c 	while (p[i] && p[i] != ' ' && p[i] != ',')
p                 285 drivers/video/fbdev/intelfb/intelfbdrv.c 		strncpy(ret, p, i);
p                 659 drivers/video/fbdev/intelfb/intelfbhw.c static int calc_vclock3(int index, int m, int n, int p)
p                 661 drivers/video/fbdev/intelfb/intelfbhw.c 	if (p == 0 || n == 0)
p                 663 drivers/video/fbdev/intelfb/intelfbhw.c 	return plls[index].ref_clk * m / n / p;
p                 670 drivers/video/fbdev/intelfb/intelfbhw.c 	u32 m, vco, p;
p                 677 drivers/video/fbdev/intelfb/intelfbhw.c 		p = ((p1 + 2) * (1 << (p2 + 1)));
p                 679 drivers/video/fbdev/intelfb/intelfbhw.c 		p = ((p1) * (p2 ? 5 : 10));
p                 680 drivers/video/fbdev/intelfb/intelfbhw.c 	return vco / p;
p                 902 drivers/video/fbdev/intelfb/intelfbhw.c static int splitp(int index, unsigned int p, unsigned int *retp1,
p                 909 drivers/video/fbdev/intelfb/intelfbhw.c 		p2 = (p % 10) ? 1 : 0;
p                 911 drivers/video/fbdev/intelfb/intelfbhw.c 		p1 = p / (p2 ? 5 : 10);
p                 918 drivers/video/fbdev/intelfb/intelfbhw.c 	if (p % 4 == 0)
p                 922 drivers/video/fbdev/intelfb/intelfbhw.c 	p1 = (p / (1 << (p2 + 1))) - 2;
p                 923 drivers/video/fbdev/intelfb/intelfbhw.c 	if (p % 4 == 0 && p1 < pll->min_p1) {
p                 925 drivers/video/fbdev/intelfb/intelfbhw.c 		p1 = (p / (1 << (p2 + 1))) - 2;
p                 928 drivers/video/fbdev/intelfb/intelfbhw.c 	    (p1 + 2) * (1 << (p2 + 1)) != p) {
p                 941 drivers/video/fbdev/intelfb/intelfbhw.c 	u32 f_vco, p, p_best = 0, m, f_out = 0;
p                 961 drivers/video/fbdev/intelfb/intelfbhw.c 	p = p_min;
p                 963 drivers/video/fbdev/intelfb/intelfbhw.c 		if (splitp(index, p, &p1, &p2)) {
p                 964 drivers/video/fbdev/intelfb/intelfbhw.c 			WRN_MSG("cannot split p = %d\n", p);
p                 965 drivers/video/fbdev/intelfb/intelfbhw.c 			p += p_inc;
p                 969 drivers/video/fbdev/intelfb/intelfbhw.c 		f_vco = clock * p;
p                 978 drivers/video/fbdev/intelfb/intelfbhw.c 				f_out = calc_vclock3(index, testm, n, p);
p                 992 drivers/video/fbdev/intelfb/intelfbhw.c 					p_best = p;
p                 998 drivers/video/fbdev/intelfb/intelfbhw.c 		p += p_inc;
p                 999 drivers/video/fbdev/intelfb/intelfbhw.c 	} while ((p <= p_max));
p                1007 drivers/video/fbdev/intelfb/intelfbhw.c 	p = p_best;
p                1009 drivers/video/fbdev/intelfb/intelfbhw.c 	splitp(index, p, &p1, &p2);
p                1014 drivers/video/fbdev/intelfb/intelfbhw.c 		m, m1, m2, n, n1, p, p1, p2,
p                1015 drivers/video/fbdev/intelfb/intelfbhw.c 		calc_vclock3(index, m, n, p),
p                1017 drivers/video/fbdev/intelfb/intelfbhw.c 		calc_vclock3(index, m, n, p) * p);
p                  20 drivers/video/fbdev/matrox/g450_pll.c static inline unsigned int g450_vco2f(unsigned char p, unsigned int fvco) {
p                  21 drivers/video/fbdev/matrox/g450_pll.c 	return (p & 0x40) ? fvco : fvco >> ((p & 3) + 1);
p                  24 drivers/video/fbdev/matrox/g450_pll.c static inline unsigned int g450_f2vco(unsigned char p, unsigned int fin) {
p                  25 drivers/video/fbdev/matrox/g450_pll.c 	return (p & 0x40) ? fin : fin << ((p & 3) + 1);
p                  59 drivers/video/fbdev/matrox/g450_pll.c 	unsigned int m, n, p;
p                  63 drivers/video/fbdev/matrox/g450_pll.c 	p = mnp & 0xFF;
p                  68 drivers/video/fbdev/matrox/g450_pll.c 				if (p & 0x40) {
p                  71 drivers/video/fbdev/matrox/g450_pll.c 			        if (p & 3) {
p                  72 drivers/video/fbdev/matrox/g450_pll.c 					p--;
p                  74 drivers/video/fbdev/matrox/g450_pll.c 					p = 0x40;
p                  83 drivers/video/fbdev/matrox/g450_pll.c 			p &= 0x43;
p                  87 drivers/video/fbdev/matrox/g450_pll.c 				p |= 0x08;
p                  89 drivers/video/fbdev/matrox/g450_pll.c 				p |= 0x10;
p                  91 drivers/video/fbdev/matrox/g450_pll.c 				p |= 0x18;
p                  93 drivers/video/fbdev/matrox/g450_pll.c 				p |= 0x20;
p                 101 drivers/video/fbdev/matrox/g450_pll.c 	return (m << 16) | (n << 8) | p;
p                 108 drivers/video/fbdev/matrox/g450_pll.c 	unsigned int p;
p                 118 drivers/video/fbdev/matrox/g450_pll.c 		p = 0x40;
p                 122 drivers/video/fbdev/matrox/g450_pll.c 		p = 3;
p                 123 drivers/video/fbdev/matrox/g450_pll.c 		tvco = g450_f2vco(p, fout);
p                 124 drivers/video/fbdev/matrox/g450_pll.c 		while (p && (tvco > vcomax)) {
p                 125 drivers/video/fbdev/matrox/g450_pll.c 			p--;
p                 133 drivers/video/fbdev/matrox/g450_pll.c 	return g450_nextpll(minfo, pi, vco, 0xFF0000 | p);
p                 178 drivers/video/fbdev/matrox/g450_pll.c 	unsigned char p = mnp;
p                 184 drivers/video/fbdev/matrox/g450_pll.c 				matroxfb_DAC_in(minfo, M1064_XPIXPLLAP) != p);
p                 189 drivers/video/fbdev/matrox/g450_pll.c 				matroxfb_DAC_in(minfo, M1064_XPIXPLLBP) != p);
p                 194 drivers/video/fbdev/matrox/g450_pll.c 				matroxfb_DAC_in(minfo, M1064_XPIXPLLCP) != p);
p                 199 drivers/video/fbdev/matrox/g450_pll.c 				matroxfb_DAC_in(minfo, DAC1064_XSYSPLLP) != p);
p                 204 drivers/video/fbdev/matrox/g450_pll.c 				matroxfb_DAC_in(minfo, M1064_XVIDPLLP) != p);
p                  43 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 	unsigned int p;
p                  49 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 	fvco = PLL_calcclock(minfo, freq, fmax, in, feed, &p);
p                  51 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 	p = (1 << p) - 1;
p                  55 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 		p |= 0x08;
p                  57 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 		p |= 0x10;
p                  59 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 		p |= 0x18;
p                  60 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 	*post = p;
p                  90 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 	unsigned int m, n, p;
p                  94 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 	DAC1064_calcclock(minfo, fout, minfo->max_pixel_clock, &m, &n, &p);
p                  97 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 	minfo->hw.DACclk[2] = p;
p                 127 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 		unsigned int m, n, p;
p                 142 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 		DAC1064_calcclock(minfo, fmem, minfo->max_pixel_clock, &m, &n, &p);
p                 145 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 		outDAC1064(minfo, DAC1064_XSYSPLLP, hw->DACclk[5] = p);
p                 611 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 				 int m, int n, int p)
p                 628 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 	outDAC1064(minfo, reg, p);
p                 659 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 	unsigned int m, n, p;
p                 663 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 	DAC1064_calcclock(minfo, freq, minfo->max_pixel_clock, &m, &n, &p);
p                 664 drivers/video/fbdev/matrox/matroxfb_DAC1064.c 	MGAG100_progPixClock(minfo, flags, m, n, p);
p                1963 drivers/video/fbdev/matrox/matroxfb_base.c 		void* p;
p                1967 drivers/video/fbdev/matrox/matroxfb_base.c 		p = drv->probe(minfo);
p                1968 drivers/video/fbdev/matrox/matroxfb_base.c 		if (p) {
p                1969 drivers/video/fbdev/matrox/matroxfb_base.c 			minfo->drivers_data[minfo->drivers_count] = p;
p                2005 drivers/video/fbdev/matrox/matroxfb_base.c 			void *p = drv->probe(minfo);
p                2006 drivers/video/fbdev/matrox/matroxfb_base.c 			if (p) {
p                2007 drivers/video/fbdev/matrox/matroxfb_base.c 				minfo->drivers_data[i] = p;
p                 142 drivers/video/fbdev/matrox/matroxfb_g450.c static int g450_query_ctrl(void* md, struct v4l2_queryctrl *p) {
p                 145 drivers/video/fbdev/matrox/matroxfb_g450.c 	i = get_ctrl_id(p->id);
p                 147 drivers/video/fbdev/matrox/matroxfb_g450.c 		*p = g450_controls[i].desc;
p                 154 drivers/video/fbdev/matrox/matroxfb_g450.c 		i = p->id;
p                 155 drivers/video/fbdev/matrox/matroxfb_g450.c 		*p = disctrl;
p                 156 drivers/video/fbdev/matrox/matroxfb_g450.c 		p->id = i;
p                 157 drivers/video/fbdev/matrox/matroxfb_g450.c 		sprintf(p->name, "Ctrl #%08X", i);
p                 163 drivers/video/fbdev/matrox/matroxfb_g450.c static int g450_set_ctrl(void* md, struct v4l2_control *p) {
p                 167 drivers/video/fbdev/matrox/matroxfb_g450.c 	i = get_ctrl_id(p->id);
p                 173 drivers/video/fbdev/matrox/matroxfb_g450.c 	if (p->value == *get_ctrl_ptr(minfo, i)) return 0;
p                 178 drivers/video/fbdev/matrox/matroxfb_g450.c 	if (p->value > g450_controls[i].desc.maximum) return -EINVAL;
p                 179 drivers/video/fbdev/matrox/matroxfb_g450.c 	if (p->value < g450_controls[i].desc.minimum) return -EINVAL;
p                 184 drivers/video/fbdev/matrox/matroxfb_g450.c 	*get_ctrl_ptr(minfo, i) = p->value;
p                 186 drivers/video/fbdev/matrox/matroxfb_g450.c 	switch (p->id) {
p                 197 drivers/video/fbdev/matrox/matroxfb_g450.c 			cve2_set_reg(minfo, 0x20, p->value);
p                 198 drivers/video/fbdev/matrox/matroxfb_g450.c 			cve2_set_reg(minfo, 0x22, p->value);
p                 201 drivers/video/fbdev/matrox/matroxfb_g450.c 			cve2_set_reg(minfo, 0x25, p->value);
p                 206 drivers/video/fbdev/matrox/matroxfb_g450.c 				if (p->value) val |=  0x02;
p                 217 drivers/video/fbdev/matrox/matroxfb_g450.c static int g450_get_ctrl(void* md, struct v4l2_control *p) {
p                 221 drivers/video/fbdev/matrox/matroxfb_g450.c 	i = get_ctrl_id(p->id);
p                 223 drivers/video/fbdev/matrox/matroxfb_g450.c 	p->value = *get_ctrl_ptr(minfo, i);
p                 233 drivers/video/fbdev/matrox/matroxfb_maven.c 	unsigned int p;
p                 245 drivers/video/fbdev/matrox/matroxfb_maven.c 	for (p = 1; p <= pll->post_shift_max; p++) {
p                 252 drivers/video/fbdev/matrox/matroxfb_maven.c 	for (; p-- > 0; fwant >>= 1) {
p                 268 drivers/video/fbdev/matrox/matroxfb_maven.c 			dvd = m << p;
p                 279 drivers/video/fbdev/matrox/matroxfb_maven.c 			dprintk(KERN_DEBUG "Match: %u / %u / %u / %u\n", n, m, p, ln);
p                 283 drivers/video/fbdev/matrox/matroxfb_maven.c 				*post = p;
p                 303 drivers/video/fbdev/matrox/matroxfb_maven.c 	unsigned int uninitialized_var(p);
p                 305 drivers/video/fbdev/matrox/matroxfb_maven.c 	fvco = matroxfb_PLL_mavenclock(&maven1000_pll, ctl, htotal, vtotal, in, feed, &p, htotal2);
p                 308 drivers/video/fbdev/matrox/matroxfb_maven.c 	p = (1 << p) - 1;
p                 312 drivers/video/fbdev/matrox/matroxfb_maven.c 		p |= 0x08;
p                 314 drivers/video/fbdev/matrox/matroxfb_maven.c 		p |= 0x10;
p                 316 drivers/video/fbdev/matrox/matroxfb_maven.c 		p |= 0x18;
p                 317 drivers/video/fbdev/matrox/matroxfb_maven.c 	*post = p;
p                 324 drivers/video/fbdev/matrox/matroxfb_maven.c 	unsigned int p;
p                 326 drivers/video/fbdev/matrox/matroxfb_maven.c 	fvco = matroxfb_PLL_calcclock(&maven_pll, freq, fmax, in, feed, &p);
p                 327 drivers/video/fbdev/matrox/matroxfb_maven.c 	p = (1 << p) - 1;
p                 331 drivers/video/fbdev/matrox/matroxfb_maven.c 		p |= 0x08;
p                 333 drivers/video/fbdev/matrox/matroxfb_maven.c 		p |= 0x10;
p                 335 drivers/video/fbdev/matrox/matroxfb_maven.c 		p |= 0x18;
p                 336 drivers/video/fbdev/matrox/matroxfb_maven.c 	*post = p;
p                1034 drivers/video/fbdev/matrox/matroxfb_maven.c 				struct v4l2_queryctrl *p) {
p                1037 drivers/video/fbdev/matrox/matroxfb_maven.c 	i = get_ctrl_id(p->id);
p                1039 drivers/video/fbdev/matrox/matroxfb_maven.c 		*p = maven_controls[i].desc;
p                1046 drivers/video/fbdev/matrox/matroxfb_maven.c 		i = p->id;
p                1047 drivers/video/fbdev/matrox/matroxfb_maven.c 		*p = disctrl;
p                1048 drivers/video/fbdev/matrox/matroxfb_maven.c 		p->id = i;
p                1049 drivers/video/fbdev/matrox/matroxfb_maven.c 		sprintf(p->name, "Ctrl #%08X", i);
p                1056 drivers/video/fbdev/matrox/matroxfb_maven.c 			      struct v4l2_control *p) {
p                1059 drivers/video/fbdev/matrox/matroxfb_maven.c 	i = get_ctrl_id(p->id);
p                1065 drivers/video/fbdev/matrox/matroxfb_maven.c 	if (p->value == *get_ctrl_ptr(md, i)) return 0;
p                1070 drivers/video/fbdev/matrox/matroxfb_maven.c 	if (p->value > maven_controls[i].desc.maximum) return -EINVAL;
p                1071 drivers/video/fbdev/matrox/matroxfb_maven.c 	if (p->value < maven_controls[i].desc.minimum) return -EINVAL;
p                1076 drivers/video/fbdev/matrox/matroxfb_maven.c 	*get_ctrl_ptr(md, i) = p->value;
p                1078 drivers/video/fbdev/matrox/matroxfb_maven.c 	switch (p->id) {
p                1092 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x20, p->value);
p                1093 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x22, p->value);
p                1098 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x25, p->value);
p                1120 drivers/video/fbdev/matrox/matroxfb_maven.c 			if (p->value) val |= 0x10;
p                1137 drivers/video/fbdev/matrox/matroxfb_maven.c 			      struct v4l2_control *p) {
p                1140 drivers/video/fbdev/matrox/matroxfb_maven.c 	i = get_ctrl_id(p->id);
p                1142 drivers/video/fbdev/matrox/matroxfb_maven.c 	p->value = *get_ctrl_ptr(md, i);
p                1178 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_out_get_queryctrl(void* md, struct v4l2_queryctrl* p) {
p                1179 drivers/video/fbdev/matrox/matroxfb_maven.c         return maven_get_queryctrl(md, p);
p                1182 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_out_get_ctrl(void* md, struct v4l2_control* p) {
p                1183 drivers/video/fbdev/matrox/matroxfb_maven.c 	return maven_get_control(md, p);
p                1186 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_out_set_ctrl(void* md, struct v4l2_control* p) {
p                1187 drivers/video/fbdev/matrox/matroxfb_maven.c 	return maven_set_control(md, p);
p                 135 drivers/video/fbdev/matrox/matroxfb_misc.c 	unsigned int p;
p                 152 drivers/video/fbdev/matrox/matroxfb_misc.c 	for (p = 1; p <= pll->post_shift_max; p++) {
p                 159 drivers/video/fbdev/matrox/matroxfb_misc.c 	for (; p-- > 0; fwant >>= 1, bestdiff >>= 1) {
p                 179 drivers/video/fbdev/matrox/matroxfb_misc.c 				*post = p;
p                 118 drivers/video/fbdev/mbx/mbxfb.c 	u8 p;
p                 124 drivers/video/fbdev/mbx/mbxfb.c 	u8 m, n, p;
p                 148 drivers/video/fbdev/mbx/mbxfb.c 			for (p = 0; p < 8; p++) {
p                 149 drivers/video/fbdev/mbx/mbxfb.c 				clk = (ref_clk * m) / (n * (1 << p));
p                 157 drivers/video/fbdev/mbx/mbxfb.c 					div->p = p;
p                 285 drivers/video/fbdev/mbx/mbxfb.c 		Disp_Pll_P(div.p) | DISP_PLL_EN), DISPPLL);
p                 523 drivers/video/fbdev/metronomefb.c 	unsigned long p = *ppos;
p                 533 drivers/video/fbdev/metronomefb.c 	if (p > total_size)
p                 541 drivers/video/fbdev/metronomefb.c 	if (count + p > total_size) {
p                 545 drivers/video/fbdev/metronomefb.c 		count = total_size - p;
p                 548 drivers/video/fbdev/metronomefb.c 	dst = (void __force *)(info->screen_base + p);
p                  62 drivers/video/fbdev/nvidia/nv_local.h #define NV_WR08(p,i,d)  (__raw_writeb((d), (void __iomem *)(p) + (i)))
p                  63 drivers/video/fbdev/nvidia/nv_local.h #define NV_RD08(p,i)    (__raw_readb((void __iomem *)(p) + (i)))
p                  64 drivers/video/fbdev/nvidia/nv_local.h #define NV_WR16(p,i,d)  (__raw_writew((d), (void __iomem *)(p) + (i)))
p                  65 drivers/video/fbdev/nvidia/nv_local.h #define NV_RD16(p,i)    (__raw_readw((void __iomem *)(p) + (i)))
p                  66 drivers/video/fbdev/nvidia/nv_local.h #define NV_WR32(p,i,d)  (__raw_writel((d), (void __iomem *)(p) + (i)))
p                  67 drivers/video/fbdev/nvidia/nv_local.h #define NV_RD32(p,i)    (__raw_readl((void __iomem *)(p) + (i)))
p                  70 drivers/video/fbdev/nvidia/nv_local.h #define VGA_WR08(p,i,d) (writeb((d), (void __iomem *)(p) + (i)))
p                  71 drivers/video/fbdev/nvidia/nv_local.h #define VGA_RD08(p,i)   (readb((void __iomem *)(p) + (i)))
p                  43 drivers/video/fbdev/nvidia/nv_proto.h #define nvidia_probe_i2c_connector(p, c, edid) (-1)
p                1459 drivers/video/fbdev/nvidia/nvidia.c 			char *p;
p                1461 drivers/video/fbdev/nvidia/nvidia.c 			p = this_opt + 9;
p                1462 drivers/video/fbdev/nvidia/nvidia.c 			if (!*p || !*(++p))
p                1464 drivers/video/fbdev/nvidia/nvidia.c 			forceCRTC = *p - '0';
p                  34 drivers/video/fbdev/omap/lcd_mipid.c #define to_mipid_device(p)		container_of(p, struct mipid_device, \
p                1066 drivers/video/fbdev/omap/omapfb_main.c 	} p;
p                1072 drivers/video/fbdev/omap/omapfb_main.c 		if (get_user(p.mirror, (int __user *)arg))
p                1075 drivers/video/fbdev/omap/omapfb_main.c 			omapfb_mirror(fbi, p.mirror);
p                1083 drivers/video/fbdev/omap/omapfb_main.c 		if (get_user(p.update_mode, (int __user *)arg))
p                1086 drivers/video/fbdev/omap/omapfb_main.c 			r = omapfb_set_update_mode(fbdev, p.update_mode);
p                1089 drivers/video/fbdev/omap/omapfb_main.c 		p.update_mode = omapfb_get_update_mode(fbdev);
p                1090 drivers/video/fbdev/omap/omapfb_main.c 		if (put_user(p.update_mode,
p                1095 drivers/video/fbdev/omap/omapfb_main.c 		if (copy_from_user(&p.update_window, (void __user *)arg,
p                1099 drivers/video/fbdev/omap/omapfb_main.c 			struct omapfb_update_window *u = &p.update_window;
p                1109 drivers/video/fbdev/omap/omapfb_main.c 		if (copy_from_user(&p.update_window, (void __user *)arg,
p                1110 drivers/video/fbdev/omap/omapfb_main.c 				   sizeof(p.update_window)))
p                1113 drivers/video/fbdev/omap/omapfb_main.c 			r = omapfb_update_win(fbi, &p.update_window);
p                1116 drivers/video/fbdev/omap/omapfb_main.c 		if (copy_from_user(&p.plane_info, (void __user *)arg,
p                1117 drivers/video/fbdev/omap/omapfb_main.c 				   sizeof(p.plane_info)))
p                1120 drivers/video/fbdev/omap/omapfb_main.c 			r = omapfb_setup_plane(fbi, &p.plane_info);
p                1123 drivers/video/fbdev/omap/omapfb_main.c 		if ((r = omapfb_query_plane(fbi, &p.plane_info)) < 0)
p                1125 drivers/video/fbdev/omap/omapfb_main.c 		if (copy_to_user((void __user *)arg, &p.plane_info,
p                1126 drivers/video/fbdev/omap/omapfb_main.c 				   sizeof(p.plane_info)))
p                1130 drivers/video/fbdev/omap/omapfb_main.c 		if (copy_from_user(&p.mem_info, (void __user *)arg,
p                1131 drivers/video/fbdev/omap/omapfb_main.c 				   sizeof(p.mem_info)))
p                1134 drivers/video/fbdev/omap/omapfb_main.c 			r = omapfb_setup_mem(fbi, &p.mem_info);
p                1137 drivers/video/fbdev/omap/omapfb_main.c 		if ((r = omapfb_query_mem(fbi, &p.mem_info)) < 0)
p                1139 drivers/video/fbdev/omap/omapfb_main.c 		if (copy_to_user((void __user *)arg, &p.mem_info,
p                1140 drivers/video/fbdev/omap/omapfb_main.c 				   sizeof(p.mem_info)))
p                1144 drivers/video/fbdev/omap/omapfb_main.c 		if (copy_from_user(&p.color_key, (void __user *)arg,
p                1145 drivers/video/fbdev/omap/omapfb_main.c 				   sizeof(p.color_key)))
p                1148 drivers/video/fbdev/omap/omapfb_main.c 			r = omapfb_set_color_key(fbdev, &p.color_key);
p                1151 drivers/video/fbdev/omap/omapfb_main.c 		if ((r = omapfb_get_color_key(fbdev, &p.color_key)) < 0)
p                1153 drivers/video/fbdev/omap/omapfb_main.c 		if (copy_to_user((void __user *)arg, &p.color_key,
p                1154 drivers/video/fbdev/omap/omapfb_main.c 				 sizeof(p.color_key)))
p                1158 drivers/video/fbdev/omap/omapfb_main.c 		omapfb_get_caps(fbdev, plane->idx, &p.caps);
p                1159 drivers/video/fbdev/omap/omapfb_main.c 		if (copy_to_user((void __user *)arg, &p.caps, sizeof(p.caps)))
p                  34 drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
p                  80 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
p                  55 drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
p                  83 drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
p                  56 drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
p                 103 drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
p                 156 drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
p                  92 drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
p                1952 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		unsigned p = dsi->lanes[i].polarity;
p                1955 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			l |= 1 << (i * 2 + (p ? 0 : 1));
p                1958 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			l |= 1 << (i * 2 + (p ? 1 : 0));
p                2687 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	u8 *p;
p                2704 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	p = data;
p                2709 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		b1 = *p++;
p                2710 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		b2 = *p++;
p                2711 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		b3 = *p++;
p                2712 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 		b4 = *p++;
p                2726 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			b1 = *p++;
p                2727 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			b2 = *p++;
p                2728 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			b3 = *p++;
p                2731 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			b1 = *p++;
p                2732 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			b2 = *p++;
p                2735 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 			b1 = *p++;
p                 146 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c 	struct omap_video_timings *p;
p                 159 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c 	p = &hdmi.cfg.timings;
p                 161 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c 	DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
p                 163 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c 	hdmi_pll_compute(&hdmi.pll, p->pixelclock, &hdmi_cinfo);
p                 194 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c 	dss_mgr_set_timings(mgr, p);
p                 163 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c 	struct omap_video_timings *p;
p                 171 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c 	p = &hdmi.cfg.timings;
p                 173 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c 	DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
p                 175 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c 	hdmi_pll_compute(&hdmi.pll, p->pixelclock, &hdmi_cinfo);
p                 211 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c 	dss_mgr_set_timings(mgr, p);
p                  34 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c 	const char *p = prop->value;
p                  38 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c 	for (i = 0; total < prop->length; total += l, p += l, i++)
p                  39 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c 		l = strlen(p) + 1;
p                 597 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 	} p;
p                 601 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 	memset(&p, 0, sizeof(p));
p                 622 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_from_user(&p.uwnd_o,
p                 624 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 					sizeof(p.uwnd_o))) {
p                 629 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		r = omapfb_update_window(fbi, p.uwnd_o.x, p.uwnd_o.y,
p                 630 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 				p.uwnd_o.width, p.uwnd_o.height);
p                 640 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_from_user(&p.uwnd, (void __user *)arg,
p                 641 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 					sizeof(p.uwnd))) {
p                 646 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		r = omapfb_update_window(fbi, p.uwnd.x, p.uwnd.y,
p                 647 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 				p.uwnd.width, p.uwnd.height);
p                 652 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_from_user(&p.plane_info, (void __user *)arg,
p                 653 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 					sizeof(p.plane_info)))
p                 656 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 			r = omapfb_setup_plane(fbi, &p.plane_info);
p                 661 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		r = omapfb_query_plane(fbi, &p.plane_info);
p                 664 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_to_user((void __user *)arg, &p.plane_info,
p                 665 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 					sizeof(p.plane_info)))
p                 671 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_from_user(&p.mem_info, (void __user *)arg,
p                 672 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 					sizeof(p.mem_info)))
p                 675 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 			r = omapfb_setup_mem(fbi, &p.mem_info);
p                 680 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		r = omapfb_query_mem(fbi, &p.mem_info);
p                 683 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_to_user((void __user *)arg, &p.mem_info,
p                 684 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 					sizeof(p.mem_info)))
p                 695 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		memset(&p.caps, 0, sizeof(p.caps));
p                 697 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 			p.caps.ctrl |= OMAPFB_CAPS_MANUAL_UPDATE;
p                 699 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 			p.caps.ctrl |= OMAPFB_CAPS_TEARSYNC;
p                 701 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_to_user((void __user *)arg, &p.caps, sizeof(p.caps)))
p                 707 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_from_user(&p.ovl_colormode, (void __user *)arg,
p                 708 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 				   sizeof(p.ovl_colormode))) {
p                 712 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		r = omapfb_get_ovl_colormode(fbdev, &p.ovl_colormode);
p                 715 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_to_user((void __user *)arg, &p.ovl_colormode,
p                 716 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 				 sizeof(p.ovl_colormode)))
p                 722 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (get_user(p.update_mode, (int __user *)arg))
p                 725 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 			r = omapfb_set_update_mode(fbi, p.update_mode);
p                 730 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		r = omapfb_get_update_mode(fbi, &p.update_mode);
p                 733 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (put_user(p.update_mode,
p                 740 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_from_user(&p.color_key, (void __user *)arg,
p                 741 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 				   sizeof(p.color_key)))
p                 744 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 			r = omapfb_set_color_key(fbi, &p.color_key);
p                 749 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		r = omapfb_get_color_key(fbi, &p.color_key);
p                 752 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_to_user((void __user *)arg, &p.color_key,
p                 753 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 				 sizeof(p.color_key)))
p                 758 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (get_user(p.crt, (__u32 __user *)arg)) {
p                 762 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (p.crt != 0) {
p                 799 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (get_user(p.test_num, (int __user *)arg)) {
p                 808 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		r = display->driver->run_test(display, p.test_num);
p                 814 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (get_user(p.test_num, (int __user *)arg)) {
p                 823 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		r = display->driver->run_test(display, p.test_num);
p                 830 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_from_user(&p.memory_read, (void __user *)arg,
p                 831 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 					sizeof(p.memory_read))) {
p                 836 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		r = omapfb_memory_read(fbi, &p.memory_read);
p                 847 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		p.vram_info.total = SZ_1M * 64;
p                 848 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		p.vram_info.free = SZ_1M * 64;
p                 849 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		p.vram_info.largest_free_block = SZ_1M * 64;
p                 851 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_to_user((void __user *)arg, &p.vram_info,
p                 852 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 					sizeof(p.vram_info)))
p                 860 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_from_user(&p.tearsync_info, (void __user *)arg,
p                 861 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 					sizeof(p.tearsync_info))) {
p                 872 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 				!!p.tearsync_info.enabled);
p                 889 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		p.display_info.xres = xres;
p                 890 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		p.display_info.yres = yres;
p                 895 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 			p.display_info.width = w;
p                 896 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 			p.display_info.height = h;
p                 898 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 			p.display_info.width = 0;
p                 899 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 			p.display_info.height = 0;
p                 902 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 		if (copy_to_user((void __user *)arg, &p.display_info,
p                 903 drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 					sizeof(p.display_info)))
p                  67 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		u16 __iomem *p = (u16 __iomem *)addr;
p                  68 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		p += y * line_len + x;
p                  74 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		__raw_writew((r << 11) | (g << 5) | (b << 0), p);
p                  76 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		u8 __iomem *p = (u8 __iomem *)addr;
p                  77 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		p += (y * line_len + x) * 3;
p                  79 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		__raw_writeb(b, p + 0);
p                  80 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		__raw_writeb(g, p + 1);
p                  81 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		__raw_writeb(r, p + 2);
p                  83 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		u32 __iomem *p = (u32 __iomem *)addr;
p                  84 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		p += y * line_len + x;
p                  85 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		__raw_writel(color, p);
p                1473 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 	char *p, *start;
p                1478 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		p = start;
p                1480 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		fbnum = simple_strtoul(p, &p, 10);
p                1482 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		if (p == start)
p                1485 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		if (*p != ':')
p                1491 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		size = memparse(p + 1, &p);
p                1498 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		if (*p == '@') {
p                1499 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 			paddr = simple_strtoul(p + 1, &p, 16);
p                1512 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		if (*p == 0)
p                1515 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		if (*p != ',')
p                1518 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		++p;
p                1520 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		start = p;
p                2165 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		char *p, *display_str, *mode_str;
p                2169 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		p = strchr(this_opt, ':');
p                2170 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		if (!p) {
p                2175 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		*p = 0;
p                2177 drivers/video/fbdev/omap2/omapfb/omapfb-main.c 		mode_str = p + 1;
p                 201 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 		char *p = (char *)buf;
p                 204 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 		while (p < buf + len) {
p                 211 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 			ovlnum = simple_strtoul(p, &p, 0);
p                 228 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 			p++;
p                 359 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 		char *p = (char *)buf;
p                 361 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 		while (p < buf + len) {
p                 369 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 			rot = simple_strtoul(p, &p, 0);
p                 380 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c 			p++;
p                 144 drivers/video/fbdev/pm2fb.c static inline u32 pm2_RD(struct pm2fb_par *p, s32 off)
p                 146 drivers/video/fbdev/pm2fb.c 	return fb_readl(p->v_regs + off);
p                 149 drivers/video/fbdev/pm2fb.c static inline void pm2_WR(struct pm2fb_par *p, s32 off, u32 v)
p                 151 drivers/video/fbdev/pm2fb.c 	fb_writel(v, p->v_regs + off);
p                 154 drivers/video/fbdev/pm2fb.c static inline u32 pm2_RDAC_RD(struct pm2fb_par *p, s32 idx)
p                 156 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_RD_PALETTE_WRITE_ADDRESS, idx);
p                 158 drivers/video/fbdev/pm2fb.c 	return pm2_RD(p, PM2R_RD_INDEXED_DATA);
p                 161 drivers/video/fbdev/pm2fb.c static inline u32 pm2v_RDAC_RD(struct pm2fb_par *p, s32 idx)
p                 163 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
p                 165 drivers/video/fbdev/pm2fb.c 	return pm2_RD(p,  PM2VR_RD_INDEXED_DATA);
p                 168 drivers/video/fbdev/pm2fb.c static inline void pm2_RDAC_WR(struct pm2fb_par *p, s32 idx, u32 v)
p                 170 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_RD_PALETTE_WRITE_ADDRESS, idx);
p                 172 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_RD_INDEXED_DATA, v);
p                 176 drivers/video/fbdev/pm2fb.c static inline void pm2v_RDAC_WR(struct pm2fb_par *p, s32 idx, u32 v)
p                 178 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
p                 180 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2VR_RD_INDEXED_DATA, v);
p                 185 drivers/video/fbdev/pm2fb.c #define WAIT_FIFO(p, a)
p                 187 drivers/video/fbdev/pm2fb.c static inline void WAIT_FIFO(struct pm2fb_par *p, u32 a)
p                 189 drivers/video/fbdev/pm2fb.c 	while (pm2_RD(p, PM2R_IN_FIFO_SPACE) < a)
p                 255 drivers/video/fbdev/pm2fb.c 	unsigned char p;
p                 265 drivers/video/fbdev/pm2fb.c 				for (p = 0; p < 5; p++, f >>= 1) {
p                 271 drivers/video/fbdev/pm2fb.c 						*pp = p;
p                 284 drivers/video/fbdev/pm2fb.c 	unsigned char p;
p                 291 drivers/video/fbdev/pm2fb.c 			for (p = 0; p < 2; p++) {
p                 292 drivers/video/fbdev/pm2fb.c 				f = (PM2_REFERENCE_CLOCK >> (p + 1)) * n / m;
p                 297 drivers/video/fbdev/pm2fb.c 					*pp = p;
p                 304 drivers/video/fbdev/pm2fb.c static void clear_palette(struct pm2fb_par *p)
p                 308 drivers/video/fbdev/pm2fb.c 	WAIT_FIFO(p, 1);
p                 309 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_RD_PALETTE_WRITE_ADDRESS, 0);
p                 312 drivers/video/fbdev/pm2fb.c 		WAIT_FIFO(p, 3);
p                 313 drivers/video/fbdev/pm2fb.c 		pm2_WR(p, PM2R_RD_PALETTE_DATA, 0);
p                 314 drivers/video/fbdev/pm2fb.c 		pm2_WR(p, PM2R_RD_PALETTE_DATA, 0);
p                 315 drivers/video/fbdev/pm2fb.c 		pm2_WR(p, PM2R_RD_PALETTE_DATA, 0);
p                 319 drivers/video/fbdev/pm2fb.c static void reset_card(struct pm2fb_par *p)
p                 321 drivers/video/fbdev/pm2fb.c 	if (p->type == PM2_TYPE_PERMEDIA2V)
p                 322 drivers/video/fbdev/pm2fb.c 		pm2_WR(p, PM2VR_RD_INDEX_HIGH, 0);
p                 323 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_RESET_STATUS, 0);
p                 325 drivers/video/fbdev/pm2fb.c 	while (pm2_RD(p, PM2R_RESET_STATUS) & PM2F_BEING_RESET)
p                 330 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_FIFO_DISCON, 1);
p                 335 drivers/video/fbdev/pm2fb.c 	WAIT_FIFO(p, 3);
p                 336 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_MEM_CONTROL, p->mem_control);
p                 337 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_BOOT_ADDRESS, p->boot_address);
p                 339 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_MEM_CONFIG, p->mem_config);
p                 342 drivers/video/fbdev/pm2fb.c static void reset_config(struct pm2fb_par *p)
p                 344 drivers/video/fbdev/pm2fb.c 	WAIT_FIFO(p, 53);
p                 345 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_CHIP_CONFIG, pm2_RD(p, PM2R_CHIP_CONFIG) &
p                 347 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_BYPASS_WRITE_MASK, ~(0L));
p                 348 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_FRAMEBUFFER_WRITE_MASK, ~(0L));
p                 349 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_FIFO_CONTROL, 0);
p                 350 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_APERTURE_ONE, 0);
p                 351 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_APERTURE_TWO, 0);
p                 352 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_RASTERIZER_MODE, 0);
p                 353 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_DELTA_MODE, PM2F_DELTA_ORDER_RGB);
p                 354 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_LB_READ_FORMAT, 0);
p                 355 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_LB_WRITE_FORMAT, 0);
p                 356 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_LB_READ_MODE, 0);
p                 357 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_LB_SOURCE_OFFSET, 0);
p                 358 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_FB_SOURCE_OFFSET, 0);
p                 359 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_FB_PIXEL_OFFSET, 0);
p                 360 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_FB_WINDOW_BASE, 0);
p                 361 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_LB_WINDOW_BASE, 0);
p                 362 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_FB_SOFT_WRITE_MASK, ~(0L));
p                 363 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_FB_HARD_WRITE_MASK, ~(0L));
p                 364 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_FB_READ_PIXEL, 0);
p                 365 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_DITHER_MODE, 0);
p                 366 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_AREA_STIPPLE_MODE, 0);
p                 367 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_DEPTH_MODE, 0);
p                 368 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_STENCIL_MODE, 0);
p                 369 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_TEXTURE_ADDRESS_MODE, 0);
p                 370 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_TEXTURE_READ_MODE, 0);
p                 371 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_TEXEL_LUT_MODE, 0);
p                 372 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_YUV_MODE, 0);
p                 373 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_COLOR_DDA_MODE, 0);
p                 374 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_TEXTURE_COLOR_MODE, 0);
p                 375 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_FOG_MODE, 0);
p                 376 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_ALPHA_BLEND_MODE, 0);
p                 377 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_LOGICAL_OP_MODE, 0);
p                 378 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_STATISTICS_MODE, 0);
p                 379 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_SCISSOR_MODE, 0);
p                 380 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_FILTER_MODE, PM2F_SYNCHRONIZATION);
p                 381 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_RD_PIXEL_MASK, 0xff);
p                 382 drivers/video/fbdev/pm2fb.c 	switch (p->type) {
p                 384 drivers/video/fbdev/pm2fb.c 		pm2_RDAC_WR(p, PM2I_RD_MODE_CONTROL, 0); /* no overlay */
p                 385 drivers/video/fbdev/pm2fb.c 		pm2_RDAC_WR(p, PM2I_RD_CURSOR_CONTROL, 0);
p                 386 drivers/video/fbdev/pm2fb.c 		pm2_RDAC_WR(p, PM2I_RD_MISC_CONTROL, PM2F_RD_PALETTE_WIDTH_8);
p                 387 drivers/video/fbdev/pm2fb.c 		pm2_RDAC_WR(p, PM2I_RD_COLOR_KEY_CONTROL, 0);
p                 388 drivers/video/fbdev/pm2fb.c 		pm2_RDAC_WR(p, PM2I_RD_OVERLAY_KEY, 0);
p                 389 drivers/video/fbdev/pm2fb.c 		pm2_RDAC_WR(p, PM2I_RD_RED_KEY, 0);
p                 390 drivers/video/fbdev/pm2fb.c 		pm2_RDAC_WR(p, PM2I_RD_GREEN_KEY, 0);
p                 391 drivers/video/fbdev/pm2fb.c 		pm2_RDAC_WR(p, PM2I_RD_BLUE_KEY, 0);
p                 394 drivers/video/fbdev/pm2fb.c 		pm2v_RDAC_WR(p, PM2VI_RD_MISC_CONTROL, 1); /* 8bit */
p                 399 drivers/video/fbdev/pm2fb.c static void set_aperture(struct pm2fb_par *p, u32 depth)
p                 406 drivers/video/fbdev/pm2fb.c 	WAIT_FIFO(p, 2);
p                 408 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_APERTURE_ONE, PM2F_APERTURE_STANDARD);
p                 418 drivers/video/fbdev/pm2fb.c 		pm2_WR(p, PM2R_APERTURE_ONE, PM2F_APERTURE_STANDARD);
p                 421 drivers/video/fbdev/pm2fb.c 		pm2_WR(p, PM2R_APERTURE_ONE, PM2F_APERTURE_HALFWORDSWAP);
p                 424 drivers/video/fbdev/pm2fb.c 		pm2_WR(p, PM2R_APERTURE_ONE, PM2F_APERTURE_BYTESWAP);
p                 430 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_APERTURE_TWO, PM2F_APERTURE_STANDARD);
p                 433 drivers/video/fbdev/pm2fb.c static void set_color(struct pm2fb_par *p, unsigned char regno,
p                 436 drivers/video/fbdev/pm2fb.c 	WAIT_FIFO(p, 4);
p                 437 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_RD_PALETTE_WRITE_ADDRESS, regno);
p                 439 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_RD_PALETTE_DATA, r);
p                 441 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_RD_PALETTE_DATA, g);
p                 443 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_RD_PALETTE_DATA, b);
p                 449 drivers/video/fbdev/pm2fb.c 	unsigned char m, n, p;
p                 453 drivers/video/fbdev/pm2fb.c 		pm2v_mnp(clk/2, &m, &n, &p);
p                 459 drivers/video/fbdev/pm2fb.c 		pm2v_RDAC_WR(par, PM2VI_RD_MCLK_POSTSCALE, p);
p                 468 drivers/video/fbdev/pm2fb.c 		pm2_mnp(clk, &m, &n, &p);
p                 473 drivers/video/fbdev/pm2fb.c 		pm2_RDAC_WR(par, PM2I_RD_MEMORY_CLOCK_3, 8|p);
p                 486 drivers/video/fbdev/pm2fb.c 	unsigned char m, n, p;
p                 490 drivers/video/fbdev/pm2fb.c 		pm2_mnp(clk, &m, &n, &p);
p                 495 drivers/video/fbdev/pm2fb.c 		pm2_RDAC_WR(par, PM2I_RD_PIXEL_CLOCK_A3, 8|p);
p                 503 drivers/video/fbdev/pm2fb.c 		pm2v_mnp(clk/2, &m, &n, &p);
p                 508 drivers/video/fbdev/pm2fb.c 		pm2v_RDAC_WR(par, PM2VI_RD_CLK0_POSTSCALE, p);
p                 514 drivers/video/fbdev/pm2fb.c static void set_video(struct pm2fb_par *p, u32 video)
p                 530 drivers/video/fbdev/pm2fb.c 	WAIT_FIFO(p, 3);
p                 531 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_VIDEO_CONTROL, vsync);
p                 533 drivers/video/fbdev/pm2fb.c 	switch (p->type) {
p                 540 drivers/video/fbdev/pm2fb.c 		pm2_RDAC_WR(p, PM2I_RD_MISC_CONTROL, tmp);
p                 548 drivers/video/fbdev/pm2fb.c 		pm2v_RDAC_WR(p, PM2VI_RD_SYNC_CONTROL, tmp);
p                 969 drivers/video/fbdev/pm2fb.c 	struct pm2fb_par *p = info->par;
p                 976 drivers/video/fbdev/pm2fb.c 	WAIT_FIFO(p, 1);
p                 977 drivers/video/fbdev/pm2fb.c 	pm2_WR(p, PM2R_SCREEN_BASE, base);
p                 826 drivers/video/fbdev/pm3fb.c 		unsigned char uninitialized_var(p);	/* ClkPostScale */
p                 829 drivers/video/fbdev/pm3fb.c 		(void)pm3fb_calculate_clock(pixclock, &m, &n, &p);
p                 832 drivers/video/fbdev/pm3fb.c 			pixclock, (int) m, (int) n, (int) p);
p                 836 drivers/video/fbdev/pm3fb.c 		PM3_WRITE_DAC_REG(par, PM3RD_DClk0PostScale, p);
p                 725 drivers/video/fbdev/pvr2fb.c static int pvr2_get_param_val(const struct pvr2_params *p, const char *s,
p                 731 drivers/video/fbdev/pvr2fb.c 		if (!strncasecmp(p[i].name, s, strlen(s)))
p                 732 drivers/video/fbdev/pvr2fb.c 			return p[i].val;
p                 738 drivers/video/fbdev/pvr2fb.c static char *pvr2_get_param_name(const struct pvr2_params *p, int val,
p                 744 drivers/video/fbdev/pvr2fb.c 		if (p[i].val == val)
p                 745 drivers/video/fbdev/pvr2fb.c 			return p[i].name;
p                2121 drivers/video/fbdev/riva/fbdev.c 			char *p;
p                2123 drivers/video/fbdev/riva/fbdev.c 			p = this_opt + 9;
p                2124 drivers/video/fbdev/riva/fbdev.c 			if (!*p || !*(++p)) continue; 
p                2125 drivers/video/fbdev/riva/fbdev.c 			forceCRTC = *p - '0';
p                 535 drivers/video/fbdev/riva/riva_hw.c     int done, g,v, p;
p                 538 drivers/video/fbdev/riva/riva_hw.c     for (p=0; p < 2; p++)
p                 544 drivers/video/fbdev/riva/riva_hw.c                 ainfo->priority = p;
p                1249 drivers/video/fbdev/riva/riva_hw.c         uninitialized_var(n),	uninitialized_var(p);
p                1261 drivers/video/fbdev/riva/riva_hw.c     if (!CalcVClock(dotClock, &VClk, &m, &n, &p, chip))
p                1329 drivers/video/fbdev/riva/riva_hw.c     state->vpll     = (p << 16) | (n << 8) | m;
p                  78 drivers/video/fbdev/riva/riva_hw.h #define NV_WR08(p,i,d)  (__raw_writeb((d), (void __iomem *)(p) + (i)))
p                  79 drivers/video/fbdev/riva/riva_hw.h #define NV_RD08(p,i)    (__raw_readb((void __iomem *)(p) + (i)))
p                  80 drivers/video/fbdev/riva/riva_hw.h #define NV_WR16(p,i,d)  (__raw_writew((d), (void __iomem *)(p) + (i)))
p                  81 drivers/video/fbdev/riva/riva_hw.h #define NV_RD16(p,i)    (__raw_readw((void __iomem *)(p) + (i)))
p                  82 drivers/video/fbdev/riva/riva_hw.h #define NV_WR32(p,i,d)  (__raw_writel((d), (void __iomem *)(p) + (i)))
p                  83 drivers/video/fbdev/riva/riva_hw.h #define NV_RD32(p,i)    (__raw_readl((void __iomem *)(p) + (i)))
p                  85 drivers/video/fbdev/riva/riva_hw.h #define VGA_WR08(p,i,d) (writeb((d), (void __iomem *)(p) + (i)))
p                  86 drivers/video/fbdev/riva/riva_hw.h #define VGA_RD08(p,i)   (readb((void __iomem *)(p) + (i)))
p                 198 drivers/video/fbdev/sbuslib.c 	struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
p                 202 drivers/video/fbdev/sbuslib.c 	ret = copy_in_user(p, argp, 2 * sizeof(int));
p                 204 drivers/video/fbdev/sbuslib.c 	ret |= put_user(compat_ptr(addr), &p->red);
p                 206 drivers/video/fbdev/sbuslib.c 	ret |= put_user(compat_ptr(addr), &p->green);
p                 208 drivers/video/fbdev/sbuslib.c 	ret |= put_user(compat_ptr(addr), &p->blue);
p                 214 drivers/video/fbdev/sbuslib.c 			(unsigned long)p);
p                 219 drivers/video/fbdev/sbuslib.c 	struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
p                 224 drivers/video/fbdev/sbuslib.c 	ret = copy_in_user(p, argp,
p                 226 drivers/video/fbdev/sbuslib.c 	ret |= copy_in_user(&p->size, &argp->size, sizeof(struct fbcurpos));
p                 227 drivers/video/fbdev/sbuslib.c 	ret |= copy_in_user(&p->cmap, &argp->cmap, 2 * sizeof(int));
p                 229 drivers/video/fbdev/sbuslib.c 	ret |= put_user(compat_ptr(addr), &p->cmap.red);
p                 231 drivers/video/fbdev/sbuslib.c 	ret |= put_user(compat_ptr(addr), &p->cmap.green);
p                 233 drivers/video/fbdev/sbuslib.c 	ret |= put_user(compat_ptr(addr), &p->cmap.blue);
p                 235 drivers/video/fbdev/sbuslib.c 	ret |= put_user(compat_ptr(addr), &p->mask);
p                 237 drivers/video/fbdev/sbuslib.c 	ret |= put_user(compat_ptr(addr), &p->image);
p                 240 drivers/video/fbdev/sbuslib.c 	return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p);
p                1826 drivers/video/fbdev/sh_mobile_lcdcfb.c 	struct sh_mobile_lcdc_priv *p = ch->lcdc;
p                1871 drivers/video/fbdev/sh_mobile_lcdcfb.c 	if (p->forced_fourcc &&
p                1872 drivers/video/fbdev/sh_mobile_lcdcfb.c 	    p->forced_fourcc != sh_mobile_format_fourcc(var))
p                1926 drivers/video/fbdev/sh_mobile_lcdcfb.c 	struct sh_mobile_lcdc_priv *p = ch->lcdc;
p                1938 drivers/video/fbdev/sh_mobile_lcdcfb.c 		sh_mobile_lcdc_clk_on(p);
p                1951 drivers/video/fbdev/sh_mobile_lcdcfb.c 		sh_mobile_lcdc_clk_off(p);
p                 318 drivers/video/fbdev/simplefb.c 	const char *p;
p                 326 drivers/video/fbdev/simplefb.c 		p = strstr(prop->name, SUPPLY_SUFFIX);
p                 327 drivers/video/fbdev/simplefb.c 		if (p && p != prop->name)
p                 343 drivers/video/fbdev/simplefb.c 		p = strstr(prop->name, SUPPLY_SUFFIX);
p                 344 drivers/video/fbdev/simplefb.c 		if (!p || p == prop->name)
p                6609 drivers/video/fbdev/sis/init301.c    unsigned short      a, b, p = 0;
p                6636 drivers/video/fbdev/sis/init301.c 	 if((tableptr[p] | tableptr[p+1] << 8) == a) break;
p                6637 drivers/video/fbdev/sis/init301.c 	 p += 0x42;
p                6638 drivers/video/fbdev/sis/init301.c       } while((tableptr[p] | tableptr[p+1] << 8) != 0xffff);
p                6639 drivers/video/fbdev/sis/init301.c       if((tableptr[p] | tableptr[p+1] << 8) == 0xffff) p -= 0x42;
p                6641 drivers/video/fbdev/sis/init301.c    p += 2;
p                6642 drivers/video/fbdev/sis/init301.c    return ((unsigned char *)&tableptr[p]);
p                 493 drivers/video/fbdev/skeletonfb.c void xxxfb_fillrect(struct fb_info *p, const struct fb_fillrect *region)
p                 519 drivers/video/fbdev/skeletonfb.c void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) 
p                 544 drivers/video/fbdev/skeletonfb.c void xxxfb_imageblit(struct fb_info *p, const struct fb_image *image) 
p                 103 drivers/video/fbdev/sm712.h #define big_pixel_depth(p, d)	{if (p == 24) {p = 32; d = 32; } }
p                 104 drivers/video/fbdev/sm712.h #define big_swap(p)		((p & 0xff00ff00 >> 8) | (p & 0x00ff00ff << 8))
p                 110 drivers/video/fbdev/sm712.h #define big_pixel_depth(p, d)	do { } while (0)
p                 111 drivers/video/fbdev/sm712.h #define big_swap(p)		p
p                1023 drivers/video/fbdev/sm712fb.c 	unsigned long p = *ppos;
p                1041 drivers/video/fbdev/sm712fb.c 	if (p >= total_size)
p                1047 drivers/video/fbdev/sm712fb.c 	if (count + p > total_size)
p                1048 drivers/video/fbdev/sm712fb.c 		count = total_size - p;
p                1054 drivers/video/fbdev/sm712fb.c 	src = (u32 __iomem *)(info->screen_base + p);
p                1100 drivers/video/fbdev/sm712fb.c 	unsigned long p = *ppos;
p                1118 drivers/video/fbdev/sm712fb.c 	if (p > total_size)
p                1126 drivers/video/fbdev/sm712fb.c 	if (count + p > total_size) {
p                1130 drivers/video/fbdev/sm712fb.c 		count = total_size - p;
p                1137 drivers/video/fbdev/sm712fb.c 	dst = (u32 __iomem *)(info->screen_base + p);
p                 226 drivers/video/fbdev/ssd1307fb.c 	unsigned long p = *ppos;
p                 231 drivers/video/fbdev/ssd1307fb.c 	if (p > total_size)
p                 234 drivers/video/fbdev/ssd1307fb.c 	if (count + p > total_size)
p                 235 drivers/video/fbdev/ssd1307fb.c 		count = total_size - p;
p                 240 drivers/video/fbdev/ssd1307fb.c 	dst = info->screen_buffer + p;
p                 293 drivers/video/fbdev/sstfb.c 	int m, m2, n, p, best_err, fout;
p                 298 drivers/video/fbdev/sstfb.c 	p = 3;
p                 300 drivers/video/fbdev/sstfb.c 	while (((1 << p) * freq > VCO_MAX) && (p >= 0))
p                 301 drivers/video/fbdev/sstfb.c 		p--;
p                 302 drivers/video/fbdev/sstfb.c 	if (p == -1)
p                 306 drivers/video/fbdev/sstfb.c 		m2 = (2 * freq * (1 << p) * (n + 2) ) / DAC_FREF - 4 ;
p                 311 drivers/video/fbdev/sstfb.c 		fout = (DAC_FREF * (m + 2)) / ((1 << p) * (n + 2));
p                 322 drivers/video/fbdev/sstfb.c 	t->p = p;
p                 325 drivers/video/fbdev/sstfb.c 	*freq_out = (DAC_FREF * (t->m + 2)) / ((1 << t->p) * (t->n + 2));
p                 327 drivers/video/fbdev/sstfb.c 		  t->m, t->n, t->p, *freq_out);
p                 997 drivers/video/fbdev/sstfb.c 		dac_i_write(DACREG_AC1_I, t->p << 6 | t->n);
p                1003 drivers/video/fbdev/sstfb.c 		dac_i_write(DACREG_BD1_I, t->p << 6 | t->n);
p                1032 drivers/video/fbdev/sstfb.c 		sst_dac_write(DACREG_ICS_PLLDATA, t->p << 5 | t->n);
p                1043 drivers/video/fbdev/sstfb.c 		sst_dac_write(DACREG_ICS_PLLDATA, t->p << 5 | t->n);
p                  45 drivers/video/fbdev/sticore.h #define STI_PTR(p)	( virt_to_phys(p) )
p                  46 drivers/video/fbdev/sticore.h #define PTR_STI(p)	( phys_to_virt((unsigned long)p) )
p                 129 drivers/video/fbdev/tcx.c 	u32 __iomem *p, *pend;
p                 134 drivers/video/fbdev/tcx.c 	p = par->cplane;
p                 135 drivers/video/fbdev/tcx.c 	if (p == NULL)
p                 137 drivers/video/fbdev/tcx.c 	for (pend = p + info->fix.smem_len; p < pend; p++) {
p                 138 drivers/video/fbdev/tcx.c 		u32 tmp = sbus_readl(p);
p                 141 drivers/video/fbdev/tcx.c 		sbus_writel(tmp, p);
p                 164 drivers/video/fbdev/tridentfb.c static inline void t_outb(struct tridentfb_par *p, u8 val, u16 reg)
p                 166 drivers/video/fbdev/tridentfb.c 	fb_writeb(val, p->io_virt + reg);
p                 169 drivers/video/fbdev/tridentfb.c static inline u8 t_inb(struct tridentfb_par *p, u16 reg)
p                 171 drivers/video/fbdev/tridentfb.c 	return fb_readb(p->io_virt + reg);
p                1961 drivers/video/fbdev/uvesafb.c #define param_check_scroll(name, p) __param_check(name, p, void)
p                 107 drivers/video/fbdev/valkyriefb.c static int read_valkyrie_sense(struct fb_info_valkyrie *p);
p                 112 drivers/video/fbdev/valkyriefb.c static int valkyrie_init_info(struct fb_info *info, struct fb_info_valkyrie *p);
p                 114 drivers/video/fbdev/valkyriefb.c static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valkyrie *p);
p                 130 drivers/video/fbdev/valkyriefb.c 	struct fb_info_valkyrie *p =
p                 132 drivers/video/fbdev/valkyriefb.c 	volatile struct valkyrie_regs __iomem *valkyrie_regs = p->valkyrie_regs;
p                 189 drivers/video/fbdev/valkyriefb.c 	struct fb_info_valkyrie *p =
p                 199 drivers/video/fbdev/valkyriefb.c 		out_8(&p->valkyrie_regs->mode.r, init->mode);
p                 210 drivers/video/fbdev/valkyriefb.c 		out_8(&p->valkyrie_regs->mode.r, init->mode | 0x40);
p                 213 drivers/video/fbdev/valkyriefb.c 		out_8(&p->valkyrie_regs->mode.r, 0x66);
p                 222 drivers/video/fbdev/valkyriefb.c 	struct fb_info_valkyrie *p =
p                 224 drivers/video/fbdev/valkyriefb.c 	volatile struct cmap_regs __iomem *cmap_regs = p->cmap_regs;
p                 234 drivers/video/fbdev/valkyriefb.c 	out_8(&p->cmap_regs->addr, regno);
p                 273 drivers/video/fbdev/valkyriefb.c static void __init valkyrie_choose_mode(struct fb_info_valkyrie *p)
p                 275 drivers/video/fbdev/valkyriefb.c 	p->sense = read_valkyrie_sense(p);
p                 276 drivers/video/fbdev/valkyriefb.c 	printk(KERN_INFO "Monitor sense value = 0x%x\n", p->sense);
p                 285 drivers/video/fbdev/valkyriefb.c 		default_vmode = mac_map_monitor_sense(p->sense);
p                 299 drivers/video/fbdev/valkyriefb.c 	    || valkyrie_vram_reqd(default_vmode, default_cmode) > p->total_vram)
p                 308 drivers/video/fbdev/valkyriefb.c 	struct fb_info_valkyrie	*p;
p                 347 drivers/video/fbdev/valkyriefb.c 	p = kzalloc(sizeof(*p), GFP_ATOMIC);
p                 348 drivers/video/fbdev/valkyriefb.c 	if (p == 0)
p                 353 drivers/video/fbdev/valkyriefb.c 		kfree(p);
p                 356 drivers/video/fbdev/valkyriefb.c 	p->total_vram = 0x100000;
p                 357 drivers/video/fbdev/valkyriefb.c 	p->frame_buffer_phys = frame_buffer_phys;
p                 359 drivers/video/fbdev/valkyriefb.c 	p->frame_buffer = ioremap_nocache(frame_buffer_phys, p->total_vram);
p                 361 drivers/video/fbdev/valkyriefb.c 	p->frame_buffer = ioremap_wt(frame_buffer_phys, p->total_vram);
p                 363 drivers/video/fbdev/valkyriefb.c 	p->cmap_regs_phys = cmap_regs_phys;
p                 364 drivers/video/fbdev/valkyriefb.c 	p->cmap_regs = ioremap(p->cmap_regs_phys, 0x1000);
p                 365 drivers/video/fbdev/valkyriefb.c 	p->valkyrie_regs_phys = cmap_regs_phys+0x6000;
p                 366 drivers/video/fbdev/valkyriefb.c 	p->valkyrie_regs = ioremap(p->valkyrie_regs_phys, 0x1000);
p                 368 drivers/video/fbdev/valkyriefb.c 	if (p->frame_buffer == NULL || p->cmap_regs == NULL
p                 369 drivers/video/fbdev/valkyriefb.c 	    || p->valkyrie_regs == NULL) {
p                 374 drivers/video/fbdev/valkyriefb.c 	valkyrie_choose_mode(p);
p                 375 drivers/video/fbdev/valkyriefb.c 	mac_vmode_to_var(default_vmode, default_cmode, &p->info.var);
p                 376 drivers/video/fbdev/valkyriefb.c 	err = valkyrie_init_info(&p->info, p);
p                 379 drivers/video/fbdev/valkyriefb.c 	valkyrie_init_fix(&p->info.fix, p);
p                 380 drivers/video/fbdev/valkyriefb.c 	if (valkyriefb_set_par(&p->info))
p                 384 drivers/video/fbdev/valkyriefb.c 	if ((err = register_framebuffer(&p->info)) != 0)
p                 387 drivers/video/fbdev/valkyriefb.c 	fb_info(&p->info, "valkyrie frame buffer device\n");
p                 391 drivers/video/fbdev/valkyriefb.c 	fb_dealloc_cmap(&p->info.cmap);
p                 393 drivers/video/fbdev/valkyriefb.c 	if (p->frame_buffer)
p                 394 drivers/video/fbdev/valkyriefb.c 		iounmap(p->frame_buffer);
p                 395 drivers/video/fbdev/valkyriefb.c 	if (p->cmap_regs)
p                 396 drivers/video/fbdev/valkyriefb.c 		iounmap(p->cmap_regs);
p                 397 drivers/video/fbdev/valkyriefb.c 	if (p->valkyrie_regs)
p                 398 drivers/video/fbdev/valkyriefb.c 		iounmap(p->valkyrie_regs);
p                 399 drivers/video/fbdev/valkyriefb.c 	kfree(p);
p                 406 drivers/video/fbdev/valkyriefb.c static int read_valkyrie_sense(struct fb_info_valkyrie *p)
p                 410 drivers/video/fbdev/valkyriefb.c 	out_8(&p->valkyrie_regs->msense.r, 0);   /* release all lines */
p                 412 drivers/video/fbdev/valkyriefb.c 	sense = ((in = in_8(&p->valkyrie_regs->msense.r)) & 0x70) << 4;
p                 414 drivers/video/fbdev/valkyriefb.c 	out_8(&p->valkyrie_regs->msense.r, 4);   /* drive A low */
p                 416 drivers/video/fbdev/valkyriefb.c 	sense |= ((in = in_8(&p->valkyrie_regs->msense.r)) & 0x30);
p                 417 drivers/video/fbdev/valkyriefb.c 	out_8(&p->valkyrie_regs->msense.r, 2);   /* drive B low */
p                 419 drivers/video/fbdev/valkyriefb.c 	sense |= ((in = in_8(&p->valkyrie_regs->msense.r)) & 0x40) >> 3;
p                 421 drivers/video/fbdev/valkyriefb.c 	out_8(&p->valkyrie_regs->msense.r, 1);   /* drive C low */
p                 423 drivers/video/fbdev/valkyriefb.c 	sense |= ((in = in_8(&p->valkyrie_regs->msense.r)) & 0x60) >> 5;
p                 425 drivers/video/fbdev/valkyriefb.c 	out_8(&p->valkyrie_regs->msense.r, 7);
p                 461 drivers/video/fbdev/valkyriefb.c 	struct fb_info_valkyrie *p =
p                 493 drivers/video/fbdev/valkyriefb.c 	if (valkyrie_vram_reqd(vmode, cmode) > p->total_vram) {
p                 510 drivers/video/fbdev/valkyriefb.c static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valkyrie *p)
p                 514 drivers/video/fbdev/valkyriefb.c 	fix->mmio_start = p->valkyrie_regs_phys;
p                 517 drivers/video/fbdev/valkyriefb.c 	fix->smem_start = p->frame_buffer_phys + 0x1000;
p                 518 drivers/video/fbdev/valkyriefb.c 	fix->smem_len = p->total_vram;
p                 539 drivers/video/fbdev/valkyriefb.c 		struct fb_info_valkyrie *p)
p                 542 drivers/video/fbdev/valkyriefb.c 	info->screen_base = p->frame_buffer + 0x1000;
p                 544 drivers/video/fbdev/valkyriefb.c 	info->pseudo_palette = p->pseudo_palette;
p                 545 drivers/video/fbdev/valkyriefb.c 	info->par = &p->par;
p                 112 drivers/video/fbdev/vga16fb.c static inline void rmw(volatile char __iomem *p)
p                 114 drivers/video/fbdev/vga16fb.c 	readb(p);
p                 115 drivers/video/fbdev/vga16fb.c 	writeb(1, p);
p                  17 drivers/video/fbdev/via/hw.h #define viafb_read_reg(p, i)			via_read_reg(p, i)
p                  18 drivers/video/fbdev/via/hw.h #define viafb_write_reg(i, p, d)		via_write_reg(p, i, d)
p                  19 drivers/video/fbdev/via/hw.h #define viafb_write_reg_mask(i, p, d, m)	via_write_reg_mask(p, i, d, m)
p                  41 drivers/video/fbdev/wmt_ge_rops.c void wmt_ge_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
p                  45 drivers/video/fbdev/wmt_ge_rops.c 	if (p->state != FBINFO_STATE_RUNNING)
p                  48 drivers/video/fbdev/wmt_ge_rops.c 	if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p                  49 drivers/video/fbdev/wmt_ge_rops.c 	    p->fix.visual == FB_VISUAL_DIRECTCOLOR)
p                  50 drivers/video/fbdev/wmt_ge_rops.c 		fg = ((u32 *) (p->pseudo_palette))[rect->color];
p                  54 drivers/video/fbdev/wmt_ge_rops.c 	pat = pixel_to_pat(p->var.bits_per_pixel, fg);
p                  56 drivers/video/fbdev/wmt_ge_rops.c 	if (p->fbops->fb_sync)
p                  57 drivers/video/fbdev/wmt_ge_rops.c 		p->fbops->fb_sync(p);
p                  59 drivers/video/fbdev/wmt_ge_rops.c 	writel(p->var.bits_per_pixel == 32 ? 3 :
p                  60 drivers/video/fbdev/wmt_ge_rops.c 	      (p->var.bits_per_pixel == 8 ? 0 : 1), regbase + GE_DEPTH_OFF);
p                  61 drivers/video/fbdev/wmt_ge_rops.c 	writel(p->var.bits_per_pixel == 15 ? 1 : 0, regbase + GE_HIGHCOLOR_OFF);
p                  62 drivers/video/fbdev/wmt_ge_rops.c 	writel(p->fix.smem_start, regbase + GE_DESTBASE_OFF);
p                  63 drivers/video/fbdev/wmt_ge_rops.c 	writel(p->var.xres_virtual - 1, regbase + GE_DESTDISPW_OFF);
p                  64 drivers/video/fbdev/wmt_ge_rops.c 	writel(p->var.yres_virtual - 1, regbase + GE_DESTDISPH_OFF);
p                  77 drivers/video/fbdev/wmt_ge_rops.c void wmt_ge_copyarea(struct fb_info *p, const struct fb_copyarea *area)
p                  79 drivers/video/fbdev/wmt_ge_rops.c 	if (p->state != FBINFO_STATE_RUNNING)
p                  82 drivers/video/fbdev/wmt_ge_rops.c 	if (p->fbops->fb_sync)
p                  83 drivers/video/fbdev/wmt_ge_rops.c 		p->fbops->fb_sync(p);
p                  85 drivers/video/fbdev/wmt_ge_rops.c 	writel(p->var.bits_per_pixel > 16 ? 3 :
p                  86 drivers/video/fbdev/wmt_ge_rops.c 	      (p->var.bits_per_pixel > 8 ? 1 : 0), regbase + GE_DEPTH_OFF);
p                  88 drivers/video/fbdev/wmt_ge_rops.c 	writel(p->fix.smem_start, regbase + GE_SRCBASE_OFF);
p                  89 drivers/video/fbdev/wmt_ge_rops.c 	writel(p->var.xres_virtual - 1, regbase + GE_SRCDISPW_OFF);
p                  90 drivers/video/fbdev/wmt_ge_rops.c 	writel(p->var.yres_virtual - 1, regbase + GE_SRCDISPH_OFF);
p                  96 drivers/video/fbdev/wmt_ge_rops.c 	writel(p->fix.smem_start, regbase + GE_DESTBASE_OFF);
p                  97 drivers/video/fbdev/wmt_ge_rops.c 	writel(p->var.xres_virtual - 1, regbase + GE_DESTDISPW_OFF);
p                  98 drivers/video/fbdev/wmt_ge_rops.c 	writel(p->var.yres_virtual - 1, regbase + GE_DESTDISPH_OFF);
p                 110 drivers/video/fbdev/wmt_ge_rops.c int wmt_ge_sync(struct fb_info *p)
p                  12 drivers/video/fbdev/wmt_ge_rops.h static inline int wmt_ge_sync(struct fb_info *p)
p                  17 drivers/video/fbdev/wmt_ge_rops.h static inline void wmt_ge_fillrect(struct fb_info *p,
p                  20 drivers/video/fbdev/wmt_ge_rops.h 	sys_fillrect(p, rect);
p                  23 drivers/video/fbdev/wmt_ge_rops.h static inline void wmt_ge_copyarea(struct fb_info *p,
p                  26 drivers/video/fbdev/wmt_ge_rops.h 	sys_copyarea(p, area);
p                 245 drivers/video/fbdev/xen-fbfront.c static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
p                 247 drivers/video/fbdev/xen-fbfront.c 	struct xenfb_info *info = p->par;
p                 249 drivers/video/fbdev/xen-fbfront.c 	sys_fillrect(p, rect);
p                 253 drivers/video/fbdev/xen-fbfront.c static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
p                 255 drivers/video/fbdev/xen-fbfront.c 	struct xenfb_info *info = p->par;
p                 257 drivers/video/fbdev/xen-fbfront.c 	sys_imageblit(p, image);
p                 261 drivers/video/fbdev/xen-fbfront.c static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
p                 263 drivers/video/fbdev/xen-fbfront.c 	struct xenfb_info *info = p->par;
p                 265 drivers/video/fbdev/xen-fbfront.c 	sys_copyarea(p, area);
p                 269 drivers/video/fbdev/xen-fbfront.c static ssize_t xenfb_write(struct fb_info *p, const char __user *buf,
p                 272 drivers/video/fbdev/xen-fbfront.c 	struct xenfb_info *info = p->par;
p                 275 drivers/video/fbdev/xen-fbfront.c 	res = fb_sys_write(p, buf, count, ppos);
p                  55 drivers/virt/fsl_hypervisor.c static long ioctl_restart(struct fsl_hv_ioctl_restart __user *p)
p                  60 drivers/virt/fsl_hypervisor.c 	if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_restart)))
p                  65 drivers/virt/fsl_hypervisor.c 	if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
p                  76 drivers/virt/fsl_hypervisor.c static long ioctl_status(struct fsl_hv_ioctl_status __user *p)
p                  82 drivers/virt/fsl_hypervisor.c 	if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_status)))
p                  89 drivers/virt/fsl_hypervisor.c 	if (copy_to_user(p, &param, sizeof(struct fsl_hv_ioctl_status)))
p                 100 drivers/virt/fsl_hypervisor.c static long ioctl_start(struct fsl_hv_ioctl_start __user *p)
p                 105 drivers/virt/fsl_hypervisor.c 	if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_start)))
p                 111 drivers/virt/fsl_hypervisor.c 	if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
p                 122 drivers/virt/fsl_hypervisor.c static long ioctl_stop(struct fsl_hv_ioctl_stop __user *p)
p                 127 drivers/virt/fsl_hypervisor.c 	if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_stop)))
p                 132 drivers/virt/fsl_hypervisor.c 	if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
p                 147 drivers/virt/fsl_hypervisor.c static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
p                 165 drivers/virt/fsl_hypervisor.c 	if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_memcpy)))
p                 305 drivers/virt/fsl_hypervisor.c 		if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
p                 316 drivers/virt/fsl_hypervisor.c static long ioctl_doorbell(struct fsl_hv_ioctl_doorbell __user *p)
p                 321 drivers/virt/fsl_hypervisor.c 	if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_doorbell)))
p                 326 drivers/virt/fsl_hypervisor.c 	if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
p                 332 drivers/virt/fsl_hypervisor.c static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
p                 342 drivers/virt/fsl_hypervisor.c 	if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_prop)))
p                 390 drivers/virt/fsl_hypervisor.c 			    put_user(param.proplen, &p->proplen)) {
p                 397 drivers/virt/fsl_hypervisor.c 	if (put_user(param.ret, &p->ret))
p                 571 drivers/virt/fsl_hypervisor.c static __poll_t fsl_hv_poll(struct file *filp, struct poll_table_struct *p)
p                 579 drivers/virt/fsl_hypervisor.c 	poll_wait(filp, &dbq->wait, p);
p                 598 drivers/virt/fsl_hypervisor.c 	uint32_t __user *p = (uint32_t __user *) buf; /* for put_user() */
p                 643 drivers/virt/fsl_hypervisor.c 		if (put_user(dbell, p))
p                 645 drivers/virt/fsl_hypervisor.c 		p++;
p                 556 drivers/virt/vboxguest/vboxguest_utils.c 	void __user *p;
p                 583 drivers/virt/vboxguest/vboxguest_utils.c 			p = (void __user *)dst_parm->u.pointer.u.linear_addr;
p                 584 drivers/virt/vboxguest/vboxguest_utils.c 			ret = copy_to_user(p, bounce_bufs[i],
p                 582 drivers/virtio/virtio_balloon.c 	void *p;
p                 597 drivers/virtio/virtio_balloon.c 	p = page_address(page);
p                 598 drivers/virtio/virtio_balloon.c 	sg_init_one(&sg, p, VIRTIO_BALLOON_FREE_PAGE_SIZE);
p                 601 drivers/virtio/virtio_balloon.c 		err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL);
p                 603 drivers/virtio/virtio_balloon.c 			free_pages((unsigned long)p,
p                 617 drivers/virtio/virtio_balloon.c 		free_pages((unsigned long)p, VIRTIO_BALLOON_FREE_PAGE_ORDER);
p                  73 drivers/virtio/virtio_pci_modern.c 	void __iomem *p;
p                 131 drivers/virtio/virtio_pci_modern.c 	p = pci_iomap_range(dev, bar, offset, length);
p                 132 drivers/virtio/virtio_pci_modern.c 	if (!p)
p                 136 drivers/virtio/virtio_pci_modern.c 	return p;
p                  25 drivers/visorbus/visorbus_private.h void visorbus_response(struct visor_device *p, int response, int controlvm_id);
p                  26 drivers/visorbus/visorbus_private.h void visorbus_device_changestate_response(struct visor_device *p, int response,
p                 431 drivers/visorbus/visorchipset.c 					struct visor_device *p, int response,
p                 436 drivers/visorbus/visorchipset.c 	if (p->pending_msg_hdr->id != cmd_id)
p                 439 drivers/visorbus/visorchipset.c 	controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
p                 440 drivers/visorbus/visorchipset.c 	outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
p                 441 drivers/visorbus/visorchipset.c 	outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
p                  42 drivers/w1/slaves/w1_ds2423.c 	int p;
p                  54 drivers/w1/slaves/w1_ds2423.c 		for (p = 0; p < 4; p++) {
p                  63 drivers/w1/slaves/w1_ds2423.c 				rbuf + (p * READ_BYTE_COUNT), READ_BYTE_COUNT);
p                  67 drivers/w1/slaves/w1_ds2423.c 					rbuf[(p * READ_BYTE_COUNT) + ii]);
p                  68 drivers/w1/slaves/w1_ds2423.c 			if (read_byte_count != (p + 1) * READ_BYTE_COUNT) {
p                  77 drivers/w1/slaves/w1_ds2423.c 				if (p == 0) {
p                  87 drivers/w1/slaves/w1_ds2423.c 						((p - 1) * READ_BYTE_COUNT),
p                  94 drivers/w1/slaves/w1_ds2423.c 						result |= rbuf[(p *
p                 195 drivers/w1/slaves/w1_ds28e17.c 	const u8 *p;
p                 218 drivers/w1/slaves/w1_ds28e17.c 		p = buffer;
p                 221 drivers/w1/slaves/w1_ds28e17.c 		result = __w1_f19_i2c_write(sl, command, 2, p,
p                 231 drivers/w1/slaves/w1_ds28e17.c 		p += W1_F19_WRITE_DATA_LIMIT;
p                 237 drivers/w1/slaves/w1_ds28e17.c 			result = __w1_f19_i2c_write(sl, command, 1, p,
p                 247 drivers/w1/slaves/w1_ds28e17.c 			p += W1_F19_WRITE_DATA_LIMIT;
p                 254 drivers/w1/slaves/w1_ds28e17.c 		result = __w1_f19_i2c_write(sl, command, 1, p, remaining);
p                 148 drivers/watchdog/acquirewdt.c 	int __user *p = argp;
p                 161 drivers/watchdog/acquirewdt.c 		return put_user(0, p);
p                 165 drivers/watchdog/acquirewdt.c 		if (get_user(options, p))
p                 182 drivers/watchdog/acquirewdt.c 		return put_user(WATCHDOG_HEARTBEAT, p);
p                 135 drivers/watchdog/advantechwdt.c 	int __user *p = argp;
p                 152 drivers/watchdog/advantechwdt.c 		return put_user(0, p);
p                 158 drivers/watchdog/advantechwdt.c 		if (get_user(options, p))
p                 175 drivers/watchdog/advantechwdt.c 		if (get_user(new_timeout, p))
p                 182 drivers/watchdog/advantechwdt.c 		return put_user(timeout, p);
p                 179 drivers/watchdog/alim1535_wdt.c 	int __user *p = argp;
p                 194 drivers/watchdog/alim1535_wdt.c 		return put_user(0, p);
p                 199 drivers/watchdog/alim1535_wdt.c 		if (get_user(new_options, p))
p                 217 drivers/watchdog/alim1535_wdt.c 		if (get_user(new_timeout, p))
p                 225 drivers/watchdog/alim1535_wdt.c 		return put_user(timeout, p);
p                 237 drivers/watchdog/alim7101_wdt.c 	int __user *p = argp;
p                 250 drivers/watchdog/alim7101_wdt.c 		return put_user(0, p);
p                 255 drivers/watchdog/alim7101_wdt.c 		if (get_user(new_options, p))
p                 274 drivers/watchdog/alim7101_wdt.c 		if (get_user(new_timeout, p))
p                 284 drivers/watchdog/alim7101_wdt.c 		return put_user(timeout, p);
p                 163 drivers/watchdog/at91rm9200_wdt.c 	int __user *p = argp;
p                 172 drivers/watchdog/at91rm9200_wdt.c 		return put_user(0, p);
p                 174 drivers/watchdog/at91rm9200_wdt.c 		if (get_user(new_value, p))
p                 185 drivers/watchdog/at91rm9200_wdt.c 		if (get_user(new_value, p))
p                 192 drivers/watchdog/at91rm9200_wdt.c 		return put_user(wdt_time, p);
p                 194 drivers/watchdog/at91rm9200_wdt.c 		return put_user(wdt_time, p);
p                 187 drivers/watchdog/ath79_wdt.c 	int __user *p = argp;
p                 198 drivers/watchdog/ath79_wdt.c 		err = put_user(0, p);
p                 202 drivers/watchdog/ath79_wdt.c 		err = put_user(boot_status, p);
p                 211 drivers/watchdog/ath79_wdt.c 		err = get_user(t, p);
p                 221 drivers/watchdog/ath79_wdt.c 		err = put_user(timeout, p);
p                 170 drivers/watchdog/bcm63xx_wdt.c 	int __user *p = argp;
p                 180 drivers/watchdog/bcm63xx_wdt.c 		return put_user(0, p);
p                 183 drivers/watchdog/bcm63xx_wdt.c 		if (get_user(new_value, p))
p                 202 drivers/watchdog/bcm63xx_wdt.c 		if (get_user(new_value, p))
p                 211 drivers/watchdog/bcm63xx_wdt.c 		return put_user(wdt_time, p);
p                 142 drivers/watchdog/cpu5wdt.c 	int __user *p = argp;
p                 157 drivers/watchdog/cpu5wdt.c 		return put_user(value, p);
p                 159 drivers/watchdog/cpu5wdt.c 		return put_user(0, p);
p                 161 drivers/watchdog/cpu5wdt.c 		if (get_user(value, p))
p                 205 drivers/watchdog/cpwd.c static void cpwd_toggleintr(struct cpwd *p, int index, int enable)
p                 207 drivers/watchdog/cpwd.c 	unsigned char curregs = cpwd_readb(p->regs + PLD_IMASK);
p                 211 drivers/watchdog/cpwd.c 		(p->devs[index].intr_mask);
p                 218 drivers/watchdog/cpwd.c 	cpwd_writeb(curregs, p->regs + PLD_IMASK);
p                 224 drivers/watchdog/cpwd.c static void cpwd_resetbrokentimer(struct cpwd *p, int index)
p                 226 drivers/watchdog/cpwd.c 	cpwd_toggleintr(p, index, WD_INTR_ON);
p                 227 drivers/watchdog/cpwd.c 	cpwd_writew(WD_BLIMIT, p->devs[index].regs + WD_LIMIT);
p                 237 drivers/watchdog/cpwd.c 	struct cpwd *p = cpwd_device;
p                 247 drivers/watchdog/cpwd.c 		if (p->devs[id].runstatus & WD_STAT_BSTOP) {
p                 249 drivers/watchdog/cpwd.c 			cpwd_resetbrokentimer(p, id);
p                 263 drivers/watchdog/cpwd.c static void cpwd_pingtimer(struct cpwd *p, int index)
p                 265 drivers/watchdog/cpwd.c 	if (cpwd_readb(p->devs[index].regs + WD_STATUS) & WD_S_RUNNING)
p                 266 drivers/watchdog/cpwd.c 		cpwd_readw(p->devs[index].regs + WD_DCNTR);
p                 273 drivers/watchdog/cpwd.c static void cpwd_stoptimer(struct cpwd *p, int index)
p                 275 drivers/watchdog/cpwd.c 	if (cpwd_readb(p->devs[index].regs + WD_STATUS) & WD_S_RUNNING) {
p                 276 drivers/watchdog/cpwd.c 		cpwd_toggleintr(p, index, WD_INTR_OFF);
p                 278 drivers/watchdog/cpwd.c 		if (p->broken) {
p                 279 drivers/watchdog/cpwd.c 			p->devs[index].runstatus |= WD_STAT_BSTOP;
p                 292 drivers/watchdog/cpwd.c static void cpwd_starttimer(struct cpwd *p, int index)
p                 294 drivers/watchdog/cpwd.c 	if (p->broken)
p                 295 drivers/watchdog/cpwd.c 		p->devs[index].runstatus &= ~WD_STAT_BSTOP;
p                 297 drivers/watchdog/cpwd.c 	p->devs[index].runstatus &= ~WD_STAT_SVCD;
p                 299 drivers/watchdog/cpwd.c 	cpwd_writew(p->devs[index].timeout, p->devs[index].regs + WD_LIMIT);
p                 300 drivers/watchdog/cpwd.c 	cpwd_toggleintr(p, index, WD_INTR_ON);
p                 303 drivers/watchdog/cpwd.c static int cpwd_getstatus(struct cpwd *p, int index)
p                 305 drivers/watchdog/cpwd.c 	unsigned char stat = cpwd_readb(p->devs[index].regs + WD_STATUS);
p                 306 drivers/watchdog/cpwd.c 	unsigned char intr = cpwd_readb(p->devs[index].regs + PLD_IMASK);
p                 317 drivers/watchdog/cpwd.c 		if (intr & p->devs[index].intr_mask) {
p                 331 drivers/watchdog/cpwd.c 			if (p->broken &&
p                 332 drivers/watchdog/cpwd.c 			    (p->devs[index].runstatus & WD_STAT_BSTOP)) {
p                 333 drivers/watchdog/cpwd.c 				if (p->devs[index].runstatus & WD_STAT_SVCD) {
p                 347 drivers/watchdog/cpwd.c 	if (p->devs[index].runstatus & WD_STAT_SVCD)
p                 355 drivers/watchdog/cpwd.c 	struct cpwd *p = dev_id;
p                 360 drivers/watchdog/cpwd.c 	spin_lock_irq(&p->lock);
p                 362 drivers/watchdog/cpwd.c 	cpwd_stoptimer(p, WD0_ID);
p                 363 drivers/watchdog/cpwd.c 	p->devs[WD0_ID].runstatus |=  WD_STAT_SVCD;
p                 365 drivers/watchdog/cpwd.c 	spin_unlock_irq(&p->lock);
p                 372 drivers/watchdog/cpwd.c 	struct cpwd *p = cpwd_device;
p                 387 drivers/watchdog/cpwd.c 	if (!p->initialized) {
p                 388 drivers/watchdog/cpwd.c 		if (request_irq(p->irq, &cpwd_interrupt,
p                 389 drivers/watchdog/cpwd.c 				IRQF_SHARED, DRIVER_NAME, p)) {
p                 390 drivers/watchdog/cpwd.c 			pr_err("Cannot register IRQ %d\n", p->irq);
p                 394 drivers/watchdog/cpwd.c 		p->initialized = true;
p                 417 drivers/watchdog/cpwd.c 	struct cpwd *p = cpwd_device;
p                 434 drivers/watchdog/cpwd.c 		cpwd_pingtimer(p, index);
p                 442 drivers/watchdog/cpwd.c 			if (p->enabled)
p                 444 drivers/watchdog/cpwd.c 			cpwd_stoptimer(p, index);
p                 446 drivers/watchdog/cpwd.c 			cpwd_starttimer(p, index);
p                 454 drivers/watchdog/cpwd.c 		setopt = cpwd_getstatus(p, index);
p                 460 drivers/watchdog/cpwd.c 		cpwd_starttimer(p, index);
p                 464 drivers/watchdog/cpwd.c 		if (p->enabled)
p                 467 drivers/watchdog/cpwd.c 		cpwd_stoptimer(p, index);
p                 486 drivers/watchdog/cpwd.c 	struct cpwd *p = cpwd_device;
p                 490 drivers/watchdog/cpwd.c 		cpwd_pingtimer(p, index);
p                 520 drivers/watchdog/cpwd.c 	struct cpwd *p;
p                 525 drivers/watchdog/cpwd.c 	p = devm_kzalloc(&op->dev, sizeof(*p), GFP_KERNEL);
p                 526 drivers/watchdog/cpwd.c 	if (!p)
p                 529 drivers/watchdog/cpwd.c 	p->irq = op->archdata.irqs[0];
p                 531 drivers/watchdog/cpwd.c 	spin_lock_init(&p->lock);
p                 533 drivers/watchdog/cpwd.c 	p->regs = of_ioremap(&op->resource[0], 0,
p                 535 drivers/watchdog/cpwd.c 	if (!p->regs) {
p                 548 drivers/watchdog/cpwd.c 	p->enabled = (prop_val ? true : false);
p                 551 drivers/watchdog/cpwd.c 	p->reboot = (prop_val ? true : false);
p                 555 drivers/watchdog/cpwd.c 		p->timeout = simple_strtoul(str_prop, NULL, 10);
p                 564 drivers/watchdog/cpwd.c 	p->broken = (str_prop && !strcmp(str_prop, WD_BADMODEL));
p                 566 drivers/watchdog/cpwd.c 	if (!p->enabled)
p                 567 drivers/watchdog/cpwd.c 		cpwd_toggleintr(p, -1, WD_INTR_OFF);
p                 574 drivers/watchdog/cpwd.c 		struct miscdevice *mp = &p->devs[i].misc;
p                 580 drivers/watchdog/cpwd.c 		p->devs[i].regs = p->regs + (i * WD_TIMER_REGSZ);
p                 581 drivers/watchdog/cpwd.c 		p->devs[i].intr_mask = (WD0_INTR_MASK << i);
p                 582 drivers/watchdog/cpwd.c 		p->devs[i].runstatus &= ~WD_STAT_BSTOP;
p                 583 drivers/watchdog/cpwd.c 		p->devs[i].runstatus |= WD_STAT_INIT;
p                 584 drivers/watchdog/cpwd.c 		p->devs[i].timeout = p->timeout;
p                 586 drivers/watchdog/cpwd.c 			p->devs[i].timeout = *parms[i];
p                 588 drivers/watchdog/cpwd.c 		err = misc_register(&p->devs[i].misc);
p                 596 drivers/watchdog/cpwd.c 	if (p->broken) {
p                 604 drivers/watchdog/cpwd.c 	platform_set_drvdata(op, p);
p                 605 drivers/watchdog/cpwd.c 	cpwd_device = p;
p                 610 drivers/watchdog/cpwd.c 		misc_deregister(&p->devs[i].misc);
p                 613 drivers/watchdog/cpwd.c 	of_iounmap(&op->resource[0], p->regs, 4 * WD_TIMER_REGSZ);
p                 620 drivers/watchdog/cpwd.c 	struct cpwd *p = platform_get_drvdata(op);
p                 624 drivers/watchdog/cpwd.c 		misc_deregister(&p->devs[i].misc);
p                 626 drivers/watchdog/cpwd.c 		if (!p->enabled) {
p                 627 drivers/watchdog/cpwd.c 			cpwd_stoptimer(p, i);
p                 628 drivers/watchdog/cpwd.c 			if (p->devs[i].runstatus & WD_STAT_BSTOP)
p                 629 drivers/watchdog/cpwd.c 				cpwd_resetbrokentimer(p, i);
p                 633 drivers/watchdog/cpwd.c 	if (p->broken)
p                 636 drivers/watchdog/cpwd.c 	if (p->initialized)
p                 637 drivers/watchdog/cpwd.c 		free_irq(p->irq, p);
p                 639 drivers/watchdog/cpwd.c 	of_iounmap(&op->resource[0], p->regs, 4 * WD_TIMER_REGSZ);
p                 236 drivers/watchdog/eurotechwdt.c 	int __user *p = argp;
p                 253 drivers/watchdog/eurotechwdt.c 		return put_user(0, p);
p                 256 drivers/watchdog/eurotechwdt.c 		if (get_user(options, p))
p                 278 drivers/watchdog/eurotechwdt.c 		if (copy_from_user(&time, p, sizeof(int)))
p                 292 drivers/watchdog/eurotechwdt.c 		return put_user(eurwdt_timeout, p);
p                 139 drivers/watchdog/geodewdt.c 	int __user *p = argp;
p                 157 drivers/watchdog/geodewdt.c 		return put_user(0, p);
p                 163 drivers/watchdog/geodewdt.c 		if (get_user(options, p))
p                 183 drivers/watchdog/geodewdt.c 		if (get_user(interval, p))
p                 190 drivers/watchdog/geodewdt.c 		return put_user(timeout, p);
p                  72 drivers/watchdog/iTCO_wdt.c #define TCOBASE(p)	((p)->tco_res->start)
p                  74 drivers/watchdog/iTCO_wdt.c #define SMI_EN(p)	((p)->smi_res->start)
p                  76 drivers/watchdog/iTCO_wdt.c #define TCO_RLD(p)	(TCOBASE(p) + 0x00) /* TCO Timer Reload/Curr. Value */
p                  77 drivers/watchdog/iTCO_wdt.c #define TCOv1_TMR(p)	(TCOBASE(p) + 0x01) /* TCOv1 Timer Initial Value*/
p                  78 drivers/watchdog/iTCO_wdt.c #define TCO_DAT_IN(p)	(TCOBASE(p) + 0x02) /* TCO Data In Register	*/
p                  79 drivers/watchdog/iTCO_wdt.c #define TCO_DAT_OUT(p)	(TCOBASE(p) + 0x03) /* TCO Data Out Register	*/
p                  80 drivers/watchdog/iTCO_wdt.c #define TCO1_STS(p)	(TCOBASE(p) + 0x04) /* TCO1 Status Register	*/
p                  81 drivers/watchdog/iTCO_wdt.c #define TCO2_STS(p)	(TCOBASE(p) + 0x06) /* TCO2 Status Register	*/
p                  82 drivers/watchdog/iTCO_wdt.c #define TCO1_CNT(p)	(TCOBASE(p) + 0x08) /* TCO1 Control Register	*/
p                  83 drivers/watchdog/iTCO_wdt.c #define TCO2_CNT(p)	(TCOBASE(p) + 0x0a) /* TCO2 Control Register	*/
p                  84 drivers/watchdog/iTCO_wdt.c #define TCOv2_TMR(p)	(TCOBASE(p) + 0x12) /* TCOv2 Timer Initial Value*/
p                 109 drivers/watchdog/iTCO_wdt.c 	int (*update_no_reboot_bit)(void *p, bool set);
p                 140 drivers/watchdog/iTCO_wdt.c static inline unsigned int seconds_to_ticks(struct iTCO_wdt_private *p,
p                 143 drivers/watchdog/iTCO_wdt.c 	return p->iTCO_version == 3 ? secs : (secs * 10) / 6;
p                 146 drivers/watchdog/iTCO_wdt.c static inline unsigned int ticks_to_seconds(struct iTCO_wdt_private *p,
p                 149 drivers/watchdog/iTCO_wdt.c 	return p->iTCO_version == 3 ? ticks : (ticks * 6) / 10;
p                 152 drivers/watchdog/iTCO_wdt.c static inline u32 no_reboot_bit(struct iTCO_wdt_private *p)
p                 156 drivers/watchdog/iTCO_wdt.c 	switch (p->iTCO_version) {
p                 181 drivers/watchdog/iTCO_wdt.c 	struct iTCO_wdt_private *p = priv;
p                 184 drivers/watchdog/iTCO_wdt.c 	pci_read_config_dword(p->pci_dev, 0xd4, &val32);
p                 186 drivers/watchdog/iTCO_wdt.c 		val32 |= no_reboot_bit(p);
p                 188 drivers/watchdog/iTCO_wdt.c 		val32 &= ~no_reboot_bit(p);
p                 189 drivers/watchdog/iTCO_wdt.c 	pci_write_config_dword(p->pci_dev, 0xd4, val32);
p                 190 drivers/watchdog/iTCO_wdt.c 	pci_read_config_dword(p->pci_dev, 0xd4, &newval32);
p                 201 drivers/watchdog/iTCO_wdt.c 	struct iTCO_wdt_private *p = priv;
p                 204 drivers/watchdog/iTCO_wdt.c 	val32 = readl(p->gcs_pmc);
p                 206 drivers/watchdog/iTCO_wdt.c 		val32 |= no_reboot_bit(p);
p                 208 drivers/watchdog/iTCO_wdt.c 		val32 &= ~no_reboot_bit(p);
p                 209 drivers/watchdog/iTCO_wdt.c 	writel(val32, p->gcs_pmc);
p                 210 drivers/watchdog/iTCO_wdt.c 	newval32 = readl(p->gcs_pmc);
p                 221 drivers/watchdog/iTCO_wdt.c 	struct iTCO_wdt_private *p = priv;
p                 224 drivers/watchdog/iTCO_wdt.c 	val = inw(TCO1_CNT(p));
p                 229 drivers/watchdog/iTCO_wdt.c 	outw(val, TCO1_CNT(p));
p                 230 drivers/watchdog/iTCO_wdt.c 	newval = inw(TCO1_CNT(p));
p                 236 drivers/watchdog/iTCO_wdt.c static void iTCO_wdt_no_reboot_bit_setup(struct iTCO_wdt_private *p,
p                 240 drivers/watchdog/iTCO_wdt.c 		p->update_no_reboot_bit = pdata->update_no_reboot_bit;
p                 241 drivers/watchdog/iTCO_wdt.c 		p->no_reboot_priv = pdata->no_reboot_priv;
p                 245 drivers/watchdog/iTCO_wdt.c 	if (p->iTCO_version >= 6)
p                 246 drivers/watchdog/iTCO_wdt.c 		p->update_no_reboot_bit = update_no_reboot_bit_cnt;
p                 247 drivers/watchdog/iTCO_wdt.c 	else if (p->iTCO_version >= 2)
p                 248 drivers/watchdog/iTCO_wdt.c 		p->update_no_reboot_bit = update_no_reboot_bit_mem;
p                 249 drivers/watchdog/iTCO_wdt.c 	else if (p->iTCO_version == 1)
p                 250 drivers/watchdog/iTCO_wdt.c 		p->update_no_reboot_bit = update_no_reboot_bit_pci;
p                 252 drivers/watchdog/iTCO_wdt.c 		p->update_no_reboot_bit = update_no_reboot_bit_def;
p                 254 drivers/watchdog/iTCO_wdt.c 	p->no_reboot_priv = p;
p                 259 drivers/watchdog/iTCO_wdt.c 	struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev);
p                 262 drivers/watchdog/iTCO_wdt.c 	spin_lock(&p->io_lock);
p                 264 drivers/watchdog/iTCO_wdt.c 	iTCO_vendor_pre_start(p->smi_res, wd_dev->timeout);
p                 267 drivers/watchdog/iTCO_wdt.c 	if (p->update_no_reboot_bit(p->no_reboot_priv, false)) {
p                 268 drivers/watchdog/iTCO_wdt.c 		spin_unlock(&p->io_lock);
p                 275 drivers/watchdog/iTCO_wdt.c 	if (p->iTCO_version >= 2)
p                 276 drivers/watchdog/iTCO_wdt.c 		outw(0x01, TCO_RLD(p));
p                 277 drivers/watchdog/iTCO_wdt.c 	else if (p->iTCO_version == 1)
p                 278 drivers/watchdog/iTCO_wdt.c 		outb(0x01, TCO_RLD(p));
p                 281 drivers/watchdog/iTCO_wdt.c 	val = inw(TCO1_CNT(p));
p                 283 drivers/watchdog/iTCO_wdt.c 	outw(val, TCO1_CNT(p));
p                 284 drivers/watchdog/iTCO_wdt.c 	val = inw(TCO1_CNT(p));
p                 285 drivers/watchdog/iTCO_wdt.c 	spin_unlock(&p->io_lock);
p                 294 drivers/watchdog/iTCO_wdt.c 	struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev);
p                 297 drivers/watchdog/iTCO_wdt.c 	spin_lock(&p->io_lock);
p                 299 drivers/watchdog/iTCO_wdt.c 	iTCO_vendor_pre_stop(p->smi_res);
p                 302 drivers/watchdog/iTCO_wdt.c 	val = inw(TCO1_CNT(p));
p                 304 drivers/watchdog/iTCO_wdt.c 	outw(val, TCO1_CNT(p));
p                 305 drivers/watchdog/iTCO_wdt.c 	val = inw(TCO1_CNT(p));
p                 308 drivers/watchdog/iTCO_wdt.c 	p->update_no_reboot_bit(p->no_reboot_priv, true);
p                 310 drivers/watchdog/iTCO_wdt.c 	spin_unlock(&p->io_lock);
p                 319 drivers/watchdog/iTCO_wdt.c 	struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev);
p                 321 drivers/watchdog/iTCO_wdt.c 	spin_lock(&p->io_lock);
p                 324 drivers/watchdog/iTCO_wdt.c 	if (p->iTCO_version >= 2) {
p                 325 drivers/watchdog/iTCO_wdt.c 		outw(0x01, TCO_RLD(p));
p                 326 drivers/watchdog/iTCO_wdt.c 	} else if (p->iTCO_version == 1) {
p                 329 drivers/watchdog/iTCO_wdt.c 		outw(0x0008, TCO1_STS(p));	/* write 1 to clear bit */
p                 331 drivers/watchdog/iTCO_wdt.c 		outb(0x01, TCO_RLD(p));
p                 334 drivers/watchdog/iTCO_wdt.c 	spin_unlock(&p->io_lock);
p                 340 drivers/watchdog/iTCO_wdt.c 	struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev);
p                 345 drivers/watchdog/iTCO_wdt.c 	tmrval = seconds_to_ticks(p, t);
p                 348 drivers/watchdog/iTCO_wdt.c 	if (p->iTCO_version == 1)
p                 355 drivers/watchdog/iTCO_wdt.c 	if ((p->iTCO_version >= 2 && tmrval > 0x3ff) ||
p                 356 drivers/watchdog/iTCO_wdt.c 	    (p->iTCO_version == 1 && tmrval > 0x03f))
p                 360 drivers/watchdog/iTCO_wdt.c 	if (p->iTCO_version >= 2) {
p                 361 drivers/watchdog/iTCO_wdt.c 		spin_lock(&p->io_lock);
p                 362 drivers/watchdog/iTCO_wdt.c 		val16 = inw(TCOv2_TMR(p));
p                 365 drivers/watchdog/iTCO_wdt.c 		outw(val16, TCOv2_TMR(p));
p                 366 drivers/watchdog/iTCO_wdt.c 		val16 = inw(TCOv2_TMR(p));
p                 367 drivers/watchdog/iTCO_wdt.c 		spin_unlock(&p->io_lock);
p                 371 drivers/watchdog/iTCO_wdt.c 	} else if (p->iTCO_version == 1) {
p                 372 drivers/watchdog/iTCO_wdt.c 		spin_lock(&p->io_lock);
p                 373 drivers/watchdog/iTCO_wdt.c 		val8 = inb(TCOv1_TMR(p));
p                 376 drivers/watchdog/iTCO_wdt.c 		outb(val8, TCOv1_TMR(p));
p                 377 drivers/watchdog/iTCO_wdt.c 		val8 = inb(TCOv1_TMR(p));
p                 378 drivers/watchdog/iTCO_wdt.c 		spin_unlock(&p->io_lock);
p                 390 drivers/watchdog/iTCO_wdt.c 	struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev);
p                 396 drivers/watchdog/iTCO_wdt.c 	if (p->iTCO_version >= 2) {
p                 397 drivers/watchdog/iTCO_wdt.c 		spin_lock(&p->io_lock);
p                 398 drivers/watchdog/iTCO_wdt.c 		val16 = inw(TCO_RLD(p));
p                 400 drivers/watchdog/iTCO_wdt.c 		spin_unlock(&p->io_lock);
p                 402 drivers/watchdog/iTCO_wdt.c 		time_left = ticks_to_seconds(p, val16);
p                 403 drivers/watchdog/iTCO_wdt.c 	} else if (p->iTCO_version == 1) {
p                 404 drivers/watchdog/iTCO_wdt.c 		spin_lock(&p->io_lock);
p                 405 drivers/watchdog/iTCO_wdt.c 		val8 = inb(TCO_RLD(p));
p                 407 drivers/watchdog/iTCO_wdt.c 		if (!(inw(TCO1_STS(p)) & 0x0008))
p                 408 drivers/watchdog/iTCO_wdt.c 			val8 += (inb(TCOv1_TMR(p)) & 0x3f);
p                 409 drivers/watchdog/iTCO_wdt.c 		spin_unlock(&p->io_lock);
p                 411 drivers/watchdog/iTCO_wdt.c 		time_left = ticks_to_seconds(p, val8);
p                 445 drivers/watchdog/iTCO_wdt.c 	struct iTCO_wdt_private *p;
p                 452 drivers/watchdog/iTCO_wdt.c 	p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
p                 453 drivers/watchdog/iTCO_wdt.c 	if (!p)
p                 456 drivers/watchdog/iTCO_wdt.c 	spin_lock_init(&p->io_lock);
p                 458 drivers/watchdog/iTCO_wdt.c 	p->tco_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_TCO);
p                 459 drivers/watchdog/iTCO_wdt.c 	if (!p->tco_res)
p                 462 drivers/watchdog/iTCO_wdt.c 	p->iTCO_version = pdata->version;
p                 463 drivers/watchdog/iTCO_wdt.c 	p->pci_dev = to_pci_dev(dev->parent);
p                 465 drivers/watchdog/iTCO_wdt.c 	p->smi_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_SMI);
p                 466 drivers/watchdog/iTCO_wdt.c 	if (p->smi_res) {
p                 468 drivers/watchdog/iTCO_wdt.c 		if (!devm_request_region(dev, p->smi_res->start,
p                 469 drivers/watchdog/iTCO_wdt.c 					 resource_size(p->smi_res),
p                 472 drivers/watchdog/iTCO_wdt.c 			       (u64)SMI_EN(p));
p                 476 drivers/watchdog/iTCO_wdt.c 		   turn_SMI_watchdog_clear_off >= p->iTCO_version) {
p                 481 drivers/watchdog/iTCO_wdt.c 	iTCO_wdt_no_reboot_bit_setup(p, pdata);
p                 487 drivers/watchdog/iTCO_wdt.c 	if (p->iTCO_version >= 2 && p->iTCO_version < 6 &&
p                 489 drivers/watchdog/iTCO_wdt.c 		p->gcs_pmc_res = platform_get_resource(pdev,
p                 492 drivers/watchdog/iTCO_wdt.c 		p->gcs_pmc = devm_ioremap_resource(dev, p->gcs_pmc_res);
p                 493 drivers/watchdog/iTCO_wdt.c 		if (IS_ERR(p->gcs_pmc))
p                 494 drivers/watchdog/iTCO_wdt.c 			return PTR_ERR(p->gcs_pmc);
p                 498 drivers/watchdog/iTCO_wdt.c 	if (p->update_no_reboot_bit(p->no_reboot_priv, false) &&
p                 505 drivers/watchdog/iTCO_wdt.c 	p->update_no_reboot_bit(p->no_reboot_priv, true);
p                 507 drivers/watchdog/iTCO_wdt.c 	if (turn_SMI_watchdog_clear_off >= p->iTCO_version) {
p                 512 drivers/watchdog/iTCO_wdt.c 		val32 = inl(SMI_EN(p));
p                 514 drivers/watchdog/iTCO_wdt.c 		outl(val32, SMI_EN(p));
p                 517 drivers/watchdog/iTCO_wdt.c 	if (!devm_request_region(dev, p->tco_res->start,
p                 518 drivers/watchdog/iTCO_wdt.c 				 resource_size(p->tco_res),
p                 521 drivers/watchdog/iTCO_wdt.c 		       (u64)TCOBASE(p));
p                 526 drivers/watchdog/iTCO_wdt.c 		pdata->name, pdata->version, (u64)TCOBASE(p));
p                 529 drivers/watchdog/iTCO_wdt.c 	switch (p->iTCO_version) {
p                 533 drivers/watchdog/iTCO_wdt.c 		outw(0x0008, TCO1_STS(p)); /* Clear the Time Out Status bit */
p                 534 drivers/watchdog/iTCO_wdt.c 		outw(0x0002, TCO2_STS(p)); /* Clear SECOND_TO_STS bit */
p                 537 drivers/watchdog/iTCO_wdt.c 		outl(0x20008, TCO1_STS(p));
p                 542 drivers/watchdog/iTCO_wdt.c 		outw(0x0008, TCO1_STS(p)); /* Clear the Time Out Status bit */
p                 543 drivers/watchdog/iTCO_wdt.c 		outw(0x0002, TCO2_STS(p)); /* Clear SECOND_TO_STS bit */
p                 544 drivers/watchdog/iTCO_wdt.c 		outw(0x0004, TCO2_STS(p)); /* Clear BOOT_STS bit */
p                 548 drivers/watchdog/iTCO_wdt.c 	p->wddev.info =	&ident,
p                 549 drivers/watchdog/iTCO_wdt.c 	p->wddev.ops = &iTCO_wdt_ops,
p                 550 drivers/watchdog/iTCO_wdt.c 	p->wddev.bootstatus = 0;
p                 551 drivers/watchdog/iTCO_wdt.c 	p->wddev.timeout = WATCHDOG_TIMEOUT;
p                 552 drivers/watchdog/iTCO_wdt.c 	watchdog_set_nowayout(&p->wddev, nowayout);
p                 553 drivers/watchdog/iTCO_wdt.c 	p->wddev.parent = dev;
p                 555 drivers/watchdog/iTCO_wdt.c 	watchdog_set_drvdata(&p->wddev, p);
p                 556 drivers/watchdog/iTCO_wdt.c 	platform_set_drvdata(pdev, p);
p                 559 drivers/watchdog/iTCO_wdt.c 	iTCO_wdt_stop(&p->wddev);
p                 563 drivers/watchdog/iTCO_wdt.c 	if (iTCO_wdt_set_timeout(&p->wddev, heartbeat)) {
p                 564 drivers/watchdog/iTCO_wdt.c 		iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT);
p                 569 drivers/watchdog/iTCO_wdt.c 	watchdog_stop_on_reboot(&p->wddev);
p                 570 drivers/watchdog/iTCO_wdt.c 	watchdog_stop_on_unregister(&p->wddev);
p                 571 drivers/watchdog/iTCO_wdt.c 	ret = devm_watchdog_register_device(dev, &p->wddev);
p                 601 drivers/watchdog/iTCO_wdt.c 	struct iTCO_wdt_private *p = dev_get_drvdata(dev);
p                 604 drivers/watchdog/iTCO_wdt.c 	p->suspended = false;
p                 605 drivers/watchdog/iTCO_wdt.c 	if (watchdog_active(&p->wddev) && need_suspend()) {
p                 606 drivers/watchdog/iTCO_wdt.c 		ret = iTCO_wdt_stop(&p->wddev);
p                 608 drivers/watchdog/iTCO_wdt.c 			p->suspended = true;
p                 615 drivers/watchdog/iTCO_wdt.c 	struct iTCO_wdt_private *p = dev_get_drvdata(dev);
p                 617 drivers/watchdog/iTCO_wdt.c 	if (p->suspended)
p                 618 drivers/watchdog/iTCO_wdt.c 		iTCO_wdt_start(&p->wddev);
p                 171 drivers/watchdog/ib700wdt.c 	int __user *p = argp;
p                 188 drivers/watchdog/ib700wdt.c 		return put_user(0, p);
p                 194 drivers/watchdog/ib700wdt.c 		if (get_user(options, p))
p                 212 drivers/watchdog/ib700wdt.c 		if (get_user(new_margin, p))
p                 220 drivers/watchdog/ib700wdt.c 		return put_user(timeout, p);
p                 278 drivers/watchdog/ibmasr.c 	int __user *p = argp;
p                 286 drivers/watchdog/ibmasr.c 		return put_user(0, p);
p                 290 drivers/watchdog/ibmasr.c 		if (get_user(new_options, p))
p                 312 drivers/watchdog/ibmasr.c 		return put_user(heartbeat, p);
p                 352 drivers/watchdog/intel_scu_watchdog.c 	u32 __user *p = argp;
p                 371 drivers/watchdog/intel_scu_watchdog.c 		return put_user(0, p);
p                 377 drivers/watchdog/intel_scu_watchdog.c 		if (get_user(new_margin, p))
p                 387 drivers/watchdog/intel_scu_watchdog.c 		return put_user(watchdog_device.soft_threshold, p);
p                 258 drivers/watchdog/it8712f_wdt.c 	int __user *p = argp;
p                 283 drivers/watchdog/it8712f_wdt.c 		return put_user(value, p);
p                 285 drivers/watchdog/it8712f_wdt.c 		return put_user(0, p);
p                 290 drivers/watchdog/it8712f_wdt.c 		if (get_user(value, p))
p                 308 drivers/watchdog/it8712f_wdt.c 		if (put_user(margin, p))
p                 344 drivers/watchdog/kempld_wdt.c 	int __user *p = argp;
p                 349 drivers/watchdog/kempld_wdt.c 		if (get_user(new_value, p))
p                 306 drivers/watchdog/machzwd.c 	int __user *p = argp;
p                 314 drivers/watchdog/machzwd.c 		return put_user(0, p);
p                 188 drivers/watchdog/max63xx_wdt.c static int max63xx_mmap_init(struct platform_device *p, struct max63xx_wdt *wdt)
p                 190 drivers/watchdog/max63xx_wdt.c 	wdt->base = devm_platform_ioremap_resource(p, 0);
p                 196 drivers/watchdog/mixcomwd.c 	int __user *p = argp;
p                 213 drivers/watchdog/mixcomwd.c 		return put_user(status, p);
p                 215 drivers/watchdog/mixcomwd.c 		return put_user(0, p);
p                 135 drivers/watchdog/mtx-1_wdt.c 	int __user *p = (int __user *)argp;
p                 149 drivers/watchdog/mtx-1_wdt.c 		put_user(0, p);
p                 152 drivers/watchdog/mtx-1_wdt.c 		if (get_user(value, p))
p                 216 drivers/watchdog/nv_tco.c 	int __user *p = argp;
p                 230 drivers/watchdog/nv_tco.c 		return put_user(0, p);
p                 232 drivers/watchdog/nv_tco.c 		if (get_user(new_options, p))
p                 248 drivers/watchdog/nv_tco.c 		if (get_user(new_heartbeat, p))
p                 255 drivers/watchdog/nv_tco.c 		return put_user(heartbeat, p);
p                 467 drivers/watchdog/pcwd_pci.c 	int __user *p = argp;
p                 486 drivers/watchdog/pcwd_pci.c 		return put_user(status, p);
p                 490 drivers/watchdog/pcwd_pci.c 		return put_user(pcipcwd_private.boot_status, p);
p                 499 drivers/watchdog/pcwd_pci.c 		return put_user(temperature, p);
p                 506 drivers/watchdog/pcwd_pci.c 		if (get_user(new_options, p))
p                 537 drivers/watchdog/pcwd_pci.c 		if (get_user(new_heartbeat, p))
p                 548 drivers/watchdog/pcwd_pci.c 		return put_user(heartbeat, p);
p                 557 drivers/watchdog/pcwd_pci.c 		return put_user(time_left, p);
p                 392 drivers/watchdog/pcwd_usb.c 	int __user *p = argp;
p                 407 drivers/watchdog/pcwd_usb.c 		return put_user(0, p);
p                 416 drivers/watchdog/pcwd_usb.c 		return put_user(temperature, p);
p                 423 drivers/watchdog/pcwd_usb.c 		if (get_user(new_options, p))
p                 447 drivers/watchdog/pcwd_usb.c 		if (get_user(new_heartbeat, p))
p                 458 drivers/watchdog/pcwd_usb.c 		return put_user(heartbeat, p);
p                 467 drivers/watchdog/pcwd_usb.c 		return put_user(time_left, p);
p                 177 drivers/watchdog/pika_wdt.c 	int __user *p = argp;
p                 185 drivers/watchdog/pika_wdt.c 		return put_user(0, p);
p                 188 drivers/watchdog/pika_wdt.c 		return put_user(pikawdt_private.bootstatus, p);
p                 195 drivers/watchdog/pika_wdt.c 		if (get_user(new_value, p))
p                 201 drivers/watchdog/pika_wdt.c 		return put_user(new_value, p);  /* return current value */
p                 204 drivers/watchdog/pika_wdt.c 		return put_user(heartbeat, p);
p                  40 drivers/watchdog/pnx4008_wdt.c #define WDTIM_INT(p)     ((p) + 0x0)
p                  41 drivers/watchdog/pnx4008_wdt.c #define WDTIM_CTRL(p)    ((p) + 0x4)
p                  42 drivers/watchdog/pnx4008_wdt.c #define WDTIM_COUNTER(p) ((p) + 0x8)
p                  43 drivers/watchdog/pnx4008_wdt.c #define WDTIM_MCTRL(p)   ((p) + 0xC)
p                  44 drivers/watchdog/pnx4008_wdt.c #define WDTIM_MATCH0(p)  ((p) + 0x10)
p                  45 drivers/watchdog/pnx4008_wdt.c #define WDTIM_EMR(p)     ((p) + 0x14)
p                  46 drivers/watchdog/pnx4008_wdt.c #define WDTIM_PULSE(p)   ((p) + 0x18)
p                  47 drivers/watchdog/pnx4008_wdt.c #define WDTIM_RES(p)     ((p) + 0x1C)
p                  35 drivers/watchdog/renesas_wdt.c #define MUL_BY_CLKS_PER_SEC(p, d) \
p                  36 drivers/watchdog/renesas_wdt.c 	DIV_ROUND_UP((d) * (p)->clk_rate, clk_divs[(p)->cks])
p                  39 drivers/watchdog/renesas_wdt.c #define DIV_BY_CLKS_PER_SEC(p, d) ((d) * clk_divs[(p)->cks] / (p)->clk_rate)
p                  68 drivers/watchdog/riowd.c static void riowd_writereg(struct riowd *p, u8 val, int index)
p                  72 drivers/watchdog/riowd.c 	spin_lock_irqsave(&p->lock, flags);
p                  73 drivers/watchdog/riowd.c 	writeb(index, p->regs + 0);
p                  74 drivers/watchdog/riowd.c 	writeb(val, p->regs + 1);
p                  75 drivers/watchdog/riowd.c 	spin_unlock_irqrestore(&p->lock, flags);
p                  97 drivers/watchdog/riowd.c 	struct riowd *p = riowd_device;
p                 114 drivers/watchdog/riowd.c 		riowd_writereg(p, riowd_timeout, WDTO_INDEX);
p                 122 drivers/watchdog/riowd.c 			riowd_writereg(p, 0, WDTO_INDEX);
p                 124 drivers/watchdog/riowd.c 			riowd_writereg(p, riowd_timeout, WDTO_INDEX);
p                 136 drivers/watchdog/riowd.c 		riowd_writereg(p, riowd_timeout, WDTO_INDEX);
p                 152 drivers/watchdog/riowd.c 	struct riowd *p = riowd_device;
p                 155 drivers/watchdog/riowd.c 		riowd_writereg(p, riowd_timeout, WDTO_INDEX);
p                 179 drivers/watchdog/riowd.c 	struct riowd *p;
p                 186 drivers/watchdog/riowd.c 	p = devm_kzalloc(&op->dev, sizeof(*p), GFP_KERNEL);
p                 187 drivers/watchdog/riowd.c 	if (!p)
p                 190 drivers/watchdog/riowd.c 	spin_lock_init(&p->lock);
p                 192 drivers/watchdog/riowd.c 	p->regs = of_ioremap(&op->resource[0], 0, 2, DRIVER_NAME);
p                 193 drivers/watchdog/riowd.c 	if (!p->regs) {
p                 198 drivers/watchdog/riowd.c 	riowd_device = p;
p                 207 drivers/watchdog/riowd.c 		riowd_timeout, p->regs);
p                 209 drivers/watchdog/riowd.c 	platform_set_drvdata(op, p);
p                 214 drivers/watchdog/riowd.c 	of_iounmap(&op->resource[0], p->regs, 2);
p                 222 drivers/watchdog/riowd.c 	struct riowd *p = platform_get_drvdata(op);
p                 225 drivers/watchdog/riowd.c 	of_iounmap(&op->resource[0], p->regs, 2);
p                  97 drivers/watchdog/sa1100_wdt.c 	int __user *p = argp;
p                 106 drivers/watchdog/sa1100_wdt.c 		ret = put_user(0, p);
p                 110 drivers/watchdog/sa1100_wdt.c 		ret = put_user(boot_status, p);
p                 119 drivers/watchdog/sa1100_wdt.c 		ret = get_user(time, p);
p                 133 drivers/watchdog/sa1100_wdt.c 		ret = put_user(pre_margin / oscr_freq, p);
p                 175 drivers/watchdog/sb_wdog.c 	int __user *p = argp;
p                 184 drivers/watchdog/sb_wdog.c 		ret = put_user(0, p);
p                 193 drivers/watchdog/sb_wdog.c 		ret = get_user(time, p);
p                 212 drivers/watchdog/sb_wdog.c 		ret = put_user((u32)__raw_readq(user_dog - 8) / 1000000, p);
p                 224 drivers/watchdog/sbc60xxwdt.c 	int __user *p = argp;
p                 237 drivers/watchdog/sbc60xxwdt.c 		return put_user(0, p);
p                 241 drivers/watchdog/sbc60xxwdt.c 		if (get_user(new_options, p))
p                 259 drivers/watchdog/sbc60xxwdt.c 		if (get_user(new_timeout, p))
p                 270 drivers/watchdog/sbc60xxwdt.c 		return put_user(timeout, p);
p                 185 drivers/watchdog/sc1200wdt.c 	int __user *p = argp;
p                 200 drivers/watchdog/sc1200wdt.c 		return put_user(sc1200wdt_status(), p);
p                 203 drivers/watchdog/sc1200wdt.c 		return put_user(0, p);
p                 209 drivers/watchdog/sc1200wdt.c 		if (get_user(options, p))
p                 229 drivers/watchdog/sc1200wdt.c 		if (get_user(new_timeout, p))
p                 240 drivers/watchdog/sc1200wdt.c 		return put_user(timeout * 60, p);
p                 276 drivers/watchdog/sc520_wdt.c 	int __user *p = argp;
p                 289 drivers/watchdog/sc520_wdt.c 		return put_user(0, p);
p                 294 drivers/watchdog/sc520_wdt.c 		if (get_user(new_options, p))
p                 316 drivers/watchdog/sc520_wdt.c 		if (get_user(new_timeout, p))
p                 326 drivers/watchdog/sc520_wdt.c 		return put_user(timeout, p);
p                 249 drivers/watchdog/sch311x_wdt.c 	int __user *p = argp;
p                 267 drivers/watchdog/sch311x_wdt.c 		return put_user(status, p);
p                 270 drivers/watchdog/sch311x_wdt.c 		return put_user(sch311x_wdt_data.boot_status, p);
p                 276 drivers/watchdog/sch311x_wdt.c 		if (get_user(options, p))
p                 293 drivers/watchdog/sch311x_wdt.c 		if (get_user(new_timeout, p))
p                 300 drivers/watchdog/sch311x_wdt.c 		return put_user(timeout, p);
p                 159 drivers/watchdog/scx200_wdt.c 	int __user *p = argp;
p                 175 drivers/watchdog/scx200_wdt.c 		if (put_user(0, p))
p                 182 drivers/watchdog/scx200_wdt.c 		if (get_user(new_margin, p))
p                 191 drivers/watchdog/scx200_wdt.c 		if (put_user(margin, p))
p                 242 drivers/watchdog/w83877f_wdt.c 	int __user *p = argp;
p                 255 drivers/watchdog/w83877f_wdt.c 		return put_user(0, p);
p                 260 drivers/watchdog/w83877f_wdt.c 		if (get_user(new_options, p))
p                 282 drivers/watchdog/w83877f_wdt.c 		if (get_user(new_timeout, p))
p                 294 drivers/watchdog/w83877f_wdt.c 		return put_user(timeout, p);
p                 127 drivers/watchdog/wafer5823wdt.c 	int __user *p = argp;
p                 143 drivers/watchdog/wafer5823wdt.c 		return put_user(0, p);
p                 149 drivers/watchdog/wafer5823wdt.c 		if (get_user(options, p))
p                 170 drivers/watchdog/wafer5823wdt.c 		if (get_user(new_timeout, p))
p                 179 drivers/watchdog/wafer5823wdt.c 		return put_user(timeout, p);
p                  67 drivers/watchdog/watchdog_core.c 	struct list_head *p, *n;
p                  70 drivers/watchdog/watchdog_core.c 	list_for_each_safe(p, n, &wtd_deferred_reg_list) {
p                  71 drivers/watchdog/watchdog_core.c 		wdd_tmp = list_entry(p, struct watchdog_device,
p                 698 drivers/watchdog/watchdog_dev.c 	int __user *p = argp;
p                 721 drivers/watchdog/watchdog_dev.c 		err = put_user(val, p);
p                 724 drivers/watchdog/watchdog_dev.c 		err = put_user(wdd->bootstatus, p);
p                 727 drivers/watchdog/watchdog_dev.c 		if (get_user(val, p)) {
p                 747 drivers/watchdog/watchdog_dev.c 		if (get_user(val, p)) {
p                 767 drivers/watchdog/watchdog_dev.c 		err = put_user(wdd->timeout, p);
p                 773 drivers/watchdog/watchdog_dev.c 		err = put_user(val, p);
p                 776 drivers/watchdog/watchdog_dev.c 		if (get_user(val, p)) {
p                 783 drivers/watchdog/watchdog_dev.c 		err = put_user(wdd->pretimeout, p);
p                 116 drivers/watchdog/watchdog_pretimeout.c 	struct watchdog_pretimeout *p;
p                 139 drivers/watchdog/watchdog_pretimeout.c 		list_for_each_entry(p, &pretimeout_list, entry)
p                 140 drivers/watchdog/watchdog_pretimeout.c 			if (!p->wdd->gov)
p                 141 drivers/watchdog/watchdog_pretimeout.c 				p->wdd->gov = default_gov;
p                 153 drivers/watchdog/watchdog_pretimeout.c 	struct watchdog_pretimeout *p;
p                 167 drivers/watchdog/watchdog_pretimeout.c 	list_for_each_entry(p, &pretimeout_list, entry)
p                 168 drivers/watchdog/watchdog_pretimeout.c 		if (p->wdd->gov == gov)
p                 169 drivers/watchdog/watchdog_pretimeout.c 			p->wdd->gov = default_gov;
p                 178 drivers/watchdog/watchdog_pretimeout.c 	struct watchdog_pretimeout *p;
p                 183 drivers/watchdog/watchdog_pretimeout.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 184 drivers/watchdog/watchdog_pretimeout.c 	if (!p)
p                 188 drivers/watchdog/watchdog_pretimeout.c 	list_add(&p->entry, &pretimeout_list);
p                 189 drivers/watchdog/watchdog_pretimeout.c 	p->wdd = wdd;
p                 198 drivers/watchdog/watchdog_pretimeout.c 	struct watchdog_pretimeout *p, *t;
p                 206 drivers/watchdog/watchdog_pretimeout.c 	list_for_each_entry_safe(p, t, &pretimeout_list, entry) {
p                 207 drivers/watchdog/watchdog_pretimeout.c 		if (p->wdd == wdd) {
p                 208 drivers/watchdog/watchdog_pretimeout.c 			list_del(&p->entry);
p                 214 drivers/watchdog/watchdog_pretimeout.c 	kfree(p);
p                 354 drivers/watchdog/wdt.c 	int __user *p = argp;
p                 380 drivers/watchdog/wdt.c 		return put_user(status, p);
p                 382 drivers/watchdog/wdt.c 		return put_user(0, p);
p                 387 drivers/watchdog/wdt.c 		if (get_user(new_heartbeat, p))
p                 394 drivers/watchdog/wdt.c 		return put_user(heartbeat, p);
p                 391 drivers/watchdog/wdt_pci.c 	int __user *p = argp;
p                 417 drivers/watchdog/wdt_pci.c 		return put_user(status, p);
p                 419 drivers/watchdog/wdt_pci.c 		return put_user(0, p);
p                 424 drivers/watchdog/wdt_pci.c 		if (get_user(new_heartbeat, p))
p                 431 drivers/watchdog/wdt_pci.c 		return put_user(heartbeat, p);
p                 371 drivers/xen/balloon.c 	struct page *p;
p                 376 drivers/xen/balloon.c 		p = pfn_to_page(start_pfn + i);
p                 377 drivers/xen/balloon.c 		__online_page_set_limits(p);
p                 378 drivers/xen/balloon.c 		balloon_append(p);
p                 194 drivers/xen/evtchn.c 	unsigned int c, p, bytes1 = 0, bytes2 = 0;
p                 214 drivers/xen/evtchn.c 		p = u->ring_prod;
p                 215 drivers/xen/evtchn.c 		if (c != p)
p                 230 drivers/xen/evtchn.c 	if (((c ^ p) & u->ring_size) != 0) {
p                 233 drivers/xen/evtchn.c 		bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t);
p                 235 drivers/xen/evtchn.c 		bytes1 = (p - c) * sizeof(evtchn_port_t);
p                 155 drivers/xen/mcelog.c 	int __user *p = (int __user *)arg;
p                 162 drivers/xen/mcelog.c 		return put_user(sizeof(struct xen_mce), p);
p                 164 drivers/xen/mcelog.c 		return put_user(XEN_MCE_LOG_LEN, p);
p                 172 drivers/xen/mcelog.c 		return put_user(flags, p);
p                  95 drivers/xen/privcmd.c 	struct page *p, *n;
p                  97 drivers/xen/privcmd.c 	list_for_each_entry_safe(p, n, pages, lru)
p                  98 drivers/xen/privcmd.c 		__free_page(p);
p                  88 drivers/xen/swiotlb-xen.c static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
p                  90 drivers/xen/swiotlb-xen.c 	unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
p                  91 drivers/xen/swiotlb-xen.c 	unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
p                 125 drivers/xen/swiotlb-xen.c 	phys_addr_t p = virt_to_phys(buf);
p                 135 drivers/xen/swiotlb-xen.c 				p + (i << IO_TLB_SHIFT),
p                  27 drivers/xen/time.c static u64 get64(const u64 *p)
p                  32 drivers/xen/time.c 		u32 *p32 = (u32 *)p;
p                  49 drivers/xen/time.c 		ret = READ_ONCE(*p);
p                1501 drivers/xen/xen-scsiback.c 				  struct se_session *se_sess, void *p)
p                1506 drivers/xen/xen-scsiback.c 	tpg->tpg_nexus = p;
p                 561 drivers/xen/xenbus/xenbus_probe.c 	const char *p, *root;
p                 573 drivers/xen/xenbus/xenbus_probe.c 	p = strchr(node, '/') + 1;
p                 574 drivers/xen/xenbus/xenbus_probe.c 	snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
p                 379 drivers/xen/xenbus/xenbus_xs.c 	const char *p;
p                 381 drivers/xen/xenbus/xenbus_xs.c 	for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
p                 401 drivers/xen/xenbus/xenbus_xs.c 	char *p, **ret;
p                 416 drivers/xen/xenbus/xenbus_xs.c 	for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
p                 417 drivers/xen/xenbus/xenbus_xs.c 		ret[(*num)++] = p;
p                 639 drivers/xen/xenbus/xenbus_xs.c 		char *p;
p                 641 drivers/xen/xenbus/xenbus_xs.c 		p = xenbus_read(t, dir, name, NULL);
p                 642 drivers/xen/xenbus/xenbus_xs.c 		if (IS_ERR(p)) {
p                 643 drivers/xen/xenbus/xenbus_xs.c 			ret = PTR_ERR(p);
p                 647 drivers/xen/xenbus/xenbus_xs.c 			if (sscanf(p, fmt, result) == 0)
p                 649 drivers/xen/xenbus/xenbus_xs.c 			kfree(p);
p                 651 drivers/xen/xenbus/xenbus_xs.c 			*(char **)result = p;
p                  77 drivers/xen/xenfs/xensyms.c static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos)
p                  86 drivers/xen/xenfs/xensyms.c 	return p;
p                  89 drivers/xen/xenfs/xensyms.c static int xensyms_show(struct seq_file *m, void *p)
p                 100 drivers/xen/xenfs/xensyms.c static void xensyms_stop(struct seq_file *m, void *p)
p                 166 fs/9p/v9fs.c   	char *p;
p                 190 fs/9p/v9fs.c   	while ((p = strsep(&options, ",")) != NULL) {
p                 192 fs/9p/v9fs.c   		if (!*p)
p                 194 fs/9p/v9fs.c   		token = match_token(p, tokens, args);
p                  54 fs/9p/vfs_dentry.c 	struct hlist_node *p, *n;
p                  57 fs/9p/vfs_dentry.c 	hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata)
p                  58 fs/9p/vfs_dentry.c 		p9_client_clunk(hlist_entry(p, struct p9_fid, dlist));
p                  17 fs/adfs/dir_f.c static inline unsigned int adfs_readval(unsigned char *p, int len)
p                  22 fs/adfs/dir_f.c 	case 4:		val |= p[3] << 24;
p                  24 fs/adfs/dir_f.c 	case 3:		val |= p[2] << 16;
p                  26 fs/adfs/dir_f.c 	case 2:		val |= p[1] << 8;
p                  28 fs/adfs/dir_f.c 	default:	val |= p[0];
p                  33 fs/adfs/dir_f.c static inline void adfs_writeval(unsigned char *p, int len, unsigned int val)
p                  36 fs/adfs/dir_f.c 	case 4:		p[3] = val >> 24;
p                  38 fs/adfs/dir_f.c 	case 3:		p[2] = val >> 16;
p                  40 fs/adfs/dir_f.c 	case 2:		p[1] = val >> 8;
p                  42 fs/adfs/dir_f.c 	default:	p[0] = val;
p                 179 fs/adfs/super.c 	char *p;
p                 185 fs/adfs/super.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 188 fs/adfs/super.c 		if (!*p)
p                 191 fs/adfs/super.c 		token = match_token(p, tokens, args);
p                 225 fs/adfs/super.c 				 p);
p                  17 fs/affs/affs.h #define GET_END_PTR(st,p,sz)		 ((st *)((char *)(p)+((sz)-sizeof(st))))
p                 319 fs/affs/namei.c 	char			*p;
p                 342 fs/affs/namei.c 	p  = (char *)AFFS_HEAD(bh)->table;
p                 350 fs/affs/namei.c 			*p++ = sbi->s_volume[i++];
p                 355 fs/affs/namei.c 			*p++ = '/';
p                 363 fs/affs/namei.c 			*p++ = c;
p                 371 fs/affs/namei.c 	*p = 0;
p                 193 fs/affs/super.c 	char *p;
p                 209 fs/affs/super.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 211 fs/affs/super.c 		if (!*p)
p                 214 fs/affs/super.c 		token = match_token(p, tokens, args);
p                 288 fs/affs/super.c 				p);
p                  71 fs/afs/addr_list.c 	const char *p, *end = text + len;
p                  87 fs/afs/addr_list.c 	p = text;
p                  89 fs/afs/addr_list.c 		if (!*p) {
p                  93 fs/afs/addr_list.c 		if (*p == delim)
p                  96 fs/afs/addr_list.c 		if (*p == '[') {
p                  97 fs/afs/addr_list.c 			p++;
p                  98 fs/afs/addr_list.c 			if (p == end) {
p                 102 fs/afs/addr_list.c 			p = memchr(p, ']', end - p);
p                 103 fs/afs/addr_list.c 			if (!p) {
p                 107 fs/afs/addr_list.c 			p++;
p                 108 fs/afs/addr_list.c 			if (p >= end)
p                 112 fs/afs/addr_list.c 		p = memchr(p, delim, end - p);
p                 113 fs/afs/addr_list.c 		if (!p)
p                 115 fs/afs/addr_list.c 		p++;
p                 116 fs/afs/addr_list.c 	} while (p < end);
p                 134 fs/afs/addr_list.c 	p = text;
p                 141 fs/afs/addr_list.c 		if (*p == delim) {
p                 142 fs/afs/addr_list.c 			p++;
p                 146 fs/afs/addr_list.c 		if (*p == '[') {
p                 147 fs/afs/addr_list.c 			p++;
p                 148 fs/afs/addr_list.c 			q = memchr(p, ']', end - p);
p                 150 fs/afs/addr_list.c 			for (q = p; q < end; q++)
p                 155 fs/afs/addr_list.c 		if (in4_pton(p, q - p, (u8 *)&x[0], -1, &stop)) {
p                 157 fs/afs/addr_list.c 		} else if (in6_pton(p, q - p, (u8 *)x, -1, &stop)) {
p                 164 fs/afs/addr_list.c 		p = q;
p                 165 fs/afs/addr_list.c 		if (stop != p) {
p                 171 fs/afs/addr_list.c 			p++;
p                 173 fs/afs/addr_list.c 		if (p < end) {
p                 174 fs/afs/addr_list.c 			if (*p == '+') {
p                 177 fs/afs/addr_list.c 				p++;
p                 178 fs/afs/addr_list.c 				if (p >= end || !isdigit(*p)) {
p                 184 fs/afs/addr_list.c 					xport += *p - '0';
p                 189 fs/afs/addr_list.c 					p++;
p                 190 fs/afs/addr_list.c 				} while (p < end && isdigit(*p));
p                 191 fs/afs/addr_list.c 			} else if (*p == delim) {
p                 192 fs/afs/addr_list.c 				p++;
p                 204 fs/afs/addr_list.c 	} while (p < end);
p                 212 fs/afs/addr_list.c 	       problem, p - text, (int)len, (int)len, text);
p                 216 fs/afs/addr_list.c 	       problem, p - text, (int)len, (int)len, text);
p                  51 fs/afs/cell.c  	struct rb_node *p;
p                  84 fs/afs/cell.c  		p = rcu_dereference_raw(net->cells.rb_node);
p                  85 fs/afs/cell.c  		while (p) {
p                  86 fs/afs/cell.c  			cell = rb_entry(p, struct afs_cell, net_node);
p                  93 fs/afs/cell.c  				p = rcu_dereference_raw(p->rb_left);
p                  95 fs/afs/cell.c  				p = rcu_dereference_raw(p->rb_right);
p                 383 fs/afs/cell.c  	struct afs_vlserver_list *vllist, *old = NULL, *p;
p                 456 fs/afs/cell.c  	p = rcu_dereference_protected(cell->vl_servers, true);
p                 457 fs/afs/cell.c  	if (vllist->nr_servers > 0 || p->nr_servers == 0) {
p                 460 fs/afs/cell.c  		old = p;
p                 583 fs/afs/cell.c  	struct hlist_node **p;
p                 605 fs/afs/cell.c  	for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
p                 606 fs/afs/cell.c  		pcell = hlist_entry(*p, struct afs_cell, proc_link);
p                 611 fs/afs/cell.c  	cell->proc_link.pprev = p;
p                 612 fs/afs/cell.c  	cell->proc_link.next = *p;
p                 613 fs/afs/cell.c  	rcu_assign_pointer(*p, &cell->proc_link.next);
p                 857 fs/afs/dir.c   	char *buf, *p, *name;
p                 863 fs/afs/dir.c   	p = buf = kmalloc(AFSNAMEMAX, GFP_KERNEL);
p                 867 fs/afs/dir.c   		memcpy(p, dentry->d_name.name, dentry->d_name.len - 4);
p                 868 fs/afs/dir.c   		p += dentry->d_name.len - 4;
p                 885 fs/afs/dir.c   		strcpy(p, name);
p                  83 fs/afs/file.c  	struct afs_wb_key *wbk, *p;
p                  92 fs/afs/file.c  	list_for_each_entry(p, &vnode->wb_keys, vnode_link) {
p                  93 fs/afs/file.c  		if (p->key == wbk->key)
p                 104 fs/afs/file.c  	refcount_inc(&p->usage);
p                 106 fs/afs/file.c  	af->wb = p;
p                 445 fs/afs/file.c  	struct list_head *p;
p                 457 fs/afs/file.c  	for (p = first->lru.prev; p != pages; p = p->prev) {
p                 458 fs/afs/file.c  		page = list_entry(p, struct page, lru);
p                  91 fs/afs/flock.c 	struct file_lock *p, *_p;
p                  94 fs/afs/flock.c 	list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
p                  95 fs/afs/flock.c 		if (!exclusive && p->fl_type == F_WRLCK)
p                  98 fs/afs/flock.c 		list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks);
p                  99 fs/afs/flock.c 		p->fl_u.afs.state = AFS_LOCK_GRANTED;
p                 100 fs/afs/flock.c 		trace_afs_flock_op(vnode, p, afs_flock_op_grant);
p                 101 fs/afs/flock.c 		wake_up(&p->fl_wait);
p                 112 fs/afs/flock.c 	struct file_lock *p, *_p, *next = NULL;
p                 121 fs/afs/flock.c 	list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
p                 123 fs/afs/flock.c 		    p->fl_type == fl_type &&
p                 124 fs/afs/flock.c 		    afs_file_key(p->fl_file) == key) {
p                 125 fs/afs/flock.c 			list_del_init(&p->fl_u.afs.link);
p                 126 fs/afs/flock.c 			p->fl_u.afs.state = error;
p                 127 fs/afs/flock.c 			wake_up(&p->fl_wait);
p                 132 fs/afs/flock.c 		    (next->fl_type == F_WRLCK || p->fl_type == F_RDLCK))
p                 134 fs/afs/flock.c 		next = p;
p                 159 fs/afs/flock.c 	struct file_lock *p;
p                 164 fs/afs/flock.c 		p = list_entry(vnode->pending_locks.next,
p                 166 fs/afs/flock.c 		list_del_init(&p->fl_u.afs.link);
p                 167 fs/afs/flock.c 		p->fl_u.afs.state = -ENOENT;
p                 168 fs/afs/flock.c 		wake_up(&p->fl_wait);
p                1470 fs/afs/fsclient.c 	char *p;
p                1518 fs/afs/fsclient.c 		p = call->buffer;
p                1519 fs/afs/fsclient.c 		p[call->count] = 0;
p                1520 fs/afs/fsclient.c 		_debug("volname '%s'", p);
p                1548 fs/afs/fsclient.c 		p = call->buffer;
p                1549 fs/afs/fsclient.c 		p[call->count] = 0;
p                1550 fs/afs/fsclient.c 		_debug("offline '%s'", p);
p                1579 fs/afs/fsclient.c 		p = call->buffer;
p                1580 fs/afs/fsclient.c 		p[call->count] = 0;
p                1581 fs/afs/fsclient.c 		_debug("motd '%s'", p);
p                  78 fs/afs/mntpt.c 	const char *p;
p                 101 fs/afs/mntpt.c 		p = mntpt->d_name.name;
p                 104 fs/afs/mntpt.c 			p++;
p                 111 fs/afs/mntpt.c 		cell = afs_lookup_cell(ctx->net, p, size, NULL, false);
p                 486 fs/afs/proc.c  	char *s, *p, *sub;
p                 495 fs/afs/proc.c  	p = buf;
p                 496 fs/afs/proc.c  	while ((s = strsep(&p, " \t\n"))) {
p                 650 fs/afs/proc.c  	struct proc_dir_entry *p;
p                 654 fs/afs/proc.c  	p = proc_net_mkdir(net->net, "afs", net->net->proc_net);
p                 655 fs/afs/proc.c  	if (!p)
p                 658 fs/afs/proc.c  	if (!proc_create_net_data_write("cells", 0644, p,
p                 663 fs/afs/proc.c  	    !proc_create_net_single_write("rootcell", 0644, p,
p                 667 fs/afs/proc.c  	    !proc_create_net("servers", 0444, p, &afs_proc_servers_ops,
p                 669 fs/afs/proc.c  	    !proc_create_net_single("stats", 0444, p, afs_proc_stats_show, NULL) ||
p                 670 fs/afs/proc.c  	    !proc_create_net_data_write("sysname", 0644, p,
p                 677 fs/afs/proc.c  	net->proc_afs = p;
p                 682 fs/afs/proc.c  	proc_remove(p);
p                 100 fs/afs/server.c 	struct rb_node *p;
p                 116 fs/afs/server.c 		p = net->fs_servers.rb_node;
p                 117 fs/afs/server.c 		while (p) {
p                 118 fs/afs/server.c 			server = rb_entry(p, struct afs_server, uuid_rb);
p                 122 fs/afs/server.c 				p = p->rb_left;
p                 124 fs/afs/server.c 				p = p->rb_right;
p                 148 fs/afs/server.c 	struct rb_node **pp, *p;
p                 157 fs/afs/server.c 	p = NULL;
p                 159 fs/afs/server.c 		p = *pp;
p                 160 fs/afs/server.c 		_debug("- consider %p", p);
p                 161 fs/afs/server.c 		server = rb_entry(p, struct afs_server, uuid_rb);
p                 172 fs/afs/server.c 	rb_link_node(&server->uuid_rb, p, pp);
p                 198 fs/afs/super.c 	const char *p = NULL;
p                 206 fs/afs/super.c 	case afs_flock_mode_local:	p = "local";	break;
p                 207 fs/afs/super.c 	case afs_flock_mode_openafs:	p = "openafs";	break;
p                 208 fs/afs/super.c 	case afs_flock_mode_strict:	p = "strict";	break;
p                 209 fs/afs/super.c 	case afs_flock_mode_write:	p = "write";	break;
p                 211 fs/afs/super.c 	if (p)
p                 212 fs/afs/super.c 		seq_printf(m, ",flock=%s", p);
p                 234 fs/afs/vl_list.c 			struct afs_vlserver *p = previous->servers[i].server;
p                 236 fs/afs/vl_list.c 			if (p->name_len == bs.name_len &&
p                 237 fs/afs/vl_list.c 			    p->port == bs.port &&
p                 238 fs/afs/vl_list.c 			    strncasecmp(b, p->name, bs.name_len) == 0) {
p                 239 fs/afs/vl_list.c 				server = afs_get_vlserver(p);
p                  32 fs/afs/write.c 	size_t p;
p                  39 fs/afs/write.c 		p = pos & ~PAGE_MASK;
p                  40 fs/afs/write.c 		ASSERTCMP(p + len, <=, PAGE_SIZE);
p                  42 fs/afs/write.c 		memset(data + p, 0, len);
p                 362 fs/afs/write.c 	struct list_head *p;
p                 377 fs/afs/write.c 	p = vnode->wb_keys.next;
p                 381 fs/afs/write.c 	while (p != &vnode->wb_keys) {
p                 382 fs/afs/write.c 		wbk = list_entry(p, struct afs_wb_key, vnode_link);
p                 389 fs/afs/write.c 		p = p->next;
p                 436 fs/afs/write.c 		p = wbk->vnode_link.next;
p                  68 fs/afs/yfsclient.c static __be32 *xdr_encode_string(__be32 *bp, const char *p, unsigned int len)
p                  71 fs/afs/yfsclient.c 	bp = memcpy(bp, p, len);
p                1427 fs/afs/yfsclient.c 	char *p;
p                1475 fs/afs/yfsclient.c 		p = call->buffer;
p                1476 fs/afs/yfsclient.c 		p[call->count] = 0;
p                1477 fs/afs/yfsclient.c 		_debug("volname '%s'", p);
p                1505 fs/afs/yfsclient.c 		p = call->buffer;
p                1506 fs/afs/yfsclient.c 		p[call->count] = 0;
p                1507 fs/afs/yfsclient.c 		_debug("offline '%s'", p);
p                1536 fs/afs/yfsclient.c 		p = call->buffer;
p                1537 fs/afs/yfsclient.c 		p[call->count] = 0;
p                1538 fs/afs/yfsclient.c 		_debug("motd '%s'", p);
p                1726 fs/aio.c       		struct poll_table_struct *p)
p                1728 fs/aio.c       	struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
p                 209 fs/autofs/dev-ioctl.c static int test_by_dev(const struct path *path, void *p)
p                 211 fs/autofs/dev-ioctl.c 	return path->dentry->d_sb->s_dev == *(dev_t *)p;
p                 214 fs/autofs/dev-ioctl.c static int test_by_type(const struct path *path, void *p)
p                 218 fs/autofs/dev-ioctl.c 	return ino && ino->sbi->type & *(unsigned *)p;
p                  74 fs/autofs/expire.c static struct dentry *positive_after(struct dentry *p, struct dentry *child)
p                  79 fs/autofs/expire.c 		child = list_first_entry(&p->d_subdirs, struct dentry, d_child);
p                  81 fs/autofs/expire.c 	list_for_each_entry_from(child, &p->d_subdirs, d_child) {
p                 119 fs/autofs/expire.c 	struct dentry *p = prev, *ret = NULL, *d = NULL;
p                 125 fs/autofs/expire.c 	spin_lock(&p->d_lock);
p                 129 fs/autofs/expire.c 		ret = positive_after(p, d);
p                 130 fs/autofs/expire.c 		if (ret || p == root)
p                 132 fs/autofs/expire.c 		parent = p->d_parent;
p                 133 fs/autofs/expire.c 		spin_unlock(&p->d_lock);
p                 135 fs/autofs/expire.c 		d = p;
p                 136 fs/autofs/expire.c 		p = parent;
p                 138 fs/autofs/expire.c 	spin_unlock(&p->d_lock);
p                 188 fs/autofs/expire.c 	struct dentry *p;
p                 196 fs/autofs/expire.c 	p = NULL;
p                 197 fs/autofs/expire.c 	while ((p = get_next_positive_dentry(p, top))) {
p                 198 fs/autofs/expire.c 		pr_debug("dentry %p %pd\n", p, p);
p                 206 fs/autofs/expire.c 		if (d_mountpoint(p)) {
p                 207 fs/autofs/expire.c 			if (autofs_mount_busy(mnt, p, how)) {
p                 209 fs/autofs/expire.c 				dput(p);
p                 213 fs/autofs/expire.c 			struct autofs_info *ino = autofs_dentry_ino(p);
p                 217 fs/autofs/expire.c 			if (p == top)
p                 222 fs/autofs/expire.c 			if (d_count(p) > ino_count) {
p                 224 fs/autofs/expire.c 				dput(p);
p                 246 fs/autofs/expire.c 	struct dentry *p;
p                 250 fs/autofs/expire.c 	p = NULL;
p                 251 fs/autofs/expire.c 	while ((p = get_next_positive_dentry(p, parent))) {
p                 252 fs/autofs/expire.c 		pr_debug("dentry %p %pd\n", p, p);
p                 254 fs/autofs/expire.c 		if (d_mountpoint(p)) {
p                 256 fs/autofs/expire.c 			if (autofs_mount_busy(mnt, p, how))
p                 263 fs/autofs/expire.c 				return p;
p                 266 fs/autofs/expire.c 			if (autofs_can_expire(p, timeout, how))
p                 267 fs/autofs/expire.c 				return p;
p                 135 fs/autofs/inode.c 	char *p;
p                 153 fs/autofs/inode.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 156 fs/autofs/inode.c 		if (!*p)
p                 159 fs/autofs/inode.c 		token = match_token(p, tokens, args);
p                 134 fs/autofs/root.c 	struct list_head *p, *head;
p                 140 fs/autofs/root.c 	list_for_each(p, head) {
p                 145 fs/autofs/root.c 		ino = list_entry(p, struct autofs_info, active);
p                 189 fs/autofs/root.c 	struct list_head *p, *head;
p                 195 fs/autofs/root.c 	list_for_each(p, head) {
p                 205 fs/autofs/root.c 		ino = list_entry(p, struct autofs_info, expiring);
p                 776 fs/autofs/root.c 						 compat_ulong_t __user *p)
p                 781 fs/autofs/root.c 	rv = get_user(ntimeout, p);
p                 785 fs/autofs/root.c 	rv = put_user(sbi->exp_timeout/HZ, p);
p                 801 fs/autofs/root.c 					  unsigned long __user *p)
p                 806 fs/autofs/root.c 	rv = get_user(ntimeout, p);
p                 810 fs/autofs/root.c 	rv = put_user(sbi->exp_timeout/HZ, p);
p                 826 fs/autofs/root.c 				       int __user *p)
p                 828 fs/autofs/root.c 	return put_user(sbi->version, p);
p                 833 fs/autofs/root.c 					  int __user *p)
p                 835 fs/autofs/root.c 	return put_user(sbi->sub_version, p);
p                 841 fs/autofs/root.c static inline int autofs_ask_umount(struct vfsmount *mnt, int __user *p)
p                 850 fs/autofs/root.c 	status = put_user(status, p);
p                 874 fs/autofs/root.c 	void __user *p = (void __user *)arg;
p                 895 fs/autofs/root.c 		return autofs_get_protover(sbi, p);
p                 897 fs/autofs/root.c 		return autofs_get_protosubver(sbi, p);
p                 899 fs/autofs/root.c 		return autofs_get_set_timeout(sbi, p);
p                 902 fs/autofs/root.c 		return autofs_compat_get_set_timeout(sbi, p);
p                 906 fs/autofs/root.c 		return autofs_ask_umount(filp->f_path.mnt, p);
p                 910 fs/autofs/root.c 		return autofs_expire_run(inode->i_sb, filp->f_path.mnt, sbi, p);
p                 914 fs/autofs/root.c 					   filp->f_path.mnt, sbi, p);
p                 184 fs/autofs/waitq.c 	char *p;
p                 207 fs/autofs/waitq.c 	p = buf + len - dentry->d_name.len;
p                 208 fs/autofs/waitq.c 	strncpy(p, dentry->d_name.name, dentry->d_name.len);
p                 211 fs/autofs/waitq.c 		*(--p) = '/';
p                 212 fs/autofs/waitq.c 		p -= tmp->d_name.len;
p                 213 fs/autofs/waitq.c 		strncpy(p, tmp->d_name.name, tmp->d_name.len);
p                 694 fs/befs/linuxvfs.c 	char *p;
p                 711 fs/befs/linuxvfs.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 714 fs/befs/linuxvfs.c 		if (!*p)
p                 717 fs/befs/linuxvfs.c 		token = match_token(p, befs_tokens, args);
p                 761 fs/befs/linuxvfs.c 			       "or missing value\n", p);
p                  99 fs/bfs/inode.c static struct bfs_inode *find_inode(struct super_block *sb, u16 ino, struct buffer_head **p)
p                 108 fs/bfs/inode.c 	*p = sb_bread(sb, 1 + ino / BFS_INODES_PER_BLOCK);
p                 109 fs/bfs/inode.c 	if (!*p) {
p                 114 fs/bfs/inode.c 	return (struct bfs_inode *)(*p)->b_data +  ino % BFS_INODES_PER_BLOCK;
p                  59 fs/binfmt_aout.c static unsigned long __user *create_aout_tables(char __user *p, struct linux_binprm * bprm)
p                  67 fs/binfmt_aout.c 	sp = (void __user *)((-(unsigned long)sizeof(char *)) & (unsigned long) p);
p                  90 fs/binfmt_aout.c 	current->mm->arg_start = (unsigned long) p;
p                  93 fs/binfmt_aout.c 		put_user(p,argv++);
p                  95 fs/binfmt_aout.c 			get_user(c,p++);
p                  99 fs/binfmt_aout.c 	current->mm->arg_end = current->mm->env_start = (unsigned long) p;
p                 102 fs/binfmt_aout.c 		put_user(p,envp++);
p                 104 fs/binfmt_aout.c 			get_user(c,p++);
p                 108 fs/binfmt_aout.c 	current->mm->env_end = (unsigned long) p;
p                 247 fs/binfmt_aout.c 		(unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
p                 167 fs/binfmt_elf.c 	unsigned long p = bprm->p;
p                 189 fs/binfmt_elf.c 	p = arch_align_stack(p);
p                 201 fs/binfmt_elf.c 		u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
p                 214 fs/binfmt_elf.c 		u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
p                 224 fs/binfmt_elf.c 		       STACK_ALLOC(p, sizeof(k_rand_bytes));
p                 284 fs/binfmt_elf.c 	sp = STACK_ADD(p, ei_index);
p                 287 fs/binfmt_elf.c 	bprm->p = STACK_ROUND(sp, items);
p                 291 fs/binfmt_elf.c 	sp = (elf_addr_t __user *)bprm->p - items - ei_index;
p                 294 fs/binfmt_elf.c 	sp = (elf_addr_t __user *)bprm->p;
p                 302 fs/binfmt_elf.c 	vma = find_extend_vma(current->mm, bprm->p);
p                 311 fs/binfmt_elf.c 	p = current->mm->arg_end = current->mm->arg_start;
p                 314 fs/binfmt_elf.c 		if (__put_user((elf_addr_t)p, sp++))
p                 316 fs/binfmt_elf.c 		len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
p                 319 fs/binfmt_elf.c 		p += len;
p                 323 fs/binfmt_elf.c 	current->mm->arg_end = p;
p                 326 fs/binfmt_elf.c 	current->mm->env_end = current->mm->env_start = p;
p                 329 fs/binfmt_elf.c 		if (__put_user((elf_addr_t)p, sp++))
p                 331 fs/binfmt_elf.c 		len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
p                 334 fs/binfmt_elf.c 		p += len;
p                 338 fs/binfmt_elf.c 	current->mm->env_end = p;
p                1107 fs/binfmt_elf.c 	current->mm->start_stack = bprm->p;
p                1154 fs/binfmt_elf.c 	start_thread(regs, elf_entry, bprm->p);
p                1473 fs/binfmt_elf.c 		struct task_struct *p, long signr)
p                1476 fs/binfmt_elf.c 	prstatus->pr_sigpend = p->pending.signal.sig[0];
p                1477 fs/binfmt_elf.c 	prstatus->pr_sighold = p->blocked.sig[0];
p                1479 fs/binfmt_elf.c 	prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
p                1481 fs/binfmt_elf.c 	prstatus->pr_pid = task_pid_vnr(p);
p                1482 fs/binfmt_elf.c 	prstatus->pr_pgrp = task_pgrp_vnr(p);
p                1483 fs/binfmt_elf.c 	prstatus->pr_sid = task_session_vnr(p);
p                1484 fs/binfmt_elf.c 	if (thread_group_leader(p)) {
p                1491 fs/binfmt_elf.c 		thread_group_cputime(p, &cputime);
p                1497 fs/binfmt_elf.c 		task_cputime(p, &utime, &stime);
p                1502 fs/binfmt_elf.c 	prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
p                1503 fs/binfmt_elf.c 	prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
p                1506 fs/binfmt_elf.c static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
p                1527 fs/binfmt_elf.c 	psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
p                1529 fs/binfmt_elf.c 	psinfo->pr_pid = task_pid_vnr(p);
p                1530 fs/binfmt_elf.c 	psinfo->pr_pgrp = task_pgrp_vnr(p);
p                1531 fs/binfmt_elf.c 	psinfo->pr_sid = task_session_vnr(p);
p                1533 fs/binfmt_elf.c 	i = p->state ? ffz(~p->state) + 1 : 0;
p                1537 fs/binfmt_elf.c 	psinfo->pr_nice = task_nice(p);
p                1538 fs/binfmt_elf.c 	psinfo->pr_flag = p->flags;
p                1540 fs/binfmt_elf.c 	cred = __task_cred(p);
p                1544 fs/binfmt_elf.c 	strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
p                1938 fs/binfmt_elf.c 	struct task_struct *p = t->thread;
p                1941 fs/binfmt_elf.c 	fill_prstatus(&t->prstatus, p, signr);
p                1942 fs/binfmt_elf.c 	elf_core_copy_task_regs(p, &t->prstatus.pr_reg);	
p                1949 fs/binfmt_elf.c 	if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
p                1958 fs/binfmt_elf.c 	if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
p                 507 fs/binfmt_elf_fdpic.c 	char __user *u_platform, *u_base_platform, *p;
p                 517 fs/binfmt_elf_fdpic.c 	sp = arch_align_stack(bprm->p);
p                 684 fs/binfmt_elf_fdpic.c 	current->mm->arg_start = bprm->p;
p                 687 fs/binfmt_elf_fdpic.c 		(MAX_ARG_PAGES * PAGE_SIZE - bprm->p);
p                 690 fs/binfmt_elf_fdpic.c 	p = (char __user *) current->mm->arg_start;
p                 692 fs/binfmt_elf_fdpic.c 		__put_user((elf_caddr_t) p, argv++);
p                 693 fs/binfmt_elf_fdpic.c 		len = strnlen_user(p, MAX_ARG_STRLEN);
p                 696 fs/binfmt_elf_fdpic.c 		p += len;
p                 699 fs/binfmt_elf_fdpic.c 	current->mm->arg_end = (unsigned long) p;
p                 702 fs/binfmt_elf_fdpic.c 	current->mm->env_start = (unsigned long) p;
p                 704 fs/binfmt_elf_fdpic.c 		__put_user((elf_caddr_t)(unsigned long) p, envp++);
p                 705 fs/binfmt_elf_fdpic.c 		len = strnlen_user(p, MAX_ARG_STRLEN);
p                 708 fs/binfmt_elf_fdpic.c 		p += len;
p                 711 fs/binfmt_elf_fdpic.c 	current->mm->env_end = (unsigned long) p;
p                1343 fs/binfmt_elf_fdpic.c 			  struct task_struct *p, long signr)
p                1346 fs/binfmt_elf_fdpic.c 	prstatus->pr_sigpend = p->pending.signal.sig[0];
p                1347 fs/binfmt_elf_fdpic.c 	prstatus->pr_sighold = p->blocked.sig[0];
p                1349 fs/binfmt_elf_fdpic.c 	prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
p                1351 fs/binfmt_elf_fdpic.c 	prstatus->pr_pid = task_pid_vnr(p);
p                1352 fs/binfmt_elf_fdpic.c 	prstatus->pr_pgrp = task_pgrp_vnr(p);
p                1353 fs/binfmt_elf_fdpic.c 	prstatus->pr_sid = task_session_vnr(p);
p                1354 fs/binfmt_elf_fdpic.c 	if (thread_group_leader(p)) {
p                1361 fs/binfmt_elf_fdpic.c 		thread_group_cputime(p, &cputime);
p                1367 fs/binfmt_elf_fdpic.c 		task_cputime(p, &utime, &stime);
p                1371 fs/binfmt_elf_fdpic.c 	prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
p                1372 fs/binfmt_elf_fdpic.c 	prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
p                1374 fs/binfmt_elf_fdpic.c 	prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap;
p                1375 fs/binfmt_elf_fdpic.c 	prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap;
p                1378 fs/binfmt_elf_fdpic.c static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
p                1399 fs/binfmt_elf_fdpic.c 	psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
p                1401 fs/binfmt_elf_fdpic.c 	psinfo->pr_pid = task_pid_vnr(p);
p                1402 fs/binfmt_elf_fdpic.c 	psinfo->pr_pgrp = task_pgrp_vnr(p);
p                1403 fs/binfmt_elf_fdpic.c 	psinfo->pr_sid = task_session_vnr(p);
p                1405 fs/binfmt_elf_fdpic.c 	i = p->state ? ffz(~p->state) + 1 : 0;
p                1409 fs/binfmt_elf_fdpic.c 	psinfo->pr_nice = task_nice(p);
p                1410 fs/binfmt_elf_fdpic.c 	psinfo->pr_flag = p->flags;
p                1412 fs/binfmt_elf_fdpic.c 	cred = __task_cred(p);
p                1416 fs/binfmt_elf_fdpic.c 	strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
p                1442 fs/binfmt_elf_fdpic.c 	struct task_struct *p = t->thread;
p                1447 fs/binfmt_elf_fdpic.c 	fill_prstatus(&t->prstatus, p, signr);
p                1448 fs/binfmt_elf_fdpic.c 	elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
p                1455 fs/binfmt_elf_fdpic.c 	t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu);
p                1464 fs/binfmt_elf_fdpic.c 	if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
p                  90 fs/binfmt_flat.c static int load_flat_shared_library(int id, struct lib_info *p);
p                 125 fs/binfmt_flat.c 	char __user *p;
p                 129 fs/binfmt_flat.c 	p = (char __user *)arg_start;
p                 150 fs/binfmt_flat.c 	current->mm->arg_start = (unsigned long)p;
p                 152 fs/binfmt_flat.c 		__put_user((unsigned long)p, sp++);
p                 153 fs/binfmt_flat.c 		len = strnlen_user(p, MAX_ARG_STRLEN);
p                 156 fs/binfmt_flat.c 		p += len;
p                 159 fs/binfmt_flat.c 	current->mm->arg_end = (unsigned long)p;
p                 161 fs/binfmt_flat.c 	current->mm->env_start = (unsigned long) p;
p                 163 fs/binfmt_flat.c 		__put_user((unsigned long)p, sp++);
p                 164 fs/binfmt_flat.c 		len = strnlen_user(p, MAX_ARG_STRLEN);
p                 167 fs/binfmt_flat.c 		p += len;
p                 170 fs/binfmt_flat.c 	current->mm->env_end = (unsigned long)p;
p                 314 fs/binfmt_flat.c calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
p                 339 fs/binfmt_flat.c 		} else if (!p->lib_list[id].loaded &&
p                 340 fs/binfmt_flat.c 			   load_flat_shared_library(id, p) < 0) {
p                 345 fs/binfmt_flat.c 		if (p->lib_list[id].build_date && p->lib_list[curid].build_date &&
p                 346 fs/binfmt_flat.c 				p->lib_list[curid].build_date < p->lib_list[id].build_date) {
p                 355 fs/binfmt_flat.c 	start_brk = p->lib_list[id].start_brk;
p                 356 fs/binfmt_flat.c 	start_data = p->lib_list[id].start_data;
p                 357 fs/binfmt_flat.c 	start_code = p->lib_list[id].start_code;
p                 358 fs/binfmt_flat.c 	text_len = p->lib_list[id].text_len;
p                 941 fs/binfmt_flat.c 	stack_len += PAGE_SIZE * MAX_ARG_PAGES - bprm->p; /* the strings */
p                 958 fs/binfmt_flat.c 			unsigned long __user *p = (unsigned long __user *)
p                 960 fs/binfmt_flat.c 			p -= j + 1;
p                 961 fs/binfmt_flat.c 			if (put_user(val, p))
p                 973 fs/binfmt_flat.c 		res = create_flat_tables(bprm, bprm->p);
p                  92 fs/binfmt_misc.c 	char *p = strrchr(bprm->interp, '.');
p                 107 fs/binfmt_misc.c 			if (p && !strcmp(e->magic, p + 1))
p                 279 fs/binfmt_misc.c 	char *p = sfs;
p                 284 fs/binfmt_misc.c 		switch (*p) {
p                 287 fs/binfmt_misc.c 			p++;
p                 292 fs/binfmt_misc.c 			p++;
p                 297 fs/binfmt_misc.c 			p++;
p                 305 fs/binfmt_misc.c 			p++;
p                 313 fs/binfmt_misc.c 	return p;
p                 325 fs/binfmt_misc.c 	char *buf, *p;
p                 341 fs/binfmt_misc.c 	p = buf = (char *)e + sizeof(Node);
p                 347 fs/binfmt_misc.c 	del = *p++;	/* delimeter */
p                 355 fs/binfmt_misc.c 	e->name = p;
p                 356 fs/binfmt_misc.c 	p = strchr(p, del);
p                 357 fs/binfmt_misc.c 	if (!p)
p                 359 fs/binfmt_misc.c 	*p++ = '\0';
p                 369 fs/binfmt_misc.c 	switch (*p++) {
p                 381 fs/binfmt_misc.c 	if (*p++ != del)
p                 389 fs/binfmt_misc.c 		s = strchr(p, del);
p                 393 fs/binfmt_misc.c 		if (p != s) {
p                 394 fs/binfmt_misc.c 			int r = kstrtoint(p, 10, &e->offset);
p                 398 fs/binfmt_misc.c 		p = s;
p                 399 fs/binfmt_misc.c 		if (*p++)
p                 404 fs/binfmt_misc.c 		e->magic = p;
p                 405 fs/binfmt_misc.c 		p = scanarg(p, del);
p                 406 fs/binfmt_misc.c 		if (!p)
p                 413 fs/binfmt_misc.c 				DUMP_PREFIX_NONE, e->magic, p - e->magic);
p                 416 fs/binfmt_misc.c 		e->mask = p;
p                 417 fs/binfmt_misc.c 		p = scanarg(p, del);
p                 418 fs/binfmt_misc.c 		if (!p)
p                 426 fs/binfmt_misc.c 				DUMP_PREFIX_NONE, e->mask, p - e->mask);
p                 470 fs/binfmt_misc.c 		p = strchr(p, del);
p                 471 fs/binfmt_misc.c 		if (!p)
p                 473 fs/binfmt_misc.c 		*p++ = '\0';
p                 476 fs/binfmt_misc.c 		e->magic = p;
p                 477 fs/binfmt_misc.c 		p = strchr(p, del);
p                 478 fs/binfmt_misc.c 		if (!p)
p                 480 fs/binfmt_misc.c 		*p++ = '\0';
p                 486 fs/binfmt_misc.c 		p = strchr(p, del);
p                 487 fs/binfmt_misc.c 		if (!p)
p                 489 fs/binfmt_misc.c 		*p++ = '\0';
p                 493 fs/binfmt_misc.c 	e->interpreter = p;
p                 494 fs/binfmt_misc.c 	p = strchr(p, del);
p                 495 fs/binfmt_misc.c 	if (!p)
p                 497 fs/binfmt_misc.c 	*p++ = '\0';
p                 503 fs/binfmt_misc.c 	p = check_special_flags(p, e);
p                 504 fs/binfmt_misc.c 	if (*p == '\n')
p                 505 fs/binfmt_misc.c 		p++;
p                 506 fs/binfmt_misc.c 	if (p != buf + count)
p                 229 fs/btrfs/backref.c 	struct rb_node **p;
p                 236 fs/btrfs/backref.c 	p = &root->rb_root.rb_node;
p                 238 fs/btrfs/backref.c 	while (*p) {
p                 239 fs/btrfs/backref.c 		parent = *p;
p                 243 fs/btrfs/backref.c 			p = &(*p)->rb_left;
p                 245 fs/btrfs/backref.c 			p = &(*p)->rb_right;
p                 276 fs/btrfs/backref.c 	rb_link_node(&newref->rbnode, parent, p);
p                 154 fs/btrfs/block-group.c 	struct rb_node **p;
p                 159 fs/btrfs/block-group.c 	p = &info->block_group_cache_tree.rb_node;
p                 161 fs/btrfs/block-group.c 	while (*p) {
p                 162 fs/btrfs/block-group.c 		parent = *p;
p                 166 fs/btrfs/block-group.c 			p = &(*p)->rb_left;
p                 168 fs/btrfs/block-group.c 			p = &(*p)->rb_right;
p                 175 fs/btrfs/block-group.c 	rb_link_node(&block_group->cache_node, parent, p);
p                2934 fs/btrfs/check-integrity.c 		const char *p;
p                2949 fs/btrfs/check-integrity.c 		p = kbasename(ds->name);
p                2950 fs/btrfs/check-integrity.c 		strlcpy(ds->name, p, sizeof(ds->name));
p                1251 fs/btrfs/compression.c 	u32 p, p_base, sz_base;
p                1256 fs/btrfs/compression.c 		p = ws->bucket[i].count;
p                1257 fs/btrfs/compression.c 		p_base = ilog2_w(p);
p                1258 fs/btrfs/compression.c 		entropy_sum += p * (sz_base - p_base);
p                  63 fs/btrfs/ctree.c noinline void btrfs_set_path_blocking(struct btrfs_path *p)
p                  67 fs/btrfs/ctree.c 		if (!p->nodes[i] || !p->locks[i])
p                  74 fs/btrfs/ctree.c 		if (p->locks[i] == BTRFS_READ_LOCK) {
p                  75 fs/btrfs/ctree.c 			btrfs_set_lock_blocking_read(p->nodes[i]);
p                  76 fs/btrfs/ctree.c 			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
p                  77 fs/btrfs/ctree.c 		} else if (p->locks[i] == BTRFS_WRITE_LOCK) {
p                  78 fs/btrfs/ctree.c 			btrfs_set_lock_blocking_write(p->nodes[i]);
p                  79 fs/btrfs/ctree.c 			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
p                  85 fs/btrfs/ctree.c void btrfs_free_path(struct btrfs_path *p)
p                  87 fs/btrfs/ctree.c 	if (!p)
p                  89 fs/btrfs/ctree.c 	btrfs_release_path(p);
p                  90 fs/btrfs/ctree.c 	kmem_cache_free(btrfs_path_cachep, p);
p                  99 fs/btrfs/ctree.c noinline void btrfs_release_path(struct btrfs_path *p)
p                 104 fs/btrfs/ctree.c 		p->slots[i] = 0;
p                 105 fs/btrfs/ctree.c 		if (!p->nodes[i])
p                 107 fs/btrfs/ctree.c 		if (p->locks[i]) {
p                 108 fs/btrfs/ctree.c 			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
p                 109 fs/btrfs/ctree.c 			p->locks[i] = 0;
p                 111 fs/btrfs/ctree.c 		free_extent_buffer(p->nodes[i]);
p                 112 fs/btrfs/ctree.c 		p->nodes[i] = NULL;
p                1707 fs/btrfs/ctree.c 				       unsigned long p, int item_size,
p                1733 fs/btrfs/ctree.c 		offset = p + mid * item_size;
p                2412 fs/btrfs/ctree.c read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
p                2454 fs/btrfs/ctree.c 		btrfs_set_path_blocking(p);
p                2463 fs/btrfs/ctree.c 		btrfs_release_path(p);
p                2474 fs/btrfs/ctree.c 	btrfs_unlock_up_safe(p, level + 1);
p                2475 fs/btrfs/ctree.c 	btrfs_set_path_blocking(p);
p                2477 fs/btrfs/ctree.c 	if (p->reada != READA_NONE)
p                2478 fs/btrfs/ctree.c 		reada_for_search(fs_info, p, level, slot, key->objectid);
p                2497 fs/btrfs/ctree.c 	btrfs_release_path(p);
p                2512 fs/btrfs/ctree.c 		       struct btrfs_root *root, struct btrfs_path *p,
p                2519 fs/btrfs/ctree.c 	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
p                2525 fs/btrfs/ctree.c 			btrfs_release_path(p);
p                2529 fs/btrfs/ctree.c 		btrfs_set_path_blocking(p);
p                2530 fs/btrfs/ctree.c 		reada_for_balance(fs_info, p, level);
p                2531 fs/btrfs/ctree.c 		sret = split_node(trans, root, p, level);
p                2538 fs/btrfs/ctree.c 		b = p->nodes[level];
p                2545 fs/btrfs/ctree.c 			btrfs_release_path(p);
p                2549 fs/btrfs/ctree.c 		btrfs_set_path_blocking(p);
p                2550 fs/btrfs/ctree.c 		reada_for_balance(fs_info, p, level);
p                2551 fs/btrfs/ctree.c 		sret = balance_level(trans, root, p, level);
p                2557 fs/btrfs/ctree.c 		b = p->nodes[level];
p                2559 fs/btrfs/ctree.c 			btrfs_release_path(p);
p                2621 fs/btrfs/ctree.c 							struct btrfs_path *p,
p                2632 fs/btrfs/ctree.c 	if (p->search_commit_root) {
p                2642 fs/btrfs/ctree.c 		if (p->need_commit_sem) {
p                2658 fs/btrfs/ctree.c 		ASSERT(p->skip_locking == 1);
p                2663 fs/btrfs/ctree.c 	if (p->skip_locking) {
p                2695 fs/btrfs/ctree.c 	p->nodes[level] = b;
p                2696 fs/btrfs/ctree.c 	if (!p->skip_locking)
p                2697 fs/btrfs/ctree.c 		p->locks[level] = root_lock;
p                2731 fs/btrfs/ctree.c 		      const struct btrfs_key *key, struct btrfs_path *p,
p                2746 fs/btrfs/ctree.c 	lowest_level = p->lowest_level;
p                2748 fs/btrfs/ctree.c 	WARN_ON(p->nodes[0] != NULL);
p                2770 fs/btrfs/ctree.c 	if (cow && (p->keep_locks || p->lowest_level))
p                2777 fs/btrfs/ctree.c 	b = btrfs_search_slot_get_root(root, p, write_lock_level);
p                2810 fs/btrfs/ctree.c 			    p->nodes[level + 1])) {
p                2812 fs/btrfs/ctree.c 				btrfs_release_path(p);
p                2816 fs/btrfs/ctree.c 			btrfs_set_path_blocking(p);
p                2822 fs/btrfs/ctree.c 						      p->nodes[level + 1],
p                2823 fs/btrfs/ctree.c 						      p->slots[level + 1], &b);
p                2830 fs/btrfs/ctree.c 		p->nodes[level] = b;
p                2847 fs/btrfs/ctree.c 		if (!ins_len && !p->keep_locks) {
p                2850 fs/btrfs/ctree.c 			if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
p                2851 fs/btrfs/ctree.c 				btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
p                2852 fs/btrfs/ctree.c 				p->locks[u] = 0;
p                2866 fs/btrfs/ctree.c 			p->slots[level] = slot;
p                2867 fs/btrfs/ctree.c 			err = setup_nodes_for_search(trans, root, p, b, level,
p                2875 fs/btrfs/ctree.c 			b = p->nodes[level];
p                2876 fs/btrfs/ctree.c 			slot = p->slots[level];
p                2887 fs/btrfs/ctree.c 				btrfs_release_path(p);
p                2891 fs/btrfs/ctree.c 			unlock_up(p, level, lowest_unlock,
p                2896 fs/btrfs/ctree.c 					p->slots[level]++;
p                2900 fs/btrfs/ctree.c 			err = read_block_for_search(root, p, &b, level,
p                2909 fs/btrfs/ctree.c 			if (!p->skip_locking) {
p                2913 fs/btrfs/ctree.c 						btrfs_set_path_blocking(p);
p                2916 fs/btrfs/ctree.c 					p->locks[level] = BTRFS_WRITE_LOCK;
p                2919 fs/btrfs/ctree.c 						btrfs_set_path_blocking(p);
p                2922 fs/btrfs/ctree.c 					p->locks[level] = BTRFS_READ_LOCK;
p                2924 fs/btrfs/ctree.c 				p->nodes[level] = b;
p                2927 fs/btrfs/ctree.c 			p->slots[level] = slot;
p                2932 fs/btrfs/ctree.c 					btrfs_release_path(p);
p                2936 fs/btrfs/ctree.c 				btrfs_set_path_blocking(p);
p                2938 fs/btrfs/ctree.c 						 p, ins_len, ret == 0);
p                2946 fs/btrfs/ctree.c 			if (!p->search_for_split)
p                2947 fs/btrfs/ctree.c 				unlock_up(p, level, lowest_unlock,
p                2958 fs/btrfs/ctree.c 	if (!p->leave_spinning)
p                2959 fs/btrfs/ctree.c 		btrfs_set_path_blocking(p);
p                2960 fs/btrfs/ctree.c 	if (ret < 0 && !p->skip_release_on_error)
p                2961 fs/btrfs/ctree.c 		btrfs_release_path(p);
p                2977 fs/btrfs/ctree.c 			  struct btrfs_path *p, u64 time_seq)
p                2989 fs/btrfs/ctree.c 	lowest_level = p->lowest_level;
p                2990 fs/btrfs/ctree.c 	WARN_ON(p->nodes[0] != NULL);
p                2992 fs/btrfs/ctree.c 	if (p->search_commit_root) {
p                2994 fs/btrfs/ctree.c 		return btrfs_search_slot(NULL, root, key, p, 0, 0);
p                3004 fs/btrfs/ctree.c 	p->locks[level] = BTRFS_READ_LOCK;
p                3008 fs/btrfs/ctree.c 		p->nodes[level] = b;
p                3016 fs/btrfs/ctree.c 		btrfs_unlock_up_safe(p, level + 1);
p                3033 fs/btrfs/ctree.c 			p->slots[level] = slot;
p                3034 fs/btrfs/ctree.c 			unlock_up(p, level, lowest_unlock, 0, NULL);
p                3038 fs/btrfs/ctree.c 					p->slots[level]++;
p                3042 fs/btrfs/ctree.c 			err = read_block_for_search(root, p, &b, level,
p                3053 fs/btrfs/ctree.c 				btrfs_set_path_blocking(p);
p                3056 fs/btrfs/ctree.c 			b = tree_mod_log_rewind(fs_info, p, b, time_seq);
p                3061 fs/btrfs/ctree.c 			p->locks[level] = BTRFS_READ_LOCK;
p                3062 fs/btrfs/ctree.c 			p->nodes[level] = b;
p                3064 fs/btrfs/ctree.c 			p->slots[level] = slot;
p                3065 fs/btrfs/ctree.c 			unlock_up(p, level, lowest_unlock, 0, NULL);
p                3071 fs/btrfs/ctree.c 	if (!p->leave_spinning)
p                3072 fs/btrfs/ctree.c 		btrfs_set_path_blocking(p);
p                3074 fs/btrfs/ctree.c 		btrfs_release_path(p);
p                3093 fs/btrfs/ctree.c 			       struct btrfs_path *p, int find_higher,
p                3100 fs/btrfs/ctree.c 	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
p                3110 fs/btrfs/ctree.c 	leaf = p->nodes[0];
p                3113 fs/btrfs/ctree.c 		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
p                3114 fs/btrfs/ctree.c 			ret = btrfs_next_leaf(root, p);
p                3125 fs/btrfs/ctree.c 			btrfs_release_path(p);
p                3129 fs/btrfs/ctree.c 		if (p->slots[0] == 0) {
p                3130 fs/btrfs/ctree.c 			ret = btrfs_prev_leaf(root, p);
p                3134 fs/btrfs/ctree.c 				leaf = p->nodes[0];
p                3135 fs/btrfs/ctree.c 				if (p->slots[0] == btrfs_header_nritems(leaf))
p                3136 fs/btrfs/ctree.c 					p->slots[0]--;
p                3147 fs/btrfs/ctree.c 			btrfs_release_path(p);
p                3150 fs/btrfs/ctree.c 			--p->slots[0];
p                1373 fs/btrfs/ctree.h 	const type *p = page_address(eb->pages[0]);			\
p                1374 fs/btrfs/ctree.h 	u##bits res = le##bits##_to_cpu(p->member);			\
p                1380 fs/btrfs/ctree.h 	type *p = page_address(eb->pages[0]);				\
p                1381 fs/btrfs/ctree.h 	p->member = cpu_to_le##bits(val);				\
p                2553 fs/btrfs/ctree.h 		      const struct btrfs_key *key, struct btrfs_path *p,
p                2556 fs/btrfs/ctree.h 			  struct btrfs_path *p, u64 time_seq);
p                2559 fs/btrfs/ctree.h 			       struct btrfs_path *p, int find_higher,
p                2565 fs/btrfs/ctree.h void btrfs_release_path(struct btrfs_path *p);
p                2567 fs/btrfs/ctree.h void btrfs_free_path(struct btrfs_path *p);
p                2568 fs/btrfs/ctree.h void btrfs_set_path_blocking(struct btrfs_path *p);
p                2569 fs/btrfs/ctree.h void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
p                2605 fs/btrfs/ctree.h 				      struct btrfs_path *p, u64 time_seq)
p                2607 fs/btrfs/ctree.h 	++p->slots[0];
p                2608 fs/btrfs/ctree.h 	if (p->slots[0] >= btrfs_header_nritems(p->nodes[0]))
p                2609 fs/btrfs/ctree.h 		return btrfs_next_old_leaf(root, p, time_seq);
p                2612 fs/btrfs/ctree.h static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
p                2614 fs/btrfs/ctree.h 	return btrfs_next_old_item(root, p, 0);
p                 208 fs/btrfs/delayed-inode.c 	struct list_head *p;
p                 215 fs/btrfs/delayed-inode.c 	p = delayed_root->node_list.next;
p                 216 fs/btrfs/delayed-inode.c 	node = list_entry(p, struct btrfs_delayed_node, n_list);
p                 228 fs/btrfs/delayed-inode.c 	struct list_head *p;
p                 237 fs/btrfs/delayed-inode.c 		p = delayed_root->node_list.next;
p                 241 fs/btrfs/delayed-inode.c 		p = node->n_list.next;
p                 243 fs/btrfs/delayed-inode.c 	next = list_entry(p, struct btrfs_delayed_node, n_list);
p                 293 fs/btrfs/delayed-inode.c 	struct list_head *p;
p                 300 fs/btrfs/delayed-inode.c 	p = delayed_root->prepare_list.next;
p                 301 fs/btrfs/delayed-inode.c 	list_del_init(p);
p                 302 fs/btrfs/delayed-inode.c 	node = list_entry(p, struct btrfs_delayed_node, p_list);
p                 403 fs/btrfs/delayed-inode.c 	struct rb_node **p, *node;
p                 416 fs/btrfs/delayed-inode.c 	p = &root->rb_root.rb_node;
p                 419 fs/btrfs/delayed-inode.c 	while (*p) {
p                 420 fs/btrfs/delayed-inode.c 		parent_node = *p;
p                 426 fs/btrfs/delayed-inode.c 			p = &(*p)->rb_right;
p                 429 fs/btrfs/delayed-inode.c 			p = &(*p)->rb_left;
p                 435 fs/btrfs/delayed-inode.c 	rb_link_node(node, parent_node, p);
p                 511 fs/btrfs/delayed-inode.c 	struct rb_node *p;
p                 514 fs/btrfs/delayed-inode.c 	p = rb_first_cached(&delayed_node->ins_root);
p                 515 fs/btrfs/delayed-inode.c 	if (p)
p                 516 fs/btrfs/delayed-inode.c 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
p                 524 fs/btrfs/delayed-inode.c 	struct rb_node *p;
p                 527 fs/btrfs/delayed-inode.c 	p = rb_first_cached(&delayed_node->del_root);
p                 528 fs/btrfs/delayed-inode.c 	if (p)
p                 529 fs/btrfs/delayed-inode.c 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
p                 537 fs/btrfs/delayed-inode.c 	struct rb_node *p;
p                 540 fs/btrfs/delayed-inode.c 	p = rb_next(&item->rb_node);
p                 541 fs/btrfs/delayed-inode.c 	if (p)
p                 542 fs/btrfs/delayed-inode.c 		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
p                 281 fs/btrfs/delayed-ref.c 	struct rb_node **p = &root->rb_root.rb_node;
p                 290 fs/btrfs/delayed-ref.c 	while (*p) {
p                 291 fs/btrfs/delayed-ref.c 		parent_node = *p;
p                 296 fs/btrfs/delayed-ref.c 			p = &(*p)->rb_left;
p                 298 fs/btrfs/delayed-ref.c 			p = &(*p)->rb_right;
p                 305 fs/btrfs/delayed-ref.c 	rb_link_node(node, parent_node, p);
p                 313 fs/btrfs/delayed-ref.c 	struct rb_node **p = &root->rb_root.rb_node;
p                 319 fs/btrfs/delayed-ref.c 	while (*p) {
p                 322 fs/btrfs/delayed-ref.c 		parent_node = *p;
p                 327 fs/btrfs/delayed-ref.c 			p = &(*p)->rb_left;
p                 329 fs/btrfs/delayed-ref.c 			p = &(*p)->rb_right;
p                 336 fs/btrfs/delayed-ref.c 	rb_link_node(node, parent_node, p);
p                 333 fs/btrfs/extent_io.c 	struct rb_node **p;
p                 338 fs/btrfs/extent_io.c 		p = *p_in;
p                 343 fs/btrfs/extent_io.c 	p = search_start ? &search_start : &root->rb_node;
p                 344 fs/btrfs/extent_io.c 	while (*p) {
p                 345 fs/btrfs/extent_io.c 		parent = *p;
p                 349 fs/btrfs/extent_io.c 			p = &(*p)->rb_left;
p                 351 fs/btrfs/extent_io.c 			p = &(*p)->rb_right;
p                 357 fs/btrfs/extent_io.c 	rb_link_node(node, parent, p);
p                 519 fs/btrfs/extent_io.c 			struct rb_node ***p,
p                 535 fs/btrfs/extent_io.c 	node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
p                 945 fs/btrfs/extent_io.c 	struct rb_node **p;
p                 979 fs/btrfs/extent_io.c 	node = tree_search_for_insert(tree, start, &p, &parent);
p                 984 fs/btrfs/extent_io.c 				   &p, &parent, &bits, changeset);
p                1177 fs/btrfs/extent_io.c 	struct rb_node **p;
p                1216 fs/btrfs/extent_io.c 	node = tree_search_for_insert(tree, start, &p, &parent);
p                1224 fs/btrfs/extent_io.c 				   &p, &parent, &bits, NULL);
p                2271 fs/btrfs/extent_io.c 		struct page *p = eb->pages[i];
p                2273 fs/btrfs/extent_io.c 		ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
p                2274 fs/btrfs/extent_io.c 					start - page_offset(p), mirror_num);
p                3709 fs/btrfs/extent_io.c 		struct page *p = eb->pages[i];
p                3711 fs/btrfs/extent_io.c 		if (!trylock_page(p)) {
p                3723 fs/btrfs/extent_io.c 			lock_page(p);
p                3886 fs/btrfs/extent_io.c 		struct page *p = eb->pages[i];
p                3888 fs/btrfs/extent_io.c 		clear_page_dirty_for_io(p);
p                3889 fs/btrfs/extent_io.c 		set_page_writeback(p);
p                3891 fs/btrfs/extent_io.c 					 p, offset, PAGE_SIZE, 0, bdev,
p                3896 fs/btrfs/extent_io.c 			set_btree_ioerr(p);
p                3897 fs/btrfs/extent_io.c 			if (PageWriteback(p))
p                3898 fs/btrfs/extent_io.c 				end_page_writeback(p);
p                3906 fs/btrfs/extent_io.c 		unlock_page(p);
p                3911 fs/btrfs/extent_io.c 			struct page *p = eb->pages[i];
p                3912 fs/btrfs/extent_io.c 			clear_page_dirty_for_io(p);
p                3913 fs/btrfs/extent_io.c 			unlock_page(p);
p                4963 fs/btrfs/extent_io.c 	struct page *p;
p                4972 fs/btrfs/extent_io.c 		p = alloc_page(GFP_NOFS);
p                4973 fs/btrfs/extent_io.c 		if (!p) {
p                4977 fs/btrfs/extent_io.c 		attach_extent_buffer_page(new, p);
p                4978 fs/btrfs/extent_io.c 		WARN_ON(PageDirty(p));
p                4979 fs/btrfs/extent_io.c 		SetPageUptodate(p);
p                4980 fs/btrfs/extent_io.c 		new->pages[i] = p;
p                4981 fs/btrfs/extent_io.c 		copy_page(page_address(p), page_address(src->pages[i]));
p                5067 fs/btrfs/extent_io.c 		struct page *p = eb->pages[i];
p                5069 fs/btrfs/extent_io.c 		if (p != accessed)
p                5070 fs/btrfs/extent_io.c 			mark_page_accessed(p);
p                5162 fs/btrfs/extent_io.c 	struct page *p;
p                5182 fs/btrfs/extent_io.c 		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
p                5183 fs/btrfs/extent_io.c 		if (!p) {
p                5189 fs/btrfs/extent_io.c 		if (PagePrivate(p)) {
p                5197 fs/btrfs/extent_io.c 			exists = (struct extent_buffer *)p->private;
p                5200 fs/btrfs/extent_io.c 				unlock_page(p);
p                5201 fs/btrfs/extent_io.c 				put_page(p);
p                5202 fs/btrfs/extent_io.c 				mark_extent_buffer_accessed(exists, p);
p                5211 fs/btrfs/extent_io.c 			ClearPagePrivate(p);
p                5212 fs/btrfs/extent_io.c 			WARN_ON(PageDirty(p));
p                5213 fs/btrfs/extent_io.c 			put_page(p);
p                5215 fs/btrfs/extent_io.c 		attach_extent_buffer_page(eb, p);
p                5217 fs/btrfs/extent_io.c 		WARN_ON(PageDirty(p));
p                5218 fs/btrfs/extent_io.c 		eb->pages[i] = p;
p                5219 fs/btrfs/extent_io.c 		if (!PageUptodate(p))
p                5638 fs/btrfs/extent_io.c 	struct page *p;
p                5661 fs/btrfs/extent_io.c 	p = eb->pages[i];
p                5662 fs/btrfs/extent_io.c 	kaddr = page_address(p);
p                  96 fs/btrfs/extent_map.c 	struct rb_node **p = &root->rb_root.rb_node;
p                 103 fs/btrfs/extent_map.c 	while (*p) {
p                 104 fs/btrfs/extent_map.c 		parent = *p;
p                 108 fs/btrfs/extent_map.c 			p = &(*p)->rb_left;
p                 110 fs/btrfs/extent_map.c 			p = &(*p)->rb_right;
p                 136 fs/btrfs/extent_map.c 	rb_link_node(&em->rb_node, orig_parent, p);
p                  86 fs/btrfs/file.c 	struct rb_node **p;
p                  90 fs/btrfs/file.c 	p = &fs_info->defrag_inodes.rb_node;
p                  91 fs/btrfs/file.c 	while (*p) {
p                  92 fs/btrfs/file.c 		parent = *p;
p                  97 fs/btrfs/file.c 			p = &parent->rb_left;
p                  99 fs/btrfs/file.c 			p = &parent->rb_right;
p                 113 fs/btrfs/file.c 	rb_link_node(&defrag->rb_node, parent, p);
p                 215 fs/btrfs/file.c 	struct rb_node *p;
p                 223 fs/btrfs/file.c 	p = fs_info->defrag_inodes.rb_node;
p                 224 fs/btrfs/file.c 	while (p) {
p                 225 fs/btrfs/file.c 		parent = p;
p                 230 fs/btrfs/file.c 			p = parent->rb_left;
p                 232 fs/btrfs/file.c 			p = parent->rb_right;
p                 567 fs/btrfs/file.c 		struct page *p = pages[i];
p                 568 fs/btrfs/file.c 		SetPageUptodate(p);
p                 569 fs/btrfs/file.c 		ClearPageChecked(p);
p                 570 fs/btrfs/file.c 		set_page_dirty(p);
p                1452 fs/btrfs/free-space-cache.c 	struct rb_node **p = &root->rb_node;
p                1456 fs/btrfs/free-space-cache.c 	while (*p) {
p                1457 fs/btrfs/free-space-cache.c 		parent = *p;
p                1461 fs/btrfs/free-space-cache.c 			p = &(*p)->rb_left;
p                1463 fs/btrfs/free-space-cache.c 			p = &(*p)->rb_right;
p                1483 fs/btrfs/free-space-cache.c 				p = &(*p)->rb_right;
p                1489 fs/btrfs/free-space-cache.c 				p = &(*p)->rb_left;
p                1494 fs/btrfs/free-space-cache.c 	rb_link_node(node, parent, p);
p                 113 fs/btrfs/free-space-tree.c 				  struct btrfs_key *key, struct btrfs_path *p,
p                 118 fs/btrfs/free-space-tree.c 	ret = btrfs_search_slot(trans, root, key, p, ins_len, cow);
p                 127 fs/btrfs/free-space-tree.c 	if (p->slots[0] == 0) {
p                 131 fs/btrfs/free-space-tree.c 	p->slots[0]--;
p                 163 fs/btrfs/free-space-tree.c 	u8 *p = ((u8 *)map) + BIT_BYTE(start);
p                 169 fs/btrfs/free-space-tree.c 		*p |= mask_to_set;
p                 173 fs/btrfs/free-space-tree.c 		p++;
p                 177 fs/btrfs/free-space-tree.c 		*p |= mask_to_set;
p                 559 fs/btrfs/free-space-tree.c 				  struct btrfs_root *root, struct btrfs_path *p)
p                 563 fs/btrfs/free-space-tree.c 	if (p->slots[0] + 1 < btrfs_header_nritems(p->nodes[0])) {
p                 564 fs/btrfs/free-space-tree.c 		p->slots[0]++;
p                 568 fs/btrfs/free-space-tree.c 	btrfs_item_key_to_cpu(p->nodes[0], &key, p->slots[0]);
p                 569 fs/btrfs/free-space-tree.c 	btrfs_release_path(p);
p                 575 fs/btrfs/free-space-tree.c 	return btrfs_search_prev_slot(trans, root, &key, p, 0, 1);
p                 884 fs/btrfs/inode.c 			struct page *p = async_extent->pages[0];
p                 888 fs/btrfs/inode.c 			p->mapping = inode->i_mapping;
p                 889 fs/btrfs/inode.c 			btrfs_writepage_endio_finish_ordered(p, start, end, 0);
p                 891 fs/btrfs/inode.c 			p->mapping = NULL;
p                2516 fs/btrfs/inode.c 	struct rb_node **p = &root->rb_node;
p                2521 fs/btrfs/inode.c 	while (*p) {
p                2522 fs/btrfs/inode.c 		parent = *p;
p                2527 fs/btrfs/inode.c 			p = &(*p)->rb_left;
p                2529 fs/btrfs/inode.c 			p = &(*p)->rb_right;
p                2532 fs/btrfs/inode.c 	rb_link_node(&backref->node, parent, p);
p                5771 fs/btrfs/inode.c 	struct rb_node **p;
p                5780 fs/btrfs/inode.c 	p = &root->inode_tree.rb_node;
p                5781 fs/btrfs/inode.c 	while (*p) {
p                5782 fs/btrfs/inode.c 		parent = *p;
p                5786 fs/btrfs/inode.c 			p = &parent->rb_left;
p                5788 fs/btrfs/inode.c 			p = &parent->rb_right;
p                5798 fs/btrfs/inode.c 	rb_link_node(new, parent, p);
p                5826 fs/btrfs/inode.c static int btrfs_init_locked_inode(struct inode *inode, void *p)
p                5828 fs/btrfs/inode.c 	struct btrfs_iget_args *args = p;
p                10739 fs/btrfs/inode.c 	struct rb_node **p;
p                10750 fs/btrfs/inode.c 	p = &fs_info->swapfile_pins.rb_node;
p                10751 fs/btrfs/inode.c 	while (*p) {
p                10752 fs/btrfs/inode.c 		parent = *p;
p                10756 fs/btrfs/inode.c 			p = &(*p)->rb_left;
p                10759 fs/btrfs/inode.c 			p = &(*p)->rb_right;
p                10766 fs/btrfs/inode.c 	rb_link_node(&sp->node, parent, p);
p                4337 fs/btrfs/ioctl.c 	struct btrfs_ioctl_dev_replace_args *p;
p                4343 fs/btrfs/ioctl.c 	p = memdup_user(arg, sizeof(*p));
p                4344 fs/btrfs/ioctl.c 	if (IS_ERR(p))
p                4345 fs/btrfs/ioctl.c 		return PTR_ERR(p);
p                4347 fs/btrfs/ioctl.c 	switch (p->cmd) {
p                4356 fs/btrfs/ioctl.c 			ret = btrfs_dev_replace_by_ioctl(fs_info, p);
p                4361 fs/btrfs/ioctl.c 		btrfs_dev_replace_status(fs_info, p);
p                4365 fs/btrfs/ioctl.c 		p->result = btrfs_dev_replace_cancel(fs_info);
p                4373 fs/btrfs/ioctl.c 	if ((ret == 0 || ret == -ECANCELED) && copy_to_user(arg, p, sizeof(*p)))
p                4376 fs/btrfs/ioctl.c 	kfree(p);
p                  34 fs/btrfs/ordered-data.c 	struct rb_node **p = &root->rb_node;
p                  38 fs/btrfs/ordered-data.c 	while (*p) {
p                  39 fs/btrfs/ordered-data.c 		parent = *p;
p                  43 fs/btrfs/ordered-data.c 			p = &(*p)->rb_left;
p                  45 fs/btrfs/ordered-data.c 			p = &(*p)->rb_right;
p                  50 fs/btrfs/ordered-data.c 	rb_link_node(node, parent, p);
p                 443 fs/btrfs/props.c 		struct prop_handler *p = &prop_handlers[i];
p                 444 fs/btrfs/props.c 		u64 h = btrfs_name_hash(p->xattr_name, strlen(p->xattr_name));
p                 446 fs/btrfs/props.c 		hash_add(prop_handlers_ht, &p->node, h);
p                 192 fs/btrfs/qgroup.c 	struct rb_node **p = &fs_info->qgroup_tree.rb_node;
p                 196 fs/btrfs/qgroup.c 	while (*p) {
p                 197 fs/btrfs/qgroup.c 		parent = *p;
p                 201 fs/btrfs/qgroup.c 			p = &(*p)->rb_left;
p                 203 fs/btrfs/qgroup.c 			p = &(*p)->rb_right;
p                 217 fs/btrfs/qgroup.c 	rb_link_node(&qgroup->node, parent, p);
p                1548 fs/btrfs/qgroup.c 	struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
p                1556 fs/btrfs/qgroup.c 	while (*p) {
p                1557 fs/btrfs/qgroup.c 		parent_node = *p;
p                1561 fs/btrfs/qgroup.c 			p = &(*p)->rb_left;
p                1563 fs/btrfs/qgroup.c 			p = &(*p)->rb_right;
p                1574 fs/btrfs/qgroup.c 	rb_link_node(&record->node, parent_node, p);
p                 948 fs/btrfs/raid56.c 	struct page *p = NULL;
p                 953 fs/btrfs/raid56.c 	p = rbio->bio_pages[chunk_page];
p                 956 fs/btrfs/raid56.c 	if (p || bio_list_only)
p                 957 fs/btrfs/raid56.c 		return p;
p                 984 fs/btrfs/raid56.c 	void *p;
p                1018 fs/btrfs/raid56.c 	p = rbio + 1;
p                1020 fs/btrfs/raid56.c 		ptr = p;						\
p                1021 fs/btrfs/raid56.c 		p = (unsigned char *)p + sizeof(*(ptr)) * (count);	\
p                1248 fs/btrfs/raid56.c 		struct page *p;
p                1251 fs/btrfs/raid56.c 			p = page_in_rbio(rbio, stripe, pagenr, 0);
p                1252 fs/btrfs/raid56.c 			pointers[stripe] = kmap(p);
p                1256 fs/btrfs/raid56.c 		p = rbio_pstripe_page(rbio, pagenr);
p                1257 fs/btrfs/raid56.c 		SetPageUptodate(p);
p                1258 fs/btrfs/raid56.c 		pointers[stripe++] = kmap(p);
p                1266 fs/btrfs/raid56.c 			p = rbio_qstripe_page(rbio, pagenr);
p                1267 fs/btrfs/raid56.c 			SetPageUptodate(p);
p                1268 fs/btrfs/raid56.c 			pointers[stripe++] = kmap(p);
p                1922 fs/btrfs/raid56.c 			void *p;
p                1931 fs/btrfs/raid56.c 			p = pointers[faila];
p                1934 fs/btrfs/raid56.c 			pointers[rbio->nr_data - 1] = p;
p                2085 fs/btrfs/raid56.c 			struct page *p;
p                2091 fs/btrfs/raid56.c 			p = rbio_stripe_page(rbio, stripe, pagenr);
p                2092 fs/btrfs/raid56.c 			if (PageUptodate(p))
p                2410 fs/btrfs/raid56.c 		struct page *p;
p                2414 fs/btrfs/raid56.c 			p = page_in_rbio(rbio, stripe, pagenr, 0);
p                2415 fs/btrfs/raid56.c 			pointers[stripe] = kmap(p);
p                2438 fs/btrfs/raid56.c 		p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
p                2439 fs/btrfs/raid56.c 		parity = kmap(p);
p                2445 fs/btrfs/raid56.c 		kunmap(p);
p                  78 fs/btrfs/ref-verify.c 	struct rb_node **p = &root->rb_node;
p                  82 fs/btrfs/ref-verify.c 	while (*p) {
p                  83 fs/btrfs/ref-verify.c 		parent_node = *p;
p                  86 fs/btrfs/ref-verify.c 			p = &(*p)->rb_left;
p                  88 fs/btrfs/ref-verify.c 			p = &(*p)->rb_right;
p                  93 fs/btrfs/ref-verify.c 	rb_link_node(&be->node, parent_node, p);
p                 119 fs/btrfs/ref-verify.c 	struct rb_node **p = &root->rb_node;
p                 123 fs/btrfs/ref-verify.c 	while (*p) {
p                 124 fs/btrfs/ref-verify.c 		parent_node = *p;
p                 127 fs/btrfs/ref-verify.c 			p = &(*p)->rb_left;
p                 129 fs/btrfs/ref-verify.c 			p = &(*p)->rb_right;
p                 134 fs/btrfs/ref-verify.c 	rb_link_node(&re->node, parent_node, p);
p                 164 fs/btrfs/ref-verify.c 	struct rb_node **p = &root->rb_node;
p                 169 fs/btrfs/ref-verify.c 	while (*p) {
p                 170 fs/btrfs/ref-verify.c 		parent_node = *p;
p                 174 fs/btrfs/ref-verify.c 			p = &(*p)->rb_left;
p                 176 fs/btrfs/ref-verify.c 			p = &(*p)->rb_right;
p                 181 fs/btrfs/ref-verify.c 	rb_link_node(&ref->node, parent_node, p);
p                 285 fs/btrfs/relocation.c 	struct rb_node **p = &root->rb_node;
p                 289 fs/btrfs/relocation.c 	while (*p) {
p                 290 fs/btrfs/relocation.c 		parent = *p;
p                 294 fs/btrfs/relocation.c 			p = &(*p)->rb_left;
p                 296 fs/btrfs/relocation.c 			p = &(*p)->rb_right;
p                 301 fs/btrfs/relocation.c 	rb_link_node(node, parent, p);
p                 323 fs/btrfs/scrub.c 	struct rb_node **p;
p                 330 fs/btrfs/scrub.c 	p = &locks_root->root.rb_node;
p                 331 fs/btrfs/scrub.c 	while (*p) {
p                 332 fs/btrfs/scrub.c 		parent = *p;
p                 335 fs/btrfs/scrub.c 			p = &(*p)->rb_left;
p                 337 fs/btrfs/scrub.c 			p = &(*p)->rb_right;
p                 354 fs/btrfs/scrub.c 	rb_link_node(&ret->node, parent, p);
p                1846 fs/btrfs/scrub.c 	void *p;
p                1881 fs/btrfs/scrub.c 	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
p                1886 fs/btrfs/scrub.c 		crypto_shash_update(shash, p, l);
p                1897 fs/btrfs/scrub.c 		p = mapped_buffer;
p                1918 fs/btrfs/scrub.c 	void *p;
p                1944 fs/btrfs/scrub.c 	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
p                1949 fs/btrfs/scrub.c 		crypto_shash_update(shash, p, l);
p                1960 fs/btrfs/scrub.c 		p = mapped_buffer;
p                 334 fs/btrfs/send.c static void fs_path_reset(struct fs_path *p)
p                 336 fs/btrfs/send.c 	if (p->reversed) {
p                 337 fs/btrfs/send.c 		p->start = p->buf + p->buf_len - 1;
p                 338 fs/btrfs/send.c 		p->end = p->start;
p                 339 fs/btrfs/send.c 		*p->start = 0;
p                 341 fs/btrfs/send.c 		p->start = p->buf;
p                 342 fs/btrfs/send.c 		p->end = p->start;
p                 343 fs/btrfs/send.c 		*p->start = 0;
p                 349 fs/btrfs/send.c 	struct fs_path *p;
p                 351 fs/btrfs/send.c 	p = kmalloc(sizeof(*p), GFP_KERNEL);
p                 352 fs/btrfs/send.c 	if (!p)
p                 354 fs/btrfs/send.c 	p->reversed = 0;
p                 355 fs/btrfs/send.c 	p->buf = p->inline_buf;
p                 356 fs/btrfs/send.c 	p->buf_len = FS_PATH_INLINE_SIZE;
p                 357 fs/btrfs/send.c 	fs_path_reset(p);
p                 358 fs/btrfs/send.c 	return p;
p                 363 fs/btrfs/send.c 	struct fs_path *p;
p                 365 fs/btrfs/send.c 	p = fs_path_alloc();
p                 366 fs/btrfs/send.c 	if (!p)
p                 368 fs/btrfs/send.c 	p->reversed = 1;
p                 369 fs/btrfs/send.c 	fs_path_reset(p);
p                 370 fs/btrfs/send.c 	return p;
p                 373 fs/btrfs/send.c static void fs_path_free(struct fs_path *p)
p                 375 fs/btrfs/send.c 	if (!p)
p                 377 fs/btrfs/send.c 	if (p->buf != p->inline_buf)
p                 378 fs/btrfs/send.c 		kfree(p->buf);
p                 379 fs/btrfs/send.c 	kfree(p);
p                 382 fs/btrfs/send.c static int fs_path_len(struct fs_path *p)
p                 384 fs/btrfs/send.c 	return p->end - p->start;
p                 387 fs/btrfs/send.c static int fs_path_ensure_buf(struct fs_path *p, int len)
p                 395 fs/btrfs/send.c 	if (p->buf_len >= len)
p                 403 fs/btrfs/send.c 	path_len = p->end - p->start;
p                 404 fs/btrfs/send.c 	old_buf_len = p->buf_len;
p                 409 fs/btrfs/send.c 	if (p->buf == p->inline_buf) {
p                 412 fs/btrfs/send.c 			memcpy(tmp_buf, p->buf, old_buf_len);
p                 414 fs/btrfs/send.c 		tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
p                 418 fs/btrfs/send.c 	p->buf = tmp_buf;
p                 423 fs/btrfs/send.c 	p->buf_len = ksize(p->buf);
p                 425 fs/btrfs/send.c 	if (p->reversed) {
p                 426 fs/btrfs/send.c 		tmp_buf = p->buf + old_buf_len - path_len - 1;
p                 427 fs/btrfs/send.c 		p->end = p->buf + p->buf_len - 1;
p                 428 fs/btrfs/send.c 		p->start = p->end - path_len;
p                 429 fs/btrfs/send.c 		memmove(p->start, tmp_buf, path_len + 1);
p                 431 fs/btrfs/send.c 		p->start = p->buf;
p                 432 fs/btrfs/send.c 		p->end = p->start + path_len;
p                 437 fs/btrfs/send.c static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
p                 443 fs/btrfs/send.c 	new_len = p->end - p->start + name_len;
p                 444 fs/btrfs/send.c 	if (p->start != p->end)
p                 446 fs/btrfs/send.c 	ret = fs_path_ensure_buf(p, new_len);
p                 450 fs/btrfs/send.c 	if (p->reversed) {
p                 451 fs/btrfs/send.c 		if (p->start != p->end)
p                 452 fs/btrfs/send.c 			*--p->start = '/';
p                 453 fs/btrfs/send.c 		p->start -= name_len;
p                 454 fs/btrfs/send.c 		*prepared = p->start;
p                 456 fs/btrfs/send.c 		if (p->start != p->end)
p                 457 fs/btrfs/send.c 			*p->end++ = '/';
p                 458 fs/btrfs/send.c 		*prepared = p->end;
p                 459 fs/btrfs/send.c 		p->end += name_len;
p                 460 fs/btrfs/send.c 		*p->end = 0;
p                 467 fs/btrfs/send.c static int fs_path_add(struct fs_path *p, const char *name, int name_len)
p                 472 fs/btrfs/send.c 	ret = fs_path_prepare_for_add(p, name_len, &prepared);
p                 481 fs/btrfs/send.c static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
p                 486 fs/btrfs/send.c 	ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
p                 495 fs/btrfs/send.c static int fs_path_add_from_extent_buffer(struct fs_path *p,
p                 502 fs/btrfs/send.c 	ret = fs_path_prepare_for_add(p, len, &prepared);
p                 512 fs/btrfs/send.c static int fs_path_copy(struct fs_path *p, struct fs_path *from)
p                 516 fs/btrfs/send.c 	p->reversed = from->reversed;
p                 517 fs/btrfs/send.c 	fs_path_reset(p);
p                 519 fs/btrfs/send.c 	ret = fs_path_add_path(p, from);
p                 525 fs/btrfs/send.c static void fs_path_unreverse(struct fs_path *p)
p                 530 fs/btrfs/send.c 	if (!p->reversed)
p                 533 fs/btrfs/send.c 	tmp = p->start;
p                 534 fs/btrfs/send.c 	len = p->end - p->start;
p                 535 fs/btrfs/send.c 	p->start = p->buf;
p                 536 fs/btrfs/send.c 	p->end = p->start + len;
p                 537 fs/btrfs/send.c 	memmove(p->start, tmp, len + 1);
p                 538 fs/btrfs/send.c 	p->reversed = 0;
p                 652 fs/btrfs/send.c #define TLV_PUT_PATH(sctx, attrtype, p) \
p                 654 fs/btrfs/send.c 		ret = tlv_put_string(sctx, attrtype, p->start, \
p                 655 fs/btrfs/send.c 			p->end - p->start); \
p                 878 fs/btrfs/send.c 				   struct fs_path *p,
p                 898 fs/btrfs/send.c 	struct fs_path *p;
p                 912 fs/btrfs/send.c 	p = fs_path_alloc_reversed();
p                 913 fs/btrfs/send.c 	if (!p)
p                 918 fs/btrfs/send.c 		fs_path_free(p);
p                 936 fs/btrfs/send.c 		fs_path_reset(p);
p                 955 fs/btrfs/send.c 						  p->buf, p->buf_len);
p                 960 fs/btrfs/send.c 			if (start < p->buf) {
p                 962 fs/btrfs/send.c 				ret = fs_path_ensure_buf(p,
p                 963 fs/btrfs/send.c 						p->buf_len + p->buf - start);
p                 969 fs/btrfs/send.c 							  p->buf, p->buf_len);
p                 974 fs/btrfs/send.c 				BUG_ON(start < p->buf);
p                 976 fs/btrfs/send.c 			p->start = start;
p                 978 fs/btrfs/send.c 			ret = fs_path_add_from_extent_buffer(p, eb, name_off,
p                 985 fs/btrfs/send.c 		ret = iterate(num, dir, index, p, ctx);
p                 993 fs/btrfs/send.c 	fs_path_free(p);
p                1123 fs/btrfs/send.c 			    struct fs_path *p, void *ctx)
p                1128 fs/btrfs/send.c 	ret = fs_path_copy(pt, p);
p                1145 fs/btrfs/send.c 	struct btrfs_path *p;
p                1147 fs/btrfs/send.c 	p = alloc_path_for_send();
p                1148 fs/btrfs/send.c 	if (!p)
p                1157 fs/btrfs/send.c 	ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
p                1164 fs/btrfs/send.c 	btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
p                1172 fs/btrfs/send.c 	ret = iterate_inode_ref(root, p, &found_key, 1,
p                1179 fs/btrfs/send.c 	btrfs_free_path(p);
p                2435 fs/btrfs/send.c 	struct fs_path *p;
p                2439 fs/btrfs/send.c 	p = fs_path_alloc();
p                2440 fs/btrfs/send.c 	if (!p)
p                2447 fs/btrfs/send.c 	ret = get_cur_path(sctx, ino, gen, p);
p                2450 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
p                2457 fs/btrfs/send.c 	fs_path_free(p);
p                2465 fs/btrfs/send.c 	struct fs_path *p;
p                2469 fs/btrfs/send.c 	p = fs_path_alloc();
p                2470 fs/btrfs/send.c 	if (!p)
p                2477 fs/btrfs/send.c 	ret = get_cur_path(sctx, ino, gen, p);
p                2480 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
p                2487 fs/btrfs/send.c 	fs_path_free(p);
p                2495 fs/btrfs/send.c 	struct fs_path *p;
p                2500 fs/btrfs/send.c 	p = fs_path_alloc();
p                2501 fs/btrfs/send.c 	if (!p)
p                2508 fs/btrfs/send.c 	ret = get_cur_path(sctx, ino, gen, p);
p                2511 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
p                2519 fs/btrfs/send.c 	fs_path_free(p);
p                2527 fs/btrfs/send.c 	struct fs_path *p = NULL;
p                2536 fs/btrfs/send.c 	p = fs_path_alloc();
p                2537 fs/btrfs/send.c 	if (!p)
p                2563 fs/btrfs/send.c 	ret = get_cur_path(sctx, ino, gen, p);
p                2566 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
p                2576 fs/btrfs/send.c 	fs_path_free(p);
p                2590 fs/btrfs/send.c 	struct fs_path *p;
p                2598 fs/btrfs/send.c 	p = fs_path_alloc();
p                2599 fs/btrfs/send.c 	if (!p)
p                2636 fs/btrfs/send.c 	ret = gen_unique_name(sctx, ino, gen, p);
p                2640 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
p                2644 fs/btrfs/send.c 		fs_path_reset(p);
p                2645 fs/btrfs/send.c 		ret = read_symlink(sctx->send_root, ino, p);
p                2648 fs/btrfs/send.c 		TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
p                2662 fs/btrfs/send.c 	fs_path_free(p);
p                2861 fs/btrfs/send.c 	struct rb_node **p = &sctx->orphan_dirs.rb_node;
p                2865 fs/btrfs/send.c 	while (*p) {
p                2866 fs/btrfs/send.c 		parent = *p;
p                2869 fs/btrfs/send.c 			p = &(*p)->rb_left;
p                2871 fs/btrfs/send.c 			p = &(*p)->rb_right;
p                2884 fs/btrfs/send.c 	rb_link_node(&odi->node, parent, p);
p                3029 fs/btrfs/send.c 	struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
p                3040 fs/btrfs/send.c 	while (*p) {
p                3041 fs/btrfs/send.c 		parent = *p;
p                3044 fs/btrfs/send.c 			p = &(*p)->rb_left;
p                3046 fs/btrfs/send.c 			p = &(*p)->rb_right;
p                3053 fs/btrfs/send.c 	rb_link_node(&dm->node, parent, p);
p                3093 fs/btrfs/send.c 	struct rb_node **p = &sctx->pending_dir_moves.rb_node;
p                3110 fs/btrfs/send.c 	while (*p) {
p                3111 fs/btrfs/send.c 		parent = *p;
p                3114 fs/btrfs/send.c 			p = &(*p)->rb_left;
p                3116 fs/btrfs/send.c 			p = &(*p)->rb_right;
p                3141 fs/btrfs/send.c 		rb_link_node(&pm->node, parent, p);
p                4213 fs/btrfs/send.c 	struct fs_path *p;
p                4216 fs/btrfs/send.c 	p = fs_path_alloc();
p                4217 fs/btrfs/send.c 	if (!p)
p                4225 fs/btrfs/send.c 	ret = get_cur_path(sctx, dir, gen, p);
p                4228 fs/btrfs/send.c 	ret = fs_path_add_path(p, name);
p                4232 fs/btrfs/send.c 	ret = __record_ref(refs, dir, gen, p);
p                4236 fs/btrfs/send.c 		fs_path_free(p);
p                4536 fs/btrfs/send.c 	struct fs_path *p;
p                4539 fs/btrfs/send.c 	p = fs_path_alloc();
p                4540 fs/btrfs/send.c 	if (!p)
p                4559 fs/btrfs/send.c 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
p                4563 fs/btrfs/send.c 	ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
p                4566 fs/btrfs/send.c 	fs_path_free(p);
p                4577 fs/btrfs/send.c 	struct fs_path *p;
p                4579 fs/btrfs/send.c 	p = fs_path_alloc();
p                4580 fs/btrfs/send.c 	if (!p)
p                4583 fs/btrfs/send.c 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
p                4587 fs/btrfs/send.c 	ret = send_remove_xattr(sctx, p, name, name_len);
p                4590 fs/btrfs/send.c 	fs_path_free(p);
p                4880 fs/btrfs/send.c 	struct fs_path *p;
p                4883 fs/btrfs/send.c 	p = fs_path_alloc();
p                4884 fs/btrfs/send.c 	if (!p)
p                4900 fs/btrfs/send.c 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
p                4904 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
p                4912 fs/btrfs/send.c 	fs_path_free(p);
p                4926 fs/btrfs/send.c 	struct fs_path *p;
p                4934 fs/btrfs/send.c 	p = fs_path_alloc();
p                4935 fs/btrfs/send.c 	if (!p)
p                4942 fs/btrfs/send.c 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
p                4948 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
p                4955 fs/btrfs/send.c 		ret = get_cur_path(sctx, clone_root->ino, gen, p);
p                4957 fs/btrfs/send.c 		ret = get_inode_path(clone_root->root, clone_root->ino, p);
p                4979 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
p                4987 fs/btrfs/send.c 	fs_path_free(p);
p                4998 fs/btrfs/send.c 	struct fs_path *p;
p                5000 fs/btrfs/send.c 	p = fs_path_alloc();
p                5001 fs/btrfs/send.c 	if (!p)
p                5008 fs/btrfs/send.c 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
p                5012 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
p                5020 fs/btrfs/send.c 	fs_path_free(p);
p                5026 fs/btrfs/send.c 	struct fs_path *p = NULL;
p                5049 fs/btrfs/send.c 	p = fs_path_alloc();
p                5050 fs/btrfs/send.c 	if (!p)
p                5052 fs/btrfs/send.c 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
p                5062 fs/btrfs/send.c 		TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
p                5072 fs/btrfs/send.c 	fs_path_free(p);
p                  10 fs/btrfs/struct-funcs.c static inline u8 get_unaligned_le8(const void *p)
p                  12 fs/btrfs/struct-funcs.c        return *(u8 *)p;
p                  15 fs/btrfs/struct-funcs.c static inline void put_unaligned_le8(u8 val, void *p)
p                  17 fs/btrfs/struct-funcs.c        *(u8 *)p = val;
p                  47 fs/btrfs/struct-funcs.c 	void *p;							\
p                  61 fs/btrfs/struct-funcs.c 		p = kaddr + part_offset - token->offset;		\
p                  62 fs/btrfs/struct-funcs.c 		res = get_unaligned_le##bits(p + off);			\
p                  73 fs/btrfs/struct-funcs.c 	p = kaddr + part_offset - map_start;				\
p                  74 fs/btrfs/struct-funcs.c 	res = get_unaligned_le##bits(p + off);				\
p                  84 fs/btrfs/struct-funcs.c 	void *p;							\
p                 100 fs/btrfs/struct-funcs.c 	p = kaddr + part_offset - map_start;				\
p                 101 fs/btrfs/struct-funcs.c 	res = get_unaligned_le##bits(p + off);				\
p                 111 fs/btrfs/struct-funcs.c 	void *p;							\
p                 124 fs/btrfs/struct-funcs.c 		p = kaddr + part_offset - token->offset;		\
p                 125 fs/btrfs/struct-funcs.c 		put_unaligned_le##bits(val, p + off);			\
p                 137 fs/btrfs/struct-funcs.c 	p = kaddr + part_offset - map_start;				\
p                 138 fs/btrfs/struct-funcs.c 	put_unaligned_le##bits(val, p + off);				\
p                 147 fs/btrfs/struct-funcs.c 	void *p;							\
p                 163 fs/btrfs/struct-funcs.c 	p = kaddr + part_offset - map_start;				\
p                 164 fs/btrfs/struct-funcs.c 	put_unaligned_le##bits(val, p + off);				\
p                 431 fs/btrfs/super.c 	char *p, *num;
p                 454 fs/btrfs/super.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 456 fs/btrfs/super.c 		if (!*p)
p                 459 fs/btrfs/super.c 		token = match_token(p, tokens, args);
p                 857 fs/btrfs/super.c 			btrfs_info(info, "unrecognized mount option '%s'", p);
p                 898 fs/btrfs/super.c 	char *device_name, *opts, *orig, *p;
p                 916 fs/btrfs/super.c 	while ((p = strsep(&opts, ",")) != NULL) {
p                 919 fs/btrfs/super.c 		if (!*p)
p                 922 fs/btrfs/super.c 		token = match_token(p, tokens, args);
p                 953 fs/btrfs/super.c 	char *opts, *orig, *p;
p                 969 fs/btrfs/super.c 	while ((p = strsep(&opts, ",")) != NULL) {
p                 971 fs/btrfs/super.c 		if (!*p)
p                 974 fs/btrfs/super.c 		token = match_token(p, tokens, args);
p                1381 fs/btrfs/super.c 	struct btrfs_fs_info *p = data;
p                1384 fs/btrfs/super.c 	return fs_info->fs_devices == p->fs_devices;
p                 146 fs/btrfs/ulist.c 	struct rb_node **p = &ulist->root.rb_node;
p                 150 fs/btrfs/ulist.c 	while (*p) {
p                 151 fs/btrfs/ulist.c 		parent = *p;
p                 155 fs/btrfs/ulist.c 			p = &(*p)->rb_right;
p                 157 fs/btrfs/ulist.c 			p = &(*p)->rb_left;
p                 161 fs/btrfs/ulist.c 	rb_link_node(&ins->rb_node, parent, p);
p                1443 fs/btrfs/volumes.c 	void *p;
p                1466 fs/btrfs/volumes.c 	p = kmap(*page);
p                1469 fs/btrfs/volumes.c 	*disk_super = p + offset_in_page(bytenr);
p                 477 fs/buffer.c    	struct list_head *p;
p                 482 fs/buffer.c    	list_for_each_prev(p, list) {
p                 483 fs/buffer.c    		bh = BH_ENTRY(p);
p                  31 fs/cachefiles/interface.c 	void *buffer, *p;
p                  66 fs/cachefiles/interface.c 		p = cookie->inline_key;
p                  68 fs/cachefiles/interface.c 		p = cookie->key;
p                  69 fs/cachefiles/interface.c 	memcpy(buffer + 2, p, keylen);
p                  86 fs/cachefiles/interface.c 			p = cookie->inline_aux;
p                  88 fs/cachefiles/interface.c 			p = cookie->aux;
p                  89 fs/cachefiles/interface.c 		memcpy(auxdata->data, p, auxlen);
p                  89 fs/cachefiles/namei.c 	struct rb_node *p;
p                  95 fs/cachefiles/namei.c 	p = cache->active_nodes.rb_node;
p                  96 fs/cachefiles/namei.c 	while (p) {
p                  97 fs/cachefiles/namei.c 		object = rb_entry(p, struct cachefiles_object, active_node);
p                  99 fs/cachefiles/namei.c 			p = p->rb_left;
p                 101 fs/cachefiles/namei.c 			p = p->rb_right;
p                1820 fs/ceph/addr.c 	struct rb_node **p, *parent;
p                1827 fs/ceph/addr.c 	p = &mdsc->pool_perm_tree.rb_node;
p                1828 fs/ceph/addr.c 	while (*p) {
p                1829 fs/ceph/addr.c 		perm = rb_entry(*p, struct ceph_pool_perm, node);
p                1831 fs/ceph/addr.c 			p = &(*p)->rb_left;
p                1833 fs/ceph/addr.c 			p = &(*p)->rb_right;
p                1839 fs/ceph/addr.c 				p = &(*p)->rb_left;
p                1841 fs/ceph/addr.c 				p = &(*p)->rb_right;
p                1849 fs/ceph/addr.c 	if (*p)
p                1859 fs/ceph/addr.c 	p = &mdsc->pool_perm_tree.rb_node;
p                1861 fs/ceph/addr.c 	while (*p) {
p                1862 fs/ceph/addr.c 		parent = *p;
p                1865 fs/ceph/addr.c 			p = &(*p)->rb_left;
p                1867 fs/ceph/addr.c 			p = &(*p)->rb_right;
p                1873 fs/ceph/addr.c 				p = &(*p)->rb_left;
p                1875 fs/ceph/addr.c 				p = &(*p)->rb_right;
p                1882 fs/ceph/addr.c 	if (*p) {
p                1971 fs/ceph/addr.c 	rb_link_node(&perm->node, parent, p);
p                 466 fs/ceph/caps.c 	struct rb_node **p = &ci->i_caps.rb_node;
p                 470 fs/ceph/caps.c 	while (*p) {
p                 471 fs/ceph/caps.c 		parent = *p;
p                 474 fs/ceph/caps.c 			p = &(*p)->rb_left;
p                 476 fs/ceph/caps.c 			p = &(*p)->rb_right;
p                 481 fs/ceph/caps.c 	rb_link_node(&new->ci_node, parent, p);
p                 794 fs/ceph/caps.c 	struct rb_node *p;
p                 798 fs/ceph/caps.c 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
p                 799 fs/ceph/caps.c 		cap = rb_entry(p, struct ceph_cap, ci_node);
p                 827 fs/ceph/caps.c 	struct rb_node *p;
p                 829 fs/ceph/caps.c 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
p                 830 fs/ceph/caps.c 		cap = rb_entry(p, struct ceph_cap, ci_node);
p                 868 fs/ceph/caps.c 	struct rb_node *p;
p                 879 fs/ceph/caps.c 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
p                 880 fs/ceph/caps.c 		cap = rb_entry(p, struct ceph_cap, ci_node);
p                 905 fs/ceph/caps.c 				for (q = rb_first(&ci->i_caps); q != p;
p                 928 fs/ceph/caps.c 	struct rb_node *p;
p                 930 fs/ceph/caps.c 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
p                 931 fs/ceph/caps.c 		cap = rb_entry(p, struct ceph_cap, ci_node);
p                 991 fs/ceph/caps.c 	struct rb_node *p;
p                 994 fs/ceph/caps.c 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
p                 995 fs/ceph/caps.c 		cap = rb_entry(p, struct ceph_cap, ci_node);
p                1139 fs/ceph/caps.c 	void *p;
p                1196 fs/ceph/caps.c 	p = fc + 1;
p                1198 fs/ceph/caps.c 	ceph_encode_32(&p, 0);
p                1200 fs/ceph/caps.c 	ceph_encode_64(&p, arg->inline_data ? 0 : CEPH_INLINE_NONE);
p                1202 fs/ceph/caps.c 	ceph_encode_32(&p, 0);
p                1208 fs/ceph/caps.c 	ceph_encode_32(&p, READ_ONCE(osdc->epoch_barrier));
p                1210 fs/ceph/caps.c 	ceph_encode_64(&p, arg->oldest_flush_tid);
p                1219 fs/ceph/caps.c 	ceph_encode_32(&p, 0);
p                1220 fs/ceph/caps.c 	ceph_encode_32(&p, 0);
p                1223 fs/ceph/caps.c 	ceph_encode_32(&p, 0);
p                1226 fs/ceph/caps.c 	ceph_encode_timespec64(p, &arg->btime);
p                1227 fs/ceph/caps.c 	p += sizeof(struct ceph_timespec);
p                1228 fs/ceph/caps.c 	ceph_encode_64(&p, arg->change_attr);
p                1231 fs/ceph/caps.c 	ceph_encode_32(&p, arg->flags);
p                1242 fs/ceph/caps.c 	struct rb_node *p;
p                1247 fs/ceph/caps.c 	p = rb_first(&ci->i_caps);
p                1248 fs/ceph/caps.c 	while (p) {
p                1249 fs/ceph/caps.c 		struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
p                1250 fs/ceph/caps.c 		p = rb_next(p);
p                1830 fs/ceph/caps.c 	struct rb_node *p;
p                1923 fs/ceph/caps.c 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
p                1924 fs/ceph/caps.c 		cap = rb_entry(p, struct ceph_cap, ci_node);
p                3817 fs/ceph/caps.c 	void *p, *end;
p                3835 fs/ceph/caps.c 	p = snaptrace + snaptrace_len;
p                3839 fs/ceph/caps.c 		ceph_decode_32_safe(&p, end, flock_len, bad);
p                3840 fs/ceph/caps.c 		if (p + flock_len > end)
p                3842 fs/ceph/caps.c 		p += flock_len;
p                3847 fs/ceph/caps.c 			if (p + sizeof(*peer) > end)
p                3849 fs/ceph/caps.c 			peer = p;
p                3850 fs/ceph/caps.c 			p += sizeof(*peer);
p                3858 fs/ceph/caps.c 		ceph_decode_64_safe(&p, end, extra_info.inline_version, bad);
p                3859 fs/ceph/caps.c 		ceph_decode_32_safe(&p, end, extra_info.inline_len, bad);
p                3860 fs/ceph/caps.c 		if (p + extra_info.inline_len > end)
p                3862 fs/ceph/caps.c 		extra_info.inline_data = p;
p                3863 fs/ceph/caps.c 		p += extra_info.inline_len;
p                3870 fs/ceph/caps.c 		ceph_decode_32_safe(&p, end, epoch_barrier, bad);
p                3880 fs/ceph/caps.c 		ceph_decode_64_safe(&p, end, flush_tid, bad);
p                3882 fs/ceph/caps.c 		ceph_decode_32_safe(&p, end, caller_uid, bad);
p                3883 fs/ceph/caps.c 		ceph_decode_32_safe(&p, end, caller_gid, bad);
p                3885 fs/ceph/caps.c 		ceph_decode_32_safe(&p, end, pool_ns_len, bad);
p                3887 fs/ceph/caps.c 			ceph_decode_need(&p, end, pool_ns_len, bad);
p                3889 fs/ceph/caps.c 				ceph_find_or_create_string(p, pool_ns_len);
p                3890 fs/ceph/caps.c 			p += pool_ns_len;
p                3897 fs/ceph/caps.c 		if (p + sizeof(*btime) > end)
p                3899 fs/ceph/caps.c 		btime = p;
p                3901 fs/ceph/caps.c 		p += sizeof(*btime);
p                3902 fs/ceph/caps.c 		ceph_decode_64_safe(&p, end, extra_info.change_attr, bad);
p                3908 fs/ceph/caps.c 		ceph_decode_32_safe(&p, end, flags, bad);
p                3911 fs/ceph/caps.c 		ceph_decode_64_safe(&p, end, extra_info.nfiles, bad);
p                3912 fs/ceph/caps.c 		ceph_decode_64_safe(&p, end, extra_info.nsubdirs, bad);
p                4165 fs/ceph/caps.c int ceph_encode_inode_release(void **p, struct inode *inode,
p                4170 fs/ceph/caps.c 	struct ceph_mds_request_release *rel = *p;
p                4229 fs/ceph/caps.c 			*p += sizeof(*rel);
p                4240 fs/ceph/caps.c int ceph_encode_dentry_release(void **p, struct dentry *dentry,
p                4245 fs/ceph/caps.c 	struct ceph_mds_request_release *rel = *p;
p                4265 fs/ceph/caps.c 	ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
p                4273 fs/ceph/caps.c 		memcpy(*p, dentry->d_name.name, dentry->d_name.len);
p                4274 fs/ceph/caps.c 		*p += dentry->d_name.len;
p                  22 fs/ceph/debugfs.c static int mdsmap_show(struct seq_file *s, void *p)
p                  49 fs/ceph/debugfs.c static int mdsc_show(struct seq_file *s, void *p)
p                 127 fs/ceph/debugfs.c static int caps_show_cb(struct inode *inode, struct ceph_cap *cap, void *p)
p                 129 fs/ceph/debugfs.c 	struct seq_file *s = p;
p                 137 fs/ceph/debugfs.c static int caps_show(struct seq_file *s, void *p)
p                  73 fs/ceph/dir.c  static bool is_hash_order(loff_t p)
p                  75 fs/ceph/dir.c  	return (p & HASH_ORDER) == HASH_ORDER;
p                  78 fs/ceph/dir.c  static unsigned fpos_frag(loff_t p)
p                  80 fs/ceph/dir.c  	return p >> OFFSET_BITS;
p                  83 fs/ceph/dir.c  static unsigned fpos_hash(loff_t p)
p                  85 fs/ceph/dir.c  	return ceph_frag_value(fpos_frag(p));
p                  88 fs/ceph/dir.c  static unsigned fpos_off(loff_t p)
p                  90 fs/ceph/dir.c  	return p & OFFSET_MASK;
p                 118 fs/ceph/inode.c 	struct rb_node **p;
p                 123 fs/ceph/inode.c 	p = &ci->i_fragtree.rb_node;
p                 124 fs/ceph/inode.c 	while (*p) {
p                 125 fs/ceph/inode.c 		parent = *p;
p                 129 fs/ceph/inode.c 			p = &(*p)->rb_left;
p                 131 fs/ceph/inode.c 			p = &(*p)->rb_right;
p                 145 fs/ceph/inode.c 	rb_link_node(&frag->node, parent, p);
p                  70 fs/ceph/mds_client.c static int parse_reply_info_quota(void **p, void *end,
p                  76 fs/ceph/mds_client.c 	ceph_decode_8_safe(p, end, struct_v, bad);
p                  77 fs/ceph/mds_client.c 	ceph_decode_8_safe(p, end, struct_compat, bad);
p                  82 fs/ceph/mds_client.c 	ceph_decode_32_safe(p, end, struct_len, bad);
p                  83 fs/ceph/mds_client.c 	ceph_decode_need(p, end, struct_len, bad);
p                  84 fs/ceph/mds_client.c 	end = *p + struct_len;
p                  85 fs/ceph/mds_client.c 	ceph_decode_64_safe(p, end, info->max_bytes, bad);
p                  86 fs/ceph/mds_client.c 	ceph_decode_64_safe(p, end, info->max_files, bad);
p                  87 fs/ceph/mds_client.c 	*p = end;
p                  96 fs/ceph/mds_client.c static int parse_reply_info_in(void **p, void *end,
p                 106 fs/ceph/mds_client.c 		ceph_decode_8_safe(p, end, struct_v, bad);
p                 107 fs/ceph/mds_client.c 		ceph_decode_8_safe(p, end, struct_compat, bad);
p                 112 fs/ceph/mds_client.c 		ceph_decode_32_safe(p, end, struct_len, bad);
p                 113 fs/ceph/mds_client.c 		ceph_decode_need(p, end, struct_len, bad);
p                 114 fs/ceph/mds_client.c 		end = *p + struct_len;
p                 117 fs/ceph/mds_client.c 	ceph_decode_need(p, end, sizeof(struct ceph_mds_reply_inode), bad);
p                 118 fs/ceph/mds_client.c 	info->in = *p;
p                 119 fs/ceph/mds_client.c 	*p += sizeof(struct ceph_mds_reply_inode) +
p                 123 fs/ceph/mds_client.c 	ceph_decode_32_safe(p, end, info->symlink_len, bad);
p                 124 fs/ceph/mds_client.c 	ceph_decode_need(p, end, info->symlink_len, bad);
p                 125 fs/ceph/mds_client.c 	info->symlink = *p;
p                 126 fs/ceph/mds_client.c 	*p += info->symlink_len;
p                 128 fs/ceph/mds_client.c 	ceph_decode_copy_safe(p, end, &info->dir_layout,
p                 130 fs/ceph/mds_client.c 	ceph_decode_32_safe(p, end, info->xattr_len, bad);
p                 131 fs/ceph/mds_client.c 	ceph_decode_need(p, end, info->xattr_len, bad);
p                 132 fs/ceph/mds_client.c 	info->xattr_data = *p;
p                 133 fs/ceph/mds_client.c 	*p += info->xattr_len;
p                 137 fs/ceph/mds_client.c 		ceph_decode_64_safe(p, end, info->inline_version, bad);
p                 138 fs/ceph/mds_client.c 		ceph_decode_32_safe(p, end, info->inline_len, bad);
p                 139 fs/ceph/mds_client.c 		ceph_decode_need(p, end, info->inline_len, bad);
p                 140 fs/ceph/mds_client.c 		info->inline_data = *p;
p                 141 fs/ceph/mds_client.c 		*p += info->inline_len;
p                 143 fs/ceph/mds_client.c 		err = parse_reply_info_quota(p, end, info);
p                 147 fs/ceph/mds_client.c 		ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
p                 149 fs/ceph/mds_client.c 			ceph_decode_need(p, end, info->pool_ns_len, bad);
p                 150 fs/ceph/mds_client.c 			info->pool_ns_data = *p;
p                 151 fs/ceph/mds_client.c 			*p += info->pool_ns_len;
p                 155 fs/ceph/mds_client.c 		ceph_decode_need(p, end, sizeof(info->btime), bad);
p                 156 fs/ceph/mds_client.c 		ceph_decode_copy(p, &info->btime, sizeof(info->btime));
p                 159 fs/ceph/mds_client.c 		ceph_decode_64_safe(p, end, info->change_attr, bad);
p                 163 fs/ceph/mds_client.c 			ceph_decode_32_safe(p, end, info->dir_pin, bad);
p                 170 fs/ceph/mds_client.c 			ceph_decode_need(p, end, sizeof(info->snap_btime), bad);
p                 171 fs/ceph/mds_client.c 			ceph_decode_copy(p, &info->snap_btime,
p                 177 fs/ceph/mds_client.c 		*p = end;
p                 180 fs/ceph/mds_client.c 			ceph_decode_64_safe(p, end, info->inline_version, bad);
p                 181 fs/ceph/mds_client.c 			ceph_decode_32_safe(p, end, info->inline_len, bad);
p                 182 fs/ceph/mds_client.c 			ceph_decode_need(p, end, info->inline_len, bad);
p                 183 fs/ceph/mds_client.c 			info->inline_data = *p;
p                 184 fs/ceph/mds_client.c 			*p += info->inline_len;
p                 189 fs/ceph/mds_client.c 			err = parse_reply_info_quota(p, end, info);
p                 200 fs/ceph/mds_client.c 			ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
p                 202 fs/ceph/mds_client.c 				ceph_decode_need(p, end, info->pool_ns_len, bad);
p                 203 fs/ceph/mds_client.c 				info->pool_ns_data = *p;
p                 204 fs/ceph/mds_client.c 				*p += info->pool_ns_len;
p                 209 fs/ceph/mds_client.c 			ceph_decode_need(p, end, sizeof(info->btime), bad);
p                 210 fs/ceph/mds_client.c 			ceph_decode_copy(p, &info->btime, sizeof(info->btime));
p                 211 fs/ceph/mds_client.c 			ceph_decode_64_safe(p, end, info->change_attr, bad);
p                 224 fs/ceph/mds_client.c static int parse_reply_info_dir(void **p, void *end,
p                 231 fs/ceph/mds_client.c 		ceph_decode_8_safe(p, end, struct_v, bad);
p                 232 fs/ceph/mds_client.c 		ceph_decode_8_safe(p, end, struct_compat, bad);
p                 237 fs/ceph/mds_client.c 		ceph_decode_32_safe(p, end, struct_len, bad);
p                 238 fs/ceph/mds_client.c 		ceph_decode_need(p, end, struct_len, bad);
p                 239 fs/ceph/mds_client.c 		end = *p + struct_len;
p                 242 fs/ceph/mds_client.c 	ceph_decode_need(p, end, sizeof(**dirfrag), bad);
p                 243 fs/ceph/mds_client.c 	*dirfrag = *p;
p                 244 fs/ceph/mds_client.c 	*p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist);
p                 245 fs/ceph/mds_client.c 	if (unlikely(*p > end))
p                 248 fs/ceph/mds_client.c 		*p = end;
p                 254 fs/ceph/mds_client.c static int parse_reply_info_lease(void **p, void *end,
p                 261 fs/ceph/mds_client.c 		ceph_decode_8_safe(p, end, struct_v, bad);
p                 262 fs/ceph/mds_client.c 		ceph_decode_8_safe(p, end, struct_compat, bad);
p                 267 fs/ceph/mds_client.c 		ceph_decode_32_safe(p, end, struct_len, bad);
p                 268 fs/ceph/mds_client.c 		ceph_decode_need(p, end, struct_len, bad);
p                 269 fs/ceph/mds_client.c 		end = *p + struct_len;
p                 272 fs/ceph/mds_client.c 	ceph_decode_need(p, end, sizeof(**lease), bad);
p                 273 fs/ceph/mds_client.c 	*lease = *p;
p                 274 fs/ceph/mds_client.c 	*p += sizeof(**lease);
p                 276 fs/ceph/mds_client.c 		*p = end;
p                 286 fs/ceph/mds_client.c static int parse_reply_info_trace(void **p, void *end,
p                 293 fs/ceph/mds_client.c 		err = parse_reply_info_in(p, end, &info->diri, features);
p                 297 fs/ceph/mds_client.c 		err = parse_reply_info_dir(p, end, &info->dirfrag, features);
p                 301 fs/ceph/mds_client.c 		ceph_decode_32_safe(p, end, info->dname_len, bad);
p                 302 fs/ceph/mds_client.c 		ceph_decode_need(p, end, info->dname_len, bad);
p                 303 fs/ceph/mds_client.c 		info->dname = *p;
p                 304 fs/ceph/mds_client.c 		*p += info->dname_len;
p                 306 fs/ceph/mds_client.c 		err = parse_reply_info_lease(p, end, &info->dlease, features);
p                 312 fs/ceph/mds_client.c 		err = parse_reply_info_in(p, end, &info->targeti, features);
p                 317 fs/ceph/mds_client.c 	if (unlikely(*p != end))
p                 331 fs/ceph/mds_client.c static int parse_reply_info_readdir(void **p, void *end,
p                 338 fs/ceph/mds_client.c 	err = parse_reply_info_dir(p, end, &info->dir_dir, features);
p                 342 fs/ceph/mds_client.c 	ceph_decode_need(p, end, sizeof(num) + 2, bad);
p                 343 fs/ceph/mds_client.c 	num = ceph_decode_32(p);
p                 345 fs/ceph/mds_client.c 		u16 flags = ceph_decode_16(p);
p                 366 fs/ceph/mds_client.c 		ceph_decode_32_safe(p, end, rde->name_len, bad);
p                 367 fs/ceph/mds_client.c 		ceph_decode_need(p, end, rde->name_len, bad);
p                 368 fs/ceph/mds_client.c 		rde->name = *p;
p                 369 fs/ceph/mds_client.c 		*p += rde->name_len;
p                 373 fs/ceph/mds_client.c 		err = parse_reply_info_lease(p, end, &rde->lease, features);
p                 377 fs/ceph/mds_client.c 		err = parse_reply_info_in(p, end, &rde->inode, features);
p                 388 fs/ceph/mds_client.c 	*p = end;
p                 401 fs/ceph/mds_client.c static int parse_reply_info_filelock(void **p, void *end,
p                 405 fs/ceph/mds_client.c 	if (*p + sizeof(*info->filelock_reply) > end)
p                 408 fs/ceph/mds_client.c 	info->filelock_reply = *p;
p                 411 fs/ceph/mds_client.c 	*p = end;
p                 420 fs/ceph/mds_client.c static int parse_reply_info_create(void **p, void *end,
p                 427 fs/ceph/mds_client.c 		if (*p == end) {
p                 431 fs/ceph/mds_client.c 			ceph_decode_64_safe(p, end, info->ino, bad);
p                 434 fs/ceph/mds_client.c 		if (*p != end)
p                 439 fs/ceph/mds_client.c 	*p = end;
p                 448 fs/ceph/mds_client.c static int parse_reply_info_extra(void **p, void *end,
p                 455 fs/ceph/mds_client.c 		return parse_reply_info_filelock(p, end, info, features);
p                 457 fs/ceph/mds_client.c 		return parse_reply_info_readdir(p, end, info, features);
p                 459 fs/ceph/mds_client.c 		return parse_reply_info_create(p, end, info, features);
p                 471 fs/ceph/mds_client.c 	void *p, *end;
p                 476 fs/ceph/mds_client.c 	p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
p                 477 fs/ceph/mds_client.c 	end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
p                 480 fs/ceph/mds_client.c 	ceph_decode_32_safe(&p, end, len, bad);
p                 482 fs/ceph/mds_client.c 		ceph_decode_need(&p, end, len, bad);
p                 483 fs/ceph/mds_client.c 		err = parse_reply_info_trace(&p, p+len, info, features);
p                 489 fs/ceph/mds_client.c 	ceph_decode_32_safe(&p, end, len, bad);
p                 491 fs/ceph/mds_client.c 		ceph_decode_need(&p, end, len, bad);
p                 492 fs/ceph/mds_client.c 		err = parse_reply_info_extra(&p, p+len, info, features);
p                 498 fs/ceph/mds_client.c 	ceph_decode_32_safe(&p, end, len, bad);
p                 500 fs/ceph/mds_client.c 	info->snapblob = p;
p                 501 fs/ceph/mds_client.c 	p += len;
p                 503 fs/ceph/mds_client.c 	if (p != end)
p                 809 fs/ceph/mds_client.c 		struct rb_node *p = rb_next(&req->r_node);
p                 811 fs/ceph/mds_client.c 		while (p) {
p                 813 fs/ceph/mds_client.c 				rb_entry(p, struct ceph_mds_request, r_node);
p                 818 fs/ceph/mds_client.c 			p = rb_next(p);
p                1048 fs/ceph/mds_client.c static void encode_supported_features(void **p, void *end)
p                1057 fs/ceph/mds_client.c 		BUG_ON(*p + 4 + size > end);
p                1058 fs/ceph/mds_client.c 		ceph_encode_32(p, size);
p                1059 fs/ceph/mds_client.c 		memset(*p, 0, size);
p                1061 fs/ceph/mds_client.c 			((unsigned char*)(*p))[i / 8] |= 1 << (bits[i] % 8);
p                1062 fs/ceph/mds_client.c 		*p += size;
p                1064 fs/ceph/mds_client.c 		BUG_ON(*p + 4 > end);
p                1065 fs/ceph/mds_client.c 		ceph_encode_32(p, 0);
p                1082 fs/ceph/mds_client.c 	void *p, *end;
p                1109 fs/ceph/mds_client.c 	p = msg->front.iov_base;
p                1110 fs/ceph/mds_client.c 	end = p + msg->front.iov_len;
p                1112 fs/ceph/mds_client.c 	h = p;
p                1126 fs/ceph/mds_client.c 	p += sizeof(*h);
p                1129 fs/ceph/mds_client.c 	ceph_encode_32(&p, metadata_key_count);
p                1136 fs/ceph/mds_client.c 		ceph_encode_32(&p, key_len);
p                1137 fs/ceph/mds_client.c 		memcpy(p, metadata[i][0], key_len);
p                1138 fs/ceph/mds_client.c 		p += key_len;
p                1139 fs/ceph/mds_client.c 		ceph_encode_32(&p, val_len);
p                1140 fs/ceph/mds_client.c 		memcpy(p, metadata[i][1], val_len);
p                1141 fs/ceph/mds_client.c 		p += val_len;
p                1144 fs/ceph/mds_client.c 	encode_supported_features(&p, end);
p                1145 fs/ceph/mds_client.c 	msg->front.iov_len = p - msg->front.iov_base;
p                1274 fs/ceph/mds_client.c 	struct rb_node *p;
p                1297 fs/ceph/mds_client.c 	p = rb_first(&mdsc->request_tree);
p                1298 fs/ceph/mds_client.c 	while (p) {
p                1299 fs/ceph/mds_client.c 		req = rb_entry(p, struct ceph_mds_request, r_node);
p                1300 fs/ceph/mds_client.c 		p = rb_next(p);
p                1318 fs/ceph/mds_client.c 	struct list_head *p;
p                1326 fs/ceph/mds_client.c 	p = session->s_caps.next;
p                1327 fs/ceph/mds_client.c 	while (p != &session->s_caps) {
p                1328 fs/ceph/mds_client.c 		cap = list_entry(p, struct ceph_cap, session_caps);
p                1331 fs/ceph/mds_client.c 			p = p->next;
p                1352 fs/ceph/mds_client.c 		p = p->next;
p                2297 fs/ceph/mds_client.c 	void *p, *end;
p                2343 fs/ceph/mds_client.c 	p = msg->front.iov_base + sizeof(*head);
p                2352 fs/ceph/mds_client.c 	ceph_encode_filepath(&p, end, ino1, path1);
p                2353 fs/ceph/mds_client.c 	ceph_encode_filepath(&p, end, ino2, path2);
p                2356 fs/ceph/mds_client.c 	req->r_request_release_offset = p - msg->front.iov_base;
p                2361 fs/ceph/mds_client.c 		releases += ceph_encode_inode_release(&p,
p                2365 fs/ceph/mds_client.c 		releases += ceph_encode_dentry_release(&p, req->r_dentry,
p                2369 fs/ceph/mds_client.c 		releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
p                2374 fs/ceph/mds_client.c 		releases += ceph_encode_inode_release(&p,
p                2380 fs/ceph/mds_client.c 		p = msg->front.iov_base + req->r_request_release_offset;
p                2389 fs/ceph/mds_client.c 		ceph_encode_copy(&p, &ts, sizeof(ts));
p                2392 fs/ceph/mds_client.c 	BUG_ON(p > end);
p                2393 fs/ceph/mds_client.c 	msg->front.iov_len = p - msg->front.iov_base;
p                2453 fs/ceph/mds_client.c 		void *p;
p                2476 fs/ceph/mds_client.c 		p = msg->front.iov_base + req->r_request_release_offset;
p                2480 fs/ceph/mds_client.c 			ceph_encode_copy(&p, &ts, sizeof(ts));
p                2483 fs/ceph/mds_client.c 		msg->front.iov_len = p - msg->front.iov_base;
p                2647 fs/ceph/mds_client.c 	struct rb_node *p = rb_first(&mdsc->request_tree);
p                2650 fs/ceph/mds_client.c 	while (p) {
p                2651 fs/ceph/mds_client.c 		req = rb_entry(p, struct ceph_mds_request, r_node);
p                2652 fs/ceph/mds_client.c 		p = rb_next(p);
p                2998 fs/ceph/mds_client.c 	void *p = msg->front.iov_base;
p                2999 fs/ceph/mds_client.c 	void *end = p + msg->front.iov_len;
p                3001 fs/ceph/mds_client.c 	ceph_decode_need(&p, end, 2*sizeof(u32), bad);
p                3002 fs/ceph/mds_client.c 	next_mds = ceph_decode_32(&p);
p                3003 fs/ceph/mds_client.c 	fwd_seq = ceph_decode_32(&p);
p                3038 fs/ceph/mds_client.c static int __decode_session_metadata(void **p, void *end,
p                3044 fs/ceph/mds_client.c 	ceph_decode_32_safe(p, end, n, bad);
p                3047 fs/ceph/mds_client.c 		ceph_decode_32_safe(p, end, len, bad);
p                3048 fs/ceph/mds_client.c 		ceph_decode_need(p, end, len, bad);
p                3049 fs/ceph/mds_client.c 		err_str = !strncmp(*p, "error_string", len);
p                3050 fs/ceph/mds_client.c 		*p += len;
p                3051 fs/ceph/mds_client.c 		ceph_decode_32_safe(p, end, len, bad);
p                3052 fs/ceph/mds_client.c 		ceph_decode_need(p, end, len, bad);
p                3053 fs/ceph/mds_client.c 		if (err_str && strnstr(*p, "blacklisted", len))
p                3055 fs/ceph/mds_client.c 		*p += len;
p                3071 fs/ceph/mds_client.c 	void *p = msg->front.iov_base;
p                3072 fs/ceph/mds_client.c 	void *end = p + msg->front.iov_len;
p                3080 fs/ceph/mds_client.c 	ceph_decode_need(&p, end, sizeof(*h), bad);
p                3081 fs/ceph/mds_client.c 	h = p;
p                3082 fs/ceph/mds_client.c 	p += sizeof(*h);
p                3090 fs/ceph/mds_client.c 		if (__decode_session_metadata(&p, end, &blacklisted) < 0)
p                3093 fs/ceph/mds_client.c 		ceph_decode_32_safe(&p, end, len, bad);
p                3094 fs/ceph/mds_client.c 		ceph_decode_64_safe(&p, end, features, bad);
p                3095 fs/ceph/mds_client.c 		p += len - sizeof(features);
p                3213 fs/ceph/mds_client.c 	struct rb_node *p;
p                3231 fs/ceph/mds_client.c 	p = rb_first(&mdsc->request_tree);
p                3232 fs/ceph/mds_client.c 	while (p) {
p                3233 fs/ceph/mds_client.c 		req = rb_entry(p, struct ceph_mds_request, r_node);
p                3234 fs/ceph/mds_client.c 		p = rb_next(p);
p                3505 fs/ceph/mds_client.c 	struct rb_node *p;
p                3520 fs/ceph/mds_client.c 	for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
p                3522 fs/ceph/mds_client.c 		       rb_entry(p, struct ceph_snap_realm, node);
p                4463 fs/ceph/mds_client.c 	void *p = msg->front.iov_base;
p                4464 fs/ceph/mds_client.c 	void *end = p + msg->front.iov_len;
p                4472 fs/ceph/mds_client.c 	ceph_decode_need(&p, end, sizeof(u32), bad);
p                4473 fs/ceph/mds_client.c 	epoch = ceph_decode_32(&p);
p                4477 fs/ceph/mds_client.c 	ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
p                4478 fs/ceph/mds_client.c 	struct_v = ceph_decode_8(&p);
p                4479 fs/ceph/mds_client.c 	struct_cv = ceph_decode_8(&p);
p                4480 fs/ceph/mds_client.c 	map_len = ceph_decode_32(&p);
p                4482 fs/ceph/mds_client.c 	ceph_decode_need(&p, end, sizeof(u32) * 3, bad);
p                4483 fs/ceph/mds_client.c 	p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */
p                4485 fs/ceph/mds_client.c 	num_fs = ceph_decode_32(&p);
p                4492 fs/ceph/mds_client.c 		ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
p                4493 fs/ceph/mds_client.c 		info_v = ceph_decode_8(&p);
p                4494 fs/ceph/mds_client.c 		info_cv = ceph_decode_8(&p);
p                4495 fs/ceph/mds_client.c 		info_len = ceph_decode_32(&p);
p                4496 fs/ceph/mds_client.c 		ceph_decode_need(&p, end, info_len, bad);
p                4497 fs/ceph/mds_client.c 		info_p = p;
p                4498 fs/ceph/mds_client.c 		info_end = p + info_len;
p                4499 fs/ceph/mds_client.c 		p = info_end;
p                4542 fs/ceph/mds_client.c 	void *p = msg->front.iov_base;
p                4543 fs/ceph/mds_client.c 	void *end = p + msg->front.iov_len;
p                4548 fs/ceph/mds_client.c 	ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
p                4549 fs/ceph/mds_client.c 	ceph_decode_copy(&p, &fsid, sizeof(fsid));
p                4552 fs/ceph/mds_client.c 	epoch = ceph_decode_32(&p);
p                4553 fs/ceph/mds_client.c 	maplen = ceph_decode_32(&p);
p                4565 fs/ceph/mds_client.c 	newmap = ceph_mdsmap_decode(&p, end);
p                  45 fs/ceph/mdsmap.c #define __decode_and_drop_type(p, end, type, bad)		\
p                  47 fs/ceph/mdsmap.c 		if (*p + sizeof(type) > end)			\
p                  49 fs/ceph/mdsmap.c 		*p += sizeof(type);				\
p                  52 fs/ceph/mdsmap.c #define __decode_and_drop_set(p, end, type, bad)		\
p                  56 fs/ceph/mdsmap.c 		ceph_decode_32_safe(p, end, n, bad);		\
p                  58 fs/ceph/mdsmap.c 		ceph_decode_need(p, end, need, bad);		\
p                  59 fs/ceph/mdsmap.c 		*p += need;					\
p                  62 fs/ceph/mdsmap.c #define __decode_and_drop_map(p, end, ktype, vtype, bad)	\
p                  66 fs/ceph/mdsmap.c 		ceph_decode_32_safe(p, end, n, bad);		\
p                  68 fs/ceph/mdsmap.c 		ceph_decode_need(p, end, need, bad);		\
p                  69 fs/ceph/mdsmap.c 		*p += need;					\
p                  73 fs/ceph/mdsmap.c static int __decode_and_drop_compat_set(void **p, void* end)
p                  79 fs/ceph/mdsmap.c 		ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
p                  81 fs/ceph/mdsmap.c 		*p += sizeof(u64);
p                  83 fs/ceph/mdsmap.c 		n = ceph_decode_32(p);
p                  86 fs/ceph/mdsmap.c 			ceph_decode_need(p, end, sizeof(u64) + sizeof(u32),
p                  88 fs/ceph/mdsmap.c 			*p += sizeof(u64);
p                  89 fs/ceph/mdsmap.c 			len = ceph_decode_32(p);
p                  90 fs/ceph/mdsmap.c 			ceph_decode_need(p, end, len, bad);
p                  91 fs/ceph/mdsmap.c 			*p += len;
p                 105 fs/ceph/mdsmap.c struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
p                 108 fs/ceph/mdsmap.c 	const void *start = *p;
p                 118 fs/ceph/mdsmap.c 	ceph_decode_need(p, end, 1 + 1, bad);
p                 119 fs/ceph/mdsmap.c 	mdsmap_v = ceph_decode_8(p);
p                 120 fs/ceph/mdsmap.c 	mdsmap_cv = ceph_decode_8(p);
p                 123 fs/ceph/mdsmap.c 	       ceph_decode_32_safe(p, end, mdsmap_len, bad);
p                 124 fs/ceph/mdsmap.c 	       if (end < *p + mdsmap_len)
p                 126 fs/ceph/mdsmap.c 	       end = *p + mdsmap_len;
p                 129 fs/ceph/mdsmap.c 	ceph_decode_need(p, end, 8*sizeof(u32) + sizeof(u64), bad);
p                 130 fs/ceph/mdsmap.c 	m->m_epoch = ceph_decode_32(p);
p                 131 fs/ceph/mdsmap.c 	m->m_client_epoch = ceph_decode_32(p);
p                 132 fs/ceph/mdsmap.c 	m->m_last_failure = ceph_decode_32(p);
p                 133 fs/ceph/mdsmap.c 	m->m_root = ceph_decode_32(p);
p                 134 fs/ceph/mdsmap.c 	m->m_session_timeout = ceph_decode_32(p);
p                 135 fs/ceph/mdsmap.c 	m->m_session_autoclose = ceph_decode_32(p);
p                 136 fs/ceph/mdsmap.c 	m->m_max_file_size = ceph_decode_64(p);
p                 137 fs/ceph/mdsmap.c 	m->m_max_mds = ceph_decode_32(p);
p                 145 fs/ceph/mdsmap.c 	n = ceph_decode_32(p);
p                 159 fs/ceph/mdsmap.c 		ceph_decode_need(p, end, sizeof(u64) + 1, bad);
p                 160 fs/ceph/mdsmap.c 		global_id = ceph_decode_64(p);
p                 161 fs/ceph/mdsmap.c 		info_v= ceph_decode_8(p);
p                 165 fs/ceph/mdsmap.c 			ceph_decode_need(p, end, 1 + sizeof(u32), bad);
p                 166 fs/ceph/mdsmap.c 			info_cv = ceph_decode_8(p);
p                 167 fs/ceph/mdsmap.c 			info_len = ceph_decode_32(p);
p                 168 fs/ceph/mdsmap.c 			info_end = *p + info_len;
p                 173 fs/ceph/mdsmap.c 		ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
p                 174 fs/ceph/mdsmap.c 		*p += sizeof(u64);
p                 175 fs/ceph/mdsmap.c 		namelen = ceph_decode_32(p);  /* skip mds name */
p                 176 fs/ceph/mdsmap.c 		*p += namelen;
p                 178 fs/ceph/mdsmap.c 		ceph_decode_need(p, end,
p                 182 fs/ceph/mdsmap.c 		mds = ceph_decode_32(p);
p                 183 fs/ceph/mdsmap.c 		inc = ceph_decode_32(p);
p                 184 fs/ceph/mdsmap.c 		state = ceph_decode_32(p);
p                 185 fs/ceph/mdsmap.c 		state_seq = ceph_decode_64(p);
p                 186 fs/ceph/mdsmap.c 		err = ceph_decode_entity_addr(p, end, &addr);
p                 189 fs/ceph/mdsmap.c 		ceph_decode_copy(p, &laggy_since, sizeof(laggy_since));
p                 190 fs/ceph/mdsmap.c 		*p += sizeof(u32);
p                 191 fs/ceph/mdsmap.c 		ceph_decode_32_safe(p, end, namelen, bad);
p                 192 fs/ceph/mdsmap.c 		*p += namelen;
p                 194 fs/ceph/mdsmap.c 			ceph_decode_32_safe(p, end, num_export_targets, bad);
p                 195 fs/ceph/mdsmap.c 			pexport_targets = *p;
p                 196 fs/ceph/mdsmap.c 			*p += num_export_targets * sizeof(u32);
p                 201 fs/ceph/mdsmap.c 		if (info_end && *p != info_end) {
p                 202 fs/ceph/mdsmap.c 			if (*p > info_end)
p                 204 fs/ceph/mdsmap.c 			*p = info_end;
p                 255 fs/ceph/mdsmap.c 	ceph_decode_32_safe(p, end, n, bad);
p                 260 fs/ceph/mdsmap.c 	ceph_decode_need(p, end, sizeof(u64)*(n+1), bad);
p                 262 fs/ceph/mdsmap.c 		m->m_data_pg_pools[i] = ceph_decode_64(p);
p                 263 fs/ceph/mdsmap.c 	m->m_cas_pg_pool = ceph_decode_64(p);
p                 268 fs/ceph/mdsmap.c 		ceph_decode_16_safe(p, end, mdsmap_ev, bad_ext);
p                 271 fs/ceph/mdsmap.c 		if (__decode_and_drop_compat_set(p, end) < 0)
p                 276 fs/ceph/mdsmap.c 		__decode_and_drop_type(p, end, u32, bad_ext);
p                 278 fs/ceph/mdsmap.c 		__decode_and_drop_type(p, end, u64, bad_ext);
p                 282 fs/ceph/mdsmap.c 	__decode_and_drop_type(p, end, struct ceph_timespec, bad_ext);
p                 283 fs/ceph/mdsmap.c 	__decode_and_drop_type(p, end, struct ceph_timespec, bad_ext);
p                 284 fs/ceph/mdsmap.c 	__decode_and_drop_type(p, end, u32, bad_ext);
p                 289 fs/ceph/mdsmap.c 		ceph_decode_32_safe(p, end, n, bad_ext);
p                 290 fs/ceph/mdsmap.c 		ceph_decode_need(p, end, sizeof(u32) * n, bad_ext);
p                 293 fs/ceph/mdsmap.c 			s32 mds = ceph_decode_32(p);
p                 313 fs/ceph/mdsmap.c 	__decode_and_drop_map(p, end, u32, u32, bad_ext);
p                 315 fs/ceph/mdsmap.c 	__decode_and_drop_map(p, end, u32, u64, bad_ext);
p                 317 fs/ceph/mdsmap.c 	__decode_and_drop_set(p, end, u32, bad_ext);
p                 319 fs/ceph/mdsmap.c 	__decode_and_drop_set(p, end, u32, bad_ext);
p                 323 fs/ceph/mdsmap.c 		__decode_and_drop_type(p, end, u32, bad_ext);
p                 327 fs/ceph/mdsmap.c 		__decode_and_drop_type(p, end, u8, bad_ext);
p                 329 fs/ceph/mdsmap.c 		__decode_and_drop_type(p, end, u8, bad_ext);
p                 333 fs/ceph/mdsmap.c 		__decode_and_drop_type(p, end, u8, bad_ext);
p                 338 fs/ceph/mdsmap.c 		ceph_decode_8_safe(p, end, m->m_enabled, bad_ext);
p                 339 fs/ceph/mdsmap.c 		ceph_decode_32_safe(p, end, name_len, bad_ext);
p                 340 fs/ceph/mdsmap.c 		ceph_decode_need(p, end, name_len, bad_ext);
p                 341 fs/ceph/mdsmap.c 		*p += name_len;
p                 346 fs/ceph/mdsmap.c 		ceph_decode_32_safe(p, end, n, bad_ext);
p                 348 fs/ceph/mdsmap.c 		ceph_decode_need(p, end, need, bad_ext);
p                 349 fs/ceph/mdsmap.c 		*p += need;
p                 355 fs/ceph/mdsmap.c 	*p = end;
p                  86 fs/ceph/snap.c 	struct rb_node **p = &root->rb_node;
p                  90 fs/ceph/snap.c 	while (*p) {
p                  91 fs/ceph/snap.c 		parent = *p;
p                  94 fs/ceph/snap.c 			p = &(*p)->rb_left;
p                  96 fs/ceph/snap.c 			p = &(*p)->rb_right;
p                 101 fs/ceph/snap.c 	rb_link_node(&new->node, parent, p);
p                 677 fs/ceph/snap.c 			   void *p, void *e, bool deletion,
p                 691 fs/ceph/snap.c 	ceph_decode_need(&p, e, sizeof(*ri), bad);
p                 692 fs/ceph/snap.c 	ri = p;
p                 693 fs/ceph/snap.c 	p += sizeof(*ri);
p                 694 fs/ceph/snap.c 	ceph_decode_need(&p, e, sizeof(u64)*(le32_to_cpu(ri->num_snaps) +
p                 696 fs/ceph/snap.c 	snaps = p;
p                 697 fs/ceph/snap.c 	p += sizeof(u64) * le32_to_cpu(ri->num_snaps);
p                 698 fs/ceph/snap.c 	prior_parent_snaps = p;
p                 699 fs/ceph/snap.c 	p += sizeof(u64) * le32_to_cpu(ri->num_prior_parent_snaps);
p                 750 fs/ceph/snap.c 	     realm, invalidate, p, e);
p                 753 fs/ceph/snap.c 	if (invalidate && p >= e)
p                 761 fs/ceph/snap.c 	if (p < e)
p                 852 fs/ceph/snap.c 	void *p = msg->front.iov_base;
p                 853 fs/ceph/snap.c 	void *e = p + msg->front.iov_len;
p                 863 fs/ceph/snap.c 	h = p;
p                 870 fs/ceph/snap.c 	p += sizeof(*h);
p                 891 fs/ceph/snap.c 		split_inos = p;
p                 892 fs/ceph/snap.c 		p += sizeof(u64) * num_split_inos;
p                 893 fs/ceph/snap.c 		split_realms = p;
p                 894 fs/ceph/snap.c 		p += sizeof(u64) * num_split_realms;
p                 895 fs/ceph/snap.c 		ceph_decode_need(&p, e, sizeof(*ri), bad);
p                 899 fs/ceph/snap.c 		ri = p;
p                 987 fs/ceph/snap.c 	ceph_update_snap_trace(mdsc, p, e,
p                1014 fs/ceph/snap.c 	struct rb_node **p, *parent;
p                1019 fs/ceph/snap.c 	p = &mdsc->snapid_map_tree.rb_node;
p                1020 fs/ceph/snap.c 	while (*p) {
p                1021 fs/ceph/snap.c 		exist = rb_entry(*p, struct ceph_snapid_map, node);
p                1023 fs/ceph/snap.c 			p = &(*p)->rb_left;
p                1025 fs/ceph/snap.c 			p = &(*p)->rb_right;
p                1055 fs/ceph/snap.c 	p = &mdsc->snapid_map_tree.rb_node;
p                1057 fs/ceph/snap.c 	while (*p) {
p                1058 fs/ceph/snap.c 		parent = *p;
p                1059 fs/ceph/snap.c 		exist = rb_entry(*p, struct ceph_snapid_map, node);
p                1061 fs/ceph/snap.c 			p = &(*p)->rb_left;
p                1063 fs/ceph/snap.c 			p = &(*p)->rb_right;
p                1072 fs/ceph/snap.c 		rb_link_node(&sm->node, parent, p);
p                1138 fs/ceph/snap.c 	struct rb_node *p;
p                1142 fs/ceph/snap.c 	while ((p = rb_first(&mdsc->snapid_map_tree))) {
p                1143 fs/ceph/snap.c 		sm = rb_entry(p, struct ceph_snapid_map, node);
p                1144 fs/ceph/snap.c 		rb_erase(p, &mdsc->snapid_map_tree);
p                1145 fs/ceph/snap.c 		RB_CLEAR_NODE(p);
p                 867 fs/ceph/super.h 				  void *p, void *e, bool deletion,
p                1070 fs/ceph/super.h extern int ceph_encode_inode_release(void **p, struct inode *inode,
p                1072 fs/ceph/super.h extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
p                 439 fs/ceph/xattr.c 	struct rb_node **p;
p                 445 fs/ceph/xattr.c 	p = &ci->i_xattrs.index.rb_node;
p                 446 fs/ceph/xattr.c 	while (*p) {
p                 447 fs/ceph/xattr.c 		parent = *p;
p                 451 fs/ceph/xattr.c 			p = &(*p)->rb_left;
p                 453 fs/ceph/xattr.c 			p = &(*p)->rb_right;
p                 458 fs/ceph/xattr.c 				p = &(*p)->rb_left;
p                 460 fs/ceph/xattr.c 				p = &(*p)->rb_right;
p                 521 fs/ceph/xattr.c 		rb_link_node(&xattr->node, parent, p);
p                 523 fs/ceph/xattr.c 		dout("__set_xattr_val p=%p\n", p);
p                 535 fs/ceph/xattr.c 	struct rb_node **p;
p                 541 fs/ceph/xattr.c 	p = &ci->i_xattrs.index.rb_node;
p                 542 fs/ceph/xattr.c 	while (*p) {
p                 543 fs/ceph/xattr.c 		parent = *p;
p                 549 fs/ceph/xattr.c 			p = &(*p)->rb_left;
p                 551 fs/ceph/xattr.c 			p = &(*p)->rb_right;
p                 600 fs/ceph/xattr.c 	struct rb_node *p;
p                 603 fs/ceph/xattr.c 	p = rb_first(&ci->i_xattrs.index);
p                 606 fs/ceph/xattr.c 	while (p) {
p                 607 fs/ceph/xattr.c 		xattr = rb_entry(p, struct ceph_inode_xattr, node);
p                 615 fs/ceph/xattr.c 		p = rb_next(p);
p                 623 fs/ceph/xattr.c 	struct rb_node *p, *tmp;
p                 626 fs/ceph/xattr.c 	p = rb_first(&ci->i_xattrs.index);
p                 628 fs/ceph/xattr.c 	dout("__ceph_destroy_xattrs p=%p\n", p);
p                 630 fs/ceph/xattr.c 	while (p) {
p                 631 fs/ceph/xattr.c 		xattr = rb_entry(p, struct ceph_inode_xattr, node);
p                 632 fs/ceph/xattr.c 		tmp = p;
p                 633 fs/ceph/xattr.c 		p = rb_next(tmp);
p                 634 fs/ceph/xattr.c 		dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
p                 654 fs/ceph/xattr.c 	void *p, *end;
p                 674 fs/ceph/xattr.c 		p = ci->i_xattrs.blob->vec.iov_base;
p                 675 fs/ceph/xattr.c 		end = p + ci->i_xattrs.blob->vec.iov_len;
p                 676 fs/ceph/xattr.c 		ceph_decode_32_safe(&p, end, numattr, bad);
p                 704 fs/ceph/xattr.c 			ceph_decode_32_safe(&p, end, len, bad);
p                 706 fs/ceph/xattr.c 			name = p;
p                 707 fs/ceph/xattr.c 			p += len;
p                 708 fs/ceph/xattr.c 			ceph_decode_32_safe(&p, end, len, bad);
p                 709 fs/ceph/xattr.c 			val = p;
p                 710 fs/ceph/xattr.c 			p += len;
p                 764 fs/ceph/xattr.c 	struct rb_node *p;
p                 775 fs/ceph/xattr.c 		p = rb_first(&ci->i_xattrs.index);
p                 779 fs/ceph/xattr.c 		while (p) {
p                 780 fs/ceph/xattr.c 			xattr = rb_entry(p, struct ceph_inode_xattr, node);
p                 789 fs/ceph/xattr.c 			p = rb_next(p);
p                 348 fs/char_dev.c  static struct kobject *cdev_get(struct cdev *p)
p                 350 fs/char_dev.c  	struct module *owner = p->owner;
p                 355 fs/char_dev.c  	kobj = kobject_get_unless_zero(&p->kobj);
p                 361 fs/char_dev.c  void cdev_put(struct cdev *p)
p                 363 fs/char_dev.c  	if (p) {
p                 364 fs/char_dev.c  		struct module *owner = p->owner;
p                 365 fs/char_dev.c  		kobject_put(&p->kobj);
p                 376 fs/char_dev.c  	struct cdev *p;
p                 381 fs/char_dev.c  	p = inode->i_cdev;
p                 382 fs/char_dev.c  	if (!p) {
p                 393 fs/char_dev.c  		p = inode->i_cdev;
p                 394 fs/char_dev.c  		if (!p) {
p                 395 fs/char_dev.c  			inode->i_cdev = p = new;
p                 396 fs/char_dev.c  			list_add(&inode->i_devices, &p->list);
p                 398 fs/char_dev.c  		} else if (!cdev_get(p))
p                 400 fs/char_dev.c  	} else if (!cdev_get(p))
p                 408 fs/char_dev.c  	fops = fops_get(p->ops);
p                 422 fs/char_dev.c  	cdev_put(p);
p                 459 fs/char_dev.c  	struct cdev *p = data;
p                 460 fs/char_dev.c  	return &p->kobj;
p                 465 fs/char_dev.c  	struct cdev *p = data;
p                 466 fs/char_dev.c  	return cdev_get(p) ? 0 : -1;
p                 479 fs/char_dev.c  int cdev_add(struct cdev *p, dev_t dev, unsigned count)
p                 483 fs/char_dev.c  	p->dev = dev;
p                 484 fs/char_dev.c  	p->count = count;
p                 487 fs/char_dev.c  			 exact_match, exact_lock, p);
p                 491 fs/char_dev.c  	kobject_get(p->kobj.parent);
p                 505 fs/char_dev.c  void cdev_set_parent(struct cdev *p, struct kobject *kobj)
p                 508 fs/char_dev.c  	p->kobj.parent = kobj;
p                 591 fs/char_dev.c  void cdev_del(struct cdev *p)
p                 593 fs/char_dev.c  	cdev_unmap(p->dev, p->count);
p                 594 fs/char_dev.c  	kobject_put(&p->kobj);
p                 600 fs/char_dev.c  	struct cdev *p = container_of(kobj, struct cdev, kobj);
p                 603 fs/char_dev.c  	cdev_purge(p);
p                 609 fs/char_dev.c  	struct cdev *p = container_of(kobj, struct cdev, kobj);
p                 612 fs/char_dev.c  	cdev_purge(p);
p                 613 fs/char_dev.c  	kfree(p);
p                 632 fs/char_dev.c  	struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
p                 633 fs/char_dev.c  	if (p) {
p                 634 fs/char_dev.c  		INIT_LIST_HEAD(&p->list);
p                 635 fs/char_dev.c  		kobject_init(&p->kobj, &ktype_cdev_dynamic);
p                 637 fs/char_dev.c  	return p;
p                 714 fs/cifs/cifsfs.c 	char *s, *p;
p                 729 fs/cifs/cifsfs.c 	p = s = full_path;
p                 751 fs/cifs/cifsfs.c 		p = s++;
p                 756 fs/cifs/cifsfs.c 		child = lookup_one_len_unlocked(p, dentry, s - p);
p                 489 fs/cifs/cifsglob.h 				unsigned long p);
p                1155 fs/cifs/connect.c cifs_demultiplex_thread(void *p)
p                1158 fs/cifs/connect.c 	struct TCP_Server_Info *server = p;
p                5156 fs/cifs/connect.c static void delayed_free(struct rcu_head *p)
p                5158 fs/cifs/connect.c 	struct cifs_sb_info *sbi = container_of(p, struct cifs_sb_info, rcu);
p                  39 fs/cifs/ioctl.c 				  unsigned long p)
p                  69 fs/cifs/ioctl.c 				filep->private_data ? 0 : 1, p);
p                 428 fs/cifs/smb2ops.c 	struct network_interface_info_ioctl_rsp *p;
p                 447 fs/cifs/smb2ops.c 	p = buf;
p                 448 fs/cifs/smb2ops.c 	while (bytes_left >= sizeof(*p)) {
p                 450 fs/cifs/smb2ops.c 		next = le32_to_cpu(p->Next);
p                 452 fs/cifs/smb2ops.c 			bytes_left -= sizeof(*p);
p                 455 fs/cifs/smb2ops.c 		p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
p                 465 fs/cifs/smb2ops.c 	if (bytes_left || p->Next)
p                 481 fs/cifs/smb2ops.c 	p = buf;
p                 482 fs/cifs/smb2ops.c 	while (bytes_left >= sizeof(*p)) {
p                 483 fs/cifs/smb2ops.c 		info->speed = le64_to_cpu(p->LinkSpeed);
p                 484 fs/cifs/smb2ops.c 		info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
p                 485 fs/cifs/smb2ops.c 		info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
p                 490 fs/cifs/smb2ops.c 			 le32_to_cpu(p->Capability));
p                 492 fs/cifs/smb2ops.c 		switch (p->Family) {
p                 500 fs/cifs/smb2ops.c 			p4 = (struct iface_info_ipv4 *)p->Buffer;
p                 512 fs/cifs/smb2ops.c 			p6 = (struct iface_info_ipv6 *)p->Buffer;
p                 534 fs/cifs/smb2ops.c 		next = le32_to_cpu(p->Next);
p                 537 fs/cifs/smb2ops.c 		p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
p                1397 fs/cifs/smb2ops.c 		      unsigned long p)
p                1400 fs/cifs/smb2ops.c 	char __user *arg = (char __user *)p;
p                  54 fs/cifs/smb2transport.c 	struct cifs_secmech *p = &server->secmech;
p                  58 fs/cifs/smb2transport.c 			     &p->hmacsha256,
p                  59 fs/cifs/smb2transport.c 			     &p->sdeschmacsha256);
p                  63 fs/cifs/smb2transport.c 	rc = cifs_alloc_hash("cmac(aes)", &p->cmacaes, &p->sdesccmacaes);
p                  69 fs/cifs/smb2transport.c 	cifs_free_hash(&p->hmacsha256, &p->sdeschmacsha256);
p                  76 fs/cifs/smb2transport.c 	struct cifs_secmech *p = &server->secmech;
p                  80 fs/cifs/smb2transport.c 			     &p->hmacsha256,
p                  81 fs/cifs/smb2transport.c 			     &p->sdeschmacsha256);
p                  85 fs/cifs/smb2transport.c 	rc = cifs_alloc_hash("cmac(aes)", &p->cmacaes, &p->sdesccmacaes);
p                  89 fs/cifs/smb2transport.c 	rc = cifs_alloc_hash("sha512", &p->sha512, &p->sdescsha512);
p                  96 fs/cifs/smb2transport.c 	cifs_free_hash(&p->cmacaes, &p->sdesccmacaes);
p                  97 fs/cifs/smb2transport.c 	cifs_free_hash(&p->hmacsha256, &p->sdeschmacsha256);
p                  29 fs/coda/symlink.c 	char *p = page_address(page);
p                  33 fs/coda/symlink.c 	error = venus_readlink(inode->i_sb, &cii->c_fid, p, &len);
p                 490 fs/compat_ioctl.c 	struct space_resv	__user *p = compat_alloc_user_space(sizeof(*p));
p                 492 fs/compat_ioctl.c 	if (copy_in_user(&p->l_type,	&p32->l_type,	sizeof(s16)) ||
p                 493 fs/compat_ioctl.c 	    copy_in_user(&p->l_whence,	&p32->l_whence, sizeof(s16)) ||
p                 494 fs/compat_ioctl.c 	    copy_in_user(&p->l_start,	&p32->l_start,	sizeof(s64)) ||
p                 495 fs/compat_ioctl.c 	    copy_in_user(&p->l_len,	&p32->l_len,	sizeof(s64)) ||
p                 496 fs/compat_ioctl.c 	    copy_in_user(&p->l_sysid,	&p32->l_sysid,	sizeof(s32)) ||
p                 497 fs/compat_ioctl.c 	    copy_in_user(&p->l_pid,	&p32->l_pid,	sizeof(u32)) ||
p                 498 fs/compat_ioctl.c 	    copy_in_user(&p->l_pad,	&p32->l_pad,	4*sizeof(u32)))
p                 501 fs/compat_ioctl.c 	return ioctl_preallocate(file, p);
p                1079 fs/compat_ioctl.c static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
p                1082 fs/compat_ioctl.c 	a = *(unsigned int *)p;
p                 155 fs/configfs/dir.c 	struct configfs_fragment *p;
p                 157 fs/configfs/dir.c 	p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL);
p                 158 fs/configfs/dir.c 	if (p) {
p                 159 fs/configfs/dir.c 		atomic_set(&p->frag_count, 1);
p                 160 fs/configfs/dir.c 		init_rwsem(&p->frag_sem);
p                 161 fs/configfs/dir.c 		p->frag_dead = false;
p                 163 fs/configfs/dir.c 	return p;
p                 280 fs/configfs/dir.c 	struct dentry *p = dentry->d_parent;
p                 285 fs/configfs/dir.c 	error = configfs_dirent_exists(p->d_fsdata, dentry->d_name.name);
p                 289 fs/configfs/dir.c 	error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
p                 295 fs/configfs/dir.c 	configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata);
p                 307 fs/configfs/dir.c 	inc_nlink(d_inode(p));
p                 359 fs/configfs/dir.c 	struct configfs_dirent *p = parent->d_fsdata;
p                 362 fs/configfs/dir.c 	err = configfs_make_dirent(p, dentry, target, mode, CONFIGFS_ITEM_LINK,
p                 363 fs/configfs/dir.c 			p->s_frag);
p                1083 fs/configfs/dir.c 	struct configfs_dirent *p;
p                1096 fs/configfs/dir.c 	p = target->ci_dentry->d_fsdata;
p                1097 fs/configfs/dir.c 	p->s_dependent_count += 1;
p                1109 fs/configfs/dir.c 	struct configfs_dirent *p;
p                1112 fs/configfs/dir.c 	list_for_each_entry(p, &root_sd->s_children, s_sibling) {
p                1113 fs/configfs/dir.c 		if (p->s_type & CONFIGFS_DIR &&
p                1114 fs/configfs/dir.c 		    p->s_element == subsys_item) {
p                1115 fs/configfs/dir.c 			ret = p;
p                1629 fs/configfs/dir.c 	struct list_head *p, *q = &cursor->s_sibling;
p                1637 fs/configfs/dir.c 	for (p = q->next; p != &parent_sd->s_children; p = p->next) {
p                1643 fs/configfs/dir.c 		next = list_entry(p, struct configfs_dirent, s_sibling);
p                1676 fs/configfs/dir.c 		list_move(q, p);
p                1677 fs/configfs/dir.c 		p = q;
p                1704 fs/configfs/dir.c 			struct list_head *p;
p                1709 fs/configfs/dir.c 			p = sd->s_children.next;
p                1710 fs/configfs/dir.c 			while (n && p != &sd->s_children) {
p                1712 fs/configfs/dir.c 				next = list_entry(p, struct configfs_dirent,
p                1716 fs/configfs/dir.c 				p = p->next;
p                1718 fs/configfs/dir.c 			list_add_tail(&cursor->s_sibling, p);
p                  26 fs/configfs/symlink.c 	struct config_item * p = item;
p                  28 fs/configfs/symlink.c 	do { depth++; } while ((p = p->ci_parent) && !configfs_is_root(p));
p                  34 fs/configfs/symlink.c 	struct config_item * p = item;
p                  37 fs/configfs/symlink.c 		length += strlen(config_item_name(p)) + 1;
p                  38 fs/configfs/symlink.c 		p = p->ci_parent;
p                  39 fs/configfs/symlink.c 	} while (p && !configfs_is_root(p));
p                  45 fs/configfs/symlink.c 	struct config_item * p;
p                  48 fs/configfs/symlink.c 	for (p = item; p && !configfs_is_root(p); p = p->ci_parent) {
p                  49 fs/configfs/symlink.c 		int cur = strlen(config_item_name(p));
p                  53 fs/configfs/symlink.c 		memcpy(buffer + length, config_item_name(p), cur);
p                 365 fs/coredump.c  	struct task_struct *g, *p;
p                 420 fs/coredump.c  		for_each_thread(g, p) {
p                 421 fs/coredump.c  			if (unlikely(!p->mm))
p                 423 fs/coredump.c  			if (unlikely(p->mm == mm)) {
p                 424 fs/coredump.c  				lock_task_sighand(p, &flags);
p                 425 fs/coredump.c  				nr += zap_process(p, exit_code,
p                 427 fs/coredump.c  				unlock_task_sighand(p, &flags);
p                 161 fs/crypto/fname.c 	const char *p;
p                 165 fs/crypto/fname.c 		p = strchr(lookup_table, src[i]);
p                 166 fs/crypto/fname.c 		if (p == NULL || src[i] == 0)
p                 168 fs/crypto/fname.c 		ac += (p - lookup_table) << bits;
p                  42 fs/d_path.c    	char *p;
p                  47 fs/d_path.c    	p = *buffer -= dlen + 1;
p                  48 fs/d_path.c    	*p++ = '/';
p                  53 fs/d_path.c    		*p++ = c;
p                 378 fs/d_path.c    	char *p = NULL;
p                 382 fs/d_path.c    		p = buf + buflen;
p                 383 fs/d_path.c    		if (prepend(&p, &buflen, "//deleted", 10) != 0)
p                 388 fs/d_path.c    	if (!IS_ERR(retval) && p)
p                 389 fs/d_path.c    		*p = '/';	/* restore '/' overriden with '\0' */
p                 304 fs/dcache.c    		struct external_name *p;
p                 305 fs/dcache.c    		p = container_of(name->name.name, struct external_name, name[0]);
p                 306 fs/dcache.c    		if (unlikely(atomic_dec_and_test(&p->u.count)))
p                 307 fs/dcache.c    			kfree_rcu(p, u.head);
p                 340 fs/dcache.c    		struct external_name *p = external_name(dentry);
p                 341 fs/dcache.c    		if (likely(atomic_dec_and_test(&p->u.count))) {
p                1422 fs/dcache.c    	struct dentry *p;
p                1425 fs/dcache.c    	for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
p                1427 fs/dcache.c    		spin_lock(&p->d_lock);
p                1428 fs/dcache.c    		if (unlikely(d_unhashed(p))) {
p                1429 fs/dcache.c    			spin_unlock(&p->d_lock);
p                1432 fs/dcache.c    		spin_unlock(&p->d_lock);
p                1704 fs/dcache.c    		struct external_name *p = kmalloc(size + name->len,
p                1707 fs/dcache.c    		if (!p) {
p                1711 fs/dcache.c    		atomic_set(&p->u.count, 1);
p                1712 fs/dcache.c    		dname = p->name;
p                2805 fs/dcache.c    	struct dentry *old_parent, *p;
p                2815 fs/dcache.c    	p = d_ancestor(old_parent, target);
p                2817 fs/dcache.c    		BUG_ON(p);
p                2819 fs/dcache.c    	} else if (!p) {
p                2824 fs/dcache.c    		BUG_ON(p == dentry);
p                2826 fs/dcache.c    		if (p != target)
p                2928 fs/dcache.c    	struct dentry *p;
p                2930 fs/dcache.c    	for (p = p2; !IS_ROOT(p); p = p->d_parent) {
p                2931 fs/dcache.c    		if (p->d_parent == p1)
p                2932 fs/dcache.c    			return p;
p                 107 fs/debugfs/inode.c 	char *p;
p                 111 fs/debugfs/inode.c 	while ((p = strsep(&data, ",")) != NULL) {
p                 112 fs/debugfs/inode.c 		if (!*p)
p                 115 fs/debugfs/inode.c 		token = match_token(p, tokens, args);
p                 248 fs/devpts/inode.c 	char *p;
p                 267 fs/devpts/inode.c 	while ((p = strsep(&data, ",")) != NULL) {
p                 272 fs/devpts/inode.c 		if (!*p)
p                 275 fs/devpts/inode.c 		token = match_token(p, tokens, args);
p                5021 fs/dlm/lock.c  void dlm_receive_buffer(union dlm_packet *p, int nodeid)
p                5023 fs/dlm/lock.c  	struct dlm_header *hd = &p->header;
p                5029 fs/dlm/lock.c  		dlm_message_in(&p->message);
p                5030 fs/dlm/lock.c  		type = p->message.m_type;
p                5033 fs/dlm/lock.c  		dlm_rcom_in(&p->rcom);
p                5034 fs/dlm/lock.c  		type = p->rcom.rc_type;
p                5056 fs/dlm/lock.c  			dlm_send_ls_not_ready(nodeid, &p->rcom);
p                5065 fs/dlm/lock.c  		dlm_receive_message(ls, &p->message, nodeid);
p                5067 fs/dlm/lock.c  		dlm_receive_rcom(ls, &p->rcom, nodeid);
p                  19 fs/dlm/lock.h  void dlm_receive_buffer(union dlm_packet *p, int nodeid);
p                 302 fs/dlm/lockspace.c 	struct task_struct *p;
p                 305 fs/dlm/lockspace.c 	p = kthread_run(dlm_scand, NULL, "dlm_scand");
p                 306 fs/dlm/lockspace.c 	if (IS_ERR(p))
p                 307 fs/dlm/lockspace.c 		error = PTR_ERR(p);
p                 309 fs/dlm/lockspace.c 		scand_task = p;
p                 722 fs/dlm/lockspace.c static int lkb_idr_is_local(int id, void *p, void *data)
p                 724 fs/dlm/lockspace.c 	struct dlm_lkb *lkb = p;
p                 729 fs/dlm/lockspace.c static int lkb_idr_is_any(int id, void *p, void *data)
p                 734 fs/dlm/lockspace.c static int lkb_idr_free(int id, void *p, void *data)
p                 736 fs/dlm/lockspace.c 	struct dlm_lkb *lkb = p;
p                  45 fs/dlm/memory.c 	char *p;
p                  47 fs/dlm/memory.c 	p = kzalloc(ls->ls_lvblen, GFP_NOFS);
p                  48 fs/dlm/memory.c 	return p;
p                  51 fs/dlm/memory.c void dlm_free_lvb(char *p)
p                  53 fs/dlm/memory.c 	kfree(p);
p                  62 fs/dlm/midcomms.c 		union dlm_packet p;
p                  64 fs/dlm/midcomms.c 	union dlm_packet *p = &__tmp.p;
p                  76 fs/dlm/midcomms.c 		copy_from_cb(p, base, offset, sizeof(struct dlm_header),
p                  79 fs/dlm/midcomms.c 		msglen = le16_to_cpu(p->header.h_length);
p                  80 fs/dlm/midcomms.c 		lockspace = p->header.h_lockspace;
p                  85 fs/dlm/midcomms.c 		if (p->header.h_cmd == DLM_MSG) {
p                 112 fs/dlm/midcomms.c 		if (msglen > sizeof(__tmp) && p == &__tmp.p) {
p                 113 fs/dlm/midcomms.c 			p = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
p                 114 fs/dlm/midcomms.c 			if (p == NULL)
p                 118 fs/dlm/midcomms.c 		copy_from_cb(p, base, offset, msglen, limit);
p                 120 fs/dlm/midcomms.c 		BUG_ON(lockspace != p->header.h_lockspace);
p                 127 fs/dlm/midcomms.c 		dlm_receive_buffer(p, nodeid);
p                 130 fs/dlm/midcomms.c 	if (p != &__tmp.p)
p                 131 fs/dlm/midcomms.c 		kfree(p);
p                 326 fs/dlm/recoverd.c 	struct task_struct *p;
p                 329 fs/dlm/recoverd.c 	p = kthread_run(dlm_recoverd, ls, "dlm_recoverd");
p                 330 fs/dlm/recoverd.c 	if (IS_ERR(p))
p                 331 fs/dlm/recoverd.c 		error = PTR_ERR(p);
p                 333 fs/dlm/recoverd.c                 ls->ls_recoverd_task = p;
p                  68 fs/ecryptfs/dentry.c 	struct ecryptfs_dentry_info *p = dentry->d_fsdata;
p                  69 fs/ecryptfs/dentry.c 	if (p) {
p                  70 fs/ecryptfs/dentry.c 		path_put(&p->lower_path);
p                  71 fs/ecryptfs/dentry.c 		call_rcu(&p->rcu, ecryptfs_dentry_free_rcu);
p                 245 fs/ecryptfs/main.c 	char *p;
p                 276 fs/ecryptfs/main.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 277 fs/ecryptfs/main.c 		if (!*p)
p                 279 fs/ecryptfs/main.c 		token = match_token(p, tokens, args);
p                 382 fs/ecryptfs/main.c 			       __func__, p);
p                  40 fs/ecryptfs/messaging.c 	struct list_head *p;
p                  52 fs/ecryptfs/messaging.c 	list_for_each(p, &ecryptfs_msg_ctx_free_list) {
p                  53 fs/ecryptfs/messaging.c 		*msg_ctx = list_entry(p, struct ecryptfs_msg_ctx, node);
p                 172 fs/efivarfs/file.c efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
p                 174 fs/efivarfs/file.c 	void __user *arg = (void __user *)p;
p                 234 fs/erofs/super.c 	char *p;
p                 240 fs/erofs/super.c 	while ((p = strsep(&options, ","))) {
p                 243 fs/erofs/super.c 		if (!*p)
p                 247 fs/erofs/super.c 		token = match_token(p, erofs_tokens, args);
p                 286 fs/erofs/super.c 			erofs_err(sb, "Unrecognized mount option \"%s\" or missing value", p);
p                 266 fs/erofs/utils.c 	struct list_head *p;
p                 278 fs/erofs/utils.c 	p = erofs_sb_list.next;
p                 279 fs/erofs/utils.c 	while (p != &erofs_sb_list) {
p                 280 fs/erofs/utils.c 		sbi = list_entry(p, struct erofs_sb_info, list);
p                 290 fs/erofs/utils.c 			p = p->next;
p                 301 fs/erofs/utils.c 		p = p->next;
p                 151 fs/erofs/zdata.h 	unsigned long *p, o, v, id;
p                 153 fs/erofs/zdata.h 	p = &page_private(page);
p                 154 fs/erofs/zdata.h 	o = READ_ONCE(*p);
p                 166 fs/erofs/zdata.h 	if (cmpxchg(p, o, v) != o)
p                 340 fs/eventpoll.c static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p)
p                 342 fs/eventpoll.c 	return container_of(p, struct eppoll_entry, wait);
p                 346 fs/eventpoll.c static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)
p                 348 fs/eventpoll.c 	return container_of(p, struct eppoll_entry, wait)->base;
p                 352 fs/eventpoll.c static inline struct epitem *ep_item_from_epqueue(poll_table *p)
p                 354 fs/eventpoll.c 	return container_of(p, struct ep_pqueue, pt)->epi;
p                 385 fs/eventpoll.c static bool ep_busy_loop_end(void *p, unsigned long start_time)
p                 387 fs/eventpoll.c 	struct eventpoll *ep = p;
p                1341 fs/eventpoll.c 	struct rb_node **p = &ep->rbr.rb_root.rb_node, *parent = NULL;
p                1345 fs/eventpoll.c 	while (*p) {
p                1346 fs/eventpoll.c 		parent = *p;
p                1350 fs/eventpoll.c 			p = &parent->rb_right;
p                1353 fs/eventpoll.c 			p = &parent->rb_left;
p                1355 fs/eventpoll.c 	rb_link_node(&epi->rbn, parent, p);
p                 278 fs/exec.c      	bprm->p = vma->vm_end - sizeof(void *);
p                 342 fs/exec.c      	bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
p                 348 fs/exec.c      	return len <= bprm->p;
p                 431 fs/exec.c      			const char __user *p = get_user_arg_ptr(argv, i);
p                 433 fs/exec.c      			if (!p)
p                 436 fs/exec.c      			if (IS_ERR(p))
p                 491 fs/exec.c      	bprm->argmin = bprm->p - limit;
p                 527 fs/exec.c      		pos = bprm->p;
p                 529 fs/exec.c      		bprm->p -= len;
p                 531 fs/exec.c      		if (bprm->p < bprm->argmin)
p                 722 fs/exec.c      	mm->arg_start = bprm->p - stack_shift;
p                 723 fs/exec.c      	bprm->p = vma->vm_end - stack_shift;
p                 734 fs/exec.c      	bprm->p -= stack_shift;
p                 735 fs/exec.c      	mm->arg_start = bprm->p;
p                 793 fs/exec.c      	current->mm->start_stack = bprm->p;
p                 816 fs/exec.c      	stop = bprm->p >> PAGE_SHIFT;
p                 820 fs/exec.c      		unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
p                1486 fs/exec.c      	struct task_struct *p = current, *t;
p                1489 fs/exec.c      	if (p->ptrace)
p                1499 fs/exec.c      	t = p;
p                1501 fs/exec.c      	spin_lock(&p->fs->lock);
p                1503 fs/exec.c      	while_each_thread(p, t) {
p                1504 fs/exec.c      		if (t->fs == p->fs)
p                1509 fs/exec.c      	if (p->fs->users > n_fs)
p                1512 fs/exec.c      		p->fs->in_exec = 1;
p                1513 fs/exec.c      	spin_unlock(&p->fs->lock);
p                1609 fs/exec.c      		offset = bprm->p & ~PAGE_MASK;
p                1610 fs/exec.c      		page = get_arg_page(bprm, bprm->p, 0);
p                1618 fs/exec.c      				offset++, bprm->p++)
p                1625 fs/exec.c      	bprm->p++;
p                1813 fs/exec.c      	bprm->exec = bprm->p;
p                 401 fs/exportfs/expfs.c 	struct dentry *p = NULL;
p                 405 fs/exportfs/expfs.c 		p = dget_parent(dentry);
p                 410 fs/exportfs/expfs.c 		parent = p->d_inode;
p                 414 fs/exportfs/expfs.c 	dput(p);
p                 338 fs/ext2/balloc.c 	struct rb_node ** p = &root->rb_node;
p                 342 fs/ext2/balloc.c 	while (*p)
p                 344 fs/ext2/balloc.c 		parent = *p;
p                 348 fs/ext2/balloc.c 			p = &(*p)->rb_left;
p                 350 fs/ext2/balloc.c 			p = &(*p)->rb_right;
p                 357 fs/ext2/balloc.c 	rb_link_node(node, parent, p);
p                 606 fs/ext2/balloc.c 	char *p, *r;
p                 630 fs/ext2/balloc.c 	p = ((char *)bh->b_data) + (here >> 3);
p                 631 fs/ext2/balloc.c 	r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
p                 124 fs/ext2/dir.c  	ext2_dirent *p;
p                 135 fs/ext2/dir.c  		p = (ext2_dirent *)(kaddr + offs);
p                 136 fs/ext2/dir.c  		rec_len = ext2_rec_len_from_disk(p->rec_len);
p                 142 fs/ext2/dir.c  		if (unlikely(rec_len < EXT2_DIR_REC_LEN(p->name_len)))
p                 146 fs/ext2/dir.c  		if (unlikely(le32_to_cpu(p->inode) > max_inumber))
p                 182 fs/ext2/dir.c  			(unsigned long) le32_to_cpu(p->inode),
p                 183 fs/ext2/dir.c  			rec_len, p->name_len);
p                 187 fs/ext2/dir.c  		p = (ext2_dirent *)(kaddr + offs);
p                 192 fs/ext2/dir.c  			(unsigned long) le32_to_cpu(p->inode));
p                 236 fs/ext2/dir.c  static inline ext2_dirent *ext2_next_entry(ext2_dirent *p)
p                 238 fs/ext2/dir.c  	return (ext2_dirent *)((char *)p +
p                 239 fs/ext2/dir.c  			ext2_rec_len_from_disk(p->rec_len));
p                 246 fs/ext2/dir.c  	ext2_dirent *p = (ext2_dirent*)(base + (offset&mask));
p                 247 fs/ext2/dir.c  	while ((char*)p < (char*)de) {
p                 248 fs/ext2/dir.c  		if (p->rec_len == 0)
p                 250 fs/ext2/dir.c  		p = ext2_next_entry(p);
p                 252 fs/ext2/dir.c  	return (char *)p - base;
p                 405 fs/ext2/dir.c  struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p)
p                 412 fs/ext2/dir.c  		*p = page;
p                 115 fs/ext2/inode.c 	__le32	*p;
p                 120 fs/ext2/inode.c static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
p                 122 fs/ext2/inode.c 	p->key = *(p->p = v);
p                 123 fs/ext2/inode.c 	p->bh = bh;
p                 128 fs/ext2/inode.c 	while (from <= to && from->key == *from->p)
p                 241 fs/ext2/inode.c 	Indirect *p = chain;
p                 247 fs/ext2/inode.c 	if (!p->key)
p                 250 fs/ext2/inode.c 		bh = sb_bread(sb, le32_to_cpu(p->key));
p                 254 fs/ext2/inode.c 		if (!verify_chain(chain, p))
p                 256 fs/ext2/inode.c 		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
p                 258 fs/ext2/inode.c 		if (!p->key)
p                 271 fs/ext2/inode.c 	return p;
p                 298 fs/ext2/inode.c 	__le32 *p;
p                 303 fs/ext2/inode.c 	for (p = ind->p - 1; p >= start; p--)
p                 304 fs/ext2/inode.c 		if (*p)
p                 305 fs/ext2/inode.c 			return le32_to_cpu(*p);
p                 382 fs/ext2/inode.c 		&& le32_to_cpu(*(branch[0].p + count)) == 0) {
p                 513 fs/ext2/inode.c 		branch[n].p = (__le32 *) bh->b_data + offsets[n];
p                 515 fs/ext2/inode.c 		*branch[n].p = branch[n].key;
p                 524 fs/ext2/inode.c 				*(branch[n].p + i) = cpu_to_le32(++current_block);
p                 572 fs/ext2/inode.c 	*where->p = where->key;
p                 581 fs/ext2/inode.c 			*(where->p + i ) = cpu_to_le32(current_block++);
p                 668 fs/ext2/inode.c 			blk = le32_to_cpu(*(chain[depth-1].p + count));
p                1006 fs/ext2/inode.c static inline int all_zeroes(__le32 *p, __le32 *q)
p                1008 fs/ext2/inode.c 	while (p < q)
p                1009 fs/ext2/inode.c 		if (*p++)
p                1054 fs/ext2/inode.c 	Indirect *partial, *p;
p                1068 fs/ext2/inode.c 	if (!partial->key && *partial->p) {
p                1072 fs/ext2/inode.c 	for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
p                1080 fs/ext2/inode.c 	if (p == chain + k - 1 && p > chain) {
p                1081 fs/ext2/inode.c 		p->p--;
p                1083 fs/ext2/inode.c 		*top = *p->p;
p                1084 fs/ext2/inode.c 		*p->p = 0;
p                1088 fs/ext2/inode.c 	while(partial > p)
p                1107 fs/ext2/inode.c static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
p                1112 fs/ext2/inode.c 	for ( ; p < q ; p++) {
p                1113 fs/ext2/inode.c 		nr = le32_to_cpu(*p);
p                1115 fs/ext2/inode.c 			*p = 0;
p                1147 fs/ext2/inode.c static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
p                1154 fs/ext2/inode.c 		for ( ; p < q ; p++) {
p                1155 fs/ext2/inode.c 			nr = le32_to_cpu(*p);
p                1158 fs/ext2/inode.c 			*p = 0;
p                1179 fs/ext2/inode.c 		ext2_free_data(inode, p, q);
p                1230 fs/ext2/inode.c 				   partial->p + 1,
p                1329 fs/ext2/inode.c 					struct buffer_head **p)
p                1337 fs/ext2/inode.c 	*p = NULL;
p                1355 fs/ext2/inode.c 	*p = bh;
p                 478 fs/ext2/super.c 	char *p;
p                 487 fs/ext2/super.c 	while ((p = strsep (&options, ",")) != NULL) {
p                 489 fs/ext2/super.c 		if (!*p)
p                 492 fs/ext2/super.c 		token = match_token(p, tokens, args);
p                 362 fs/ext4/block_validity.c 			struct inode *inode, __le32 *p, unsigned int max)
p                 365 fs/ext4/block_validity.c 	__le32 *bref = p;
p                 373 fs/ext4/block_validity.c 	while (bref < p+max) {
p                 421 fs/ext4/dir.c  	struct dir_private_info *p;
p                 423 fs/ext4/dir.c  	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 424 fs/ext4/dir.c  	if (!p)
p                 426 fs/ext4/dir.c  	p->curr_hash = pos2maj_hash(filp, pos);
p                 427 fs/ext4/dir.c  	p->curr_minor_hash = pos2min_hash(filp, pos);
p                 428 fs/ext4/dir.c  	return p;
p                 431 fs/ext4/dir.c  void ext4_htree_free_dir_info(struct dir_private_info *p)
p                 433 fs/ext4/dir.c  	free_rb_tree_fname(&p->root);
p                 434 fs/ext4/dir.c  	kfree(p);
p                 449 fs/ext4/dir.c  	struct rb_node **p, *parent = NULL;
p                 455 fs/ext4/dir.c  	p = &info->root.rb_node;
p                 470 fs/ext4/dir.c  	while (*p) {
p                 471 fs/ext4/dir.c  		parent = *p;
p                 486 fs/ext4/dir.c  			p = &(*p)->rb_left;
p                 488 fs/ext4/dir.c  			p = &(*p)->rb_right;
p                 490 fs/ext4/dir.c  			p = &(*p)->rb_left;
p                 492 fs/ext4/dir.c  			p = &(*p)->rb_right;
p                 495 fs/ext4/dir.c  	rb_link_node(&new_fn->rb_hash, parent, p);
p                2144 fs/ext4/ext4.h #define fname_name(p) ((p)->disk_name.name)
p                2145 fs/ext4/ext4.h #define fname_len(p)  ((p)->disk_name.len)
p                2487 fs/ext4/ext4.h extern void ext4_htree_free_dir_info(struct dir_private_info *p);
p                 751 fs/ext4/extents_status.c 	struct rb_node **p = &tree->root.rb_node;
p                 755 fs/ext4/extents_status.c 	while (*p) {
p                 756 fs/ext4/extents_status.c 		parent = *p;
p                 774 fs/ext4/extents_status.c 			p = &(*p)->rb_left;
p                 781 fs/ext4/extents_status.c 			p = &(*p)->rb_right;
p                 792 fs/ext4/extents_status.c 	rb_link_node(&es->rb_node, parent, p);
p                1854 fs/ext4/extents_status.c 	struct rb_node **p = &tree->root.rb_node;
p                1862 fs/ext4/extents_status.c 	while (*p) {
p                1863 fs/ext4/extents_status.c 		parent = *p;
p                1867 fs/ext4/extents_status.c 			p = &(*p)->rb_left;
p                1869 fs/ext4/extents_status.c 			p = &(*p)->rb_right;
p                1883 fs/ext4/extents_status.c 	rb_link_node(&pr->rb_node, parent, p);
p                 192 fs/ext4/fsmap.c 	struct ext4_fsmap *p;
p                 221 fs/ext4/fsmap.c 	list_for_each_entry_safe(p, tmp, &info->gfi_meta_list, fmr_list) {
p                 222 fs/ext4/fsmap.c 		if (p->fmr_physical + p->fmr_length <= info->gfi_next_fsblk) {
p                 223 fs/ext4/fsmap.c 			list_del(&p->fmr_list);
p                 224 fs/ext4/fsmap.c 			kfree(p);
p                 225 fs/ext4/fsmap.c 		} else if (p->fmr_physical < fsb) {
p                 226 fs/ext4/fsmap.c 			error = ext4_getfsmap_helper(sb, info, p);
p                 230 fs/ext4/fsmap.c 			list_del(&p->fmr_list);
p                 231 fs/ext4/fsmap.c 			kfree(p);
p                 372 fs/ext4/fsmap.c 	struct ext4_fsmap *p;
p                 376 fs/ext4/fsmap.c 	list_for_each_entry_safe(p, tmp, meta_list, fmr_list) {
p                 378 fs/ext4/fsmap.c 			prev = p;
p                 382 fs/ext4/fsmap.c 		if (prev->fmr_owner == p->fmr_owner &&
p                 383 fs/ext4/fsmap.c 		    prev->fmr_physical + prev->fmr_length == p->fmr_physical) {
p                 384 fs/ext4/fsmap.c 			prev->fmr_length += p->fmr_length;
p                 385 fs/ext4/fsmap.c 			list_del(&p->fmr_list);
p                 386 fs/ext4/fsmap.c 			kfree(p);
p                 388 fs/ext4/fsmap.c 			prev = p;
p                 395 fs/ext4/fsmap.c 	struct ext4_fsmap *p;
p                 398 fs/ext4/fsmap.c 	list_for_each_entry_safe(p, tmp, meta_list, fmr_list) {
p                 399 fs/ext4/fsmap.c 		list_del(&p->fmr_list);
p                 400 fs/ext4/fsmap.c 		kfree(p);
p                 205 fs/ext4/hash.c 	const char	*p;
p                 238 fs/ext4/hash.c 		p = name;
p                 240 fs/ext4/hash.c 			(*str2hashbuf)(p, len, in, 8);
p                 243 fs/ext4/hash.c 			p += 32;
p                 252 fs/ext4/hash.c 		p = name;
p                 254 fs/ext4/hash.c 			(*str2hashbuf)(p, len, in, 4);
p                 257 fs/ext4/hash.c 			p += 16;
p                 785 fs/ext4/ialloc.c 		struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT);
p                 787 fs/ext4/ialloc.c 		if (IS_ERR(p))
p                 788 fs/ext4/ialloc.c 			return ERR_CAST(p);
p                 789 fs/ext4/ialloc.c 		if (p) {
p                 790 fs/ext4/ialloc.c 			int acl_size = p->a_count * sizeof(ext4_acl_entry);
p                 796 fs/ext4/ialloc.c 			posix_acl_release(p);
p                  32 fs/ext4/indirect.c 	__le32	*p;
p                  37 fs/ext4/indirect.c static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
p                  39 fs/ext4/indirect.c 	p->key = *(p->p = v);
p                  40 fs/ext4/indirect.c 	p->bh = bh;
p                 149 fs/ext4/indirect.c 	Indirect *p = chain;
p                 156 fs/ext4/indirect.c 	if (!p->key)
p                 159 fs/ext4/indirect.c 		bh = sb_getblk(sb, le32_to_cpu(p->key));
p                 177 fs/ext4/indirect.c 		add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
p                 179 fs/ext4/indirect.c 		if (!p->key)
p                 187 fs/ext4/indirect.c 	return p;
p                 214 fs/ext4/indirect.c 	__le32 *p;
p                 217 fs/ext4/indirect.c 	for (p = ind->p - 1; p >= start; p--) {
p                 218 fs/ext4/indirect.c 		if (*p)
p                 219 fs/ext4/indirect.c 			return le32_to_cpu(*p);
p                 290 fs/ext4/indirect.c 		le32_to_cpu(*(branch[0].p + count)) == 0) {
p                 328 fs/ext4/indirect.c 	__le32				*p;
p                 361 fs/ext4/indirect.c 		p = branch[i].p = (__le32 *) bh->b_data + offsets[i];
p                 367 fs/ext4/indirect.c 			*p++ = cpu_to_le32(b++);
p                 428 fs/ext4/indirect.c 	*where->p = where->key;
p                 437 fs/ext4/indirect.c 			*(where->p + i) = cpu_to_le32(current_block++);
p                 543 fs/ext4/indirect.c 			blk = le32_to_cpu(*(chain[depth-1].p + count));
p                 720 fs/ext4/indirect.c static inline int all_zeroes(__le32 *p, __le32 *q)
p                 722 fs/ext4/indirect.c 	while (p < q)
p                 723 fs/ext4/indirect.c 		if (*p++)
p                 767 fs/ext4/indirect.c 	Indirect *partial, *p;
p                 782 fs/ext4/indirect.c 	if (!partial->key && *partial->p)
p                 785 fs/ext4/indirect.c 	for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
p                 793 fs/ext4/indirect.c 	if (p == chain + k - 1 && p > chain) {
p                 794 fs/ext4/indirect.c 		p->p--;
p                 796 fs/ext4/indirect.c 		*top = *p->p;
p                 799 fs/ext4/indirect.c 		*p->p = 0;
p                 804 fs/ext4/indirect.c 	while (partial > p) {
p                 829 fs/ext4/indirect.c 	__le32 *p;
p                 869 fs/ext4/indirect.c 	for (p = first; p < last; p++)
p                 870 fs/ext4/indirect.c 		*p = 0;
p                 908 fs/ext4/indirect.c 	__le32 *p;			    /* Pointer into inode/ind
p                 921 fs/ext4/indirect.c 	for (p = first; p < last; p++) {
p                 922 fs/ext4/indirect.c 		nr = le32_to_cpu(*p);
p                 927 fs/ext4/indirect.c 				block_to_free_p = p;
p                 934 fs/ext4/indirect.c 						        block_to_free_p, p);
p                 938 fs/ext4/indirect.c 				block_to_free_p = p;
p                 946 fs/ext4/indirect.c 					count, block_to_free_p, p);
p                 988 fs/ext4/indirect.c 	__le32 *p;
p                 996 fs/ext4/indirect.c 		p = last;
p                 997 fs/ext4/indirect.c 		while (--p >= first) {
p                 998 fs/ext4/indirect.c 			nr = le32_to_cpu(*p);
p                1079 fs/ext4/indirect.c 					*p = 0;
p                1149 fs/ext4/indirect.c 			*partial->p = 0;
p                1158 fs/ext4/indirect.c 					partial->p,
p                1159 fs/ext4/indirect.c 					partial->p+1, (chain+n-1) - partial);
p                1164 fs/ext4/indirect.c 		ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
p                1219 fs/ext4/indirect.c 	Indirect *p = NULL, *p2 = NULL;
p                1261 fs/ext4/indirect.c 		partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
p                1267 fs/ext4/indirect.c 				*partial->p = 0;
p                1272 fs/ext4/indirect.c 					partial->p,
p                1273 fs/ext4/indirect.c 					partial->p+1, (chain+n-1) - partial);
p                1283 fs/ext4/indirect.c 				partial->p + 1,
p                1308 fs/ext4/indirect.c 			partial2->p++;
p                1318 fs/ext4/indirect.c 					   partial2->p,
p                1326 fs/ext4/indirect.c 	partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
p                1348 fs/ext4/indirect.c 				*partial->p = 0;
p                1353 fs/ext4/indirect.c 						   partial->p,
p                1354 fs/ext4/indirect.c 						   partial->p+1,
p                1367 fs/ext4/indirect.c 		partial2->p++;
p                1381 fs/ext4/indirect.c 					   partial->p + 1,
p                1382 fs/ext4/indirect.c 					   partial2->p,
p                1396 fs/ext4/indirect.c 					   partial->p + 1,
p                1404 fs/ext4/indirect.c 					   partial2->p,
p                1411 fs/ext4/indirect.c 	while (p && p > chain) {
p                1412 fs/ext4/indirect.c 		BUFFER_TRACE(p->bh, "call brelse");
p                1413 fs/ext4/indirect.c 		brelse(p->bh);
p                1414 fs/ext4/indirect.c 		p--;
p                1968 fs/ext4/inline.c 			void *p = (void *) ext4_raw_inode(&is.iloc)->i_block;
p                1969 fs/ext4/inline.c 			memset(p + i_size, 0,
p                 521 fs/ext4/namei.c ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize)
p                 523 fs/ext4/namei.c 	return (struct ext4_dir_entry_2 *)((char *)p +
p                 524 fs/ext4/namei.c 		ext4_rec_len_from_disk(p->rec_len, blocksize));
p                 754 fs/ext4/namei.c 	struct dx_entry *at, *entries, *p, *q, *m;
p                 823 fs/ext4/namei.c 		p = entries + 1;
p                 825 fs/ext4/namei.c 		while (p <= q) {
p                 826 fs/ext4/namei.c 			m = p + (q - p) / 2;
p                 831 fs/ext4/namei.c 				p = m + 1;
p                 846 fs/ext4/namei.c 			assert (at == p - 1);
p                 849 fs/ext4/namei.c 		at = p - 1;
p                 927 fs/ext4/namei.c 	struct dx_frame *p;
p                 932 fs/ext4/namei.c 	p = frame;
p                 941 fs/ext4/namei.c 		if (++(p->at) < p->entries + dx_get_count(p->entries))
p                 943 fs/ext4/namei.c 		if (p == frames)
p                 946 fs/ext4/namei.c 		p--;
p                 956 fs/ext4/namei.c 	bhash = dx_get_hash(p->at);
p                 968 fs/ext4/namei.c 		bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
p                 971 fs/ext4/namei.c 		p++;
p                 972 fs/ext4/namei.c 		brelse(p->bh);
p                 973 fs/ext4/namei.c 		p->bh = bh;
p                 974 fs/ext4/namei.c 		p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
p                1235 fs/ext4/namei.c 	struct dx_map_entry *p, *q, *top = map + count - 1;
p                1242 fs/ext4/namei.c 		for (p = top, q = p - count; q >= map; p--, q--)
p                1243 fs/ext4/namei.c 			if (p->hash < q->hash)
p                1244 fs/ext4/namei.c 				swap(*p, *q);
p                 772 fs/ext4/resize.c 	__le32 *p = (__le32 *)primary->b_data;
p                 776 fs/ext4/resize.c 		if (le32_to_cpu(*p++) !=
p                2073 fs/ext4/super.c 	char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
p                2080 fs/ext4/super.c 	while ((p = strsep(&options, ",")) != NULL) {
p                2081 fs/ext4/super.c 		if (!*p)
p                2088 fs/ext4/super.c 		token = match_token(p, tokens, args);
p                2089 fs/ext4/super.c 		if (handle_mount_opt(sb, p, token, args, journal_devnum,
p                 557 fs/ext4/xattr.c 			void *p = bh->b_data + offset;
p                 559 fs/ext4/xattr.c 			if (unlikely(p + size > end))
p                 561 fs/ext4/xattr.c 			memcpy(buffer, p, size);
p                 612 fs/ext4/xattr.c 			void *p = (void *)IFIRST(header) + offset;
p                 614 fs/ext4/xattr.c 			if (unlikely(p + size > end))
p                 616 fs/ext4/xattr.c 			memcpy(buffer, p, size);
p                 336 fs/f2fs/acl.c  	struct posix_acl *p;
p                 346 fs/f2fs/acl.c  	p = __f2fs_get_acl(dir, ACL_TYPE_DEFAULT, dpage);
p                 347 fs/f2fs/acl.c  	if (!p || p == ERR_PTR(-EOPNOTSUPP)) {
p                 351 fs/f2fs/acl.c  	if (IS_ERR(p))
p                 352 fs/f2fs/acl.c  		return PTR_ERR(p);
p                 354 fs/f2fs/acl.c  	clone = f2fs_acl_clone(p, GFP_NOFS);
p                 370 fs/f2fs/acl.c  		posix_acl_release(p);
p                 372 fs/f2fs/acl.c  		*default_acl = p;
p                 379 fs/f2fs/acl.c  	posix_acl_release(p);
p                 382 fs/f2fs/dir.c  struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
p                 386 fs/f2fs/dir.c  	return f2fs_find_entry(dir, &dotdot, p);
p                  66 fs/f2fs/extent_cache.c 	struct rb_node **p = &root->rb_root.rb_node;
p                  69 fs/f2fs/extent_cache.c 	while (*p) {
p                  70 fs/f2fs/extent_cache.c 		*parent = *p;
p                  74 fs/f2fs/extent_cache.c 			p = &(*p)->rb_left;
p                  76 fs/f2fs/extent_cache.c 			p = &(*p)->rb_right;
p                  83 fs/f2fs/extent_cache.c 	return p;
p                 204 fs/f2fs/extent_cache.c 				struct rb_node *parent, struct rb_node **p,
p                 217 fs/f2fs/extent_cache.c 	rb_link_node(&en->rb_node, parent, p);
p                 287 fs/f2fs/extent_cache.c 	struct rb_node **p = &et->root.rb_root.rb_node;
p                 290 fs/f2fs/extent_cache.c 	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
p                 467 fs/f2fs/extent_cache.c 	struct rb_node **p;
p                 473 fs/f2fs/extent_cache.c 		p = insert_p;
p                 479 fs/f2fs/extent_cache.c 	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
p                 482 fs/f2fs/extent_cache.c 	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
p                 960 fs/f2fs/f2fs.h #define WB_DATA_TYPE(p)	(__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
p                2265 fs/f2fs/f2fs.h #define RAW_IS_INODE(p)	((p)->footer.nid == (p)->footer.ino)
p                2269 fs/f2fs/f2fs.h 	struct f2fs_node *p = F2FS_NODE(page);
p                2271 fs/f2fs/f2fs.h 	return RAW_IS_INODE(p);
p                2989 fs/f2fs/f2fs.h struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
p                 179 fs/f2fs/gc.c   			int type, struct victim_sel_policy *p)
p                 183 fs/f2fs/gc.c   	if (p->alloc_mode == SSR) {
p                 184 fs/f2fs/gc.c   		p->gc_mode = GC_GREEDY;
p                 185 fs/f2fs/gc.c   		p->dirty_segmap = dirty_i->dirty_segmap[type];
p                 186 fs/f2fs/gc.c   		p->max_search = dirty_i->nr_dirty[type];
p                 187 fs/f2fs/gc.c   		p->ofs_unit = 1;
p                 189 fs/f2fs/gc.c   		p->gc_mode = select_gc_type(sbi, gc_type);
p                 190 fs/f2fs/gc.c   		p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
p                 191 fs/f2fs/gc.c   		p->max_search = dirty_i->nr_dirty[DIRTY];
p                 192 fs/f2fs/gc.c   		p->ofs_unit = sbi->segs_per_sec;
p                 198 fs/f2fs/gc.c   			p->max_search > sbi->max_victim_search)
p                 199 fs/f2fs/gc.c   		p->max_search = sbi->max_victim_search;
p                 204 fs/f2fs/gc.c   		p->offset = 0;
p                 206 fs/f2fs/gc.c   		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
p                 210 fs/f2fs/gc.c   				struct victim_sel_policy *p)
p                 213 fs/f2fs/gc.c   	if (p->alloc_mode == SSR)
p                 215 fs/f2fs/gc.c   	if (p->gc_mode == GC_GREEDY)
p                 216 fs/f2fs/gc.c   		return 2 * sbi->blocks_per_seg * p->ofs_unit;
p                 217 fs/f2fs/gc.c   	else if (p->gc_mode == GC_CB)
p                 275 fs/f2fs/gc.c   			unsigned int segno, struct victim_sel_policy *p)
p                 277 fs/f2fs/gc.c   	if (p->alloc_mode == SSR)
p                 281 fs/f2fs/gc.c   	if (p->gc_mode == GC_GREEDY)
p                 312 fs/f2fs/gc.c   	struct victim_sel_policy p;
p                 320 fs/f2fs/gc.c   	p.alloc_mode = alloc_mode;
p                 321 fs/f2fs/gc.c   	select_policy(sbi, gc_type, type, &p);
p                 323 fs/f2fs/gc.c   	p.min_segno = NULL_SEGNO;
p                 324 fs/f2fs/gc.c   	p.min_cost = get_max_cost(sbi, &p);
p                 329 fs/f2fs/gc.c   			p.min_segno = *result;
p                 333 fs/f2fs/gc.c   	if (p.max_search == 0)
p                 336 fs/f2fs/gc.c   	if (__is_large_section(sbi) && p.alloc_mode == LFS) {
p                 338 fs/f2fs/gc.c   			p.min_segno = sbi->next_victim_seg[BG_GC];
p                 339 fs/f2fs/gc.c   			*result = p.min_segno;
p                 345 fs/f2fs/gc.c   			p.min_segno = sbi->next_victim_seg[FG_GC];
p                 346 fs/f2fs/gc.c   			*result = p.min_segno;
p                 352 fs/f2fs/gc.c   	last_victim = sm->last_victim[p.gc_mode];
p                 353 fs/f2fs/gc.c   	if (p.alloc_mode == LFS && gc_type == FG_GC) {
p                 354 fs/f2fs/gc.c   		p.min_segno = check_bg_victims(sbi);
p                 355 fs/f2fs/gc.c   		if (p.min_segno != NULL_SEGNO)
p                 363 fs/f2fs/gc.c   		segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
p                 365 fs/f2fs/gc.c   			if (sm->last_victim[p.gc_mode]) {
p                 367 fs/f2fs/gc.c   					sm->last_victim[p.gc_mode];
p                 368 fs/f2fs/gc.c   				sm->last_victim[p.gc_mode] = 0;
p                 369 fs/f2fs/gc.c   				p.offset = 0;
p                 375 fs/f2fs/gc.c   		p.offset = segno + p.ofs_unit;
p                 376 fs/f2fs/gc.c   		if (p.ofs_unit > 1) {
p                 377 fs/f2fs/gc.c   			p.offset -= segno % p.ofs_unit;
p                 378 fs/f2fs/gc.c   			nsearched += count_bits(p.dirty_segmap,
p                 379 fs/f2fs/gc.c   						p.offset - p.ofs_unit,
p                 380 fs/f2fs/gc.c   						p.ofs_unit);
p                 402 fs/f2fs/gc.c   					p.alloc_mode != SSR))
p                 407 fs/f2fs/gc.c   		cost = get_gc_cost(sbi, segno, &p);
p                 409 fs/f2fs/gc.c   		if (p.min_cost > cost) {
p                 410 fs/f2fs/gc.c   			p.min_segno = segno;
p                 411 fs/f2fs/gc.c   			p.min_cost = cost;
p                 414 fs/f2fs/gc.c   		if (nsearched >= p.max_search) {
p                 415 fs/f2fs/gc.c   			if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
p                 416 fs/f2fs/gc.c   				sm->last_victim[p.gc_mode] = last_victim + 1;
p                 418 fs/f2fs/gc.c   				sm->last_victim[p.gc_mode] = segno + 1;
p                 419 fs/f2fs/gc.c   			sm->last_victim[p.gc_mode] %=
p                 424 fs/f2fs/gc.c   	if (p.min_segno != NULL_SEGNO) {
p                 426 fs/f2fs/gc.c   		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
p                 428 fs/f2fs/gc.c   		if (p.alloc_mode == LFS) {
p                 429 fs/f2fs/gc.c   			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
p                 438 fs/f2fs/gc.c   	if (p.min_segno != NULL_SEGNO)
p                 439 fs/f2fs/gc.c   		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
p                 444 fs/f2fs/gc.c   	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
p                  76 fs/f2fs/hash.c 	const unsigned char *p;
p                  94 fs/f2fs/hash.c 	p = name;
p                  96 fs/f2fs/hash.c 		str2hashbuf(p, len, in, 4);
p                  98 fs/f2fs/hash.c 		p += 16;
p                 360 fs/f2fs/node.h static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
p                 362 fs/f2fs/node.h 	struct f2fs_node *rn = F2FS_NODE(p);
p                 364 fs/f2fs/node.h 	f2fs_wait_on_page_writeback(p, NODE, true, true);
p                 370 fs/f2fs/node.h 	return set_page_dirty(p);
p                 373 fs/f2fs/node.h static inline nid_t get_nid(struct page *p, int off, bool i)
p                 375 fs/f2fs/node.h 	struct f2fs_node *rn = F2FS_NODE(p);
p                  99 fs/f2fs/segment.c 	const unsigned long *p = addr + BIT_WORD(offset);
p                 110 fs/f2fs/segment.c 		if (*p == 0)
p                 113 fs/f2fs/segment.c 		tmp = __reverse_ulong((unsigned char *)p);
p                 125 fs/f2fs/segment.c 		p++;
p                 135 fs/f2fs/segment.c 	const unsigned long *p = addr + BIT_WORD(offset);
p                 146 fs/f2fs/segment.c 		if (*p == ~0UL)
p                 149 fs/f2fs/segment.c 		tmp = __reverse_ulong((unsigned char *)p);
p                 162 fs/f2fs/segment.c 		p++;
p                 969 fs/f2fs/segment.c 				struct rb_node *parent, struct rb_node **p,
p                 977 fs/f2fs/segment.c 	rb_link_node(&dc->rb_node, parent, p);
p                1227 fs/f2fs/segment.c 	struct rb_node **p;
p                1234 fs/f2fs/segment.c 		p = insert_p;
p                1238 fs/f2fs/segment.c 	p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
p                1242 fs/f2fs/segment.c 								p, leftmost);
p                  43 fs/f2fs/shrinker.c 	struct list_head *p;
p                  47 fs/f2fs/shrinker.c 	p = f2fs_list.next;
p                  48 fs/f2fs/shrinker.c 	while (p != &f2fs_list) {
p                  49 fs/f2fs/shrinker.c 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
p                  53 fs/f2fs/shrinker.c 			p = p->next;
p                  68 fs/f2fs/shrinker.c 		p = p->next;
p                  80 fs/f2fs/shrinker.c 	struct list_head *p;
p                  88 fs/f2fs/shrinker.c 	p = f2fs_list.next;
p                  89 fs/f2fs/shrinker.c 	while (p != &f2fs_list) {
p                  90 fs/f2fs/shrinker.c 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
p                  97 fs/f2fs/shrinker.c 			p = p->next;
p                 116 fs/f2fs/shrinker.c 		p = p->next;
p                 394 fs/f2fs/super.c 	char *p, *name;
p                 405 fs/f2fs/super.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 407 fs/f2fs/super.c 		if (!*p)
p                 414 fs/f2fs/super.c 		token = match_token(p, f2fs_tokens, args);
p                 815 fs/f2fs/super.c 				 p);
p                  57 fs/f2fs/trace.c 	void *p;
p                  66 fs/f2fs/trace.c 	p = radix_tree_lookup(&pids, pid);
p                  67 fs/f2fs/trace.c 	if (p == current)
p                  69 fs/f2fs/trace.c 	if (p)
p                  37 fs/f2fs/trace.h #define f2fs_trace_pid(p)
p                  86 fs/fat/cache.c 	struct fat_cache *hit = &nohit, *p;
p                  90 fs/fat/cache.c 	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
p                  92 fs/fat/cache.c 		if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
p                  93 fs/fat/cache.c 			hit = p;
p                 120 fs/fat/cache.c 	struct fat_cache *p;
p                 122 fs/fat/cache.c 	list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
p                 124 fs/fat/cache.c 		if (p->fcluster == new->fcluster) {
p                 125 fs/fat/cache.c 			BUG_ON(p->dcluster != new->dcluster);
p                 126 fs/fat/cache.c 			if (new->nr_contig > p->nr_contig)
p                 127 fs/fat/cache.c 				p->nr_contig = new->nr_contig;
p                 128 fs/fat/cache.c 			return p;
p                 169 fs/fat/cache.c 			struct list_head *p = MSDOS_I(inode)->cache_lru.prev;
p                 170 fs/fat/cache.c 			cache = list_entry(p, struct fat_cache, cache_list);
p                 721 fs/fat/inode.c static void delayed_free(struct rcu_head *p)
p                 723 fs/fat/inode.c 	struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu);
p                1130 fs/fat/inode.c 	char *p;
p                1165 fs/fat/inode.c 	while ((p = strsep(&options, ",")) != NULL) {
p                1167 fs/fat/inode.c 		if (!*p)
p                1170 fs/fat/inode.c 		token = match_token(p, fat_tokens, args);
p                1173 fs/fat/inode.c 				token = match_token(p, vfat_tokens, args);
p                1175 fs/fat/inode.c 				token = match_token(p, msdos_tokens, args);
p                1349 fs/fat/inode.c 			       "not supported now", p);
p                1356 fs/fat/inode.c 				       "or missing value", p);
p                 333 fs/fat/namei_vfat.c 	unsigned char base[9], ext[4], buf[5], *p;
p                 382 fs/fat/namei_vfat.c 	for (baselen = i = 0, p = base, ip = uname; i < sz; i++, ip++) {
p                 393 fs/fat/namei_vfat.c 			*p++ = charbuf[chi];
p                 410 fs/fat/namei_vfat.c 		for (p = ext, ip = ext_start; extlen < 3 && ip < end; ip++) {
p                 421 fs/fat/namei_vfat.c 				*p++ = charbuf[chi];
p                 708 fs/fcntl.c     static inline int sigio_perm(struct task_struct *p,
p                 715 fs/fcntl.c     	cred = __task_cred(p);
p                 719 fs/fcntl.c     	       !security_file_send_sigiotask(p, fown, sig));
p                 724 fs/fcntl.c     static void send_sigio_to_task(struct task_struct *p,
p                 734 fs/fcntl.c     	if (!sigio_perm(p, fown, signum))
p                 770 fs/fcntl.c     			if (!do_send_sig_info(signum, &si, p, type))
p                 774 fs/fcntl.c     			do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type);
p                 780 fs/fcntl.c     	struct task_struct *p;
p                 793 fs/fcntl.c     		p = pid_task(pid, PIDTYPE_PID);
p                 794 fs/fcntl.c     		if (p)
p                 795 fs/fcntl.c     			send_sigio_to_task(p, fown, fd, band, type);
p                 799 fs/fcntl.c     		do_each_pid_task(pid, type, p) {
p                 800 fs/fcntl.c     			send_sigio_to_task(p, fown, fd, band, type);
p                 801 fs/fcntl.c     		} while_each_pid_task(pid, type, p);
p                 808 fs/fcntl.c     static void send_sigurg_to_task(struct task_struct *p,
p                 811 fs/fcntl.c     	if (sigio_perm(p, fown, SIGURG))
p                 812 fs/fcntl.c     		do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, type);
p                 817 fs/fcntl.c     	struct task_struct *p;
p                 833 fs/fcntl.c     		p = pid_task(pid, PIDTYPE_PID);
p                 834 fs/fcntl.c     		if (p)
p                 835 fs/fcntl.c     			send_sigurg_to_task(p, fown, type);
p                 839 fs/fcntl.c     		do_each_pid_task(pid, type, p) {
p                 840 fs/fcntl.c     			send_sigurg_to_task(p, fown, type);
p                 841 fs/fcntl.c     		} while_each_pid_task(pid, type, p);
p                 998 fs/file.c      		const void *p)
p                1010 fs/file.c      		res = f(p, file, n);
p                  51 fs/filesystems.c 	struct file_system_type **p;
p                  52 fs/filesystems.c 	for (p = &file_systems; *p; p = &(*p)->next)
p                  53 fs/filesystems.c 		if (strncmp((*p)->name, name, len) == 0 &&
p                  54 fs/filesystems.c 		    !(*p)->name[len])
p                  56 fs/filesystems.c 	return p;
p                  75 fs/filesystems.c 	struct file_system_type ** p;
p                  84 fs/filesystems.c 	p = find_filesystem(fs->name, strlen(fs->name));
p                  85 fs/filesystems.c 	if (*p)
p                  88 fs/filesystems.c 		*p = fs;
p                 392 fs/fs_context.c 	const char *p;
p                 399 fs/fs_context.c 		p = fmt;
p                 403 fs/fs_context.c 		p = va_arg(va, const char *);
p                 415 fs/fs_context.c 	if ((unsigned long)p >= (unsigned long)__start_rodata &&
p                 416 fs/fs_context.c 	    (unsigned long)p <  (unsigned long)__end_rodata)
p                 418 fs/fs_context.c 	if (log && within_module_core((unsigned long)p, log->owner))
p                 420 fs/fs_context.c 	q = kstrdup(p, GFP_KERNEL);
p                 424 fs/fs_context.c 	p = store_failure;
p                 426 fs/fs_context.c 	q = (char *)p;
p                  49 fs/fs_parser.c 	const struct fs_parameter_spec *p;
p                  54 fs/fs_parser.c 	for (p = desc->specs; p->name; p++)
p                  55 fs/fs_parser.c 		if (strcmp(p->name, name) == 0)
p                  56 fs/fs_parser.c 			return p;
p                  84 fs/fs_parser.c 	const struct fs_parameter_spec *p;
p                  92 fs/fs_parser.c 	p = fs_lookup_key(desc, param->key);
p                  93 fs/fs_parser.c 	if (!p) {
p                 103 fs/fs_parser.c 		p = fs_lookup_key(desc, param->key + 2);
p                 104 fs/fs_parser.c 		if (!p)
p                 106 fs/fs_parser.c 		if (!(p->flags & fs_param_neg_with_no))
p                 112 fs/fs_parser.c 	if (p->flags & fs_param_deprecated)
p                 120 fs/fs_parser.c 	switch (p->type) {
p                 133 fs/fs_parser.c 			if (p->flags & fs_param_v_optional)
p                 145 fs/fs_parser.c 	switch (p->type) {
p                 191 fs/fs_parser.c 			if (e->opt == p->opt &&
p                 237 fs/fs_parser.c 	return p->opt;
p                  30 fs/fs_pin.c    void pin_kill(struct fs_pin *p)
p                  34 fs/fs_pin.c    	if (!p) {
p                  39 fs/fs_pin.c    	spin_lock_irq(&p->wait.lock);
p                  40 fs/fs_pin.c    	if (likely(!p->done)) {
p                  41 fs/fs_pin.c    		p->done = -1;
p                  42 fs/fs_pin.c    		spin_unlock_irq(&p->wait.lock);
p                  44 fs/fs_pin.c    		p->kill(p);
p                  47 fs/fs_pin.c    	if (p->done > 0) {
p                  48 fs/fs_pin.c    		spin_unlock_irq(&p->wait.lock);
p                  52 fs/fs_pin.c    	__add_wait_queue(&p->wait, &wait);
p                  55 fs/fs_pin.c    		spin_unlock_irq(&p->wait.lock);
p                  62 fs/fs_pin.c    		spin_lock_irq(&p->wait.lock);
p                  63 fs/fs_pin.c    		if (p->done > 0) {
p                  64 fs/fs_pin.c    			spin_unlock_irq(&p->wait.lock);
p                  74 fs/fs_pin.c    		struct hlist_node *p;
p                  76 fs/fs_pin.c    		p = READ_ONCE(m->mnt_pins.first);
p                  77 fs/fs_pin.c    		if (!p) {
p                  81 fs/fs_pin.c    		pin_kill(hlist_entry(p, struct fs_pin, m_list));
p                  85 fs/fs_pin.c    void group_pin_kill(struct hlist_head *p)
p                  90 fs/fs_pin.c    		q = READ_ONCE(p->first);
p                  50 fs/fs_struct.c static inline int replace_path(struct path *p, const struct path *old, const struct path *new)
p                  52 fs/fs_struct.c 	if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
p                  54 fs/fs_struct.c 	*p = *new;
p                  60 fs/fs_struct.c 	struct task_struct *g, *p;
p                  65 fs/fs_struct.c 	do_each_thread(g, p) {
p                  66 fs/fs_struct.c 		task_lock(p);
p                  67 fs/fs_struct.c 		fs = p->fs;
p                  81 fs/fs_struct.c 		task_unlock(p);
p                  82 fs/fs_struct.c 	} while_each_thread(g, p);
p                 199 fs/fscache/cookie.c 	struct hlist_bl_node *p;
p                 206 fs/fscache/cookie.c 	hlist_bl_for_each_entry(cursor, p, h, hash_link) {
p                 554 fs/fscache/cookie.c 	struct fscache_object *p;
p                 567 fs/fscache/cookie.c 	hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
p                 568 fs/fscache/cookie.c 		if (p->cache == object->cache) {
p                 569 fs/fscache/cookie.c 			if (fscache_object_is_dying(p))
p                 577 fs/fscache/cookie.c 	hlist_for_each_entry(p, &cookie->parent->backing_objects,
p                 579 fs/fscache/cookie.c 		if (p->cache == object->cache) {
p                 580 fs/fscache/cookie.c 			if (fscache_object_is_dying(p)) {
p                 585 fs/fscache/cookie.c 			object->parent = p;
p                 586 fs/fscache/cookie.c 			spin_lock(&p->lock);
p                 587 fs/fscache/cookie.c 			p->n_children++;
p                 588 fs/fscache/cookie.c 			spin_unlock(&p->lock);
p                 354 fs/fscache/internal.h 	void *p;
p                 359 fs/fscache/internal.h 		p = cookie->inline_aux;
p                 361 fs/fscache/internal.h 		p = cookie->aux;
p                 363 fs/fscache/internal.h 	if (memcmp(p, aux_data, cookie->aux_len) != 0) {
p                 364 fs/fscache/internal.h 		memcpy(p, aux_data, cookie->aux_len);
p                  45 fs/fscache/object-list.c 	struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
p                  51 fs/fscache/object-list.c 	while (*p) {
p                  52 fs/fscache/object-list.c 		parent = *p;
p                  56 fs/fscache/object-list.c 			p = &(*p)->rb_left;
p                  58 fs/fscache/object-list.c 			p = &(*p)->rb_right;
p                  63 fs/fscache/object-list.c 	rb_link_node(&obj->objlist_link, parent, p);
p                  91 fs/fscache/object-list.c 	struct rb_node *p;
p                 106 fs/fscache/object-list.c 	p = fscache_object_list.rb_node;
p                 107 fs/fscache/object-list.c 	while (p) {
p                 108 fs/fscache/object-list.c 		obj = rb_entry(p, struct fscache_object, objlist_link);
p                 112 fs/fscache/object-list.c 			p = p->rb_left;
p                 114 fs/fscache/object-list.c 			p = p->rb_right;
p                 167 fs/fscache/object-list.c 	u8 *p;
p                 279 fs/fscache/object-list.c 			p = keylen <= sizeof(cookie->inline_key) ?
p                 282 fs/fscache/object-list.c 				seq_printf(m, "%02x", *p++);
p                 286 fs/fscache/object-list.c 				p = auxlen <= sizeof(cookie->inline_aux) ?
p                 289 fs/fscache/object-list.c 					seq_printf(m, "%02x", *p++);
p                 120 fs/fscache/operation.c 	struct fscache_operation *p;
p                 137 fs/fscache/operation.c 		list_for_each_entry(p, &object->pending_ops, pend_link) {
p                 138 fs/fscache/operation.c 			ASSERTCMP(p->object, ==, object);
p                  31 fs/fsopen.c    	char *p;
p                  45 fs/fsopen.c    	p = log->buffer[index];
p                  53 fs/fsopen.c    	n = strlen(p);
p                  57 fs/fsopen.c    	if (copy_to_user(_buf, p, n) != 0)
p                  63 fs/fsopen.c    		kfree(p);
p                 219 fs/fuse/cuse.c 	char *p = *pp;
p                 222 fs/fuse/cuse.c 	while (p < end && *p == '\0')
p                 223 fs/fuse/cuse.c 		p++;
p                 224 fs/fuse/cuse.c 	if (p == end)
p                 232 fs/fuse/cuse.c 	key = val = p;
p                 233 fs/fuse/cuse.c 	p += strlen(p);
p                 249 fs/fuse/cuse.c 	*pp = p;
p                 270 fs/fuse/cuse.c static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo)
p                 272 fs/fuse/cuse.c 	char *end = p + len;
p                 277 fs/fuse/cuse.c 		rc = cuse_parse_one(&p, end, &key, &val);
p                2057 fs/fuse/dev.c  	struct rb_node *p;
p                2059 fs/fuse/dev.c  	p = rb_first(&fc->polled_files);
p                2061 fs/fuse/dev.c  	while (p) {
p                2063 fs/fuse/dev.c  		ff = rb_entry(p, struct fuse_file, polled_node);
p                2066 fs/fuse/dev.c  		p = rb_next(p);
p                 560 fs/gfs2/aops.c 	void *p;
p                 569 fs/gfs2/aops.c 		p = kmap_atomic(page);
p                 570 fs/gfs2/aops.c 		memcpy(buf + copied, p + offset, amt);
p                 571 fs/gfs2/aops.c 		kunmap_atomic(p);
p                 280 fs/gfs2/bmap.c 	__be64 *p = metaptr1(height, mp);
p                 281 fs/gfs2/bmap.c 	return p + mp->mp_list[height];
p                1501 fs/gfs2/bmap.c 	__be64 *p;
p                1519 fs/gfs2/bmap.c 	for (p = start; p < end; p++) {
p                1520 fs/gfs2/bmap.c 		if (!*p)
p                1522 fs/gfs2/bmap.c 		bn = be64_to_cpu(*p);
p                1599 fs/gfs2/bmap.c 		*p = 0;
p                  82 fs/gfs2/dir.c  #define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
p                  28 fs/gfs2/export.c static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len,
p                  31 fs/gfs2/export.c 	__be32 *fh = (__force __be32 *)p;
p                1356 fs/gfs2/glock.c 		     struct gfs2_holder **p)
p                1362 fs/gfs2/glock.c 		p[x] = &ghs[x];
p                1364 fs/gfs2/glock.c 	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
p                1367 fs/gfs2/glock.c 		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
p                1369 fs/gfs2/glock.c 		error = gfs2_glock_nq(p[x]);
p                1372 fs/gfs2/glock.c 				gfs2_glock_dq(p[x]);
p                1773 fs/gfs2/glock.c 	char *p = buf;
p                1775 fs/gfs2/glock.c 		*p++ = 't';
p                1777 fs/gfs2/glock.c 		*p++ = 'T';
p                1779 fs/gfs2/glock.c 		*p++ = 'e';
p                1781 fs/gfs2/glock.c 		*p++ = 'A';
p                1783 fs/gfs2/glock.c 		*p++ = 'p';
p                1785 fs/gfs2/glock.c 		*p++ = 'a';
p                1787 fs/gfs2/glock.c 		*p++ = 'E';
p                1789 fs/gfs2/glock.c 		*p++ = 'c';
p                1791 fs/gfs2/glock.c 		*p++ = 'H';
p                1793 fs/gfs2/glock.c 		*p++ = 'W';
p                1795 fs/gfs2/glock.c 		*p++ = 'F';
p                1796 fs/gfs2/glock.c 	*p = 0;
p                1830 fs/gfs2/glock.c 	char *p = buf;
p                1833 fs/gfs2/glock.c 		*p++ = 'l';
p                1835 fs/gfs2/glock.c 		*p++ = 'D';
p                1837 fs/gfs2/glock.c 		*p++ = 'd';
p                1839 fs/gfs2/glock.c 		*p++ = 'p';
p                1841 fs/gfs2/glock.c 		*p++ = 'y';
p                1843 fs/gfs2/glock.c 		*p++ = 'f';
p                1845 fs/gfs2/glock.c 		*p++ = 'i';
p                1847 fs/gfs2/glock.c 		*p++ = 'r';
p                1849 fs/gfs2/glock.c 		*p++ = 'I';
p                1851 fs/gfs2/glock.c 		*p++ = 'F';
p                1853 fs/gfs2/glock.c 		*p++ = 'q';
p                1855 fs/gfs2/glock.c 		*p++ = 'L';
p                1857 fs/gfs2/glock.c 		*p++ = 'o';
p                1859 fs/gfs2/glock.c 		*p++ = 'b';
p                1860 fs/gfs2/glock.c 	*p = 0;
p                 239 fs/gfs2/ops_fstype.c 	struct gfs2_sb *p;
p                 266 fs/gfs2/ops_fstype.c 	p = kmap(page);
p                 267 fs/gfs2/ops_fstype.c 	gfs2_sb_in(sdp, p);
p                 123 fs/gfs2/super.c 	struct task_struct *p;
p                 126 fs/gfs2/super.c 	p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
p                 127 fs/gfs2/super.c 	if (IS_ERR(p)) {
p                 128 fs/gfs2/super.c 		error = PTR_ERR(p);
p                 132 fs/gfs2/super.c 	sdp->sd_logd_process = p;
p                 134 fs/gfs2/super.c 	p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
p                 135 fs/gfs2/super.c 	if (IS_ERR(p)) {
p                 136 fs/gfs2/super.c 		error = PTR_ERR(p);
p                 140 fs/gfs2/super.c 	sdp->sd_quotad_process = p;
p                 158 fs/gfs2/util.h 					   unsigned int *p)
p                 162 fs/gfs2/util.h 	x = *p;
p                 299 fs/hfs/bnode.c 	struct hfs_bnode **p;
p                 303 fs/hfs/bnode.c 	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
p                 304 fs/hfs/bnode.c 	     *p && *p != node; p = &(*p)->next_hash)
p                 306 fs/hfs/bnode.c 	BUG_ON(!*p);
p                 307 fs/hfs/bnode.c 	*p = node->next_hash;
p                  74 fs/hfs/part_tbl.c 		struct old_pmap_entry *p;
p                  77 fs/hfs/part_tbl.c 		p = pm->pdEntry;
p                  79 fs/hfs/part_tbl.c 		for (i = 0; i < size; p++, i++) {
p                  80 fs/hfs/part_tbl.c 			if (p->pdStart && p->pdSize &&
p                  81 fs/hfs/part_tbl.c 			    p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ &&
p                  83 fs/hfs/part_tbl.c 				*part_start += be32_to_cpu(p->pdStart);
p                  84 fs/hfs/part_tbl.c 				*part_size = be32_to_cpu(p->pdSize);
p                 226 fs/hfs/super.c 	char *p;
p                 243 fs/hfs/super.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 244 fs/hfs/super.c 		if (!*p)
p                 247 fs/hfs/super.c 		token = match_token(p, tokens, args);
p                 325 fs/hfs/super.c 			p = match_strdup(&args[0]);
p                 326 fs/hfs/super.c 			if (p)
p                 327 fs/hfs/super.c 				hsb->nls_disk = load_nls(p);
p                 329 fs/hfs/super.c 				pr_err("unable to load codepage \"%s\"\n", p);
p                 330 fs/hfs/super.c 				kfree(p);
p                 333 fs/hfs/super.c 			kfree(p);
p                 340 fs/hfs/super.c 			p = match_strdup(&args[0]);
p                 341 fs/hfs/super.c 			if (p)
p                 342 fs/hfs/super.c 				hsb->nls_io = load_nls(p);
p                 344 fs/hfs/super.c 				pr_err("unable to load iocharset \"%s\"\n", p);
p                 345 fs/hfs/super.c 				kfree(p);
p                 348 fs/hfs/super.c 			kfree(p);
p                 469 fs/hfs/super.c static void hfs_init_once(void *p)
p                 471 fs/hfs/super.c 	struct hfs_inode_info *i = p;
p                 465 fs/hfsplus/bnode.c 	struct hfs_bnode **p;
p                 469 fs/hfsplus/bnode.c 	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
p                 470 fs/hfsplus/bnode.c 	     *p && *p != node; p = &(*p)->next_hash)
p                 472 fs/hfsplus/bnode.c 	BUG_ON(!*p);
p                 473 fs/hfsplus/bnode.c 	*p = node->next_hash;
p                  74 fs/hfsplus/options.c 	char *p;
p                  81 fs/hfsplus/options.c 	while ((p = strsep(&input, ",")) != NULL) {
p                  82 fs/hfsplus/options.c 		if (!*p)
p                  85 fs/hfsplus/options.c 		token = match_token(p, tokens, args);
p                 102 fs/hfsplus/options.c 	char *p;
p                 109 fs/hfsplus/options.c 	while ((p = strsep(&input, ",")) != NULL) {
p                 110 fs/hfsplus/options.c 		if (!*p)
p                 113 fs/hfsplus/options.c 		token = match_token(p, tokens, args);
p                 173 fs/hfsplus/options.c 			p = match_strdup(&args[0]);
p                 174 fs/hfsplus/options.c 			if (p)
p                 175 fs/hfsplus/options.c 				sbi->nls = load_nls(p);
p                 178 fs/hfsplus/options.c 				       p);
p                 179 fs/hfsplus/options.c 				kfree(p);
p                 182 fs/hfsplus/options.c 			kfree(p);
p                  77 fs/hfsplus/part_tbl.c 		struct old_pmap_entry *p = &pm->pdEntry[i];
p                  79 fs/hfsplus/part_tbl.c 		if (p->pdStart && p->pdSize &&
p                  80 fs/hfsplus/part_tbl.c 		    p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ &&
p                  82 fs/hfsplus/part_tbl.c 			*part_start += be32_to_cpu(p->pdStart);
p                  83 fs/hfsplus/part_tbl.c 			*part_size = be32_to_cpu(p->pdSize);
p                 654 fs/hfsplus/super.c static void hfsplus_init_once(void *p)
p                 656 fs/hfsplus/super.c 	struct hfsplus_inode_info *i = p;
p                 102 fs/hfsplus/unicode.c static u16 *hfsplus_compose_lookup(u16 *p, u16 cc)
p                 107 fs/hfsplus/unicode.c 	e = p[1];
p                 108 fs/hfsplus/unicode.c 	if (!e || cc < p[s * 2] || cc > p[e * 2])
p                 112 fs/hfsplus/unicode.c 		if (cc > p[i * 2])
p                 114 fs/hfsplus/unicode.c 		else if (cc < p[i * 2])
p                 117 fs/hfsplus/unicode.c 			return hfsplus_compose_table + p[i * 2 + 1];
p                  66 fs/hostfs/hostfs.h extern int stat_file(const char *path, struct hostfs_stat *p, int fd);
p                  86 fs/hostfs/hostfs_kern.c 	char *p = dentry_path_raw(dentry, name, PATH_MAX);
p                  92 fs/hostfs/hostfs_kern.c 	if (IS_ERR(p)) {
p                 101 fs/hostfs/hostfs_kern.c 	BUG_ON(p + strlen(p) + 1 != name + PATH_MAX);
p                 104 fs/hostfs/hostfs_kern.c 	if (len > p - name) {
p                 109 fs/hostfs/hostfs_kern.c 	if (p > name + len)
p                 110 fs/hostfs/hostfs_kern.c 		strcpy(name + len, p);
p                  21 fs/hostfs/hostfs_user.c static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p)
p                  23 fs/hostfs/hostfs_user.c 	p->ino = buf->st_ino;
p                  24 fs/hostfs/hostfs_user.c 	p->mode = buf->st_mode;
p                  25 fs/hostfs/hostfs_user.c 	p->nlink = buf->st_nlink;
p                  26 fs/hostfs/hostfs_user.c 	p->uid = buf->st_uid;
p                  27 fs/hostfs/hostfs_user.c 	p->gid = buf->st_gid;
p                  28 fs/hostfs/hostfs_user.c 	p->size = buf->st_size;
p                  29 fs/hostfs/hostfs_user.c 	p->atime.tv_sec = buf->st_atime;
p                  30 fs/hostfs/hostfs_user.c 	p->atime.tv_nsec = 0;
p                  31 fs/hostfs/hostfs_user.c 	p->ctime.tv_sec = buf->st_ctime;
p                  32 fs/hostfs/hostfs_user.c 	p->ctime.tv_nsec = 0;
p                  33 fs/hostfs/hostfs_user.c 	p->mtime.tv_sec = buf->st_mtime;
p                  34 fs/hostfs/hostfs_user.c 	p->mtime.tv_nsec = 0;
p                  35 fs/hostfs/hostfs_user.c 	p->blksize = buf->st_blksize;
p                  36 fs/hostfs/hostfs_user.c 	p->blocks = buf->st_blocks;
p                  37 fs/hostfs/hostfs_user.c 	p->maj = os_major(buf->st_rdev);
p                  38 fs/hostfs/hostfs_user.c 	p->min = os_minor(buf->st_rdev);
p                  41 fs/hostfs/hostfs_user.c int stat_file(const char *path, struct hostfs_stat *p, int fd)
p                  51 fs/hostfs/hostfs_user.c 	stat64_to_hostfs(&buf, p);
p                  86 fs/hpfs/dnode.c static void hpfs_pos_subst(loff_t *p, loff_t f, loff_t t)
p                  88 fs/hpfs/dnode.c 	if (*p == f) *p = t;
p                  96 fs/hpfs/dnode.c static void hpfs_pos_ins(loff_t *p, loff_t d, loff_t c)
p                  98 fs/hpfs/dnode.c 	if ((*p & ~0x3f) == (d & ~0x3f) && (*p & 0x3f) >= (d & 0x3f)) {
p                  99 fs/hpfs/dnode.c 		int n = (*p & 0x3f) + c;
p                 102 fs/hpfs/dnode.c 				__func__, (int)*p, (int)c >> 8);
p                 104 fs/hpfs/dnode.c 			*p = (*p & ~0x3f) | n;
p                 108 fs/hpfs/dnode.c static void hpfs_pos_del(loff_t *p, loff_t d, loff_t c)
p                 110 fs/hpfs/dnode.c 	if ((*p & ~0x3f) == (d & ~0x3f) && (*p & 0x3f) >= (d & 0x3f)) {
p                 111 fs/hpfs/dnode.c 		int n = (*p & 0x3f) - c;
p                 114 fs/hpfs/dnode.c 				__func__, (int)*p, (int)c >> 8);
p                 116 fs/hpfs/dnode.c 			*p = (*p & ~0x3f) | n;
p                 524 fs/hpfs/dnode.c 	int p;
p                 574 fs/hpfs/dnode.c 		p = 1;
p                 576 fs/hpfs/dnode.c 		for (de = dnode_first_de(dnode); de < de_end; de = de_next_de(de), p++)
p                 581 fs/hpfs/dnode.c 		for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, ((loff_t)up << 4) | p);
p                 618 fs/hpfs/dnode.c 		for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, 4);
p                 619 fs/hpfs/dnode.c 		for_all_poss(i, hpfs_pos_del, ((loff_t)up << 4) | p, 1);
p                 690 fs/hpfs/dnode.c 		for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4);
p                 691 fs/hpfs/dnode.c 		for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, ((loff_t)up << 4) | (p - 1));
p                 476 fs/hpfs/hpfs.h static inline bool fnode_in_anode(struct fnode *p)
p                 478 fs/hpfs/hpfs.h 	return (p->flags & FNODE_anode) != 0;
p                 481 fs/hpfs/hpfs.h static inline bool fnode_is_dir(struct fnode *p)
p                 483 fs/hpfs/hpfs.h 	return (p->flags & FNODE_dir) != 0;
p                 269 fs/hpfs/map.c  			unsigned p, pp = 0;
p                 284 fs/hpfs/map.c  			for (p = 20; p < le32_to_cpu(dnode->first_free); p += d[p] + (d[p+1] << 8)) {
p                 285 fs/hpfs/map.c  				struct hpfs_dirent *de = (struct hpfs_dirent *)((char *)dnode + p);
p                 286 fs/hpfs/map.c  				if (le16_to_cpu(de->length) > 292 || (le16_to_cpu(de->length) < 32) || (le16_to_cpu(de->length) & 3) || p + le16_to_cpu(de->length) > 2048) {
p                 287 fs/hpfs/map.c  					hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
p                 292 fs/hpfs/map.c  					hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
p                 298 fs/hpfs/map.c  					hpfs_error(s, "bad down pointer in dnode %08x, dirent %03x, last %03x", secno, p, pp);
p                 301 fs/hpfs/map.c  				pp = p;
p                 304 fs/hpfs/map.c  			if (p != le32_to_cpu(dnode->first_free)) {
p                 318 fs/hpfs/super.c 	char *p;
p                 326 fs/hpfs/super.c 	while ((p = strsep(&opts, ",")) != NULL) {
p                 329 fs/hpfs/super.c 		if (!*p)
p                 332 fs/hpfs/super.c 		token = match_token(p, tokens, args);
p                1034 fs/hugetlbfs/inode.c 	struct hugetlbfs_inode_info *p;
p                1038 fs/hugetlbfs/inode.c 	p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
p                1039 fs/hugetlbfs/inode.c 	if (unlikely(!p)) {
p                1053 fs/hugetlbfs/inode.c 	mpol_shared_policy_init(&p->policy, NULL);
p                1055 fs/hugetlbfs/inode.c 	return &p->vfs_inode;
p                 893 fs/inode.c     	unsigned int *p = &get_cpu_var(last_ino);
p                 894 fs/inode.c     	unsigned int res = *p;
p                 909 fs/inode.c     	*p = res;
p                 176 fs/internal.h  extern void group_pin_kill(struct hlist_head *p);
p                 396 fs/io_uring.c  static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
p                 411 fs/io_uring.c  	ctx->flags = p->flags;
p                1863 fs/io_uring.c  			       struct poll_table_struct *p)
p                1865 fs/io_uring.c  	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
p                3237 fs/io_uring.c  			       struct io_uring_params *p)
p                3249 fs/io_uring.c  		ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
p                3253 fs/io_uring.c  		if (p->flags & IORING_SETUP_SQ_AFF) {
p                3254 fs/io_uring.c  			int cpu = p->sq_thread_cpu;
p                3275 fs/io_uring.c  	} else if (p->flags & IORING_SETUP_SQ_AFF) {
p                3801 fs/io_uring.c  				  struct io_uring_params *p)
p                3806 fs/io_uring.c  	size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
p                3816 fs/io_uring.c  	rings->sq_ring_mask = p->sq_entries - 1;
p                3817 fs/io_uring.c  	rings->cq_ring_mask = p->cq_entries - 1;
p                3818 fs/io_uring.c  	rings->sq_ring_entries = p->sq_entries;
p                3819 fs/io_uring.c  	rings->cq_ring_entries = p->cq_entries;
p                3825 fs/io_uring.c  	size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
p                3886 fs/io_uring.c  static int io_uring_create(unsigned entries, struct io_uring_params *p)
p                3902 fs/io_uring.c  	p->sq_entries = roundup_pow_of_two(entries);
p                3903 fs/io_uring.c  	p->cq_entries = 2 * p->sq_entries;
p                3910 fs/io_uring.c  				ring_pages(p->sq_entries, p->cq_entries));
p                3917 fs/io_uring.c  	ctx = io_ring_ctx_alloc(p);
p                3920 fs/io_uring.c  			io_unaccount_mem(user, ring_pages(p->sq_entries,
p                3921 fs/io_uring.c  								p->cq_entries));
p                3935 fs/io_uring.c  	ret = io_allocate_scq_urings(ctx, p);
p                3939 fs/io_uring.c  	ret = io_sq_offload_start(ctx, p);
p                3943 fs/io_uring.c  	memset(&p->sq_off, 0, sizeof(p->sq_off));
p                3944 fs/io_uring.c  	p->sq_off.head = offsetof(struct io_rings, sq.head);
p                3945 fs/io_uring.c  	p->sq_off.tail = offsetof(struct io_rings, sq.tail);
p                3946 fs/io_uring.c  	p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
p                3947 fs/io_uring.c  	p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
p                3948 fs/io_uring.c  	p->sq_off.flags = offsetof(struct io_rings, sq_flags);
p                3949 fs/io_uring.c  	p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
p                3950 fs/io_uring.c  	p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
p                3952 fs/io_uring.c  	memset(&p->cq_off, 0, sizeof(p->cq_off));
p                3953 fs/io_uring.c  	p->cq_off.head = offsetof(struct io_rings, cq.head);
p                3954 fs/io_uring.c  	p->cq_off.tail = offsetof(struct io_rings, cq.tail);
p                3955 fs/io_uring.c  	p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
p                3956 fs/io_uring.c  	p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
p                3957 fs/io_uring.c  	p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
p                3958 fs/io_uring.c  	p->cq_off.cqes = offsetof(struct io_rings, cqes);
p                3968 fs/io_uring.c  	p->features = IORING_FEAT_SINGLE_MMAP;
p                3982 fs/io_uring.c  	struct io_uring_params p;
p                3986 fs/io_uring.c  	if (copy_from_user(&p, params, sizeof(p)))
p                3988 fs/io_uring.c  	for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
p                3989 fs/io_uring.c  		if (p.resv[i])
p                3993 fs/io_uring.c  	if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
p                3997 fs/io_uring.c  	ret = io_uring_create(entries, &p);
p                4001 fs/io_uring.c  	if (copy_to_user(params, &p, sizeof(p)))
p                  55 fs/ioctl.c     static int ioctl_fibmap(struct file *filp, int __user *p)
p                  65 fs/ioctl.c     	res = get_user(block, p);
p                  69 fs/ioctl.c     	return put_user(res, p);
p                 498 fs/ioctl.c     	int __user *p = (int __user *)arg;
p                 502 fs/ioctl.c     		return ioctl_fibmap(filp, p);
p                 504 fs/ioctl.c     		return put_user(i_size_read(inode) - filp->f_pos, p);
p                 507 fs/ioctl.c     		return ioctl_preallocate(filp, p);
p                  94 fs/isofs/dir.c 	char *p = NULL;		/* Quiet GCC */
p                 209 fs/isofs/dir.c 				p = tmpname;
p                 217 fs/isofs/dir.c 				p = tmpname;
p                 222 fs/isofs/dir.c 				p = tmpname;
p                 226 fs/isofs/dir.c 				p = tmpname;
p                 228 fs/isofs/dir.c 				p = de->name;
p                 233 fs/isofs/dir.c 			if (!dir_emit(ctx, p, len, inode_number, DT_UNKNOWN))
p                 340 fs/isofs/inode.c 	char *p;
p                 365 fs/isofs/inode.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 370 fs/isofs/inode.c 		if (!*p)
p                 373 fs/isofs/inode.c 		token = match_token(p, tokens, args);
p                 871 fs/isofs/inode.c 		char *p = opt.iocharset ? opt.iocharset : CONFIG_NLS_DEFAULT;
p                 872 fs/isofs/inode.c 		sbi->s_nls_iocharset = load_nls(p);
p                  76 fs/isofs/isofs.h static inline int isonum_711(u8 *p)
p                  78 fs/isofs/isofs.h 	return *p;
p                  80 fs/isofs/isofs.h static inline int isonum_712(s8 *p)
p                  82 fs/isofs/isofs.h 	return *p;
p                  84 fs/isofs/isofs.h static inline unsigned int isonum_721(u8 *p)
p                  86 fs/isofs/isofs.h 	return get_unaligned_le16(p);
p                  88 fs/isofs/isofs.h static inline unsigned int isonum_722(u8 *p)
p                  90 fs/isofs/isofs.h 	return get_unaligned_be16(p);
p                  92 fs/isofs/isofs.h static inline unsigned int isonum_723(u8 *p)
p                  95 fs/isofs/isofs.h 	return get_unaligned_le16(p);
p                  97 fs/isofs/isofs.h static inline unsigned int isonum_731(u8 *p)
p                  99 fs/isofs/isofs.h 	return get_unaligned_le32(p);
p                 101 fs/isofs/isofs.h static inline unsigned int isonum_732(u8 *p)
p                 103 fs/isofs/isofs.h 	return get_unaligned_be32(p);
p                 105 fs/isofs/isofs.h static inline unsigned int isonum_733(u8 *p)
p                 108 fs/isofs/isofs.h 	return get_unaligned_le32(p);
p                 207 fs/isofs/rock.c 	char *p;
p                 278 fs/isofs/rock.c 			p = memchr(rr->u.NM.name, '\0', len);
p                 279 fs/isofs/rock.c 			if (unlikely(p))
p                 280 fs/isofs/rock.c 				len = p - rr->u.NM.name;
p                 379 fs/isofs/rock.c 				int p;
p                 380 fs/isofs/rock.c 				for (p = 0; p < rr->u.ER.len_id; p++)
p                 381 fs/isofs/rock.c 					printk(KERN_CONT "%c", rr->u.ER.data[p]);
p                  19 fs/isofs/util.c int iso_date(u8 *p, int flag)
p                  24 fs/isofs/util.c 	year = p[0];
p                  25 fs/isofs/util.c 	month = p[1];
p                  26 fs/isofs/util.c 	day = p[2];
p                  27 fs/isofs/util.c 	hour = p[3];
p                  28 fs/isofs/util.c 	minute = p[4];
p                  29 fs/isofs/util.c 	second = p[5];
p                  30 fs/isofs/util.c 	if (flag == 0) tz = p[6]; /* High sierra has no time zone */
p                1236 fs/jbd2/journal.c 	char *p;
p                1258 fs/jbd2/journal.c 	p = strreplace(journal->j_devname, '/', '!');
p                1259 fs/jbd2/journal.c 	sprintf(p, "-%lu", journal->j_inode->i_ino);
p                  38 fs/jffs2/compr_rubin.c 	unsigned long p;
p                  93 fs/jffs2/compr_rubin.c 	rs->p = (long) (2 * UPPER_BIT_RUBIN);
p                 109 fs/jffs2/compr_rubin.c 	       ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) {
p                 117 fs/jffs2/compr_rubin.c 		rs->p <<= 1;
p                 119 fs/jffs2/compr_rubin.c 	i0 = A * rs->p / (A + B);
p                 123 fs/jffs2/compr_rubin.c 	if (i0 >= rs->p)
p                 124 fs/jffs2/compr_rubin.c 		i0 = rs->p - 1;
p                 126 fs/jffs2/compr_rubin.c 	i1 = rs->p - i0;
p                 129 fs/jffs2/compr_rubin.c 		rs->p = i0;
p                 131 fs/jffs2/compr_rubin.c 		rs->p = i1;
p                 163 fs/jffs2/compr_rubin.c static void __do_decode(struct rubin_state *rs, unsigned long p,
p                 179 fs/jffs2/compr_rubin.c 		p <<= 1;
p                 180 fs/jffs2/compr_rubin.c 	} while ((q >= UPPER_BIT_RUBIN) || ((p + q) <= UPPER_BIT_RUBIN));
p                 182 fs/jffs2/compr_rubin.c 	rs->p = p;
p                 202 fs/jffs2/compr_rubin.c 	unsigned long p = rs->p, q = rs->q;
p                 206 fs/jffs2/compr_rubin.c 	if (q >= UPPER_BIT_RUBIN || ((p + q) <= UPPER_BIT_RUBIN))
p                 207 fs/jffs2/compr_rubin.c 		__do_decode(rs, p, q);
p                 209 fs/jffs2/compr_rubin.c 	i0 = A * rs->p / (A + B);
p                 213 fs/jffs2/compr_rubin.c 	if (i0 >= rs->p)
p                 214 fs/jffs2/compr_rubin.c 		i0 = rs->p - 1;
p                 220 fs/jffs2/compr_rubin.c 		i0 = rs->p - i0;
p                 223 fs/jffs2/compr_rubin.c 	rs->p = i0;
p                 204 fs/jffs2/malloc.c 	struct jffs2_raw_node_ref **p, *ref;
p                 209 fs/jffs2/malloc.c 	p = &jeb->last_node;
p                 210 fs/jffs2/malloc.c 	ref = *p;
p                 220 fs/jffs2/malloc.c 			dbg_memalloc("Allocating new refblock linked from %p\n", p);
p                 221 fs/jffs2/malloc.c 			ref = *p = jffs2_alloc_refblock();
p                 226 fs/jffs2/malloc.c 			p = &ref->next_in_ino;
p                 227 fs/jffs2/malloc.c 			ref = *p;
p                 805 fs/jffs2/nodemgmt.c 		struct jffs2_raw_node_ref **p;
p                 810 fs/jffs2/nodemgmt.c 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
p                 813 fs/jffs2/nodemgmt.c 		*p = ref->next_in_ino;
p                 489 fs/jffs2/wbuf.c 			struct jffs2_raw_node_ref **p = &ic->nodes;
p                 492 fs/jffs2/wbuf.c 			while (*p && *p != (void *)ic) {
p                 493 fs/jffs2/wbuf.c 				if (*p == raw) {
p                 494 fs/jffs2/wbuf.c 					(*p) = (raw->next_in_ino);
p                 498 fs/jffs2/wbuf.c 				p = &((*p)->next_in_ino);
p                  29 fs/jfs/jfs_debug.h #define assert(p) do {	\
p                  30 fs/jfs/jfs_debug.h 	if (!(p)) {	\
p                  32 fs/jfs/jfs_debug.h 		       __FILE__, __LINE__, #p);			\
p                  42 fs/jfs/jfs_debug.h #define ASSERT(p) assert(p)
p                  83 fs/jfs/jfs_debug.h #define ASSERT(p) do {} while (0)
p                3377 fs/jfs/jfs_dmap.c 	s64 p;
p                3454 fs/jfs/jfs_dmap.c 	p = BMAPBLKNO + nbperpage;	/* L2 page */
p                3455 fs/jfs/jfs_dmap.c 	l2mp = read_metapage(ipbmap, p, PSIZE, 0);
p                3465 fs/jfs/jfs_dmap.c 	p = BLKTOL1(blkno, sbi->l2nbperpage);	/* L1 page */
p                3470 fs/jfs/jfs_dmap.c 	for (; k < LPERCTL; k++, p += nbperpage) {
p                3474 fs/jfs/jfs_dmap.c 			l1mp = read_metapage(ipbmap, p, PSIZE, 0);
p                3482 fs/jfs/jfs_dmap.c 			p = BLKTOL0(blkno, sbi->l2nbperpage);
p                3486 fs/jfs/jfs_dmap.c 			l1mp = get_metapage(ipbmap, p, PSIZE, 0);
p                3495 fs/jfs/jfs_dmap.c 			p += nbperpage;	/* 1st L0 of L1.k */
p                3506 fs/jfs/jfs_dmap.c 				l0mp = read_metapage(ipbmap, p, PSIZE, 0);
p                3515 fs/jfs/jfs_dmap.c 				p = BLKTODMAP(blkno,
p                3520 fs/jfs/jfs_dmap.c 				l0mp = get_metapage(ipbmap, p, PSIZE, 0);
p                3529 fs/jfs/jfs_dmap.c 				p += nbperpage;	/* 1st dmap of L0.j */
p                3542 fs/jfs/jfs_dmap.c 					mp = read_metapage(ipbmap, p,
p                3549 fs/jfs/jfs_dmap.c 					mp = read_metapage(ipbmap, p,
p                3567 fs/jfs/jfs_dmap.c 				p += nbperpage;
p                 154 fs/jfs/jfs_dtree.c static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p);
p                 161 fs/jfs/jfs_dtree.c static int dtCompare(struct component_name * key, dtpage_t * p, int si);
p                 163 fs/jfs/jfs_dtree.c static int ciCompare(struct component_name * key, dtpage_t * p, int si,
p                 166 fs/jfs/jfs_dtree.c static void dtGetKey(dtpage_t * p, int i, struct component_name * key,
p                 172 fs/jfs/jfs_dtree.c static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
p                 179 fs/jfs/jfs_dtree.c static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock);
p                 181 fs/jfs/jfs_dtree.c static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock);
p                 183 fs/jfs/jfs_dtree.c static void dtLinelockFreelist(dtpage_t * p, int m, struct dt_lock ** dtlock);
p                 574 fs/jfs/jfs_dtree.c 	dtpage_t *p;
p                 623 fs/jfs/jfs_dtree.c 		DT_GETPAGE(ip, bn, mp, psize, p, rc);
p                 628 fs/jfs/jfs_dtree.c 		stbl = DT_GETSTBL(p);
p                 633 fs/jfs/jfs_dtree.c 		for (base = 0, lim = p->header.nextindex; lim; lim >>= 1) {
p                 636 fs/jfs/jfs_dtree.c 			if (p->header.flag & BT_LEAF) {
p                 639 fs/jfs/jfs_dtree.c 				    ciCompare(&ciKey, p, stbl[index],
p                 644 fs/jfs/jfs_dtree.c 				cmp = dtCompare(&ciKey, p, stbl[index]);
p                 655 fs/jfs/jfs_dtree.c 				if (p->header.flag & BT_LEAF) {
p                 657 fs/jfs/jfs_dtree.c 			((struct ldtentry *) & p->slot[stbl[index]])->inumber);
p                 725 fs/jfs/jfs_dtree.c 		if (p->header.flag & BT_LEAF) {
p                 778 fs/jfs/jfs_dtree.c 		pxd = (pxd_t *) & p->slot[stbl[index]];
p                 814 fs/jfs/jfs_dtree.c 	dtpage_t *p;		/* base B+-tree index page */
p                 831 fs/jfs/jfs_dtree.c 	DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
p                 857 fs/jfs/jfs_dtree.c 	if (n > p->header.freecnt) {
p                 886 fs/jfs/jfs_dtree.c 	dtInsertEntry(p, index, name, &data, &dtlck);
p                 889 fs/jfs/jfs_dtree.c 	if (!(p->header.flag & BT_ROOT)) {
p                 894 fs/jfs/jfs_dtree.c 		lv->offset = p->header.stblindex + n;
p                 896 fs/jfs/jfs_dtree.c 		    ((p->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1;
p                1328 fs/jfs/jfs_dtree.c 	dtpage_t *p;
p                1463 fs/jfs/jfs_dtree.c 		DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
p                1484 fs/jfs/jfs_dtree.c 		p->header.prev = cpu_to_le64(rbn);
p                2076 fs/jfs/jfs_dtree.c 	dtpage_t *p;
p                2098 fs/jfs/jfs_dtree.c 	DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
p                2106 fs/jfs/jfs_dtree.c 		stbl = DT_GETSTBL(p);
p                2107 fs/jfs/jfs_dtree.c 		ldtentry = (struct ldtentry *) & p->slot[stbl[index]];
p                2109 fs/jfs/jfs_dtree.c 		if (index == (p->header.nextindex - 1)) {
p                2113 fs/jfs/jfs_dtree.c 			if ((p->header.flag & BT_ROOT)
p                2114 fs/jfs/jfs_dtree.c 			    || (p->header.next == 0))
p                2118 fs/jfs/jfs_dtree.c 				DT_GETPAGE(ip, le64_to_cpu(p->header.next),
p                2134 fs/jfs/jfs_dtree.c 			    (struct ldtentry *) & p->slot[stbl[index + 1]];
p                2142 fs/jfs/jfs_dtree.c 	if (p->header.nextindex == 1) {
p                2144 fs/jfs/jfs_dtree.c 		rc = dtDeleteUp(tid, ip, mp, p, &btstack);
p                2174 fs/jfs/jfs_dtree.c 		if (!(p->header.flag & BT_ROOT)) {
p                2179 fs/jfs/jfs_dtree.c 			lv->offset = p->header.stblindex + i;
p                2181 fs/jfs/jfs_dtree.c 			    ((p->header.nextindex - 1) >> L2DTSLOTSIZE) -
p                2187 fs/jfs/jfs_dtree.c 		dtDeleteEntry(p, index, &dtlck);
p                2192 fs/jfs/jfs_dtree.c 		if (DO_INDEX(ip) && index < p->header.nextindex) {
p                2196 fs/jfs/jfs_dtree.c 			stbl = DT_GETSTBL(p);
p                2197 fs/jfs/jfs_dtree.c 			for (i = index; i < p->header.nextindex; i++) {
p                2199 fs/jfs/jfs_dtree.c 				    (struct ldtentry *) & p->slot[stbl[i]];
p                2230 fs/jfs/jfs_dtree.c 	dtpage_t *p;
p                2296 fs/jfs/jfs_dtree.c 		DT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc);
p                2308 fs/jfs/jfs_dtree.c 		nextindex = p->header.nextindex;
p                2319 fs/jfs/jfs_dtree.c 			if (p->header.flag & BT_ROOT) {
p                2345 fs/jfs/jfs_dtree.c 				pxdlock->pxd = p->header.self;
p                2349 fs/jfs/jfs_dtree.c 				if ((rc = dtRelink(tid, ip, p))) {
p                2354 fs/jfs/jfs_dtree.c 				xlen = lengthPXD(&p->header.self);
p                2390 fs/jfs/jfs_dtree.c 		if (!(p->header.flag & BT_ROOT)) {
p                2398 fs/jfs/jfs_dtree.c 			lv->offset = p->header.stblindex + i;
p                2400 fs/jfs/jfs_dtree.c 			    ((p->header.nextindex - 1) >> L2DTSLOTSIZE) -
p                2406 fs/jfs/jfs_dtree.c 		dtDeleteEntry(p, index, &dtlck);
p                2410 fs/jfs/jfs_dtree.c 		    ((p->header.flag & BT_ROOT) || p->header.prev == 0))
p                2411 fs/jfs/jfs_dtree.c 			dtTruncateEntry(p, 0, &dtlck);
p                2438 fs/jfs/jfs_dtree.c 	dtpage_t *p, *pp, *rp = 0, *lp= 0;
p                2474 fs/jfs/jfs_dtree.c 	DT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
p                2485 fs/jfs/jfs_dtree.c 	if (p->header.next) {
p                2486 fs/jfs/jfs_dtree.c 		nextbn = le64_to_cpu(p->header.next);
p                2496 fs/jfs/jfs_dtree.c 	if (p->header.prev) {
p                2497 fs/jfs/jfs_dtree.c 		prevbn = le64_to_cpu(p->header.prev);
p                2557 fs/jfs/jfs_dtree.c 	pxd = &p->header.self;
p                2564 fs/jfs/jfs_dtree.c 	lv->length = p->header.maxslot;
p                2636 fs/jfs/jfs_dtree.c 	dtpage_t *p;
p                2652 fs/jfs/jfs_dtree.c 		DT_GETPAGE(ip, bn, mp, psize, p, rc);
p                2659 fs/jfs/jfs_dtree.c 		if (p->header.flag & BT_ROOT) {
p                2662 fs/jfs/jfs_dtree.c 		} else if (addressPXD(&p->header.self) == lmxaddr)
p                2668 fs/jfs/jfs_dtree.c 		if (p->header.flag & BT_LEAF) {
p                2674 fs/jfs/jfs_dtree.c 		stbl = DT_GETSTBL(p);
p                2675 fs/jfs/jfs_dtree.c 		pxd = (pxd_t *) & p->slot[stbl[0]];
p                2688 fs/jfs/jfs_dtree.c 	stbl = DT_GETSTBL(p);
p                2689 fs/jfs/jfs_dtree.c 	for (i = 0; i < p->header.nextindex; i++) {
p                2690 fs/jfs/jfs_dtree.c 		pxd = (pxd_t *) & p->slot[stbl[i]];
p                2705 fs/jfs/jfs_dtree.c 	if (p->header.next)
p                2706 fs/jfs/jfs_dtree.c 		bn = le64_to_cpu(p->header.next);
p                2716 fs/jfs/jfs_dtree.c 	DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                2735 fs/jfs/jfs_dtree.c static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p)
p                2744 fs/jfs/jfs_dtree.c 	nextbn = le64_to_cpu(p->header.next);
p                2745 fs/jfs/jfs_dtree.c 	prevbn = le64_to_cpu(p->header.prev);
p                2749 fs/jfs/jfs_dtree.c 		DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
p                2772 fs/jfs/jfs_dtree.c 		p->header.prev = cpu_to_le64(prevbn);
p                2778 fs/jfs/jfs_dtree.c 		DT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc);
p                2801 fs/jfs/jfs_dtree.c 		p->header.next = cpu_to_le64(nextbn);
p                2817 fs/jfs/jfs_dtree.c 	dtroot_t *p;
p                2875 fs/jfs/jfs_dtree.c 	p = &jfs_ip->i_dtroot;
p                2877 fs/jfs/jfs_dtree.c 	p->header.flag = DXD_INDEX | BT_ROOT | BT_LEAF;
p                2879 fs/jfs/jfs_dtree.c 	p->header.nextindex = 0;
p                2883 fs/jfs/jfs_dtree.c 	f = &p->slot[fsi];
p                2890 fs/jfs/jfs_dtree.c 	p->header.freelist = 1;
p                2891 fs/jfs/jfs_dtree.c 	p->header.freecnt = 8;
p                2894 fs/jfs/jfs_dtree.c 	p->header.idotdot = cpu_to_le32(idotdot);
p                2914 fs/jfs/jfs_dtree.c 	dtpage_t *p;
p                2922 fs/jfs/jfs_dtree.c 	DT_GETPAGE(inode, bn, mp, PSIZE, p, rc);
p                2930 fs/jfs/jfs_dtree.c 	ASSERT(p->header.flag & BT_LEAF);
p                2938 fs/jfs/jfs_dtree.c 	stbl = DT_GETSTBL(p);
p                2939 fs/jfs/jfs_dtree.c 	for (i = 0; i < p->header.nextindex; i++) {
p                2940 fs/jfs/jfs_dtree.c 		d = (struct ldtentry *) &p->slot[stbl[i]];
p                3006 fs/jfs/jfs_dtree.c 	dtpage_t *p;
p                3076 fs/jfs/jfs_dtree.c 			DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                3081 fs/jfs/jfs_dtree.c 			if (p->header.flag & BT_INTERNAL) {
p                3114 fs/jfs/jfs_dtree.c 			DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
p                3160 fs/jfs/jfs_dtree.c 		DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
p                3182 fs/jfs/jfs_dtree.c 		stbl = DT_GETSTBL(p);
p                3184 fs/jfs/jfs_dtree.c 		for (i = index; i < p->header.nextindex; i++) {
p                3185 fs/jfs/jfs_dtree.c 			d = (struct ldtentry *) & p->slot[stbl[i]];
p                3245 fs/jfs/jfs_dtree.c 				t = (struct dtslot *) & p->slot[next];
p                3274 fs/jfs/jfs_dtree.c 			if (p->header.flag & BT_ROOT)
p                3277 fs/jfs/jfs_dtree.c 				bn = le64_to_cpu(p->header.next);
p                3311 fs/jfs/jfs_dtree.c 		DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                3336 fs/jfs/jfs_dtree.c 	dtpage_t *p;
p                3349 fs/jfs/jfs_dtree.c 		DT_GETPAGE(ip, bn, mp, psize, p, rc);
p                3356 fs/jfs/jfs_dtree.c 		if (p->header.flag & BT_LEAF) {
p                3379 fs/jfs/jfs_dtree.c 		stbl = DT_GETSTBL(p);
p                3380 fs/jfs/jfs_dtree.c 		xd = (pxd_t *) & p->slot[stbl[0]];
p                3413 fs/jfs/jfs_dtree.c 	dtpage_t *p;
p                3427 fs/jfs/jfs_dtree.c 	DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
p                3436 fs/jfs/jfs_dtree.c 		if (index < p->header.nextindex)
p                3439 fs/jfs/jfs_dtree.c 		if (p->header.flag & BT_ROOT) {
p                3451 fs/jfs/jfs_dtree.c 	if (p->header.flag & BT_ROOT) {
p                3462 fs/jfs/jfs_dtree.c 	bn = le64_to_cpu(p->header.next);
p                3486 fs/jfs/jfs_dtree.c 	DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                3491 fs/jfs/jfs_dtree.c 	while (pn >= p->header.nextindex) {
p                3492 fs/jfs/jfs_dtree.c 		pn -= p->header.nextindex;
p                3495 fs/jfs/jfs_dtree.c 		bn = le64_to_cpu(p->header.next);
p                3507 fs/jfs/jfs_dtree.c 		DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                3516 fs/jfs/jfs_dtree.c 	stbl = DT_GETSTBL(p);
p                3517 fs/jfs/jfs_dtree.c 	xd = (pxd_t *) & p->slot[stbl[pn]];
p                3527 fs/jfs/jfs_dtree.c 	DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                3535 fs/jfs/jfs_dtree.c 	if (index >= p->header.nextindex) {
p                3536 fs/jfs/jfs_dtree.c 		bn = le64_to_cpu(p->header.next);
p                3548 fs/jfs/jfs_dtree.c 		DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                3579 fs/jfs/jfs_dtree.c 		     dtpage_t * p,	/* directory page */
p                3607 fs/jfs/jfs_dtree.c 	ih = (struct idtentry *) & p->slot[si];
p                3625 fs/jfs/jfs_dtree.c 		t = (struct dtslot *) & p->slot[si];
p                3655 fs/jfs/jfs_dtree.c 		     dtpage_t * p,	/* directory page */
p                3689 fs/jfs/jfs_dtree.c 	if (p->header.flag & BT_LEAF) {
p                3690 fs/jfs/jfs_dtree.c 		lh = (struct ldtentry *) & p->slot[si];
p                3703 fs/jfs/jfs_dtree.c 		ih = (struct idtentry *) & p->slot[si];
p                3728 fs/jfs/jfs_dtree.c 		t = (struct dtslot *) & p->slot[si];
p                3829 fs/jfs/jfs_dtree.c static void dtGetKey(dtpage_t * p, int i,	/* entry index */
p                3842 fs/jfs/jfs_dtree.c 	stbl = DT_GETSTBL(p);
p                3844 fs/jfs/jfs_dtree.c 	if (p->header.flag & BT_LEAF) {
p                3845 fs/jfs/jfs_dtree.c 		lh = (struct ldtentry *) & p->slot[si];
p                3854 fs/jfs/jfs_dtree.c 		ih = (struct idtentry *) & p->slot[si];
p                3874 fs/jfs/jfs_dtree.c 		t = &p->slot[si];
p                3893 fs/jfs/jfs_dtree.c static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
p                3914 fs/jfs/jfs_dtree.c 	hsi = fsi = p->header.freelist;
p                3915 fs/jfs/jfs_dtree.c 	h = &p->slot[fsi];
p                3916 fs/jfs/jfs_dtree.c 	p->header.freelist = h->next;
p                3917 fs/jfs/jfs_dtree.c 	--p->header.freecnt;
p                3927 fs/jfs/jfs_dtree.c 	if (p->header.flag & BT_LEAF) {
p                3935 fs/jfs/jfs_dtree.c 			if (!(p->header.flag & BT_ROOT))
p                3936 fs/jfs/jfs_dtree.c 				bn = addressPXD(&p->header.self);
p                3962 fs/jfs/jfs_dtree.c 		fsi = p->header.freelist;
p                3963 fs/jfs/jfs_dtree.c 		t = &p->slot[fsi];
p                3964 fs/jfs/jfs_dtree.c 		p->header.freelist = t->next;
p                3965 fs/jfs/jfs_dtree.c 		--p->header.freecnt;
p                4003 fs/jfs/jfs_dtree.c 		if (p->header.flag & BT_LEAF)
p                4012 fs/jfs/jfs_dtree.c 	stbl = DT_GETSTBL(p);
p                4013 fs/jfs/jfs_dtree.c 	nextindex = p->header.nextindex;
p                4017 fs/jfs/jfs_dtree.c 		if ((p->header.flag & BT_LEAF) && data->leaf.ip) {
p                4026 fs/jfs/jfs_dtree.c 				lh = (struct ldtentry *) & (p->slot[stbl[n]]);
p                4039 fs/jfs/jfs_dtree.c 	++p->header.nextindex;
p                4252 fs/jfs/jfs_dtree.c static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock)
p                4263 fs/jfs/jfs_dtree.c 	stbl = DT_GETSTBL(p);
p                4274 fs/jfs/jfs_dtree.c 	t = &p->slot[fsi];
p                4275 fs/jfs/jfs_dtree.c 	if (p->header.flag & BT_LEAF)
p                4309 fs/jfs/jfs_dtree.c 		t = &p->slot[si];
p                4321 fs/jfs/jfs_dtree.c 	t->next = p->header.freelist;
p                4322 fs/jfs/jfs_dtree.c 	p->header.freelist = fsi;
p                4323 fs/jfs/jfs_dtree.c 	p->header.freecnt += freecnt;
p                4328 fs/jfs/jfs_dtree.c 	si = p->header.nextindex;
p                4332 fs/jfs/jfs_dtree.c 	p->header.nextindex--;
p                4347 fs/jfs/jfs_dtree.c static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock)
p                4358 fs/jfs/jfs_dtree.c 	stbl = DT_GETSTBL(p);
p                4369 fs/jfs/jfs_dtree.c 	t = &p->slot[tsi];
p                4370 fs/jfs/jfs_dtree.c 	ASSERT(p->header.flag & BT_INTERNAL);
p                4404 fs/jfs/jfs_dtree.c 		t = &p->slot[si];
p                4418 fs/jfs/jfs_dtree.c 	t->next = p->header.freelist;
p                4419 fs/jfs/jfs_dtree.c 	p->header.freelist = fsi;
p                4420 fs/jfs/jfs_dtree.c 	p->header.freecnt += freecnt;
p                4427 fs/jfs/jfs_dtree.c static void dtLinelockFreelist(dtpage_t * p,	/* directory page */
p                4439 fs/jfs/jfs_dtree.c 	fsi = p->header.freelist;
p                4451 fs/jfs/jfs_dtree.c 	t = &p->slot[fsi];
p                4477 fs/jfs/jfs_dtree.c 		t = &p->slot[si];
p                4513 fs/jfs/jfs_dtree.c 	dtpage_t *p;
p                4532 fs/jfs/jfs_dtree.c 	DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
p                4542 fs/jfs/jfs_dtree.c 	stbl = DT_GETSTBL(p);
p                4553 fs/jfs/jfs_dtree.c 	entry = (struct ldtentry *) & p->slot[entry_si];
p                 221 fs/jfs/jfs_dtree.h #define DT_GETSTBL(p) ( ((p)->header.flag & BT_ROOT) ?\
p                 222 fs/jfs/jfs_dtree.h 	((dtroot_t *)(p))->header.stbl : \
p                 223 fs/jfs/jfs_dtree.h 	(s8 *)&(p)->slot[(p)->header.stblindex] )
p                 674 fs/jfs/jfs_imap.c 		xtpage_t *p, *xp;
p                 687 fs/jfs/jfs_imap.c 		p = &jfs_ip->i_xtroot;
p                 691 fs/jfs/jfs_imap.c 			memcpy(&xp->xad[lv->offset], &p->xad[lv->offset],
p                 717 fs/jfs/jfs_imap.c 		xtpage_t *p, *xp;
p                 723 fs/jfs/jfs_imap.c 		p = &jfs_ip->i_xtroot;
p                 727 fs/jfs/jfs_imap.c 			memcpy(&xp->xad[lv->offset], &p->xad[lv->offset],
p                 742 fs/jfs/jfs_imap.c 		dtpage_t *p, *xp;
p                 747 fs/jfs/jfs_imap.c 		p = (dtpage_t *) &jfs_ip->i_dtroot;
p                 751 fs/jfs/jfs_imap.c 			memcpy(&xp->slot[lv->offset], &p->slot[lv->offset],
p                 353 fs/jfs/jfs_logmgr.c 	caddr_t p;		/* src meta-data page */
p                 380 fs/jfs/jfs_logmgr.c 		p = (caddr_t) (tlck->mp->data);
p                 386 fs/jfs/jfs_logmgr.c 			p = (caddr_t) &JFS_IP(tlck->ip)->i_dtroot;
p                 388 fs/jfs/jfs_logmgr.c 			p = (caddr_t) &JFS_IP(tlck->ip)->i_xtroot;
p                 395 fs/jfs/jfs_logmgr.c 		p = (caddr_t) & inlinelock->pxd;
p                 426 fs/jfs/jfs_logmgr.c 		src = (u8 *) p + (lv->offset << l2linesize);
p                 587 fs/jfs/jfs_txnmgr.c 	xtpage_t *p;
p                 779 fs/jfs/jfs_txnmgr.c 				p = (xtpage_t *) mp->data;
p                 781 fs/jfs/jfs_txnmgr.c 				p = &jfs_ip->i_xtroot;
p                 783 fs/jfs/jfs_txnmgr.c 			    le16_to_cpu(p->header.nextindex);
p                1692 fs/jfs/jfs_txnmgr.c 	xtpage_t *p;
p                1711 fs/jfs/jfs_txnmgr.c 		p = &JFS_IP(ip)->i_xtroot;
p                1716 fs/jfs/jfs_txnmgr.c 		p = (xtpage_t *) mp->data;
p                1717 fs/jfs/jfs_txnmgr.c 	next = le16_to_cpu(p->header.nextindex);
p                1773 fs/jfs/jfs_txnmgr.c 				PXDaddress(pxd, addressXAD(&p->xad[lwm + i]));
p                1774 fs/jfs/jfs_txnmgr.c 				PXDlength(pxd, lengthXAD(&p->xad[lwm + i]));
p                1775 fs/jfs/jfs_txnmgr.c 				p->xad[lwm + i].flag &=
p                1785 fs/jfs/jfs_txnmgr.c 			xadlock->xdlist = &p->xad[lwm];
p                1877 fs/jfs/jfs_txnmgr.c 					addressXAD(&p->xad[XTENTRYSTART + i]));
p                1879 fs/jfs/jfs_txnmgr.c 					lengthXAD(&p->xad[XTENTRYSTART + i]));
p                1888 fs/jfs/jfs_txnmgr.c 			xadlock->xdlist = &p->xad[XTENTRYSTART];
p                2012 fs/jfs/jfs_txnmgr.c 			xadlock->xdlist = &p->xad[lwm];
p                2052 fs/jfs/jfs_txnmgr.c 			xadlock->xdlist = &p->xad[next];
p                 141 fs/jfs/jfs_xtree.c 	xtpage_t *p;
p                 175 fs/jfs/jfs_xtree.c 	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
p                 190 fs/jfs/jfs_xtree.c 	xad = &p->xad[index];
p                 234 fs/jfs/jfs_xtree.c 	xtpage_t *p;		/* page */
p                 266 fs/jfs/jfs_xtree.c 		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                 278 fs/jfs/jfs_xtree.c 		    (p->header.flag & BT_LEAF) &&
p                 280 fs/jfs/jfs_xtree.c 		    le16_to_cpu(p->header.nextindex)) {
p                 281 fs/jfs/jfs_xtree.c 			xad = &p->xad[index];
p                 296 fs/jfs/jfs_xtree.c 				    le16_to_cpu(p->header.nextindex)) {
p                 337 fs/jfs/jfs_xtree.c 				if (p->header.nextindex ==	/* little-endian */
p                 338 fs/jfs/jfs_xtree.c 				    p->header.maxentry)
p                 363 fs/jfs/jfs_xtree.c 		lim = le16_to_cpu(p->header.nextindex) - XTENTRYSTART;
p                 371 fs/jfs/jfs_xtree.c 			XT_CMP(cmp, xoff, &p->xad[index], t64);
p                 379 fs/jfs/jfs_xtree.c 				if (p->header.flag & BT_LEAF) {
p                 384 fs/jfs/jfs_xtree.c 						if (p->header.nextindex ==
p                 385 fs/jfs/jfs_xtree.c 						    p->header.maxentry)
p                 412 fs/jfs/jfs_xtree.c 				if (index < le16_to_cpu(p->header.nextindex)-1)
p                 413 fs/jfs/jfs_xtree.c 					next = offsetXAD(&p->xad[index + 1]);
p                 429 fs/jfs/jfs_xtree.c 		if (base < le16_to_cpu(p->header.nextindex))
p                 430 fs/jfs/jfs_xtree.c 			next = offsetXAD(&p->xad[base]);
p                 437 fs/jfs/jfs_xtree.c 		if (p->header.flag & BT_LEAF) {
p                 442 fs/jfs/jfs_xtree.c 				if (p->header.nextindex ==
p                 443 fs/jfs/jfs_xtree.c 				    p->header.maxentry)
p                 483 fs/jfs/jfs_xtree.c 		if (p->header.nextindex == p->header.maxentry)
p                 497 fs/jfs/jfs_xtree.c 		bn = addressXAD(&p->xad[index]);
p                 531 fs/jfs/jfs_xtree.c 	xtpage_t *p;		/* base B+-tree index page */
p                 556 fs/jfs/jfs_xtree.c 	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
p                 572 fs/jfs/jfs_xtree.c 			xad = &p->xad[index - 1];
p                 595 fs/jfs/jfs_xtree.c 	nextindex = le16_to_cpu(p->header.nextindex);
p                 596 fs/jfs/jfs_xtree.c 	if (nextindex == le16_to_cpu(p->header.maxentry)) {
p                 629 fs/jfs/jfs_xtree.c 		memmove(&p->xad[index + 1], &p->xad[index],
p                 633 fs/jfs/jfs_xtree.c 	xad = &p->xad[index];
p                 637 fs/jfs/jfs_xtree.c 	le16_add_cpu(&p->header.nextindex, 1);
p                 647 fs/jfs/jfs_xtree.c 		    le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset;
p                 955 fs/jfs/jfs_xtree.c 	xtpage_t *p;
p                1076 fs/jfs/jfs_xtree.c 		XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
p                1091 fs/jfs/jfs_xtree.c 		p->header.prev = cpu_to_le64(rbn);
p                1355 fs/jfs/jfs_xtree.c 	xtpage_t *p;		/* base B+-tree index page */
p                1372 fs/jfs/jfs_xtree.c 	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
p                1381 fs/jfs/jfs_xtree.c 	xad = &p->xad[index];
p                1410 fs/jfs/jfs_xtree.c 	nextindex = le16_to_cpu(p->header.nextindex);
p                1418 fs/jfs/jfs_xtree.c 	if (nextindex == le16_to_cpu(p->header.maxentry)) {
p                1431 fs/jfs/jfs_xtree.c 		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                1439 fs/jfs/jfs_xtree.c 		if (p->header.flag & BT_INTERNAL) {
p                1440 fs/jfs/jfs_xtree.c 			ASSERT(p->header.nextindex ==
p                1442 fs/jfs/jfs_xtree.c 			xad = &p->xad[XTENTRYSTART];
p                1447 fs/jfs/jfs_xtree.c 			XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                1463 fs/jfs/jfs_xtree.c 		xad = &p->xad[index + 1];
p                1467 fs/jfs/jfs_xtree.c 		le16_add_cpu(&p->header.nextindex, 1);
p                1471 fs/jfs/jfs_xtree.c 	xad = &p->xad[index];
p                1487 fs/jfs/jfs_xtree.c 		    le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset;
p                1518 fs/jfs/jfs_xtree.c 	xtpage_t *p;		/* base B+-tree index page */
p                1539 fs/jfs/jfs_xtree.c 	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
p                1548 fs/jfs/jfs_xtree.c 	nextindex = le16_to_cpu(p->header.nextindex);
p                1565 fs/jfs/jfs_xtree.c 	xad = &p->xad[index];
p                1583 fs/jfs/jfs_xtree.c 	if (nextindex == le16_to_cpu(p->header.maxentry)) {
p                1596 fs/jfs/jfs_xtree.c 		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                1604 fs/jfs/jfs_xtree.c 		if (p->header.flag & BT_INTERNAL) {
p                1605 fs/jfs/jfs_xtree.c 			ASSERT(p->header.nextindex ==
p                1607 fs/jfs/jfs_xtree.c 			xad = &p->xad[XTENTRYSTART];
p                1612 fs/jfs/jfs_xtree.c 			XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                1628 fs/jfs/jfs_xtree.c 		xad = &p->xad[index + 1];
p                1632 fs/jfs/jfs_xtree.c 		le16_add_cpu(&p->header.nextindex, 1);
p                1636 fs/jfs/jfs_xtree.c 	xad = &p->xad[index];
p                1668 fs/jfs/jfs_xtree.c 		xtlck->lwm.length = le16_to_cpu(p->header.nextindex) -
p                1697 fs/jfs/jfs_xtree.c 	xtpage_t *p;		/* base B+-tree index page */
p                1720 fs/jfs/jfs_xtree.c 	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index0);
p                1737 fs/jfs/jfs_xtree.c 	xad = &p->xad[index0];
p                1754 fs/jfs/jfs_xtree.c 	nextindex = le16_to_cpu(p->header.nextindex);
p                1787 fs/jfs/jfs_xtree.c 	lxad = &p->xad[index - 1];
p                1815 fs/jfs/jfs_xtree.c 				memmove(&p->xad[index], &p->xad[index + 1],
p                1819 fs/jfs/jfs_xtree.c 			p->header.nextindex =
p                1820 fs/jfs/jfs_xtree.c 			    cpu_to_le16(le16_to_cpu(p->header.nextindex) -
p                1825 fs/jfs/jfs_xtree.c 			nextindex = le16_to_cpu(p->header.nextindex);
p                1858 fs/jfs/jfs_xtree.c 	rxad = &p->xad[index + 1];
p                1883 fs/jfs/jfs_xtree.c 			memmove(&p->xad[index], &p->xad[index + 1],
p                1886 fs/jfs/jfs_xtree.c 			p->header.nextindex =
p                1887 fs/jfs/jfs_xtree.c 			    cpu_to_le16(le16_to_cpu(p->header.nextindex) -
p                1911 fs/jfs/jfs_xtree.c 	xad = &p->xad[index];
p                1915 fs/jfs/jfs_xtree.c 	if (nextindex == le16_to_cpu(p->header.maxentry)) {
p                1929 fs/jfs/jfs_xtree.c 		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                1937 fs/jfs/jfs_xtree.c 		if (p->header.flag & BT_INTERNAL) {
p                1938 fs/jfs/jfs_xtree.c 			ASSERT(p->header.nextindex ==
p                1940 fs/jfs/jfs_xtree.c 			xad = &p->xad[XTENTRYSTART];
p                1945 fs/jfs/jfs_xtree.c 			XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                1957 fs/jfs/jfs_xtree.c 			    (le16_to_cpu(p->header.maxentry) >> 1)) {
p                1960 fs/jfs/jfs_xtree.c 				    le16_to_cpu(p->header.nextindex) +
p                1968 fs/jfs/jfs_xtree.c 			memmove(&p->xad[newindex + 1], &p->xad[newindex],
p                1972 fs/jfs/jfs_xtree.c 		xad = &p->xad[newindex];
p                1977 fs/jfs/jfs_xtree.c 		p->header.nextindex =
p                1978 fs/jfs/jfs_xtree.c 		    cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
p                1998 fs/jfs/jfs_xtree.c 			    le16_to_cpu(p->header.nextindex) -
p                2002 fs/jfs/jfs_xtree.c 		bn = le64_to_cpu(p->header.next);
p                2006 fs/jfs/jfs_xtree.c 		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                2021 fs/jfs/jfs_xtree.c 	nextindex = le16_to_cpu(p->header.nextindex);
p                2027 fs/jfs/jfs_xtree.c 	if (nextindex == le16_to_cpu(p->header.maxentry)) {
p                2034 fs/jfs/jfs_xtree.c 		XT_GETSEARCH(ip, btstack.top, bn, mp, p, index0);
p                2058 fs/jfs/jfs_xtree.c 	xad = &p->xad[index];
p                2066 fs/jfs/jfs_xtree.c 	if (nextindex == le16_to_cpu(p->header.maxentry)) {
p                2082 fs/jfs/jfs_xtree.c 		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                2091 fs/jfs/jfs_xtree.c 		if (p->header.flag & BT_INTERNAL) {
p                2092 fs/jfs/jfs_xtree.c 			ASSERT(p->header.nextindex ==
p                2094 fs/jfs/jfs_xtree.c 			xad = &p->xad[XTENTRYSTART];
p                2099 fs/jfs/jfs_xtree.c 			XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                2112 fs/jfs/jfs_xtree.c 			memmove(&p->xad[newindex + 1], &p->xad[newindex],
p                2116 fs/jfs/jfs_xtree.c 		xad = &p->xad[newindex];
p                2120 fs/jfs/jfs_xtree.c 		p->header.nextindex =
p                2121 fs/jfs/jfs_xtree.c 		    cpu_to_le16(le16_to_cpu(p->header.nextindex) + 1);
p                2128 fs/jfs/jfs_xtree.c 		xtlck->lwm.length = le16_to_cpu(p->header.nextindex) -
p                2164 fs/jfs/jfs_xtree.c 	xtpage_t *p;		/* base B+-tree index page */
p                2195 fs/jfs/jfs_xtree.c 	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
p                2216 fs/jfs/jfs_xtree.c 	nextindex = le16_to_cpu(p->header.nextindex);
p                2217 fs/jfs/jfs_xtree.c 	if (nextindex < le16_to_cpu(p->header.maxentry))
p                2288 fs/jfs/jfs_xtree.c 	xad = &p->xad[index];
p                2292 fs/jfs/jfs_xtree.c 	le16_add_cpu(&p->header.nextindex, 1);
p                2296 fs/jfs/jfs_xtree.c 	xtlck->lwm.length = le16_to_cpu(p->header.nextindex) -
p                2333 fs/jfs/jfs_xtree.c 	xtpage_t *p;
p                2344 fs/jfs/jfs_xtree.c 	XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
p                2354 fs/jfs/jfs_xtree.c 	nextindex = le16_to_cpu(p->header.nextindex);
p                2355 fs/jfs/jfs_xtree.c 	le16_add_cpu(&p->header.nextindex, -1);
p                2360 fs/jfs/jfs_xtree.c 	if (p->header.nextindex == cpu_to_le16(XTENTRYSTART))
p                2361 fs/jfs/jfs_xtree.c 		return (xtDeleteUp(tid, ip, mp, p, &btstack));
p                2376 fs/jfs/jfs_xtree.c 		memmove(&p->xad[index], &p->xad[index + 1],
p                2402 fs/jfs/jfs_xtree.c 	xtpage_t *p;
p                2450 fs/jfs/jfs_xtree.c 		XT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc);
p                2458 fs/jfs/jfs_xtree.c 		nextindex = le16_to_cpu(p->header.nextindex);
p                2465 fs/jfs/jfs_xtree.c 			if (p->header.flag & BT_ROOT) {
p                2467 fs/jfs/jfs_xtree.c 				p->header.flag &= ~BT_INTERNAL;
p                2468 fs/jfs/jfs_xtree.c 				p->header.flag |= BT_LEAF;
p                2469 fs/jfs/jfs_xtree.c 				p->header.nextindex =
p                2477 fs/jfs/jfs_xtree.c 				if ((rc = xtRelink(tid, ip, p)))
p                2480 fs/jfs/jfs_xtree.c 				xaddr = addressPXD(&p->header.self);
p                2514 fs/jfs/jfs_xtree.c 				memmove(&p->xad[index], &p->xad[index + 1],
p                2518 fs/jfs/jfs_xtree.c 			le16_add_cpu(&p->header.nextindex, -1);
p                2555 fs/jfs/jfs_xtree.c 	xtpage_t *p, *pp, *rp, *lp;	/* base B+-tree index page */
p                2706 fs/jfs/jfs_xtree.c 		XT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
p                2716 fs/jfs/jfs_xtree.c 		if (p->header.next) {
p                2717 fs/jfs/jfs_xtree.c 			nextbn = le64_to_cpu(p->header.next);
p                2727 fs/jfs/jfs_xtree.c 		if (p->header.prev) {
p                2728 fs/jfs/jfs_xtree.c 			prevbn = le64_to_cpu(p->header.prev);
p                2785 fs/jfs/jfs_xtree.c 		pxd = &p->header.self;
p                2790 fs/jfs/jfs_xtree.c 		    le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset;
p                2887 fs/jfs/jfs_xtree.c 	xtpage_t *p;		/* page */
p                2916 fs/jfs/jfs_xtree.c 		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                2919 fs/jfs/jfs_xtree.c 		if (p->header.flag & BT_LEAF) {
p                2924 fs/jfs/jfs_xtree.c 		lim = le16_to_cpu(p->header.nextindex) - XTENTRYSTART;
p                2932 fs/jfs/jfs_xtree.c 			XT_CMP(cmp, xoff, &p->xad[index], t64);
p                2939 fs/jfs/jfs_xtree.c 				if (xaddr == addressXAD(&p->xad[index]) &&
p                2940 fs/jfs/jfs_xtree.c 				    xoff == offsetXAD(&p->xad[index])) {
p                2977 fs/jfs/jfs_xtree.c 		bn = addressXAD(&p->xad[index]);
p                2998 fs/jfs/jfs_xtree.c static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * p)
p                3005 fs/jfs/jfs_xtree.c 	nextbn = le64_to_cpu(p->header.next);
p                3006 fs/jfs/jfs_xtree.c 	prevbn = le64_to_cpu(p->header.prev);
p                3010 fs/jfs/jfs_xtree.c 		XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
p                3024 fs/jfs/jfs_xtree.c 		p->header.prev = cpu_to_le64(prevbn);
p                3031 fs/jfs/jfs_xtree.c 		XT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc);
p                3045 fs/jfs/jfs_xtree.c 		p->header.next = le64_to_cpu(nextbn);
p                3062 fs/jfs/jfs_xtree.c 	xtpage_t *p;
p                3071 fs/jfs/jfs_xtree.c 	p = &JFS_IP(ip)->i_xtroot;
p                3073 fs/jfs/jfs_xtree.c 	p->header.flag = DXD_INDEX | BT_ROOT | BT_LEAF;
p                3074 fs/jfs/jfs_xtree.c 	p->header.nextindex = cpu_to_le16(XTENTRYSTART);
p                3077 fs/jfs/jfs_xtree.c 		p->header.maxentry = cpu_to_le16(XTROOTINITSLOT_DIR);
p                3079 fs/jfs/jfs_xtree.c 		p->header.maxentry = cpu_to_le16(XTROOTINITSLOT);
p                3154 fs/jfs/jfs_xtree.c 	xtpage_t *p;
p                3232 fs/jfs/jfs_xtree.c 	XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                3237 fs/jfs/jfs_xtree.c 	index = le16_to_cpu(p->header.nextindex) - 1;
p                3244 fs/jfs/jfs_xtree.c 	if (p->header.next) {
p                3253 fs/jfs/jfs_xtree.c 		p->header.next = 0;
p                3256 fs/jfs/jfs_xtree.c 	if (p->header.flag & BT_INTERNAL)
p                3265 fs/jfs/jfs_xtree.c 	xad = &p->xad[index];
p                3287 fs/jfs/jfs_xtree.c 		xtlck->hwm.offset = le16_to_cpu(p->header.nextindex) - 1;
p                3295 fs/jfs/jfs_xtree.c 		xad = &p->xad[index];
p                3392 fs/jfs/jfs_xtree.c 		if (nextindex < le16_to_cpu(p->header.nextindex)) {
p                3394 fs/jfs/jfs_xtree.c 				xadlock.xdlist = &p->xad[nextindex];
p                3396 fs/jfs/jfs_xtree.c 				    le16_to_cpu(p->header.nextindex) -
p                3401 fs/jfs/jfs_xtree.c 			p->header.nextindex = cpu_to_le16(nextindex);
p                3425 fs/jfs/jfs_xtree.c 		xadlock.xdlist = &p->xad[XTENTRYSTART];
p                3427 fs/jfs/jfs_xtree.c 		    le16_to_cpu(p->header.nextindex) - XTENTRYSTART;
p                3431 fs/jfs/jfs_xtree.c 	if (p->header.flag & BT_ROOT) {
p                3432 fs/jfs/jfs_xtree.c 		p->header.flag &= ~BT_INTERNAL;
p                3433 fs/jfs/jfs_xtree.c 		p->header.flag |= BT_LEAF;
p                3434 fs/jfs/jfs_xtree.c 		p->header.nextindex = cpu_to_le16(XTENTRYSTART);
p                3470 fs/jfs/jfs_xtree.c 	XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                3481 fs/jfs/jfs_xtree.c 		if (index < le16_to_cpu(p->header.nextindex) - 1) {
p                3491 fs/jfs/jfs_xtree.c 					    le16_to_cpu(p->header.
p                3499 fs/jfs/jfs_xtree.c 				xadlock.xdlist = &p->xad[index + 1];
p                3501 fs/jfs/jfs_xtree.c 				    le16_to_cpu(p->header.nextindex) -
p                3508 fs/jfs/jfs_xtree.c 			p->header.nextindex = cpu_to_le16(index + 1);
p                3517 fs/jfs/jfs_xtree.c 	nfreed += lengthXAD(&p->xad[index]);
p                3566 fs/jfs/jfs_xtree.c 			    le16_to_cpu(p->header.nextindex) - 1;
p                3571 fs/jfs/jfs_xtree.c 			xadlock.xdlist = &p->xad[XTENTRYSTART];
p                3573 fs/jfs/jfs_xtree.c 			    le16_to_cpu(p->header.nextindex) -
p                3580 fs/jfs/jfs_xtree.c 		if (p->header.flag & BT_ROOT) {
p                3581 fs/jfs/jfs_xtree.c 			p->header.flag &= ~BT_INTERNAL;
p                3582 fs/jfs/jfs_xtree.c 			p->header.flag |= BT_LEAF;
p                3583 fs/jfs/jfs_xtree.c 			p->header.nextindex = cpu_to_le16(XTENTRYSTART);
p                3584 fs/jfs/jfs_xtree.c 			if (le16_to_cpu(p->header.maxentry) == XTROOTMAXSLOT) {
p                3589 fs/jfs/jfs_xtree.c 				p->header.maxentry =
p                3646 fs/jfs/jfs_xtree.c 	xad = &p->xad[index];
p                3713 fs/jfs/jfs_xtree.c 	xtpage_t *p;
p                3736 fs/jfs/jfs_xtree.c 		XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
p                3755 fs/jfs/jfs_xtree.c 		XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                3760 fs/jfs/jfs_xtree.c 		index = le16_to_cpu(p->header.nextindex) - 1;
p                3762 fs/jfs/jfs_xtree.c 		if (p->header.flag & BT_INTERNAL)
p                3775 fs/jfs/jfs_xtree.c 		xad = &p->xad[index];
p                3800 fs/jfs/jfs_xtree.c 	XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
p                3816 fs/jfs/jfs_xtree.c 		xtlck->hwm.offset = le16_to_cpu(p->header.nextindex) - 1;
p                3821 fs/jfs/jfs_xtree.c 		if (p->header.flag & BT_ROOT) {
p                3846 fs/jfs/jfs_xtree.c 	xad = &p->xad[index];
p                 237 fs/jfs/super.c 	char *p;
p                 245 fs/jfs/super.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 248 fs/jfs/super.c 		if (!*p)
p                 251 fs/jfs/super.c 		token = match_token(p, tokens, args);
p                 412 fs/jfs/super.c 			       p);
p                 869 fs/kernfs/dir.c 	char *p, *name;
p                 883 fs/kernfs/dir.c 	p = kernfs_pr_cont_buf;
p                 885 fs/kernfs/dir.c 	while ((name = strsep(&p, "/")) && parent) {
p                  99 fs/libfs.c     					struct list_head *p,
p                 106 fs/libfs.c     	while ((p = p->next) != &dentry->d_subdirs) {
p                 107 fs/libfs.c     		struct dentry *d = list_entry(p, struct dentry, d_child);
p                 121 fs/libfs.c     			list_move(&cursor->d_child, p);
p                 122 fs/libfs.c     			p = &cursor->d_child;
p                 190 fs/libfs.c     	struct list_head *p;
p                 196 fs/libfs.c     		p = anchor;
p                 198 fs/libfs.c     		p = &cursor->d_child;
p                 202 fs/libfs.c     	while ((next = scan_positives(cursor, p, 1, next)) != NULL) {
p                 207 fs/libfs.c     		p = &next->d_child;
p                1128 fs/libfs.c     void kfree_link(void *p)
p                1130 fs/libfs.c     	kfree(p);
p                  91 fs/lockd/clnt4xdr.c 	__be32 *p;
p                  93 fs/lockd/clnt4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                  94 fs/lockd/clnt4xdr.c 	*p = value ? xdr_one : xdr_zero;
p                  99 fs/lockd/clnt4xdr.c 	__be32 *p;
p                 101 fs/lockd/clnt4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                 102 fs/lockd/clnt4xdr.c 	*p = cpu_to_be32(value);
p                 111 fs/lockd/clnt4xdr.c 	__be32 *p;
p                 113 fs/lockd/clnt4xdr.c 	p = xdr_reserve_space(xdr, 4 + length);
p                 114 fs/lockd/clnt4xdr.c 	xdr_encode_opaque(p, data, length);
p                 143 fs/lockd/clnt4xdr.c 	__be32 *p;
p                 145 fs/lockd/clnt4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 146 fs/lockd/clnt4xdr.c 	if (unlikely(p == NULL))
p                 148 fs/lockd/clnt4xdr.c 	length = be32_to_cpup(p++);
p                 154 fs/lockd/clnt4xdr.c 	p = xdr_inline_decode(xdr, length);
p                 155 fs/lockd/clnt4xdr.c 	if (unlikely(p == NULL))
p                 158 fs/lockd/clnt4xdr.c 	memcpy(cookie->data, p, length);
p                 204 fs/lockd/clnt4xdr.c 	__be32 *p;
p                 207 fs/lockd/clnt4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                 208 fs/lockd/clnt4xdr.c 	*p = stat;
p                 213 fs/lockd/clnt4xdr.c 	__be32 *p;
p                 215 fs/lockd/clnt4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 216 fs/lockd/clnt4xdr.c 	if (unlikely(p == NULL))
p                 218 fs/lockd/clnt4xdr.c 	if (unlikely(ntohl(*p) > ntohl(nlm4_failed)))
p                 220 fs/lockd/clnt4xdr.c 	*stat = *p;
p                 224 fs/lockd/clnt4xdr.c 			__func__, be32_to_cpup(p));
p                 244 fs/lockd/clnt4xdr.c 	__be32 *p;
p                 250 fs/lockd/clnt4xdr.c 	p = xdr_reserve_space(xdr, 4 + 4);
p                 252 fs/lockd/clnt4xdr.c 	p = xdr_encode_hyper(p, l_offset);
p                 253 fs/lockd/clnt4xdr.c 	xdr_encode_hyper(p, l_len);
p                 263 fs/lockd/clnt4xdr.c 	__be32 *p;
p                 269 fs/lockd/clnt4xdr.c 	p = xdr_inline_decode(xdr, 4 + 4);
p                 270 fs/lockd/clnt4xdr.c 	if (unlikely(p == NULL))
p                 272 fs/lockd/clnt4xdr.c 	exclusive = be32_to_cpup(p++);
p                 273 fs/lockd/clnt4xdr.c 	lock->svid = be32_to_cpup(p);
p                 280 fs/lockd/clnt4xdr.c 	p = xdr_inline_decode(xdr, 8 + 8);
p                 281 fs/lockd/clnt4xdr.c 	if (unlikely(p == NULL))
p                 286 fs/lockd/clnt4xdr.c 	p = xdr_decode_hyper(p, &l_offset);
p                 287 fs/lockd/clnt4xdr.c 	xdr_decode_hyper(p, &l_len);
p                 309 fs/lockd/clnt4xdr.c 	__be32 *p;
p                 311 fs/lockd/clnt4xdr.c 	p = xdr_reserve_space(xdr, 4 + length);
p                 312 fs/lockd/clnt4xdr.c 	xdr_encode_opaque(p, name, length);
p                 329 fs/lockd/clnt4xdr.c 	__be32 *p;
p                 335 fs/lockd/clnt4xdr.c 	p = xdr_reserve_space(xdr, 4 + 8 + 8);
p                 336 fs/lockd/clnt4xdr.c 	*p++ = cpu_to_be32(lock->svid);
p                 339 fs/lockd/clnt4xdr.c 	p = xdr_encode_hyper(p, l_offset);
p                 340 fs/lockd/clnt4xdr.c 	xdr_encode_hyper(p, l_len);
p                  88 fs/lockd/clntxdr.c 	__be32 *p;
p                  90 fs/lockd/clntxdr.c 	p = xdr_reserve_space(xdr, 4);
p                  91 fs/lockd/clntxdr.c 	*p = value ? xdr_one : xdr_zero;
p                  96 fs/lockd/clntxdr.c 	__be32 *p;
p                  98 fs/lockd/clntxdr.c 	p = xdr_reserve_space(xdr, 4);
p                  99 fs/lockd/clntxdr.c 	*p = cpu_to_be32(value);
p                 108 fs/lockd/clntxdr.c 	__be32 *p;
p                 110 fs/lockd/clntxdr.c 	p = xdr_reserve_space(xdr, 4 + length);
p                 111 fs/lockd/clntxdr.c 	xdr_encode_opaque(p, data, length);
p                 140 fs/lockd/clntxdr.c 	__be32 *p;
p                 142 fs/lockd/clntxdr.c 	p = xdr_inline_decode(xdr, 4);
p                 143 fs/lockd/clntxdr.c 	if (unlikely(p == NULL))
p                 145 fs/lockd/clntxdr.c 	length = be32_to_cpup(p++);
p                 151 fs/lockd/clntxdr.c 	p = xdr_inline_decode(xdr, length);
p                 152 fs/lockd/clntxdr.c 	if (unlikely(p == NULL))
p                 155 fs/lockd/clntxdr.c 	memcpy(cookie->data, p, length);
p                 198 fs/lockd/clntxdr.c 	__be32 *p;
p                 201 fs/lockd/clntxdr.c 	p = xdr_reserve_space(xdr, 4);
p                 202 fs/lockd/clntxdr.c 	*p = stat;
p                 208 fs/lockd/clntxdr.c 	__be32 *p;
p                 210 fs/lockd/clntxdr.c 	p = xdr_inline_decode(xdr, 4);
p                 211 fs/lockd/clntxdr.c 	if (unlikely(p == NULL))
p                 213 fs/lockd/clntxdr.c 	if (unlikely(ntohl(*p) > ntohl(nlm_lck_denied_grace_period)))
p                 215 fs/lockd/clntxdr.c 	*stat = *p;
p                 219 fs/lockd/clntxdr.c 		__func__, be32_to_cpup(p));
p                 239 fs/lockd/clntxdr.c 	__be32 *p;
p                 245 fs/lockd/clntxdr.c 	p = xdr_reserve_space(xdr, 4 + 4);
p                 247 fs/lockd/clntxdr.c 	*p++ = cpu_to_be32(l_offset);
p                 248 fs/lockd/clntxdr.c 	*p   = cpu_to_be32(l_len);
p                 257 fs/lockd/clntxdr.c 	__be32 *p;
p                 263 fs/lockd/clntxdr.c 	p = xdr_inline_decode(xdr, 4 + 4);
p                 264 fs/lockd/clntxdr.c 	if (unlikely(p == NULL))
p                 266 fs/lockd/clntxdr.c 	exclusive = be32_to_cpup(p++);
p                 267 fs/lockd/clntxdr.c 	lock->svid = be32_to_cpup(p);
p                 274 fs/lockd/clntxdr.c 	p = xdr_inline_decode(xdr, 4 + 4);
p                 275 fs/lockd/clntxdr.c 	if (unlikely(p == NULL))
p                 280 fs/lockd/clntxdr.c 	l_offset = be32_to_cpup(p++);
p                 281 fs/lockd/clntxdr.c 	l_len = be32_to_cpup(p);
p                 303 fs/lockd/clntxdr.c 	__be32 *p;
p                 305 fs/lockd/clntxdr.c 	p = xdr_reserve_space(xdr, 4 + length);
p                 306 fs/lockd/clntxdr.c 	xdr_encode_opaque(p, name, length);
p                 323 fs/lockd/clntxdr.c 	__be32 *p;
p                 329 fs/lockd/clntxdr.c 	p = xdr_reserve_space(xdr, 4 + 4 + 4);
p                 330 fs/lockd/clntxdr.c 	*p++ = cpu_to_be32(lock->svid);
p                 333 fs/lockd/clntxdr.c 	*p++ = cpu_to_be32(l_offset);
p                 334 fs/lockd/clntxdr.c 	*p   = cpu_to_be32(l_len);
p                 264 fs/lockd/mon.c 	u64 *p = (u64 *)&nsm->sm_priv.data;
p                 268 fs/lockd/mon.c 	put_unaligned(ns, p);
p                 269 fs/lockd/mon.c 	put_unaligned((unsigned long)nsm, p + 1);
p                 427 fs/lockd/mon.c 	__be32 *p;
p                 429 fs/lockd/mon.c 	p = xdr_reserve_space(xdr, 4 + len);
p                 430 fs/lockd/mon.c 	xdr_encode_opaque(p, string, len);
p                 449 fs/lockd/mon.c 	__be32 *p;
p                 452 fs/lockd/mon.c 	p = xdr_reserve_space(xdr, 4 + 4 + 4);
p                 453 fs/lockd/mon.c 	*p++ = cpu_to_be32(argp->prog);
p                 454 fs/lockd/mon.c 	*p++ = cpu_to_be32(argp->vers);
p                 455 fs/lockd/mon.c 	*p = cpu_to_be32(argp->proc);
p                 475 fs/lockd/mon.c 	__be32 *p;
p                 477 fs/lockd/mon.c 	p = xdr_reserve_space(xdr, SM_PRIV_SIZE);
p                 478 fs/lockd/mon.c 	xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE);
p                 499 fs/lockd/mon.c 	__be32 *p;
p                 501 fs/lockd/mon.c 	p = xdr_inline_decode(xdr, 4 + 4);
p                 502 fs/lockd/mon.c 	if (unlikely(p == NULL))
p                 504 fs/lockd/mon.c 	resp->status = be32_to_cpup(p++);
p                 505 fs/lockd/mon.c 	resp->state = be32_to_cpup(p);
p                 517 fs/lockd/mon.c 	__be32 *p;
p                 519 fs/lockd/mon.c 	p = xdr_inline_decode(xdr, 4);
p                 520 fs/lockd/mon.c 	if (unlikely(p == NULL))
p                 522 fs/lockd/mon.c 	resp->state = be32_to_cpup(p);
p                  66 fs/lockd/svclock.c 	char *p = buf;
p                  73 fs/lockd/svclock.c 			strcpy(p-3, "...");
p                  76 fs/lockd/svclock.c 		sprintf(p, "%02x", cookie->data[i]);
p                  77 fs/lockd/svclock.c 		p += 2;
p                  80 fs/lockd/svclock.c 	*p = '\0';
p                  47 fs/lockd/xdr.c static __be32 *nlm_decode_cookie(__be32 *p, struct nlm_cookie *c)
p                  51 fs/lockd/xdr.c 	len = ntohl(*p++);
p                  61 fs/lockd/xdr.c 		memcpy(c->data, p, len);
p                  62 fs/lockd/xdr.c 		p+=XDR_QUADLEN(len);
p                  71 fs/lockd/xdr.c 	return p;
p                  75 fs/lockd/xdr.c nlm_encode_cookie(__be32 *p, struct nlm_cookie *c)
p                  77 fs/lockd/xdr.c 	*p++ = htonl(c->len);
p                  78 fs/lockd/xdr.c 	memcpy(p, c->data, c->len);
p                  79 fs/lockd/xdr.c 	p+=XDR_QUADLEN(c->len);
p                  80 fs/lockd/xdr.c 	return p;
p                  84 fs/lockd/xdr.c nlm_decode_fh(__be32 *p, struct nfs_fh *f)
p                  88 fs/lockd/xdr.c 	if ((len = ntohl(*p++)) != NFS2_FHSIZE) {
p                  95 fs/lockd/xdr.c 	memcpy(f->data, p, NFS2_FHSIZE);
p                  96 fs/lockd/xdr.c 	return p + XDR_QUADLEN(NFS2_FHSIZE);
p                 103 fs/lockd/xdr.c nlm_decode_oh(__be32 *p, struct xdr_netobj *oh)
p                 105 fs/lockd/xdr.c 	return xdr_decode_netobj(p, oh);
p                 109 fs/lockd/xdr.c nlm_encode_oh(__be32 *p, struct xdr_netobj *oh)
p                 111 fs/lockd/xdr.c 	return xdr_encode_netobj(p, oh);
p                 115 fs/lockd/xdr.c nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
p                 120 fs/lockd/xdr.c 	if (!(p = xdr_decode_string_inplace(p, &lock->caller,
p                 123 fs/lockd/xdr.c 	 || !(p = nlm_decode_fh(p, &lock->fh))
p                 124 fs/lockd/xdr.c 	 || !(p = nlm_decode_oh(p, &lock->oh)))
p                 126 fs/lockd/xdr.c 	lock->svid  = ntohl(*p++);
p                 131 fs/lockd/xdr.c 	start = ntohl(*p++);
p                 132 fs/lockd/xdr.c 	len = ntohl(*p++);
p                 141 fs/lockd/xdr.c 	return p;
p                 148 fs/lockd/xdr.c nlm_encode_testres(__be32 *p, struct nlm_res *resp)
p                 152 fs/lockd/xdr.c 	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
p                 154 fs/lockd/xdr.c 	*p++ = resp->status;
p                 159 fs/lockd/xdr.c 		*p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
p                 160 fs/lockd/xdr.c 		*p++ = htonl(resp->lock.svid);
p                 163 fs/lockd/xdr.c 		if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
p                 172 fs/lockd/xdr.c 		*p++ = htonl(start);
p                 173 fs/lockd/xdr.c 		*p++ = htonl(len);
p                 176 fs/lockd/xdr.c 	return p;
p                 184 fs/lockd/xdr.c nlmsvc_decode_testargs(struct svc_rqst *rqstp, __be32 *p)
p                 189 fs/lockd/xdr.c 	if (!(p = nlm_decode_cookie(p, &argp->cookie)))
p                 192 fs/lockd/xdr.c 	exclusive = ntohl(*p++);
p                 193 fs/lockd/xdr.c 	if (!(p = nlm_decode_lock(p, &argp->lock)))
p                 198 fs/lockd/xdr.c 	return xdr_argsize_check(rqstp, p);
p                 202 fs/lockd/xdr.c nlmsvc_encode_testres(struct svc_rqst *rqstp, __be32 *p)
p                 206 fs/lockd/xdr.c 	if (!(p = nlm_encode_testres(p, resp)))
p                 208 fs/lockd/xdr.c 	return xdr_ressize_check(rqstp, p);
p                 212 fs/lockd/xdr.c nlmsvc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p)
p                 217 fs/lockd/xdr.c 	if (!(p = nlm_decode_cookie(p, &argp->cookie)))
p                 219 fs/lockd/xdr.c 	argp->block  = ntohl(*p++);
p                 220 fs/lockd/xdr.c 	exclusive    = ntohl(*p++);
p                 221 fs/lockd/xdr.c 	if (!(p = nlm_decode_lock(p, &argp->lock)))
p                 225 fs/lockd/xdr.c 	argp->reclaim = ntohl(*p++);
p                 226 fs/lockd/xdr.c 	argp->state   = ntohl(*p++);
p                 229 fs/lockd/xdr.c 	return xdr_argsize_check(rqstp, p);
p                 233 fs/lockd/xdr.c nlmsvc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p)
p                 238 fs/lockd/xdr.c 	if (!(p = nlm_decode_cookie(p, &argp->cookie)))
p                 240 fs/lockd/xdr.c 	argp->block = ntohl(*p++);
p                 241 fs/lockd/xdr.c 	exclusive = ntohl(*p++);
p                 242 fs/lockd/xdr.c 	if (!(p = nlm_decode_lock(p, &argp->lock)))
p                 246 fs/lockd/xdr.c 	return xdr_argsize_check(rqstp, p);
p                 250 fs/lockd/xdr.c nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p)
p                 254 fs/lockd/xdr.c 	if (!(p = nlm_decode_cookie(p, &argp->cookie))
p                 255 fs/lockd/xdr.c 	 || !(p = nlm_decode_lock(p, &argp->lock)))
p                 258 fs/lockd/xdr.c 	return xdr_argsize_check(rqstp, p);
p                 262 fs/lockd/xdr.c nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
p                 271 fs/lockd/xdr.c 	if (!(p = nlm_decode_cookie(p, &argp->cookie))
p                 272 fs/lockd/xdr.c 	 || !(p = xdr_decode_string_inplace(p, &lock->caller,
p                 274 fs/lockd/xdr.c 	 || !(p = nlm_decode_fh(p, &lock->fh))
p                 275 fs/lockd/xdr.c 	 || !(p = nlm_decode_oh(p, &lock->oh)))
p                 277 fs/lockd/xdr.c 	argp->fsm_mode = ntohl(*p++);
p                 278 fs/lockd/xdr.c 	argp->fsm_access = ntohl(*p++);
p                 279 fs/lockd/xdr.c 	return xdr_argsize_check(rqstp, p);
p                 283 fs/lockd/xdr.c nlmsvc_encode_shareres(struct svc_rqst *rqstp, __be32 *p)
p                 287 fs/lockd/xdr.c 	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
p                 289 fs/lockd/xdr.c 	*p++ = resp->status;
p                 290 fs/lockd/xdr.c 	*p++ = xdr_zero;		/* sequence argument */
p                 291 fs/lockd/xdr.c 	return xdr_ressize_check(rqstp, p);
p                 295 fs/lockd/xdr.c nlmsvc_encode_res(struct svc_rqst *rqstp, __be32 *p)
p                 299 fs/lockd/xdr.c 	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
p                 301 fs/lockd/xdr.c 	*p++ = resp->status;
p                 302 fs/lockd/xdr.c 	return xdr_ressize_check(rqstp, p);
p                 306 fs/lockd/xdr.c nlmsvc_decode_notify(struct svc_rqst *rqstp, __be32 *p)
p                 311 fs/lockd/xdr.c 	if (!(p = xdr_decode_string_inplace(p, &lock->caller,
p                 314 fs/lockd/xdr.c 	argp->state = ntohl(*p++);
p                 315 fs/lockd/xdr.c 	return xdr_argsize_check(rqstp, p);
p                 319 fs/lockd/xdr.c nlmsvc_decode_reboot(struct svc_rqst *rqstp, __be32 *p)
p                 323 fs/lockd/xdr.c 	if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
p                 325 fs/lockd/xdr.c 	argp->state = ntohl(*p++);
p                 326 fs/lockd/xdr.c 	memcpy(&argp->priv.data, p, sizeof(argp->priv.data));
p                 327 fs/lockd/xdr.c 	p += XDR_QUADLEN(SM_PRIV_SIZE);
p                 328 fs/lockd/xdr.c 	return xdr_argsize_check(rqstp, p);
p                 332 fs/lockd/xdr.c nlmsvc_decode_res(struct svc_rqst *rqstp, __be32 *p)
p                 336 fs/lockd/xdr.c 	if (!(p = nlm_decode_cookie(p, &resp->cookie)))
p                 338 fs/lockd/xdr.c 	resp->status = *p++;
p                 339 fs/lockd/xdr.c 	return xdr_argsize_check(rqstp, p);
p                 343 fs/lockd/xdr.c nlmsvc_decode_void(struct svc_rqst *rqstp, __be32 *p)
p                 345 fs/lockd/xdr.c 	return xdr_argsize_check(rqstp, p);
p                 349 fs/lockd/xdr.c nlmsvc_encode_void(struct svc_rqst *rqstp, __be32 *p)
p                 351 fs/lockd/xdr.c 	return xdr_ressize_check(rqstp, p);
p                  47 fs/lockd/xdr4.c nlm4_decode_cookie(__be32 *p, struct nlm_cookie *c)
p                  51 fs/lockd/xdr4.c 	len = ntohl(*p++);
p                  61 fs/lockd/xdr4.c 		memcpy(c->data, p, len);
p                  62 fs/lockd/xdr4.c 		p+=XDR_QUADLEN(len);
p                  71 fs/lockd/xdr4.c 	return p;
p                  75 fs/lockd/xdr4.c nlm4_encode_cookie(__be32 *p, struct nlm_cookie *c)
p                  77 fs/lockd/xdr4.c 	*p++ = htonl(c->len);
p                  78 fs/lockd/xdr4.c 	memcpy(p, c->data, c->len);
p                  79 fs/lockd/xdr4.c 	p+=XDR_QUADLEN(c->len);
p                  80 fs/lockd/xdr4.c 	return p;
p                  84 fs/lockd/xdr4.c nlm4_decode_fh(__be32 *p, struct nfs_fh *f)
p                  87 fs/lockd/xdr4.c 	f->size = ntohl(*p++);
p                  93 fs/lockd/xdr4.c       	memcpy(f->data, p, f->size);
p                  94 fs/lockd/xdr4.c 	return p + XDR_QUADLEN(f->size);
p                 101 fs/lockd/xdr4.c nlm4_decode_oh(__be32 *p, struct xdr_netobj *oh)
p                 103 fs/lockd/xdr4.c 	return xdr_decode_netobj(p, oh);
p                 107 fs/lockd/xdr4.c nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
p                 113 fs/lockd/xdr4.c 	if (!(p = xdr_decode_string_inplace(p, &lock->caller,
p                 115 fs/lockd/xdr4.c 	 || !(p = nlm4_decode_fh(p, &lock->fh))
p                 116 fs/lockd/xdr4.c 	 || !(p = nlm4_decode_oh(p, &lock->oh)))
p                 118 fs/lockd/xdr4.c 	lock->svid  = ntohl(*p++);
p                 123 fs/lockd/xdr4.c 	p = xdr_decode_hyper(p, &start);
p                 124 fs/lockd/xdr4.c 	p = xdr_decode_hyper(p, &len);
p                 133 fs/lockd/xdr4.c 	return p;
p                 140 fs/lockd/xdr4.c nlm4_encode_testres(__be32 *p, struct nlm_res *resp)
p                 144 fs/lockd/xdr4.c 	dprintk("xdr: before encode_testres (p %p resp %p)\n", p, resp);
p                 145 fs/lockd/xdr4.c 	if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
p                 147 fs/lockd/xdr4.c 	*p++ = resp->status;
p                 152 fs/lockd/xdr4.c 		*p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
p                 153 fs/lockd/xdr4.c 		*p++ = htonl(resp->lock.svid);
p                 156 fs/lockd/xdr4.c 		if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
p                 165 fs/lockd/xdr4.c 		p = xdr_encode_hyper(p, start);
p                 166 fs/lockd/xdr4.c 		p = xdr_encode_hyper(p, len);
p                 172 fs/lockd/xdr4.c 	dprintk("xdr: after encode_testres (p %p resp %p)\n", p, resp);
p                 173 fs/lockd/xdr4.c 	return p;
p                 181 fs/lockd/xdr4.c nlm4svc_decode_testargs(struct svc_rqst *rqstp, __be32 *p)
p                 186 fs/lockd/xdr4.c 	if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
p                 189 fs/lockd/xdr4.c 	exclusive = ntohl(*p++);
p                 190 fs/lockd/xdr4.c 	if (!(p = nlm4_decode_lock(p, &argp->lock)))
p                 195 fs/lockd/xdr4.c 	return xdr_argsize_check(rqstp, p);
p                 199 fs/lockd/xdr4.c nlm4svc_encode_testres(struct svc_rqst *rqstp, __be32 *p)
p                 203 fs/lockd/xdr4.c 	if (!(p = nlm4_encode_testres(p, resp)))
p                 205 fs/lockd/xdr4.c 	return xdr_ressize_check(rqstp, p);
p                 209 fs/lockd/xdr4.c nlm4svc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p)
p                 214 fs/lockd/xdr4.c 	if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
p                 216 fs/lockd/xdr4.c 	argp->block  = ntohl(*p++);
p                 217 fs/lockd/xdr4.c 	exclusive    = ntohl(*p++);
p                 218 fs/lockd/xdr4.c 	if (!(p = nlm4_decode_lock(p, &argp->lock)))
p                 222 fs/lockd/xdr4.c 	argp->reclaim = ntohl(*p++);
p                 223 fs/lockd/xdr4.c 	argp->state   = ntohl(*p++);
p                 226 fs/lockd/xdr4.c 	return xdr_argsize_check(rqstp, p);
p                 230 fs/lockd/xdr4.c nlm4svc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p)
p                 235 fs/lockd/xdr4.c 	if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
p                 237 fs/lockd/xdr4.c 	argp->block = ntohl(*p++);
p                 238 fs/lockd/xdr4.c 	exclusive = ntohl(*p++);
p                 239 fs/lockd/xdr4.c 	if (!(p = nlm4_decode_lock(p, &argp->lock)))
p                 243 fs/lockd/xdr4.c 	return xdr_argsize_check(rqstp, p);
p                 247 fs/lockd/xdr4.c nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p)
p                 251 fs/lockd/xdr4.c 	if (!(p = nlm4_decode_cookie(p, &argp->cookie))
p                 252 fs/lockd/xdr4.c 	 || !(p = nlm4_decode_lock(p, &argp->lock)))
p                 255 fs/lockd/xdr4.c 	return xdr_argsize_check(rqstp, p);
p                 259 fs/lockd/xdr4.c nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
p                 268 fs/lockd/xdr4.c 	if (!(p = nlm4_decode_cookie(p, &argp->cookie))
p                 269 fs/lockd/xdr4.c 	 || !(p = xdr_decode_string_inplace(p, &lock->caller,
p                 271 fs/lockd/xdr4.c 	 || !(p = nlm4_decode_fh(p, &lock->fh))
p                 272 fs/lockd/xdr4.c 	 || !(p = nlm4_decode_oh(p, &lock->oh)))
p                 274 fs/lockd/xdr4.c 	argp->fsm_mode = ntohl(*p++);
p                 275 fs/lockd/xdr4.c 	argp->fsm_access = ntohl(*p++);
p                 276 fs/lockd/xdr4.c 	return xdr_argsize_check(rqstp, p);
p                 280 fs/lockd/xdr4.c nlm4svc_encode_shareres(struct svc_rqst *rqstp, __be32 *p)
p                 284 fs/lockd/xdr4.c 	if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
p                 286 fs/lockd/xdr4.c 	*p++ = resp->status;
p                 287 fs/lockd/xdr4.c 	*p++ = xdr_zero;		/* sequence argument */
p                 288 fs/lockd/xdr4.c 	return xdr_ressize_check(rqstp, p);
p                 292 fs/lockd/xdr4.c nlm4svc_encode_res(struct svc_rqst *rqstp, __be32 *p)
p                 296 fs/lockd/xdr4.c 	if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
p                 298 fs/lockd/xdr4.c 	*p++ = resp->status;
p                 299 fs/lockd/xdr4.c 	return xdr_ressize_check(rqstp, p);
p                 303 fs/lockd/xdr4.c nlm4svc_decode_notify(struct svc_rqst *rqstp, __be32 *p)
p                 308 fs/lockd/xdr4.c 	if (!(p = xdr_decode_string_inplace(p, &lock->caller,
p                 311 fs/lockd/xdr4.c 	argp->state = ntohl(*p++);
p                 312 fs/lockd/xdr4.c 	return xdr_argsize_check(rqstp, p);
p                 316 fs/lockd/xdr4.c nlm4svc_decode_reboot(struct svc_rqst *rqstp, __be32 *p)
p                 320 fs/lockd/xdr4.c 	if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
p                 322 fs/lockd/xdr4.c 	argp->state = ntohl(*p++);
p                 323 fs/lockd/xdr4.c 	memcpy(&argp->priv.data, p, sizeof(argp->priv.data));
p                 324 fs/lockd/xdr4.c 	p += XDR_QUADLEN(SM_PRIV_SIZE);
p                 325 fs/lockd/xdr4.c 	return xdr_argsize_check(rqstp, p);
p                 329 fs/lockd/xdr4.c nlm4svc_decode_res(struct svc_rqst *rqstp, __be32 *p)
p                 333 fs/lockd/xdr4.c 	if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
p                 335 fs/lockd/xdr4.c 	resp->status = *p++;
p                 336 fs/lockd/xdr4.c 	return xdr_argsize_check(rqstp, p);
p                 340 fs/lockd/xdr4.c nlm4svc_decode_void(struct svc_rqst *rqstp, __be32 *p)
p                 342 fs/lockd/xdr4.c 	return xdr_argsize_check(rqstp, p);
p                 346 fs/lockd/xdr4.c nlm4svc_encode_void(struct svc_rqst *rqstp, __be32 *p)
p                 348 fs/lockd/xdr4.c 	return xdr_ressize_check(rqstp, p);
p                  34 fs/minix/bitmap.c 		__u16 *p = (__u16 *)(*map++)->b_data;
p                  36 fs/minix/bitmap.c 			sum += 16 - hweight16(*p++);
p                 111 fs/minix/bitmap.c 	struct minix_inode *p;
p                 126 fs/minix/bitmap.c 	p = (void *)(*bh)->b_data;
p                 127 fs/minix/bitmap.c 	return p + ino % MINIX_INODES_PER_BLOCK;
p                 135 fs/minix/bitmap.c 	struct minix2_inode *p;
p                 152 fs/minix/bitmap.c 	p = (void *)(*bh)->b_data;
p                 153 fs/minix/bitmap.c 	return p + ino % minix2_inodes_per_block;
p                 100 fs/minix/dir.c 		char *p, *kaddr, *limit;
p                 106 fs/minix/dir.c 		p = kaddr+offset;
p                 108 fs/minix/dir.c 		for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
p                 112 fs/minix/dir.c 				minix3_dirent *de3 = (minix3_dirent *)p;
p                 116 fs/minix/dir.c 				minix_dirent *de = (minix_dirent *)p;
p                 161 fs/minix/dir.c 	char *p;
p                 176 fs/minix/dir.c 		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
p                 178 fs/minix/dir.c 				minix3_dirent *de3 = (minix3_dirent *)p;
p                 182 fs/minix/dir.c 				minix_dirent *de = (minix_dirent *)p;
p                 197 fs/minix/dir.c 	return (minix_dirent *)p;
p                 210 fs/minix/dir.c 	char *kaddr, *p;
p                 234 fs/minix/dir.c 		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
p                 235 fs/minix/dir.c 			de = (minix_dirent *)p;
p                 236 fs/minix/dir.c 			de3 = (minix3_dirent *)p;
p                 244 fs/minix/dir.c 			if (p == dir_end) {
p                 265 fs/minix/dir.c 	pos = page_offset(page) + p - (char *)page_address(page);
p                 370 fs/minix/dir.c 		char *p, *kaddr, *limit;
p                 378 fs/minix/dir.c 		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
p                 380 fs/minix/dir.c 				minix3_dirent *de3 = (minix3_dirent *)p;
p                 384 fs/minix/dir.c 				minix_dirent *de = (minix_dirent *)p;
p                 438 fs/minix/dir.c struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
p                 446 fs/minix/dir.c 		*p = page;
p                   5 fs/minix/itree_common.c 	block_t	*p;
p                  12 fs/minix/itree_common.c static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v)
p                  14 fs/minix/itree_common.c 	p->key = *(p->p = v);
p                  15 fs/minix/itree_common.c 	p->bh = bh;
p                  20 fs/minix/itree_common.c 	while (from <= to && from->key == *from->p)
p                  37 fs/minix/itree_common.c 	Indirect *p = chain;
p                  43 fs/minix/itree_common.c 	if (!p->key)
p                  46 fs/minix/itree_common.c 		bh = sb_bread(sb, block_to_cpu(p->key));
p                  50 fs/minix/itree_common.c 		if (!verify_chain(chain, p))
p                  52 fs/minix/itree_common.c 		add_chain(++p, bh, (block_t *)bh->b_data + *++offsets);
p                  54 fs/minix/itree_common.c 		if (!p->key)
p                  67 fs/minix/itree_common.c 	return p;
p                  91 fs/minix/itree_common.c 		branch[n].p = (block_t*) bh->b_data + offsets[n];
p                  92 fs/minix/itree_common.c 		*branch[n].p = branch[n].key;
p                 119 fs/minix/itree_common.c 	if (!verify_chain(chain, where-1) || *where->p)
p                 122 fs/minix/itree_common.c 	*where->p = where->key;
p                 209 fs/minix/itree_common.c static inline int all_zeroes(block_t *p, block_t *q)
p                 211 fs/minix/itree_common.c 	while (p < q)
p                 212 fs/minix/itree_common.c 		if (*p++)
p                 223 fs/minix/itree_common.c 	Indirect *partial, *p;
p                 234 fs/minix/itree_common.c 	if (!partial->key && *partial->p) {
p                 238 fs/minix/itree_common.c 	for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--)
p                 240 fs/minix/itree_common.c 	if (p == chain + k - 1 && p > chain) {
p                 241 fs/minix/itree_common.c 		p->p--;
p                 243 fs/minix/itree_common.c 		*top = *p->p;
p                 244 fs/minix/itree_common.c 		*p->p = 0;
p                 248 fs/minix/itree_common.c 	while(partial > p)
p                 257 fs/minix/itree_common.c static inline void free_data(struct inode *inode, block_t *p, block_t *q)
p                 261 fs/minix/itree_common.c 	for ( ; p < q ; p++) {
p                 262 fs/minix/itree_common.c 		nr = block_to_cpu(*p);
p                 264 fs/minix/itree_common.c 			*p = 0;
p                 270 fs/minix/itree_common.c static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
p                 276 fs/minix/itree_common.c 		for ( ; p < q ; p++) {
p                 277 fs/minix/itree_common.c 			nr = block_to_cpu(*p);
p                 280 fs/minix/itree_common.c 			*p = 0;
p                 291 fs/minix/itree_common.c 		free_data(inode, p, q);
p                 330 fs/minix/itree_common.c 		free_branches(inode, partial->p + 1, block_end(partial->bh),
p                 127 fs/minix/minix.h 	const unsigned short *p = vaddr, *addr = vaddr;
p                 134 fs/minix/minix.h 	while (*p++ == 0xffff) {
p                 136 fs/minix/minix.h 			return (p - addr) << 4;
p                 139 fs/minix/minix.h 	num = *--p;
p                 140 fs/minix/minix.h 	return ((p - addr) << 4) + ffz(num);
p                 152 fs/minix/minix.h 	const unsigned short *p = vaddr;
p                 153 fs/minix/minix.h 	return (p[nr >> 4] & (1U << (nr & 15))) != 0;
p                 511 fs/namei.c     static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
p                 514 fs/namei.c     	p->stack = p->internal;
p                 515 fs/namei.c     	p->dfd = dfd;
p                 516 fs/namei.c     	p->name = name;
p                 517 fs/namei.c     	p->total_link_count = old ? old->total_link_count : 0;
p                 518 fs/namei.c     	p->saved = old;
p                 519 fs/namei.c     	current->nameidata = p;
p                 535 fs/namei.c     	struct saved *p;
p                 538 fs/namei.c     		p= kmalloc_array(MAXSYMLINKS, sizeof(struct saved),
p                 540 fs/namei.c     		if (unlikely(!p))
p                 543 fs/namei.c     		p= kmalloc_array(MAXSYMLINKS, sizeof(struct saved),
p                 545 fs/namei.c     		if (unlikely(!p))
p                 548 fs/namei.c     	memcpy(p, nd->internal, sizeof(nd->internal));
p                 549 fs/namei.c     	nd->stack = p;
p                2850 fs/namei.c     	struct dentry *p;
p                2859 fs/namei.c     	p = d_ancestor(p2, p1);
p                2860 fs/namei.c     	if (p) {
p                2863 fs/namei.c     		return p;
p                2866 fs/namei.c     	p = d_ancestor(p1, p2);
p                2867 fs/namei.c     	if (p) {
p                2870 fs/namei.c     		return p;
p                 611 fs/namespace.c 	struct mount *p;
p                 613 fs/namespace.c 	hlist_for_each_entry_rcu(p, head, mnt_hash)
p                 614 fs/namespace.c 		if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
p                 615 fs/namespace.c 			return p;
p                 898 fs/namespace.c static struct mount *next_mnt(struct mount *p, struct mount *root)
p                 900 fs/namespace.c 	struct list_head *next = p->mnt_mounts.next;
p                 901 fs/namespace.c 	if (next == &p->mnt_mounts) {
p                 903 fs/namespace.c 			if (p == root)
p                 905 fs/namespace.c 			next = p->mnt_child.next;
p                 906 fs/namespace.c 			if (next != &p->mnt_parent->mnt_mounts)
p                 908 fs/namespace.c 			p = p->mnt_parent;
p                 914 fs/namespace.c static struct mount *skip_mnt_tree(struct mount *p)
p                 916 fs/namespace.c 	struct list_head *prev = p->mnt_mounts.prev;
p                 917 fs/namespace.c 	while (prev != &p->mnt_mounts) {
p                 918 fs/namespace.c 		p = list_entry(prev, struct mount, mnt_child);
p                 919 fs/namespace.c 		prev = p->mnt_mounts.prev;
p                 921 fs/namespace.c 	return p;
p                1084 fs/namespace.c 	struct hlist_node *p;
p                1096 fs/namespace.c 	hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
p                1165 fs/namespace.c 		struct mount *p, *tmp;
p                1166 fs/namespace.c 		list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts,  mnt_child) {
p                1167 fs/namespace.c 			__put_mountpoint(unhash_mnt(p), &list);
p                1168 fs/namespace.c 			hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
p                1239 fs/namespace.c 	struct mount *p;
p                1240 fs/namespace.c 	p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
p                1241 fs/namespace.c 	if (IS_ERR(p))
p                1242 fs/namespace.c 		return ERR_CAST(p);
p                1243 fs/namespace.c 	p->mnt.mnt_flags |= MNT_INTERNAL;
p                1244 fs/namespace.c 	return &p->mnt;
p                1251 fs/namespace.c 	struct proc_mounts *p = m->private;
p                1254 fs/namespace.c 	if (p->cached_event == p->ns->event) {
p                1255 fs/namespace.c 		void *v = p->cached_mount;
p                1256 fs/namespace.c 		if (*pos == p->cached_index)
p                1258 fs/namespace.c 		if (*pos == p->cached_index + 1) {
p                1259 fs/namespace.c 			v = seq_list_next(v, &p->ns->list, &p->cached_index);
p                1260 fs/namespace.c 			return p->cached_mount = v;
p                1264 fs/namespace.c 	p->cached_event = p->ns->event;
p                1265 fs/namespace.c 	p->cached_mount = seq_list_start(&p->ns->list, *pos);
p                1266 fs/namespace.c 	p->cached_index = *pos;
p                1267 fs/namespace.c 	return p->cached_mount;
p                1272 fs/namespace.c 	struct proc_mounts *p = m->private;
p                1274 fs/namespace.c 	p->cached_mount = seq_list_next(v, &p->ns->list, pos);
p                1275 fs/namespace.c 	p->cached_index = *pos;
p                1276 fs/namespace.c 	return p->cached_mount;
p                1286 fs/namespace.c 	struct proc_mounts *p = m->private;
p                1288 fs/namespace.c 	return p->show(m, &r->mnt);
p                1312 fs/namespace.c 	struct mount *p;
p                1317 fs/namespace.c 	for (p = mnt; p; p = next_mnt(p, mnt)) {
p                1318 fs/namespace.c 		actual_refs += mnt_get_count(p);
p                1361 fs/namespace.c 	struct hlist_node *p;
p                1377 fs/namespace.c 	hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
p                1430 fs/namespace.c 	struct mount *p;
p                1436 fs/namespace.c 	for (p = mnt; p; p = next_mnt(p, mnt)) {
p                1437 fs/namespace.c 		p->mnt.mnt_flags |= MNT_UMOUNT;
p                1438 fs/namespace.c 		list_move(&p->mnt_list, &tmp_list);
p                1442 fs/namespace.c 	list_for_each_entry(p, &tmp_list, mnt_list) {
p                1443 fs/namespace.c 		list_del_init(&p->mnt_child);
p                1453 fs/namespace.c 		p = list_first_entry(&tmp_list, struct mount, mnt_list);
p                1454 fs/namespace.c 		list_del_init(&p->mnt_expire);
p                1455 fs/namespace.c 		list_del_init(&p->mnt_list);
p                1456 fs/namespace.c 		ns = p->mnt_ns;
p                1461 fs/namespace.c 		p->mnt_ns = NULL;
p                1463 fs/namespace.c 			p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
p                1465 fs/namespace.c 		disconnect = disconnect_mount(p, how);
p                1466 fs/namespace.c 		if (mnt_has_parent(p)) {
p                1467 fs/namespace.c 			mnt_add_count(p->mnt_parent, -1);
p                1470 fs/namespace.c 				list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
p                1472 fs/namespace.c 				umount_mnt(p);
p                1475 fs/namespace.c 		change_mnt_propagation(p, MS_PRIVATE);
p                1477 fs/namespace.c 			hlist_add_head(&p->mnt_umount, &unmounted);
p                1752 fs/namespace.c 	struct mount *res, *p, *q, *r, *parent;
p                1766 fs/namespace.c 	p = mnt;
p                1789 fs/namespace.c 			while (p != s->mnt_parent) {
p                1790 fs/namespace.c 				p = p->mnt_parent;
p                1793 fs/namespace.c 			p = s;
p                1795 fs/namespace.c 			q = clone_mnt(p, p->mnt.mnt_root, flag);
p                1800 fs/namespace.c 			attach_mnt(q, parent, p->mnt_mp);
p                1903 fs/namespace.c 	struct mount *p;
p                1905 fs/namespace.c 	for (p = mnt; p; p = next_mnt(p, mnt)) {
p                1906 fs/namespace.c 		int flags = p->mnt.mnt_flags;
p                1922 fs/namespace.c 		if (list_empty(&p->mnt_expire))
p                1924 fs/namespace.c 		p->mnt.mnt_flags = flags;
p                1930 fs/namespace.c 	struct mount *p;
p                1932 fs/namespace.c 	for (p = mnt; p != end; p = next_mnt(p, mnt)) {
p                1933 fs/namespace.c 		if (p->mnt_group_id && !IS_MNT_SHARED(p))
p                1934 fs/namespace.c 			mnt_release_group_id(p);
p                1940 fs/namespace.c 	struct mount *p;
p                1942 fs/namespace.c 	for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
p                1943 fs/namespace.c 		if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
p                1944 fs/namespace.c 			int err = mnt_alloc_group_id(p);
p                1946 fs/namespace.c 				cleanup_group_ids(mnt, p);
p                1959 fs/namespace.c 	struct mount *p;
p                1961 fs/namespace.c 	for (p = mnt; p; p = next_mnt(p, mnt))
p                2049 fs/namespace.c 	struct mount *child, *p;
p                2075 fs/namespace.c 		for (p = source_mnt; p; p = next_mnt(p, source_mnt))
p                2076 fs/namespace.c 			set_mnt_shared(p);
p                2170 fs/namespace.c static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
p                2179 fs/namespace.c 	return attach_recursive_mnt(mnt, p, mp, false);
p                2324 fs/namespace.c 	struct mount *mnt, *p;
p                2339 fs/namespace.c 	for (p = mnt; p; p = next_mnt(p, mnt)) {
p                2340 fs/namespace.c 		p->mnt_ns = ns;
p                2568 fs/namespace.c 	struct mount *p;
p                2569 fs/namespace.c 	for (p = mnt; p; p = next_mnt(p, mnt)) {
p                2570 fs/namespace.c 		if (IS_MNT_UNBINDABLE(p))
p                2584 fs/namespace.c 	struct mount *p;
p                2588 fs/namespace.c 	for (p = subtree; p; p = next_mnt(p, subtree))
p                2589 fs/namespace.c 		if (mnt_ns_loop(p->mnt.mnt_root))
p                2601 fs/namespace.c 	struct mount *p;
p                2613 fs/namespace.c 	p = real_mount(new_path->mnt);
p                2621 fs/namespace.c 	if (!check_mnt(p))
p                2650 fs/namespace.c 	if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
p                2655 fs/namespace.c 	for (; mnt_has_parent(p); p = p->mnt_parent)
p                2656 fs/namespace.c 		if (p == old)
p                3217 fs/namespace.c 	struct mount *p, *q;
p                3259 fs/namespace.c 	p = old;
p                3261 fs/namespace.c 	while (p) {
p                3265 fs/namespace.c 			if (&p->mnt == new_fs->root.mnt) {
p                3267 fs/namespace.c 				rootmnt = &p->mnt;
p                3269 fs/namespace.c 			if (&p->mnt == new_fs->pwd.mnt) {
p                3271 fs/namespace.c 				pwdmnt = &p->mnt;
p                3274 fs/namespace.c 		p = next_mnt(p, old);
p                3278 fs/namespace.c 		while (p->mnt.mnt_root != q->mnt.mnt_root)
p                3279 fs/namespace.c 			p = next_mnt(p, old);
p                  85 fs/nfs/blocklayout/blocklayout.c static inline void get_parallel(struct parallel_io *p)
p                  87 fs/nfs/blocklayout/blocklayout.c 	kref_get(&p->refcnt);
p                  92 fs/nfs/blocklayout/blocklayout.c 	struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
p                  95 fs/nfs/blocklayout/blocklayout.c 	p->pnfs_callback(p->data);
p                  96 fs/nfs/blocklayout/blocklayout.c 	kfree(p);
p                  99 fs/nfs/blocklayout/blocklayout.c static inline void put_parallel(struct parallel_io *p)
p                 101 fs/nfs/blocklayout/blocklayout.c 	kref_put(&p->refcnt, destroy_parallel);
p                 618 fs/nfs/blocklayout/blocklayout.c 	__be32 *p;
p                 620 fs/nfs/blocklayout/blocklayout.c 	p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE);
p                 621 fs/nfs/blocklayout/blocklayout.c 	if (!p)
p                 628 fs/nfs/blocklayout/blocklayout.c 	memcpy(&id, p, NFS4_DEVICEID4_SIZE);
p                 629 fs/nfs/blocklayout/blocklayout.c 	p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
p                 643 fs/nfs/blocklayout/blocklayout.c 	if (decode_sector_number(&p, &be->be_f_offset) < 0)
p                 645 fs/nfs/blocklayout/blocklayout.c 	if (decode_sector_number(&p, &be->be_length) < 0)
p                 647 fs/nfs/blocklayout/blocklayout.c 	if (decode_sector_number(&p, &be->be_v_offset) < 0)
p                 649 fs/nfs/blocklayout/blocklayout.c 	be->be_state = be32_to_cpup(p++);
p                 684 fs/nfs/blocklayout/blocklayout.c 	__be32 *p;
p                 703 fs/nfs/blocklayout/blocklayout.c 	p = xdr_inline_decode(&xdr, 4);
p                 704 fs/nfs/blocklayout/blocklayout.c 	if (unlikely(!p))
p                 707 fs/nfs/blocklayout/blocklayout.c 	count = be32_to_cpup(p++);
p                  55 fs/nfs/blocklayout/dev.c 	__be32 *p;
p                  58 fs/nfs/blocklayout/dev.c 	p = xdr_inline_decode(xdr, 4);
p                  59 fs/nfs/blocklayout/dev.c 	if (!p)
p                  61 fs/nfs/blocklayout/dev.c 	b->type = be32_to_cpup(p++);
p                  65 fs/nfs/blocklayout/dev.c 		p = xdr_inline_decode(xdr, 4);
p                  66 fs/nfs/blocklayout/dev.c 		if (!p)
p                  68 fs/nfs/blocklayout/dev.c 		b->simple.nr_sigs = be32_to_cpup(p++);
p                  76 fs/nfs/blocklayout/dev.c 			p = xdr_inline_decode(xdr, 8 + 4);
p                  77 fs/nfs/blocklayout/dev.c 			if (!p)
p                  79 fs/nfs/blocklayout/dev.c 			p = xdr_decode_hyper(p, &b->simple.sigs[i].offset);
p                  80 fs/nfs/blocklayout/dev.c 			b->simple.sigs[i].sig_len = be32_to_cpup(p++);
p                  87 fs/nfs/blocklayout/dev.c 			p = xdr_inline_decode(xdr, b->simple.sigs[i].sig_len);
p                  88 fs/nfs/blocklayout/dev.c 			if (!p)
p                  90 fs/nfs/blocklayout/dev.c 			memcpy(&b->simple.sigs[i].sig, p,
p                  98 fs/nfs/blocklayout/dev.c 		p = xdr_inline_decode(xdr, 8 + 8 + 4);
p                  99 fs/nfs/blocklayout/dev.c 		if (!p)
p                 101 fs/nfs/blocklayout/dev.c 		p = xdr_decode_hyper(p, &b->slice.start);
p                 102 fs/nfs/blocklayout/dev.c 		p = xdr_decode_hyper(p, &b->slice.len);
p                 103 fs/nfs/blocklayout/dev.c 		b->slice.volume = be32_to_cpup(p++);
p                 106 fs/nfs/blocklayout/dev.c 		p = xdr_inline_decode(xdr, 4);
p                 107 fs/nfs/blocklayout/dev.c 		if (!p)
p                 110 fs/nfs/blocklayout/dev.c 		b->concat.volumes_count = be32_to_cpup(p++);
p                 116 fs/nfs/blocklayout/dev.c 		p = xdr_inline_decode(xdr, b->concat.volumes_count * 4);
p                 117 fs/nfs/blocklayout/dev.c 		if (!p)
p                 120 fs/nfs/blocklayout/dev.c 			b->concat.volumes[i] = be32_to_cpup(p++);
p                 123 fs/nfs/blocklayout/dev.c 		p = xdr_inline_decode(xdr, 8 + 4);
p                 124 fs/nfs/blocklayout/dev.c 		if (!p)
p                 127 fs/nfs/blocklayout/dev.c 		p = xdr_decode_hyper(p, &b->stripe.chunk_size);
p                 128 fs/nfs/blocklayout/dev.c 		b->stripe.volumes_count = be32_to_cpup(p++);
p                 134 fs/nfs/blocklayout/dev.c 		p = xdr_inline_decode(xdr, b->stripe.volumes_count * 4);
p                 135 fs/nfs/blocklayout/dev.c 		if (!p)
p                 138 fs/nfs/blocklayout/dev.c 			b->stripe.volumes[i] = be32_to_cpup(p++);
p                 141 fs/nfs/blocklayout/dev.c 		p = xdr_inline_decode(xdr, 4 + 4 + 4);
p                 142 fs/nfs/blocklayout/dev.c 		if (!p)
p                 144 fs/nfs/blocklayout/dev.c 		b->scsi.code_set = be32_to_cpup(p++);
p                 145 fs/nfs/blocklayout/dev.c 		b->scsi.designator_type = be32_to_cpup(p++);
p                 146 fs/nfs/blocklayout/dev.c 		b->scsi.designator_len = be32_to_cpup(p++);
p                 147 fs/nfs/blocklayout/dev.c 		p = xdr_inline_decode(xdr, b->scsi.designator_len);
p                 148 fs/nfs/blocklayout/dev.c 		if (!p)
p                 152 fs/nfs/blocklayout/dev.c 		memcpy(&b->scsi.designator, p, b->scsi.designator_len);
p                 153 fs/nfs/blocklayout/dev.c 		p = xdr_inline_decode(xdr, 8);
p                 154 fs/nfs/blocklayout/dev.c 		if (!p)
p                 156 fs/nfs/blocklayout/dev.c 		p = xdr_decode_hyper(p, &b->scsi.pr_key);
p                 506 fs/nfs/blocklayout/dev.c 	__be32 *p;
p                 515 fs/nfs/blocklayout/dev.c 	p = xdr_inline_decode(&xdr, sizeof(__be32));
p                 516 fs/nfs/blocklayout/dev.c 	if (!p)
p                 518 fs/nfs/blocklayout/dev.c 	nr_volumes = be32_to_cpup(p++);
p                 139 fs/nfs/blocklayout/extent_tree.c 	struct rb_node **p = &root->rb_node, *parent = NULL;
p                 142 fs/nfs/blocklayout/extent_tree.c 	while (*p) {
p                 143 fs/nfs/blocklayout/extent_tree.c 		parent = *p;
p                 155 fs/nfs/blocklayout/extent_tree.c 			p = &(*p)->rb_left;
p                 162 fs/nfs/blocklayout/extent_tree.c 			p = &(*p)->rb_right;
p                 168 fs/nfs/blocklayout/extent_tree.c 	rb_link_node(&new->be_node, parent, p);
p                 506 fs/nfs/blocklayout/extent_tree.c static __be32 *encode_block_extent(struct pnfs_block_extent *be, __be32 *p)
p                 508 fs/nfs/blocklayout/extent_tree.c 	p = xdr_encode_opaque_fixed(p, be->be_device->deviceid.data,
p                 510 fs/nfs/blocklayout/extent_tree.c 	p = xdr_encode_hyper(p, be->be_f_offset << SECTOR_SHIFT);
p                 511 fs/nfs/blocklayout/extent_tree.c 	p = xdr_encode_hyper(p, be->be_length << SECTOR_SHIFT);
p                 512 fs/nfs/blocklayout/extent_tree.c 	p = xdr_encode_hyper(p, 0LL);
p                 513 fs/nfs/blocklayout/extent_tree.c 	*p++ = cpu_to_be32(PNFS_BLOCK_READWRITE_DATA);
p                 514 fs/nfs/blocklayout/extent_tree.c 	return p;
p                 517 fs/nfs/blocklayout/extent_tree.c static __be32 *encode_scsi_range(struct pnfs_block_extent *be, __be32 *p)
p                 519 fs/nfs/blocklayout/extent_tree.c 	p = xdr_encode_hyper(p, be->be_f_offset << SECTOR_SHIFT);
p                 520 fs/nfs/blocklayout/extent_tree.c 	return xdr_encode_hyper(p, be->be_length << SECTOR_SHIFT);
p                 523 fs/nfs/blocklayout/extent_tree.c static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
p                 543 fs/nfs/blocklayout/extent_tree.c 			p = encode_scsi_range(be, p);
p                 545 fs/nfs/blocklayout/extent_tree.c 			p = encode_block_extent(be, p);
p                 598 fs/nfs/blocklayout/extent_tree.c 		void *p = start_p, *end = p + arg->layoutupdate_len;
p                 603 fs/nfs/blocklayout/extent_tree.c 		for ( ; p < end; p += PAGE_SIZE) {
p                 604 fs/nfs/blocklayout/extent_tree.c 			page = vmalloc_to_page(p);
p                  38 fs/nfs/blocklayout/rpc_pipefs.c nfs4_encode_simple(__be32 *p, struct pnfs_block_volume *b)
p                  42 fs/nfs/blocklayout/rpc_pipefs.c 	*p++ = cpu_to_be32(1);
p                  43 fs/nfs/blocklayout/rpc_pipefs.c 	*p++ = cpu_to_be32(b->type);
p                  44 fs/nfs/blocklayout/rpc_pipefs.c 	*p++ = cpu_to_be32(b->simple.nr_sigs);
p                  46 fs/nfs/blocklayout/rpc_pipefs.c 		p = xdr_encode_hyper(p, b->simple.sigs[i].offset);
p                  47 fs/nfs/blocklayout/rpc_pipefs.c 		p = xdr_encode_opaque(p, b->simple.sigs[i].sig,
p                 384 fs/nfs/callback.c 	char *p = rqstp->rq_cred.cr_principal;
p                 396 fs/nfs/callback.c 	if (p == NULL)
p                 404 fs/nfs/callback.c 		return !strcmp(p, clp->cl_acceptor);
p                 413 fs/nfs/callback.c 	if (memcmp(p, "nfs@", 4) != 0)
p                 415 fs/nfs/callback.c 	p += 4;
p                 416 fs/nfs/callback.c 	if (strcmp(p, clp->cl_hostname) != 0)
p                  65 fs/nfs/callback_xdr.c static int nfs4_decode_void(struct svc_rqst *rqstp, __be32 *p)
p                  67 fs/nfs/callback_xdr.c 	return xdr_argsize_check(rqstp, p);
p                  70 fs/nfs/callback_xdr.c static int nfs4_encode_void(struct svc_rqst *rqstp, __be32 *p)
p                  72 fs/nfs/callback_xdr.c 	return xdr_ressize_check(rqstp, p);
p                  89 fs/nfs/callback_xdr.c 	__be32 *p;
p                  91 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                  92 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                  94 fs/nfs/callback_xdr.c 	fh->size = ntohl(*p);
p                  97 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, fh->size);
p                  98 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 100 fs/nfs/callback_xdr.c 	memcpy(&fh->data[0], p, fh->size);
p                 107 fs/nfs/callback_xdr.c 	__be32 *p;
p                 110 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 111 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 113 fs/nfs/callback_xdr.c 	attrlen = ntohl(*p);
p                 114 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, attrlen << 2);
p                 115 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 118 fs/nfs/callback_xdr.c 		bitmap[0] = ntohl(*p++);
p                 120 fs/nfs/callback_xdr.c 		bitmap[1] = ntohl(*p);
p                 126 fs/nfs/callback_xdr.c 	__be32 *p;
p                 128 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
p                 129 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 131 fs/nfs/callback_xdr.c 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
p                 143 fs/nfs/callback_xdr.c 	__be32 *p;
p                 149 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, 12);
p                 150 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 152 fs/nfs/callback_xdr.c 	hdr->minorversion = ntohl(*p++);
p                 155 fs/nfs/callback_xdr.c 		hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 and v4.2 */
p                 162 fs/nfs/callback_xdr.c 	hdr->nops = ntohl(*p);
p                 168 fs/nfs/callback_xdr.c 	__be32 *p;
p                 169 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 170 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 172 fs/nfs/callback_xdr.c 	*op = ntohl(*p);
p                 192 fs/nfs/callback_xdr.c 	__be32 *p;
p                 198 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 199 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 201 fs/nfs/callback_xdr.c 	args->truncate = ntohl(*p);
p                 216 fs/nfs/callback_xdr.c 	__be32 *p;
p                 220 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, 4 * sizeof(uint32_t));
p                 221 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 224 fs/nfs/callback_xdr.c 	args->cbl_layout_type = ntohl(*p++);
p                 228 fs/nfs/callback_xdr.c 	iomode = ntohl(*p++);
p                 229 fs/nfs/callback_xdr.c 	args->cbl_layoutchanged = ntohl(*p++);
p                 230 fs/nfs/callback_xdr.c 	args->cbl_recall_type = ntohl(*p++);
p                 238 fs/nfs/callback_xdr.c 		p = xdr_inline_decode(xdr, 2 * sizeof(uint64_t));
p                 239 fs/nfs/callback_xdr.c 		if (unlikely(p == NULL))
p                 241 fs/nfs/callback_xdr.c 		p = xdr_decode_hyper(p, &args->cbl_range.offset);
p                 242 fs/nfs/callback_xdr.c 		p = xdr_decode_hyper(p, &args->cbl_range.length);
p                 245 fs/nfs/callback_xdr.c 		p = xdr_inline_decode(xdr, 2 * sizeof(uint64_t));
p                 246 fs/nfs/callback_xdr.c 		if (unlikely(p == NULL))
p                 248 fs/nfs/callback_xdr.c 		p = xdr_decode_hyper(p, &args->cbl_fsid.major);
p                 249 fs/nfs/callback_xdr.c 		p = xdr_decode_hyper(p, &args->cbl_fsid.minor);
p                 261 fs/nfs/callback_xdr.c 	__be32 *p;
p                 268 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, sizeof(uint32_t));
p                 269 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL)) {
p                 273 fs/nfs/callback_xdr.c 	n = ntohl(*p++);
p                 291 fs/nfs/callback_xdr.c 		p = xdr_inline_decode(xdr, (4 * sizeof(uint32_t)) +
p                 293 fs/nfs/callback_xdr.c 		if (unlikely(p == NULL)) {
p                 298 fs/nfs/callback_xdr.c 		tmp = ntohl(*p++);	/* bitmap size */
p                 303 fs/nfs/callback_xdr.c 		dev->cbd_notify_type = ntohl(*p++);
p                 310 fs/nfs/callback_xdr.c 		tmp = ntohl(*p++);	/* opaque size */
p                 318 fs/nfs/callback_xdr.c 		dev->cbd_layout_type = ntohl(*p++);
p                 319 fs/nfs/callback_xdr.c 		memcpy(dev->cbd_dev_id.data, p, NFS4_DEVICEID4_SIZE);
p                 320 fs/nfs/callback_xdr.c 		p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
p                 323 fs/nfs/callback_xdr.c 			p = xdr_inline_decode(xdr, sizeof(uint32_t));
p                 324 fs/nfs/callback_xdr.c 			if (unlikely(p == NULL)) {
p                 328 fs/nfs/callback_xdr.c 			dev->cbd_immediate = ntohl(*p++);
p                 351 fs/nfs/callback_xdr.c 	__be32 *p;
p                 353 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN);
p                 354 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 357 fs/nfs/callback_xdr.c 	memcpy(sid->data, p, NFS4_MAX_SESSIONID_LEN);
p                 364 fs/nfs/callback_xdr.c 	__be32 *p;
p                 373 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, sizeof(uint32_t));
p                 374 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 377 fs/nfs/callback_xdr.c 	rc_list->rcl_nrefcalls = ntohl(*p++);
p                 379 fs/nfs/callback_xdr.c 		p = xdr_inline_decode(xdr,
p                 381 fs/nfs/callback_xdr.c 		if (unlikely(p == NULL))
p                 389 fs/nfs/callback_xdr.c 			rc_list->rcl_refcalls[i].rc_sequenceid = ntohl(*p++);
p                 390 fs/nfs/callback_xdr.c 			rc_list->rcl_refcalls[i].rc_slotid = ntohl(*p++);
p                 404 fs/nfs/callback_xdr.c 	__be32 *p;
p                 412 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, 5 * sizeof(uint32_t));
p                 413 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 417 fs/nfs/callback_xdr.c 	args->csa_sequenceid = ntohl(*p++);
p                 418 fs/nfs/callback_xdr.c 	args->csa_slotid = ntohl(*p++);
p                 419 fs/nfs/callback_xdr.c 	args->csa_highestslotid = ntohl(*p++);
p                 420 fs/nfs/callback_xdr.c 	args->csa_cachethis = ntohl(*p++);
p                 421 fs/nfs/callback_xdr.c 	args->csa_nrclists = ntohl(*p++);
p                 453 fs/nfs/callback_xdr.c 	__be32 *p, status;
p                 455 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 456 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 458 fs/nfs/callback_xdr.c 	args->craa_objs_to_keep = ntohl(*p++);
p                 472 fs/nfs/callback_xdr.c 	__be32 *p;
p                 474 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 475 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 477 fs/nfs/callback_xdr.c 	args->crsa_target_highest_slotid = ntohl(*p++);
p                 483 fs/nfs/callback_xdr.c 	__be32		*p;
p                 486 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, 12);
p                 487 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 490 fs/nfs/callback_xdr.c 	p = xdr_decode_hyper(p, &args->cbnl_owner.clientid);
p                 491 fs/nfs/callback_xdr.c 	len = be32_to_cpu(*p);
p                 493 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, len);
p                 494 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 499 fs/nfs/callback_xdr.c 		p += 2;	/* skip "lock id:" */
p                 500 fs/nfs/callback_xdr.c 		args->cbnl_owner.s_dev = be32_to_cpu(*p++);
p                 501 fs/nfs/callback_xdr.c 		xdr_decode_hyper(p, &args->cbnl_owner.id);
p                 528 fs/nfs/callback_xdr.c 	__be32 *p;
p                 531 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 532 fs/nfs/callback_xdr.c 	if (unlikely(!p))
p                 534 fs/nfs/callback_xdr.c 	p++;
p                 537 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, 8 + 4);
p                 538 fs/nfs/callback_xdr.c 	if (unlikely(!p))
p                 540 fs/nfs/callback_xdr.c 	p = xdr_decode_hyper(p, &args->wr_count);
p                 541 fs/nfs/callback_xdr.c 	args->wr_writeverf.committed = be32_to_cpup(p);
p                 542 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, NFS4_VERIFIER_SIZE);
p                 543 fs/nfs/callback_xdr.c 	if (likely(p)) {
p                 544 fs/nfs/callback_xdr.c 		memcpy(&args->wr_writeverf.verifier.data[0], p,
p                 557 fs/nfs/callback_xdr.c 	__be32 *p;
p                 571 fs/nfs/callback_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 572 fs/nfs/callback_xdr.c 	if (unlikely(!p))
p                 574 fs/nfs/callback_xdr.c 	args->error = ntohl(*p++);
p                 580 fs/nfs/callback_xdr.c 		p = xdr_inline_decode(xdr, 8);
p                 581 fs/nfs/callback_xdr.c 		if (unlikely(!p))
p                 583 fs/nfs/callback_xdr.c 		p = xdr_decode_hyper(p, &args->wr_count);
p                 606 fs/nfs/callback_xdr.c 	__be32 *p;
p                 610 fs/nfs/callback_xdr.c 	p = xdr_reserve_space(xdr, 8);
p                 611 fs/nfs/callback_xdr.c 	if (unlikely(!p))
p                 613 fs/nfs/callback_xdr.c 	p = xdr_encode_hyper(p, change);
p                 619 fs/nfs/callback_xdr.c 	__be32 *p;
p                 623 fs/nfs/callback_xdr.c 	p = xdr_reserve_space(xdr, 8);
p                 624 fs/nfs/callback_xdr.c 	if (unlikely(!p))
p                 626 fs/nfs/callback_xdr.c 	p = xdr_encode_hyper(p, size);
p                 632 fs/nfs/callback_xdr.c 	__be32 *p;
p                 634 fs/nfs/callback_xdr.c 	p = xdr_reserve_space(xdr, 12);
p                 635 fs/nfs/callback_xdr.c 	if (unlikely(!p))
p                 637 fs/nfs/callback_xdr.c 	p = xdr_encode_hyper(p, time->tv_sec);
p                 638 fs/nfs/callback_xdr.c 	*p = htonl(time->tv_nsec);
p                 674 fs/nfs/callback_xdr.c 	__be32 *p;
p                 676 fs/nfs/callback_xdr.c 	p = xdr_reserve_space(xdr, 8);
p                 677 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 679 fs/nfs/callback_xdr.c 	*p++ = htonl(op);
p                 680 fs/nfs/callback_xdr.c 	*p = res;
p                 710 fs/nfs/callback_xdr.c 	*savep = htonl((unsigned int)((char *)xdr->p - (char *)(savep+1)));
p                 720 fs/nfs/callback_xdr.c 	__be32 *p;
p                 722 fs/nfs/callback_xdr.c 	p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
p                 723 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 726 fs/nfs/callback_xdr.c 	memcpy(p, sid, NFS4_MAX_SESSIONID_LEN);
p                 735 fs/nfs/callback_xdr.c 	__be32 *p;
p                 745 fs/nfs/callback_xdr.c 	p = xdr_reserve_space(xdr, 4 * sizeof(uint32_t));
p                 746 fs/nfs/callback_xdr.c 	if (unlikely(p == NULL))
p                 749 fs/nfs/callback_xdr.c 	*p++ = htonl(res->csr_sequenceid);
p                 750 fs/nfs/callback_xdr.c 	*p++ = htonl(res->csr_slotid);
p                 751 fs/nfs/callback_xdr.c 	*p++ = htonl(res->csr_highestslotid);
p                 752 fs/nfs/callback_xdr.c 	*p++ = htonl(res->csr_target_highestslotid);
p                 904 fs/nfs/callback_xdr.c 	maxlen = xdr_out->end - xdr_out->p;
p                 929 fs/nfs/callback_xdr.c 	__be32 *p, status;
p                 940 fs/nfs/callback_xdr.c 	p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
p                 941 fs/nfs/callback_xdr.c 	xdr_init_encode(&xdr_out, &rqstp->rq_res, p, NULL);
p                1095 fs/nfs/client.c static void *nfs_server_list_start(struct seq_file *p, loff_t *pos);
p                1096 fs/nfs/client.c static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos);
p                1097 fs/nfs/client.c static void nfs_server_list_stop(struct seq_file *p, void *v);
p                1107 fs/nfs/client.c static void *nfs_volume_list_start(struct seq_file *p, loff_t *pos);
p                1108 fs/nfs/client.c static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos);
p                1109 fs/nfs/client.c static void nfs_volume_list_stop(struct seq_file *p, void *v);
p                1135 fs/nfs/client.c static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos)
p                1137 fs/nfs/client.c 	struct nfs_net *nn = net_generic(seq_file_net(p), nfs_net_id);
p                1145 fs/nfs/client.c static void nfs_server_list_stop(struct seq_file *p, void *v)
p                1148 fs/nfs/client.c 	struct nfs_net *nn = net_generic(seq_file_net(p), nfs_net_id);
p                1202 fs/nfs/client.c static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos)
p                1204 fs/nfs/client.c 	struct nfs_net *nn = net_generic(seq_file_net(p), nfs_net_id);
p                1212 fs/nfs/client.c static void nfs_volume_list_stop(struct seq_file *p, void *v)
p                1215 fs/nfs/client.c 	struct nfs_net *nn = net_generic(seq_file_net(p), nfs_net_id);
p                1264 fs/nfs/client.c 	struct proc_dir_entry *p;
p                1271 fs/nfs/client.c 	p = proc_create_net("servers", S_IFREG|S_IRUGO, nn->proc_nfsfs,
p                1273 fs/nfs/client.c 	if (!p)
p                1277 fs/nfs/client.c 	p = proc_create_net("volumes", S_IFREG|S_IRUGO, nn->proc_nfsfs,
p                1279 fs/nfs/client.c 	if (!p)
p                2395 fs/nfs/dir.c   	struct rb_node **p = &root_node->rb_node;
p                2401 fs/nfs/dir.c   	while (*p != NULL) {
p                2402 fs/nfs/dir.c   		parent = *p;
p                2407 fs/nfs/dir.c   			p = &parent->rb_left;
p                2409 fs/nfs/dir.c   			p = &parent->rb_right;
p                2413 fs/nfs/dir.c   	rb_link_node(&set->rb_node, parent, p);
p                  25 fs/nfs/export.c static struct nfs_fh *nfs_exp_embedfh(__u32 *p)
p                  27 fs/nfs/export.c 	return (struct nfs_fh *)(p + EMBED_FH_OFF);
p                  35 fs/nfs/export.c nfs_encode_fh(struct inode *inode, __u32 *p, int *max_len, struct inode *parent)
p                  38 fs/nfs/export.c 	struct nfs_fh *clnt_fh = nfs_exp_embedfh(p);
p                  52 fs/nfs/export.c 	p[FILEID_HIGH_OFF] = NFS_FILEID(inode) >> 32;
p                  53 fs/nfs/export.c 	p[FILEID_LOW_OFF] = NFS_FILEID(inode);
p                  54 fs/nfs/export.c 	p[FILE_I_TYPE_OFF] = inode->i_mode & S_IFMT;
p                  55 fs/nfs/export.c 	p[len - 1] = 0; /* Padding */
p                  75 fs/nfs/export.c 	u32 *p = fid->raw;
p                  88 fs/nfs/export.c 	fattr->fileid = ((u64)p[FILEID_HIGH_OFF] << 32) + p[FILEID_LOW_OFF];
p                  89 fs/nfs/export.c 	fattr->mode = p[FILE_I_TYPE_OFF];
p                 657 fs/nfs/filelayout/filelayout.c 	__be32 *p;
p                 672 fs/nfs/filelayout/filelayout.c 	p = xdr_inline_decode(&stream, NFS4_DEVICEID4_SIZE + 20);
p                 673 fs/nfs/filelayout/filelayout.c 	if (unlikely(!p))
p                 676 fs/nfs/filelayout/filelayout.c 	memcpy(&fl->deviceid, p, sizeof(fl->deviceid));
p                 677 fs/nfs/filelayout/filelayout.c 	p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
p                 680 fs/nfs/filelayout/filelayout.c 	nfl_util = be32_to_cpup(p++);
p                 689 fs/nfs/filelayout/filelayout.c 	fl->first_stripe_index = be32_to_cpup(p++);
p                 690 fs/nfs/filelayout/filelayout.c 	p = xdr_decode_hyper(p, &fl->pattern_offset);
p                 691 fs/nfs/filelayout/filelayout.c 	fl->num_fh = be32_to_cpup(p++);
p                 716 fs/nfs/filelayout/filelayout.c 		p = xdr_inline_decode(&stream, 4);
p                 717 fs/nfs/filelayout/filelayout.c 		if (unlikely(!p))
p                 719 fs/nfs/filelayout/filelayout.c 		fl->fh_array[i]->size = be32_to_cpup(p++);
p                 726 fs/nfs/filelayout/filelayout.c 		p = xdr_inline_decode(&stream, fl->fh_array[i]->size);
p                 727 fs/nfs/filelayout/filelayout.c 		if (unlikely(!p))
p                 729 fs/nfs/filelayout/filelayout.c 		memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
p                  69 fs/nfs/filelayout/filelayoutdev.c 	__be32 *p;
p                  88 fs/nfs/filelayout/filelayoutdev.c 	p = xdr_inline_decode(&stream, 4);
p                  89 fs/nfs/filelayout/filelayoutdev.c 	if (unlikely(!p))
p                  92 fs/nfs/filelayout/filelayoutdev.c 	cnt = be32_to_cpup(p);
p                 106 fs/nfs/filelayout/filelayoutdev.c 	p = xdr_inline_decode(&stream, cnt << 2);
p                 107 fs/nfs/filelayout/filelayoutdev.c 	if (unlikely(!p))
p                 113 fs/nfs/filelayout/filelayoutdev.c 		*indexp = be32_to_cpup(p++);
p                 119 fs/nfs/filelayout/filelayoutdev.c 	p = xdr_inline_decode(&stream, 4);
p                 120 fs/nfs/filelayout/filelayoutdev.c 	if (unlikely(!p))
p                 123 fs/nfs/filelayout/filelayoutdev.c 	num = be32_to_cpup(p);
p                 157 fs/nfs/filelayout/filelayoutdev.c 		p = xdr_inline_decode(&stream, 4);
p                 158 fs/nfs/filelayout/filelayoutdev.c 		if (unlikely(!p))
p                 161 fs/nfs/filelayout/filelayoutdev.c 		mp_count = be32_to_cpup(p); /* multipath count */
p                  74 fs/nfs/flexfilelayout/flexfilelayout.c 	__be32 *p;
p                  76 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
p                  77 fs/nfs/flexfilelayout/flexfilelayout.c 	if (unlikely(p == NULL))
p                  80 fs/nfs/flexfilelayout/flexfilelayout.c 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
p                  82 fs/nfs/flexfilelayout/flexfilelayout.c 		p[0], p[1], p[2], p[3]);
p                  88 fs/nfs/flexfilelayout/flexfilelayout.c 	__be32 *p;
p                  90 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
p                  91 fs/nfs/flexfilelayout/flexfilelayout.c 	if (unlikely(!p))
p                  93 fs/nfs/flexfilelayout/flexfilelayout.c 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
p                 100 fs/nfs/flexfilelayout/flexfilelayout.c 	__be32 *p;
p                 102 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_inline_decode(xdr, 4);
p                 103 fs/nfs/flexfilelayout/flexfilelayout.c 	if (unlikely(!p))
p                 105 fs/nfs/flexfilelayout/flexfilelayout.c 	fh->size = be32_to_cpup(p++);
p                 112 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_inline_decode(xdr, fh->size);
p                 113 fs/nfs/flexfilelayout/flexfilelayout.c 	if (unlikely(!p))
p                 115 fs/nfs/flexfilelayout/flexfilelayout.c 	memcpy(&fh->data, p, fh->size);
p                 132 fs/nfs/flexfilelayout/flexfilelayout.c 	__be32 *p;
p                 136 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_inline_decode(xdr, 4);
p                 137 fs/nfs/flexfilelayout/flexfilelayout.c 	if (unlikely(!p))
p                 139 fs/nfs/flexfilelayout/flexfilelayout.c 	len = be32_to_cpup(p++);
p                 146 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_inline_decode(xdr, len);
p                 147 fs/nfs/flexfilelayout/flexfilelayout.c 	if (unlikely(!p))
p                 150 fs/nfs/flexfilelayout/flexfilelayout.c 	if (!nfs_map_string_to_numeric((char *)p, len, id))
p                 375 fs/nfs/flexfilelayout/flexfilelayout.c 	__be32 *p;
p                 389 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_inline_decode(&stream, 8 + 4);
p                 390 fs/nfs/flexfilelayout/flexfilelayout.c 	if (!p)
p                 393 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_decode_hyper(p, &stripe_unit);
p                 394 fs/nfs/flexfilelayout/flexfilelayout.c 	mirror_array_cnt = be32_to_cpup(p++);
p                 424 fs/nfs/flexfilelayout/flexfilelayout.c 		p = xdr_inline_decode(&stream, 4);
p                 425 fs/nfs/flexfilelayout/flexfilelayout.c 		if (!p)
p                 427 fs/nfs/flexfilelayout/flexfilelayout.c 		ds_count = be32_to_cpup(p);
p                 448 fs/nfs/flexfilelayout/flexfilelayout.c 		p = xdr_inline_decode(&stream, 4);
p                 449 fs/nfs/flexfilelayout/flexfilelayout.c 		if (!p)
p                 451 fs/nfs/flexfilelayout/flexfilelayout.c 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
p                 460 fs/nfs/flexfilelayout/flexfilelayout.c 		p = xdr_inline_decode(&stream, 4);
p                 461 fs/nfs/flexfilelayout/flexfilelayout.c 		if (!p)
p                 463 fs/nfs/flexfilelayout/flexfilelayout.c 		fh_count = be32_to_cpup(p);
p                 535 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_inline_decode(&stream, 4);
p                 536 fs/nfs/flexfilelayout/flexfilelayout.c 	if (!p)
p                 538 fs/nfs/flexfilelayout/flexfilelayout.c 	fls->flags = be32_to_cpup(p);
p                 540 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_inline_decode(&stream, 4);
p                 541 fs/nfs/flexfilelayout/flexfilelayout.c 	if (!p)
p                 544 fs/nfs/flexfilelayout/flexfilelayout.c 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
p                2037 fs/nfs/flexfilelayout/flexfilelayout.c 	__be32 *p;
p                2039 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_reserve_space(xdr, 8 + 8);
p                2040 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_encode_hyper(p, devinfo->offset);
p                2041 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_encode_hyper(p, devinfo->length);
p                2043 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_reserve_space(xdr, 4*8);
p                2044 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_encode_hyper(p, devinfo->read_count);
p                2045 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_encode_hyper(p, devinfo->read_bytes);
p                2046 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_encode_hyper(p, devinfo->write_count);
p                2047 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_encode_hyper(p, devinfo->write_bytes);
p                2066 fs/nfs/flexfilelayout/flexfilelayout.c 	__be32 *p;
p                2069 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_reserve_space(xdr, 4);
p                2070 fs/nfs/flexfilelayout/flexfilelayout.c 	*p = cpu_to_be32(ff_args->num_dev);
p                2293 fs/nfs/flexfilelayout/flexfilelayout.c 	__be32 *p;
p                2319 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_reserve_space(xdr, 4 + netid_len);
p                2320 fs/nfs/flexfilelayout/flexfilelayout.c 	xdr_encode_opaque(p, netid, netid_len);
p                2322 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_reserve_space(xdr, 4 + len);
p                2323 fs/nfs/flexfilelayout/flexfilelayout.c 	xdr_encode_opaque(p, addrbuf, len);
p                2331 fs/nfs/flexfilelayout/flexfilelayout.c 	__be32 *p;
p                2333 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_reserve_space(xdr, 12);
p                2335 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_encode_hyper(p, ts.tv_sec);
p                2336 fs/nfs/flexfilelayout/flexfilelayout.c 	*p++ = cpu_to_be32(ts.tv_nsec);
p                2343 fs/nfs/flexfilelayout/flexfilelayout.c 	__be32 *p;
p                2345 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_reserve_space(xdr, 5 * 8);
p                2346 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_encode_hyper(p, stat->ops_requested);
p                2347 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_encode_hyper(p, stat->bytes_requested);
p                2348 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_encode_hyper(p, stat->ops_completed);
p                2349 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_encode_hyper(p, stat->bytes_completed);
p                2350 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
p                2363 fs/nfs/flexfilelayout/flexfilelayout.c 	__be32 *p;
p                2371 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_reserve_space(xdr, 4 + fh->size);
p                2372 fs/nfs/flexfilelayout/flexfilelayout.c 	xdr_encode_opaque(p, fh->data, fh->size);
p                2382 fs/nfs/flexfilelayout/flexfilelayout.c 	p = xdr_reserve_space(xdr, 4);
p                2383 fs/nfs/flexfilelayout/flexfilelayout.c 	*p = cpu_to_be32(false);
p                2398 fs/nfs/flexfilelayout/flexfilelayout.c 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
p                  54 fs/nfs/flexfilelayout/flexfilelayoutdev.c 	__be32 *p;
p                  75 fs/nfs/flexfilelayout/flexfilelayoutdev.c 	p = xdr_inline_decode(&stream, 4);
p                  76 fs/nfs/flexfilelayout/flexfilelayoutdev.c 	if (unlikely(!p))
p                  78 fs/nfs/flexfilelayout/flexfilelayoutdev.c 	mp_count = be32_to_cpup(p);
p                  96 fs/nfs/flexfilelayout/flexfilelayoutdev.c 	p = xdr_inline_decode(&stream, 4);
p                  97 fs/nfs/flexfilelayout/flexfilelayoutdev.c 	if (unlikely(!p))
p                  99 fs/nfs/flexfilelayout/flexfilelayoutdev.c 	version_count = be32_to_cpup(p);
p                 111 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		p = xdr_inline_decode(&stream, 20);
p                 112 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		if (unlikely(!p))
p                 114 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		ds_versions[i].version = be32_to_cpup(p++);
p                 115 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		ds_versions[i].minor_version = be32_to_cpup(p++);
p                 116 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		ds_versions[i].rsize = nfs_block_size(be32_to_cpup(p++), NULL);
p                 117 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		ds_versions[i].wsize = nfs_block_size(be32_to_cpup(p++), NULL);
p                 118 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		ds_versions[i].tightly_coupled = be32_to_cpup(p);
p                 476 fs/nfs/flexfilelayout/flexfilelayoutdev.c 	__be32 *p;
p                 483 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		p = xdr_reserve_space(xdr,
p                 485 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		if (unlikely(!p))
p                 487 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		p = xdr_encode_hyper(p, err->offset);
p                 488 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		p = xdr_encode_hyper(p, err->length);
p                 489 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		p = xdr_encode_opaque_fixed(p, &err->stateid,
p                 492 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		*p++ = cpu_to_be32(1);
p                 493 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		p = xdr_encode_opaque_fixed(p, &err->deviceid,
p                 495 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		*p++ = cpu_to_be32(err->status);
p                 496 fs/nfs/flexfilelayout/flexfilelayoutdev.c 		*p++ = cpu_to_be32(err->opnum);
p                 116 fs/nfs/fscache.c 	struct rb_node **p, *parent;
p                 148 fs/nfs/fscache.c 	p = &nfs_fscache_keys.rb_node;
p                 150 fs/nfs/fscache.c 	while (*p) {
p                 151 fs/nfs/fscache.c 		parent = *p;
p                 177 fs/nfs/fscache.c 		p = &(*p)->rb_left;
p                 180 fs/nfs/fscache.c 		p = &(*p)->rb_right;
p                 183 fs/nfs/fscache.c 	rb_link_node(&key->node, parent, p);
p                 386 fs/nfs/internal.h extern int nfs_wait_atomic_killable(atomic_t *p, unsigned int mode);
p                 430 fs/nfs/internal.h extern char *nfs_path(char **p, struct dentry *dentry,
p                 470 fs/nfs/internal.h extern void nfs_commit_free(struct nfs_commit_data *p);
p                 304 fs/nfs/mount_clnt.c 	__be32 *p;
p                 306 fs/nfs/mount_clnt.c 	p = xdr_reserve_space(xdr, 4 + pathname_len);
p                 307 fs/nfs/mount_clnt.c 	xdr_encode_opaque(p, pathname, pathname_len);
p                 329 fs/nfs/mount_clnt.c 	__be32 *p;
p                 331 fs/nfs/mount_clnt.c 	p = xdr_inline_decode(xdr, 4);
p                 332 fs/nfs/mount_clnt.c 	if (unlikely(p == NULL))
p                 334 fs/nfs/mount_clnt.c 	status = be32_to_cpup(p);
p                 351 fs/nfs/mount_clnt.c 	__be32 *p;
p                 353 fs/nfs/mount_clnt.c 	p = xdr_inline_decode(xdr, NFS2_FHSIZE);
p                 354 fs/nfs/mount_clnt.c 	if (unlikely(p == NULL))
p                 358 fs/nfs/mount_clnt.c 	memcpy(fh->data, p, NFS2_FHSIZE);
p                 379 fs/nfs/mount_clnt.c 	__be32 *p;
p                 381 fs/nfs/mount_clnt.c 	p = xdr_inline_decode(xdr, 4);
p                 382 fs/nfs/mount_clnt.c 	if (unlikely(p == NULL))
p                 384 fs/nfs/mount_clnt.c 	status = be32_to_cpup(p);
p                 402 fs/nfs/mount_clnt.c 	__be32 *p;
p                 404 fs/nfs/mount_clnt.c 	p = xdr_inline_decode(xdr, 4);
p                 405 fs/nfs/mount_clnt.c 	if (unlikely(p == NULL))
p                 408 fs/nfs/mount_clnt.c 	size = be32_to_cpup(p);
p                 412 fs/nfs/mount_clnt.c 	p = xdr_inline_decode(xdr, size);
p                 413 fs/nfs/mount_clnt.c 	if (unlikely(p == NULL))
p                 417 fs/nfs/mount_clnt.c 	memcpy(fh->data, p, size);
p                 426 fs/nfs/mount_clnt.c 	__be32 *p;
p                 431 fs/nfs/mount_clnt.c 	p = xdr_inline_decode(xdr, 4);
p                 432 fs/nfs/mount_clnt.c 	if (unlikely(p == NULL))
p                 434 fs/nfs/mount_clnt.c 	entries = be32_to_cpup(p);
p                 439 fs/nfs/mount_clnt.c 	p = xdr_inline_decode(xdr, 4 * entries);
p                 440 fs/nfs/mount_clnt.c 	if (unlikely(p == NULL))
p                 447 fs/nfs/mount_clnt.c 		flavors[i] = be32_to_cpup(p++);
p                  51 fs/nfs/namespace.c char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen,
p                  93 fs/nfs/namespace.c 	*p = end;
p                  99 fs/nfs/nfs2xdr.c 	__be32 *p;
p                 101 fs/nfs/nfs2xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 102 fs/nfs/nfs2xdr.c 	if (unlikely(!p))
p                 104 fs/nfs/nfs2xdr.c 	count = be32_to_cpup(p);
p                 143 fs/nfs/nfs2xdr.c 	__be32 *p;
p                 145 fs/nfs/nfs2xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 146 fs/nfs/nfs2xdr.c 	if (unlikely(!p))
p                 148 fs/nfs/nfs2xdr.c 	if (unlikely(*p != cpu_to_be32(NFS_OK)))
p                 153 fs/nfs/nfs2xdr.c 	*status = be32_to_cpup(p);
p                 171 fs/nfs/nfs2xdr.c static __be32 *xdr_decode_ftype(__be32 *p, u32 *type)
p                 173 fs/nfs/nfs2xdr.c 	*type = be32_to_cpup(p++);
p                 176 fs/nfs/nfs2xdr.c 	return p;
p                 186 fs/nfs/nfs2xdr.c 	__be32 *p;
p                 188 fs/nfs/nfs2xdr.c 	p = xdr_reserve_space(xdr, NFS2_FHSIZE);
p                 189 fs/nfs/nfs2xdr.c 	memcpy(p, fh->data, NFS2_FHSIZE);
p                 194 fs/nfs/nfs2xdr.c 	__be32 *p;
p                 196 fs/nfs/nfs2xdr.c 	p = xdr_inline_decode(xdr, NFS2_FHSIZE);
p                 197 fs/nfs/nfs2xdr.c 	if (unlikely(!p))
p                 200 fs/nfs/nfs2xdr.c 	memcpy(fh->data, p, NFS2_FHSIZE);
p                 212 fs/nfs/nfs2xdr.c static __be32 *xdr_encode_time(__be32 *p, const struct timespec *timep)
p                 214 fs/nfs/nfs2xdr.c 	*p++ = cpu_to_be32(timep->tv_sec);
p                 216 fs/nfs/nfs2xdr.c 		*p++ = cpu_to_be32(timep->tv_nsec / NSEC_PER_USEC);
p                 218 fs/nfs/nfs2xdr.c 		*p++ = cpu_to_be32(0);
p                 219 fs/nfs/nfs2xdr.c 	return p;
p                 229 fs/nfs/nfs2xdr.c static __be32 *xdr_encode_current_server_time(__be32 *p,
p                 232 fs/nfs/nfs2xdr.c 	*p++ = cpu_to_be32(timep->tv_sec);
p                 233 fs/nfs/nfs2xdr.c 	*p++ = cpu_to_be32(1000000);
p                 234 fs/nfs/nfs2xdr.c 	return p;
p                 237 fs/nfs/nfs2xdr.c static __be32 *xdr_decode_time(__be32 *p, struct timespec *timep)
p                 239 fs/nfs/nfs2xdr.c 	timep->tv_sec = be32_to_cpup(p++);
p                 240 fs/nfs/nfs2xdr.c 	timep->tv_nsec = be32_to_cpup(p++) * NSEC_PER_USEC;
p                 241 fs/nfs/nfs2xdr.c 	return p;
p                 269 fs/nfs/nfs2xdr.c 	__be32 *p;
p                 271 fs/nfs/nfs2xdr.c 	p = xdr_inline_decode(xdr, NFS_fattr_sz << 2);
p                 272 fs/nfs/nfs2xdr.c 	if (unlikely(!p))
p                 277 fs/nfs/nfs2xdr.c 	p = xdr_decode_ftype(p, &type);
p                 279 fs/nfs/nfs2xdr.c 	fattr->mode = be32_to_cpup(p++);
p                 280 fs/nfs/nfs2xdr.c 	fattr->nlink = be32_to_cpup(p++);
p                 281 fs/nfs/nfs2xdr.c 	fattr->uid = make_kuid(userns, be32_to_cpup(p++));
p                 284 fs/nfs/nfs2xdr.c 	fattr->gid = make_kgid(userns, be32_to_cpup(p++));
p                 288 fs/nfs/nfs2xdr.c 	fattr->size = be32_to_cpup(p++);
p                 289 fs/nfs/nfs2xdr.c 	fattr->du.nfs2.blocksize = be32_to_cpup(p++);
p                 291 fs/nfs/nfs2xdr.c 	rdev = be32_to_cpup(p++);
p                 298 fs/nfs/nfs2xdr.c 	fattr->du.nfs2.blocks = be32_to_cpup(p++);
p                 299 fs/nfs/nfs2xdr.c 	fattr->fsid.major = be32_to_cpup(p++);
p                 301 fs/nfs/nfs2xdr.c 	fattr->fileid = be32_to_cpup(p++);
p                 303 fs/nfs/nfs2xdr.c 	p = xdr_decode_time(p, &fattr->atime);
p                 304 fs/nfs/nfs2xdr.c 	p = xdr_decode_time(p, &fattr->mtime);
p                 305 fs/nfs/nfs2xdr.c 	xdr_decode_time(p, &fattr->ctime);
p                 332 fs/nfs/nfs2xdr.c static __be32 *xdr_time_not_set(__be32 *p)
p                 334 fs/nfs/nfs2xdr.c 	*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
p                 335 fs/nfs/nfs2xdr.c 	*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
p                 336 fs/nfs/nfs2xdr.c 	return p;
p                 343 fs/nfs/nfs2xdr.c 	__be32 *p;
p                 345 fs/nfs/nfs2xdr.c 	p = xdr_reserve_space(xdr, NFS_sattr_sz << 2);
p                 348 fs/nfs/nfs2xdr.c 		*p++ = cpu_to_be32(attr->ia_mode);
p                 350 fs/nfs/nfs2xdr.c 		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
p                 352 fs/nfs/nfs2xdr.c 		*p++ = cpu_to_be32(from_kuid_munged(userns, attr->ia_uid));
p                 354 fs/nfs/nfs2xdr.c 		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
p                 356 fs/nfs/nfs2xdr.c 		*p++ = cpu_to_be32(from_kgid_munged(userns, attr->ia_gid));
p                 358 fs/nfs/nfs2xdr.c 		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
p                 360 fs/nfs/nfs2xdr.c 		*p++ = cpu_to_be32((u32)attr->ia_size);
p                 362 fs/nfs/nfs2xdr.c 		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
p                 366 fs/nfs/nfs2xdr.c 		p = xdr_encode_time(p, &ts);
p                 369 fs/nfs/nfs2xdr.c 		p = xdr_encode_current_server_time(p, &ts);
p                 371 fs/nfs/nfs2xdr.c 		p = xdr_time_not_set(p);
p                 374 fs/nfs/nfs2xdr.c 		xdr_encode_time(p, &ts);
p                 377 fs/nfs/nfs2xdr.c 		xdr_encode_current_server_time(p, &ts);
p                 379 fs/nfs/nfs2xdr.c 		xdr_time_not_set(p);
p                 390 fs/nfs/nfs2xdr.c 	__be32 *p;
p                 393 fs/nfs/nfs2xdr.c 	p = xdr_reserve_space(xdr, 4 + length);
p                 394 fs/nfs/nfs2xdr.c 	xdr_encode_opaque(p, name, length);
p                 400 fs/nfs/nfs2xdr.c 	__be32 *p;
p                 403 fs/nfs/nfs2xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 404 fs/nfs/nfs2xdr.c 	if (unlikely(!p))
p                 406 fs/nfs/nfs2xdr.c 	count = be32_to_cpup(p);
p                 409 fs/nfs/nfs2xdr.c 	p = xdr_inline_decode(xdr, count);
p                 410 fs/nfs/nfs2xdr.c 	if (unlikely(!p))
p                 412 fs/nfs/nfs2xdr.c 	*name = (const char *)p;
p                 427 fs/nfs/nfs2xdr.c 	__be32 *p;
p                 429 fs/nfs/nfs2xdr.c 	p = xdr_reserve_space(xdr, 4);
p                 430 fs/nfs/nfs2xdr.c 	*p = cpu_to_be32(length);
p                 437 fs/nfs/nfs2xdr.c 	__be32 *p;
p                 439 fs/nfs/nfs2xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 440 fs/nfs/nfs2xdr.c 	if (unlikely(!p))
p                 442 fs/nfs/nfs2xdr.c 	length = be32_to_cpup(p);
p                 619 fs/nfs/nfs2xdr.c 	__be32 *p;
p                 623 fs/nfs/nfs2xdr.c 	p = xdr_reserve_space(xdr, 4 + 4 + 4);
p                 624 fs/nfs/nfs2xdr.c 	*p++ = cpu_to_be32(offset);
p                 625 fs/nfs/nfs2xdr.c 	*p++ = cpu_to_be32(count);
p                 626 fs/nfs/nfs2xdr.c 	*p = cpu_to_be32(count);
p                 657 fs/nfs/nfs2xdr.c 	__be32 *p;
p                 661 fs/nfs/nfs2xdr.c 	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
p                 662 fs/nfs/nfs2xdr.c 	*p++ = cpu_to_be32(offset);
p                 663 fs/nfs/nfs2xdr.c 	*p++ = cpu_to_be32(offset);
p                 664 fs/nfs/nfs2xdr.c 	*p++ = cpu_to_be32(count);
p                 667 fs/nfs/nfs2xdr.c 	*p = cpu_to_be32(count);
p                 778 fs/nfs/nfs2xdr.c 	__be32 *p;
p                 782 fs/nfs/nfs2xdr.c 	p = xdr_reserve_space(xdr, 4 + 4);
p                 783 fs/nfs/nfs2xdr.c 	*p++ = cpu_to_be32(args->cookie);
p                 784 fs/nfs/nfs2xdr.c 	*p = cpu_to_be32(args->count);
p                 933 fs/nfs/nfs2xdr.c 	__be32 *p;
p                 936 fs/nfs/nfs2xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 937 fs/nfs/nfs2xdr.c 	if (unlikely(!p))
p                 939 fs/nfs/nfs2xdr.c 	if (*p++ == xdr_zero) {
p                 940 fs/nfs/nfs2xdr.c 		p = xdr_inline_decode(xdr, 4);
p                 941 fs/nfs/nfs2xdr.c 		if (unlikely(!p))
p                 943 fs/nfs/nfs2xdr.c 		if (*p++ == xdr_zero)
p                 949 fs/nfs/nfs2xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 950 fs/nfs/nfs2xdr.c 	if (unlikely(!p))
p                 952 fs/nfs/nfs2xdr.c 	entry->ino = be32_to_cpup(p);
p                 963 fs/nfs/nfs2xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 964 fs/nfs/nfs2xdr.c 	if (unlikely(!p))
p                 966 fs/nfs/nfs2xdr.c 	entry->cookie = be32_to_cpup(p);
p                1031 fs/nfs/nfs2xdr.c 	__be32 *p;
p                1033 fs/nfs/nfs2xdr.c 	p = xdr_inline_decode(xdr, NFS_info_sz << 2);
p                1034 fs/nfs/nfs2xdr.c 	if (unlikely(!p))
p                1036 fs/nfs/nfs2xdr.c 	result->tsize  = be32_to_cpup(p++);
p                1037 fs/nfs/nfs2xdr.c 	result->bsize  = be32_to_cpup(p++);
p                1038 fs/nfs/nfs2xdr.c 	result->blocks = be32_to_cpup(p++);
p                1039 fs/nfs/nfs2xdr.c 	result->bfree  = be32_to_cpup(p++);
p                1040 fs/nfs/nfs2xdr.c 	result->bavail = be32_to_cpup(p);
p                  20 fs/nfs/nfs3acl.c static void nfs3_prepare_get_acl(struct posix_acl **p)
p                  24 fs/nfs/nfs3acl.c 	if (cmpxchg(p, ACL_NOT_CACHED, sentinel) != ACL_NOT_CACHED) {
p                  29 fs/nfs/nfs3acl.c static void nfs3_complete_get_acl(struct posix_acl **p, struct posix_acl *acl)
p                  35 fs/nfs/nfs3acl.c 	if (cmpxchg(p, sentinel, acl) != sentinel)
p                  39 fs/nfs/nfs3acl.c static void nfs3_abort_get_acl(struct posix_acl **p)
p                  44 fs/nfs/nfs3acl.c 	cmpxchg(p, sentinel, ACL_NOT_CACHED);
p                 308 fs/nfs/nfs3acl.c 	char *p = data + *result;
p                 323 fs/nfs/nfs3acl.c 	strcpy(p, name);
p                 134 fs/nfs/nfs3xdr.c 	__be32 *p = xdr_reserve_space(xdr, 4);
p                 135 fs/nfs/nfs3xdr.c 	*p = cpu_to_be32(value);
p                 140 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 142 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 143 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 145 fs/nfs/nfs3xdr.c 	*value = be32_to_cpup(p);
p                 151 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 153 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, 8);
p                 154 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 156 fs/nfs/nfs3xdr.c 	xdr_decode_hyper(p, value);
p                 165 fs/nfs/nfs3xdr.c static __be32 *xdr_decode_fileid3(__be32 *p, u64 *fileid)
p                 167 fs/nfs/nfs3xdr.c 	return xdr_decode_hyper(p, fileid);
p                 183 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 186 fs/nfs/nfs3xdr.c 	p = xdr_reserve_space(xdr, 4 + length);
p                 187 fs/nfs/nfs3xdr.c 	xdr_encode_opaque(p, name, length);
p                 193 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 196 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 197 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 199 fs/nfs/nfs3xdr.c 	count = be32_to_cpup(p);
p                 202 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, count);
p                 203 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 205 fs/nfs/nfs3xdr.c 	*name = (const char *)p;
p                 229 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 231 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 232 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 234 fs/nfs/nfs3xdr.c 	count = be32_to_cpup(p);
p                 257 fs/nfs/nfs3xdr.c static __be32 *xdr_encode_cookie3(__be32 *p, u64 cookie)
p                 259 fs/nfs/nfs3xdr.c 	return xdr_encode_hyper(p, cookie);
p                 272 fs/nfs/nfs3xdr.c static __be32 *xdr_encode_cookieverf3(__be32 *p, const __be32 *verifier)
p                 274 fs/nfs/nfs3xdr.c 	memcpy(p, verifier, NFS3_COOKIEVERFSIZE);
p                 275 fs/nfs/nfs3xdr.c 	return p + XDR_QUADLEN(NFS3_COOKIEVERFSIZE);
p                 280 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 282 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
p                 283 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 285 fs/nfs/nfs3xdr.c 	memcpy(verifier, p, NFS3_COOKIEVERFSIZE);
p                 296 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 298 fs/nfs/nfs3xdr.c 	p = xdr_reserve_space(xdr, NFS3_CREATEVERFSIZE);
p                 299 fs/nfs/nfs3xdr.c 	memcpy(p, verifier, NFS3_CREATEVERFSIZE);
p                 304 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 306 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, NFS3_WRITEVERFSIZE);
p                 307 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 309 fs/nfs/nfs3xdr.c 	memcpy(verifier->data, p, NFS3_WRITEVERFSIZE);
p                 318 fs/nfs/nfs3xdr.c static __be32 *xdr_decode_size3(__be32 *p, u64 *size)
p                 320 fs/nfs/nfs3xdr.c 	return xdr_decode_hyper(p, size);
p                 335 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 337 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 338 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 340 fs/nfs/nfs3xdr.c 	if (unlikely(*p != cpu_to_be32(NFS3_OK)))
p                 345 fs/nfs/nfs3xdr.c 	*status = be32_to_cpup(p);
p                 368 fs/nfs/nfs3xdr.c static __be32 *xdr_decode_ftype3(__be32 *p, umode_t *mode)
p                 372 fs/nfs/nfs3xdr.c 	type = be32_to_cpup(p++);
p                 376 fs/nfs/nfs3xdr.c 	return p;
p                 389 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 391 fs/nfs/nfs3xdr.c 	p = xdr_reserve_space(xdr, 8);
p                 392 fs/nfs/nfs3xdr.c 	*p++ = cpu_to_be32(MAJOR(rdev));
p                 393 fs/nfs/nfs3xdr.c 	*p = cpu_to_be32(MINOR(rdev));
p                 396 fs/nfs/nfs3xdr.c static __be32 *xdr_decode_specdata3(__be32 *p, dev_t *rdev)
p                 400 fs/nfs/nfs3xdr.c 	major = be32_to_cpup(p++);
p                 401 fs/nfs/nfs3xdr.c 	minor = be32_to_cpup(p++);
p                 405 fs/nfs/nfs3xdr.c 	return p;
p                 417 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 420 fs/nfs/nfs3xdr.c 	p = xdr_reserve_space(xdr, 4 + fh->size);
p                 421 fs/nfs/nfs3xdr.c 	xdr_encode_opaque(p, fh->data, fh->size);
p                 427 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 429 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 430 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 432 fs/nfs/nfs3xdr.c 	length = be32_to_cpup(p++);
p                 435 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, length);
p                 436 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 439 fs/nfs/nfs3xdr.c 	memcpy(fh->data, p, length);
p                 459 fs/nfs/nfs3xdr.c static __be32 *xdr_encode_nfstime3(__be32 *p, const struct timespec *timep)
p                 461 fs/nfs/nfs3xdr.c 	*p++ = cpu_to_be32(timep->tv_sec);
p                 462 fs/nfs/nfs3xdr.c 	*p++ = cpu_to_be32(timep->tv_nsec);
p                 463 fs/nfs/nfs3xdr.c 	return p;
p                 466 fs/nfs/nfs3xdr.c static __be32 *xdr_decode_nfstime3(__be32 *p, struct timespec *timep)
p                 468 fs/nfs/nfs3xdr.c 	timep->tv_sec = be32_to_cpup(p++);
p                 469 fs/nfs/nfs3xdr.c 	timep->tv_nsec = be32_to_cpup(p++);
p                 470 fs/nfs/nfs3xdr.c 	return p;
p                 538 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 559 fs/nfs/nfs3xdr.c 	p = xdr_reserve_space(xdr, nbytes);
p                 562 fs/nfs/nfs3xdr.c 		*p++ = xdr_one;
p                 563 fs/nfs/nfs3xdr.c 		*p++ = cpu_to_be32(attr->ia_mode & S_IALLUGO);
p                 565 fs/nfs/nfs3xdr.c 		*p++ = xdr_zero;
p                 568 fs/nfs/nfs3xdr.c 		*p++ = xdr_one;
p                 569 fs/nfs/nfs3xdr.c 		*p++ = cpu_to_be32(from_kuid_munged(userns, attr->ia_uid));
p                 571 fs/nfs/nfs3xdr.c 		*p++ = xdr_zero;
p                 574 fs/nfs/nfs3xdr.c 		*p++ = xdr_one;
p                 575 fs/nfs/nfs3xdr.c 		*p++ = cpu_to_be32(from_kgid_munged(userns, attr->ia_gid));
p                 577 fs/nfs/nfs3xdr.c 		*p++ = xdr_zero;
p                 580 fs/nfs/nfs3xdr.c 		*p++ = xdr_one;
p                 581 fs/nfs/nfs3xdr.c 		p = xdr_encode_hyper(p, (u64)attr->ia_size);
p                 583 fs/nfs/nfs3xdr.c 		*p++ = xdr_zero;
p                 587 fs/nfs/nfs3xdr.c 		*p++ = xdr_two;
p                 589 fs/nfs/nfs3xdr.c 		p = xdr_encode_nfstime3(p, &ts);
p                 591 fs/nfs/nfs3xdr.c 		*p++ = xdr_one;
p                 593 fs/nfs/nfs3xdr.c 		*p++ = xdr_zero;
p                 596 fs/nfs/nfs3xdr.c 		*p++ = xdr_two;
p                 598 fs/nfs/nfs3xdr.c 		xdr_encode_nfstime3(p, &ts);
p                 600 fs/nfs/nfs3xdr.c 		*p = xdr_one;
p                 602 fs/nfs/nfs3xdr.c 		*p = xdr_zero;
p                 628 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 630 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, NFS3_fattr_sz << 2);
p                 631 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 634 fs/nfs/nfs3xdr.c 	p = xdr_decode_ftype3(p, &fmode);
p                 636 fs/nfs/nfs3xdr.c 	fattr->mode = (be32_to_cpup(p++) & ~S_IFMT) | fmode;
p                 637 fs/nfs/nfs3xdr.c 	fattr->nlink = be32_to_cpup(p++);
p                 638 fs/nfs/nfs3xdr.c 	fattr->uid = make_kuid(userns, be32_to_cpup(p++));
p                 641 fs/nfs/nfs3xdr.c 	fattr->gid = make_kgid(userns, be32_to_cpup(p++));
p                 645 fs/nfs/nfs3xdr.c 	p = xdr_decode_size3(p, &fattr->size);
p                 646 fs/nfs/nfs3xdr.c 	p = xdr_decode_size3(p, &fattr->du.nfs3.used);
p                 647 fs/nfs/nfs3xdr.c 	p = xdr_decode_specdata3(p, &fattr->rdev);
p                 649 fs/nfs/nfs3xdr.c 	p = xdr_decode_hyper(p, &fattr->fsid.major);
p                 652 fs/nfs/nfs3xdr.c 	p = xdr_decode_fileid3(p, &fattr->fileid);
p                 653 fs/nfs/nfs3xdr.c 	p = xdr_decode_nfstime3(p, &fattr->atime);
p                 654 fs/nfs/nfs3xdr.c 	p = xdr_decode_nfstime3(p, &fattr->mtime);
p                 655 fs/nfs/nfs3xdr.c 	xdr_decode_nfstime3(p, &fattr->ctime);
p                 681 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 683 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 684 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 686 fs/nfs/nfs3xdr.c 	if (*p != xdr_zero)
p                 701 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 703 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, NFS3_wcc_attr_sz << 2);
p                 704 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 712 fs/nfs/nfs3xdr.c 	p = xdr_decode_size3(p, &fattr->pre_size);
p                 713 fs/nfs/nfs3xdr.c 	p = xdr_decode_nfstime3(p, &fattr->pre_mtime);
p                 714 fs/nfs/nfs3xdr.c 	xdr_decode_nfstime3(p, &fattr->pre_ctime);
p                 738 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 740 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 741 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 743 fs/nfs/nfs3xdr.c 	if (*p != xdr_zero)
p                 773 fs/nfs/nfs3xdr.c 	__be32 *p = xdr_inline_decode(xdr, 4);
p                 774 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                 776 fs/nfs/nfs3xdr.c 	if (*p != xdr_zero)
p                 840 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 843 fs/nfs/nfs3xdr.c 		p = xdr_reserve_space(xdr, 4 + 8);
p                 844 fs/nfs/nfs3xdr.c 		*p++ = xdr_one;
p                 845 fs/nfs/nfs3xdr.c 		xdr_encode_nfstime3(p, &args->guardtime);
p                 847 fs/nfs/nfs3xdr.c 		p = xdr_reserve_space(xdr, 4);
p                 848 fs/nfs/nfs3xdr.c 		*p = xdr_zero;
p                 932 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 936 fs/nfs/nfs3xdr.c 	p = xdr_reserve_space(xdr, 8 + 4);
p                 937 fs/nfs/nfs3xdr.c 	p = xdr_encode_hyper(p, args->offset);
p                 938 fs/nfs/nfs3xdr.c 	*p = cpu_to_be32(args->count);
p                 974 fs/nfs/nfs3xdr.c 	__be32 *p;
p                 978 fs/nfs/nfs3xdr.c 	p = xdr_reserve_space(xdr, 8 + 4 + 4 + 4);
p                 979 fs/nfs/nfs3xdr.c 	p = xdr_encode_hyper(p, args->offset);
p                 980 fs/nfs/nfs3xdr.c 	*p++ = cpu_to_be32(args->count);
p                 981 fs/nfs/nfs3xdr.c 	*p++ = cpu_to_be32(args->stable);
p                 982 fs/nfs/nfs3xdr.c 	*p = cpu_to_be32(args->count);
p                1229 fs/nfs/nfs3xdr.c 	__be32 *p;
p                1233 fs/nfs/nfs3xdr.c 	p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4);
p                1234 fs/nfs/nfs3xdr.c 	p = xdr_encode_cookie3(p, args->cookie);
p                1235 fs/nfs/nfs3xdr.c 	p = xdr_encode_cookieverf3(p, args->verf);
p                1236 fs/nfs/nfs3xdr.c 	*p = cpu_to_be32(args->count);
p                1264 fs/nfs/nfs3xdr.c 	__be32 *p;
p                1268 fs/nfs/nfs3xdr.c 	p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4 + 4);
p                1269 fs/nfs/nfs3xdr.c 	p = xdr_encode_cookie3(p, args->cookie);
p                1270 fs/nfs/nfs3xdr.c 	p = xdr_encode_cookieverf3(p, args->verf);
p                1276 fs/nfs/nfs3xdr.c 	*p++ = cpu_to_be32(args->count >> 3);
p                1278 fs/nfs/nfs3xdr.c 	*p = cpu_to_be32(args->count);
p                1304 fs/nfs/nfs3xdr.c 	__be32 *p;
p                1308 fs/nfs/nfs3xdr.c 	p = xdr_reserve_space(xdr, 8 + 4);
p                1309 fs/nfs/nfs3xdr.c 	p = xdr_encode_hyper(p, args->offset);
p                1310 fs/nfs/nfs3xdr.c 	*p = cpu_to_be32(args->count);
p                1608 fs/nfs/nfs3xdr.c 	__be32 *p;
p                1610 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, 4 + 4 + 4);
p                1611 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                1613 fs/nfs/nfs3xdr.c 	count = be32_to_cpup(p++);
p                1614 fs/nfs/nfs3xdr.c 	eof = be32_to_cpup(p++);
p                1615 fs/nfs/nfs3xdr.c 	ocount = be32_to_cpup(p++);
p                1693 fs/nfs/nfs3xdr.c 	__be32 *p;
p                1695 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, 4 + 4);
p                1696 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                1698 fs/nfs/nfs3xdr.c 	result->count = be32_to_cpup(p++);
p                1699 fs/nfs/nfs3xdr.c 	result->verf->committed = be32_to_cpup(p++);
p                1971 fs/nfs/nfs3xdr.c 	__be32 *p;
p                1975 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, 4);
p                1976 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                1978 fs/nfs/nfs3xdr.c 	if (*p == xdr_zero) {
p                1979 fs/nfs/nfs3xdr.c 		p = xdr_inline_decode(xdr, 4);
p                1980 fs/nfs/nfs3xdr.c 		if (unlikely(!p))
p                1982 fs/nfs/nfs3xdr.c 		if (*p == xdr_zero)
p                2016 fs/nfs/nfs3xdr.c 		p = xdr_inline_decode(xdr, 4);
p                2017 fs/nfs/nfs3xdr.c 		if (unlikely(!p))
p                2019 fs/nfs/nfs3xdr.c 		if (*p != xdr_zero) {
p                2144 fs/nfs/nfs3xdr.c 	__be32 *p;
p                2146 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, 8 * 6 + 4);
p                2147 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                2149 fs/nfs/nfs3xdr.c 	p = xdr_decode_size3(p, &result->tbytes);
p                2150 fs/nfs/nfs3xdr.c 	p = xdr_decode_size3(p, &result->fbytes);
p                2151 fs/nfs/nfs3xdr.c 	p = xdr_decode_size3(p, &result->abytes);
p                2152 fs/nfs/nfs3xdr.c 	p = xdr_decode_size3(p, &result->tfiles);
p                2153 fs/nfs/nfs3xdr.c 	p = xdr_decode_size3(p, &result->ffiles);
p                2154 fs/nfs/nfs3xdr.c 	xdr_decode_size3(p, &result->afiles);
p                2213 fs/nfs/nfs3xdr.c 	__be32 *p;
p                2215 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, 4 * 7 + 8 + 8 + 4);
p                2216 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                2218 fs/nfs/nfs3xdr.c 	result->rtmax  = be32_to_cpup(p++);
p                2219 fs/nfs/nfs3xdr.c 	result->rtpref = be32_to_cpup(p++);
p                2220 fs/nfs/nfs3xdr.c 	result->rtmult = be32_to_cpup(p++);
p                2221 fs/nfs/nfs3xdr.c 	result->wtmax  = be32_to_cpup(p++);
p                2222 fs/nfs/nfs3xdr.c 	result->wtpref = be32_to_cpup(p++);
p                2223 fs/nfs/nfs3xdr.c 	result->wtmult = be32_to_cpup(p++);
p                2224 fs/nfs/nfs3xdr.c 	result->dtpref = be32_to_cpup(p++);
p                2225 fs/nfs/nfs3xdr.c 	p = xdr_decode_size3(p, &result->maxfilesize);
p                2226 fs/nfs/nfs3xdr.c 	xdr_decode_nfstime3(p, &result->time_delta);
p                2283 fs/nfs/nfs3xdr.c 	__be32 *p;
p                2285 fs/nfs/nfs3xdr.c 	p = xdr_inline_decode(xdr, 4 * 6);
p                2286 fs/nfs/nfs3xdr.c 	if (unlikely(!p))
p                2288 fs/nfs/nfs3xdr.c 	result->max_link = be32_to_cpup(p++);
p                2289 fs/nfs/nfs3xdr.c 	result->max_namelen = be32_to_cpup(p);
p                 217 fs/nfs/nfs42xdr.c 	__be32 *p;
p                 220 fs/nfs/nfs42xdr.c 	p = reserve_space(xdr, 8 + 8);
p                 221 fs/nfs/nfs42xdr.c 	p = xdr_encode_hyper(p, devinfo->offset);
p                 222 fs/nfs/nfs42xdr.c 	p = xdr_encode_hyper(p, devinfo->length);
p                 224 fs/nfs/nfs42xdr.c 	p = reserve_space(xdr, 4*8 + NFS4_DEVICEID4_SIZE + 4);
p                 225 fs/nfs/nfs42xdr.c 	p = xdr_encode_hyper(p, devinfo->read_count);
p                 226 fs/nfs/nfs42xdr.c 	p = xdr_encode_hyper(p, devinfo->read_bytes);
p                 227 fs/nfs/nfs42xdr.c 	p = xdr_encode_hyper(p, devinfo->write_count);
p                 228 fs/nfs/nfs42xdr.c 	p = xdr_encode_hyper(p, devinfo->write_bytes);
p                 229 fs/nfs/nfs42xdr.c 	p = xdr_encode_opaque_fixed(p, devinfo->dev_id.data,
p                 232 fs/nfs/nfs42xdr.c 	*p++ = cpu_to_be32(devinfo->layout_type);
p                 244 fs/nfs/nfs42xdr.c 	__be32 *p;
p                 249 fs/nfs/nfs42xdr.c 	p = reserve_space(xdr, 3*8);
p                 250 fs/nfs/nfs42xdr.c 	p = xdr_encode_hyper(p, args->src_offset);
p                 251 fs/nfs/nfs42xdr.c 	p = xdr_encode_hyper(p, args->dst_offset);
p                 252 fs/nfs/nfs42xdr.c 	xdr_encode_hyper(p, args->count);
p                 258 fs/nfs/nfs42xdr.c 	__be32 *p;
p                 260 fs/nfs/nfs42xdr.c 	p = reserve_space(xdr, NFS4_DEVICEID4_SIZE + 2*4);
p                 261 fs/nfs/nfs42xdr.c 	p = xdr_encode_opaque_fixed(p, error->dev_id.data,
p                 263 fs/nfs/nfs42xdr.c 	*p++ = cpu_to_be32(error->status);
p                 264 fs/nfs/nfs42xdr.c 	*p = cpu_to_be32(error->opnum);
p                 271 fs/nfs/nfs42xdr.c 	__be32 *p;
p                 274 fs/nfs/nfs42xdr.c 	p = reserve_space(xdr, 8 + 8);
p                 275 fs/nfs/nfs42xdr.c 	p = xdr_encode_hyper(p, args->offset);
p                 276 fs/nfs/nfs42xdr.c 	p = xdr_encode_hyper(p, args->length);
p                 278 fs/nfs/nfs42xdr.c 	p = reserve_space(xdr, 4);
p                 279 fs/nfs/nfs42xdr.c 	*p = cpu_to_be32(1);
p                 307 fs/nfs/nfs42xdr.c 	__be32 *p;
p                 310 fs/nfs/nfs42xdr.c 	p = reserve_space(xdr, 12);
p                 311 fs/nfs/nfs42xdr.c 	p = xdr_encode_hyper(p, args->dst_pos);
p                 312 fs/nfs/nfs42xdr.c 	*p = cpu_to_be32(args->count);
p                 470 fs/nfs/nfs42xdr.c 	__be32 *p;
p                 473 fs/nfs/nfs42xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 474 fs/nfs/nfs42xdr.c 	if (unlikely(!p))
p                 476 fs/nfs/nfs42xdr.c 	count = be32_to_cpup(p);
p                 485 fs/nfs/nfs42xdr.c 	p = xdr_inline_decode(xdr, 8 + 4);
p                 486 fs/nfs/nfs42xdr.c 	if (unlikely(!p))
p                 488 fs/nfs/nfs42xdr.c 	p = xdr_decode_hyper(p, &res->count);
p                 489 fs/nfs/nfs42xdr.c 	res->verifier.committed = be32_to_cpup(p);
p                 495 fs/nfs/nfs42xdr.c 	__be32 *p;
p                 497 fs/nfs/nfs42xdr.c 	p = xdr_inline_decode(xdr, 4 + 4);
p                 498 fs/nfs/nfs42xdr.c 	if (unlikely(!p))
p                 501 fs/nfs/nfs42xdr.c 	res->consecutive = be32_to_cpup(p++);
p                 502 fs/nfs/nfs42xdr.c 	res->synchronous = be32_to_cpup(p++);
p                 540 fs/nfs/nfs42xdr.c 	__be32 *p;
p                 546 fs/nfs/nfs42xdr.c 	p = xdr_inline_decode(xdr, 4 + 8);
p                 547 fs/nfs/nfs42xdr.c 	if (unlikely(!p))
p                 550 fs/nfs/nfs42xdr.c 	res->sr_eof = be32_to_cpup(p++);
p                 551 fs/nfs/nfs42xdr.c 	p = xdr_decode_hyper(p, &res->sr_offset);
p                  65 fs/nfs/nfs4namespace.c 	char *p;
p                  69 fs/nfs/nfs4namespace.c 		p = strchr(nfspath, ']');
p                  70 fs/nfs/nfs4namespace.c 		if (p != NULL && ++p < end && *p == ':')
p                  71 fs/nfs/nfs4namespace.c 			return p + 1;
p                  74 fs/nfs/nfs4namespace.c 		p = strchr(nfspath, ':');
p                  75 fs/nfs/nfs4namespace.c 		if (p != NULL && p < end)
p                  76 fs/nfs/nfs4namespace.c 			return p + 1;
p                 309 fs/nfs/nfs4proc.c 	__be32 *start, *p;
p                 329 fs/nfs/nfs4proc.c 	start = p = kmap_atomic(*readdir->pages);
p                 332 fs/nfs/nfs4proc.c 		*p++ = xdr_one;                                  /* next */
p                 333 fs/nfs/nfs4proc.c 		*p++ = xdr_zero;                   /* cookie, first word */
p                 334 fs/nfs/nfs4proc.c 		*p++ = xdr_one;                   /* cookie, second word */
p                 335 fs/nfs/nfs4proc.c 		*p++ = xdr_one;                             /* entry len */
p                 336 fs/nfs/nfs4proc.c 		memcpy(p, ".\0\0\0", 4);                        /* entry */
p                 337 fs/nfs/nfs4proc.c 		p++;
p                 338 fs/nfs/nfs4proc.c 		*p++ = xdr_one;                         /* bitmap length */
p                 339 fs/nfs/nfs4proc.c 		*p++ = htonl(attrs);                           /* bitmap */
p                 340 fs/nfs/nfs4proc.c 		*p++ = htonl(12);             /* attribute buffer length */
p                 341 fs/nfs/nfs4proc.c 		*p++ = htonl(NF4DIR);
p                 342 fs/nfs/nfs4proc.c 		p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
p                 345 fs/nfs/nfs4proc.c 	*p++ = xdr_one;                                  /* next */
p                 346 fs/nfs/nfs4proc.c 	*p++ = xdr_zero;                   /* cookie, first word */
p                 347 fs/nfs/nfs4proc.c 	*p++ = xdr_two;                   /* cookie, second word */
p                 348 fs/nfs/nfs4proc.c 	*p++ = xdr_two;                             /* entry len */
p                 349 fs/nfs/nfs4proc.c 	memcpy(p, "..\0\0", 4);                         /* entry */
p                 350 fs/nfs/nfs4proc.c 	p++;
p                 351 fs/nfs/nfs4proc.c 	*p++ = xdr_one;                         /* bitmap length */
p                 352 fs/nfs/nfs4proc.c 	*p++ = htonl(attrs);                           /* bitmap */
p                 353 fs/nfs/nfs4proc.c 	*p++ = htonl(12);             /* attribute buffer length */
p                 354 fs/nfs/nfs4proc.c 	*p++ = htonl(NF4DIR);
p                 355 fs/nfs/nfs4proc.c 	p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
p                 357 fs/nfs/nfs4proc.c 	readdir->pgbase = (char *)p - (char *)start;
p                1255 fs/nfs/nfs4proc.c static void nfs4_init_opendata_res(struct nfs4_opendata *p)
p                1257 fs/nfs/nfs4proc.c 	p->o_res.f_attr = &p->f_attr;
p                1258 fs/nfs/nfs4proc.c 	p->o_res.f_label = p->f_label;
p                1259 fs/nfs/nfs4proc.c 	p->o_res.seqid = p->o_arg.seqid;
p                1260 fs/nfs/nfs4proc.c 	p->c_res.seqid = p->c_arg.seqid;
p                1261 fs/nfs/nfs4proc.c 	p->o_res.server = p->o_arg.server;
p                1262 fs/nfs/nfs4proc.c 	p->o_res.access_request = p->o_arg.access;
p                1263 fs/nfs/nfs4proc.c 	nfs_fattr_init(&p->f_attr);
p                1264 fs/nfs/nfs4proc.c 	nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
p                1278 fs/nfs/nfs4proc.c 	struct nfs4_opendata *p;
p                1280 fs/nfs/nfs4proc.c 	p = kzalloc(sizeof(*p), gfp_mask);
p                1281 fs/nfs/nfs4proc.c 	if (p == NULL)
p                1284 fs/nfs/nfs4proc.c 	p->f_label = nfs4_label_alloc(server, gfp_mask);
p                1285 fs/nfs/nfs4proc.c 	if (IS_ERR(p->f_label))
p                1288 fs/nfs/nfs4proc.c 	p->a_label = nfs4_label_alloc(server, gfp_mask);
p                1289 fs/nfs/nfs4proc.c 	if (IS_ERR(p->a_label))
p                1293 fs/nfs/nfs4proc.c 	p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
p                1294 fs/nfs/nfs4proc.c 	if (IS_ERR(p->o_arg.seqid))
p                1297 fs/nfs/nfs4proc.c 	p->dentry = dget(dentry);
p                1298 fs/nfs/nfs4proc.c 	p->dir = parent;
p                1299 fs/nfs/nfs4proc.c 	p->owner = sp;
p                1301 fs/nfs/nfs4proc.c 	p->o_arg.open_flags = flags;
p                1302 fs/nfs/nfs4proc.c 	p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
p                1303 fs/nfs/nfs4proc.c 	p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
p                1304 fs/nfs/nfs4proc.c 	p->o_arg.share_access = nfs4_map_atomic_open_share(server,
p                1307 fs/nfs/nfs4proc.c 		p->o_arg.umask = current_umask();
p                1308 fs/nfs/nfs4proc.c 		p->o_arg.label = nfs4_label_copy(p->a_label, label);
p                1310 fs/nfs/nfs4proc.c 			p->o_arg.u.attrs = &p->attrs;
p                1311 fs/nfs/nfs4proc.c 			memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
p                1313 fs/nfs/nfs4proc.c 			memcpy(p->o_arg.u.verifier.data, c->verf,
p                1314 fs/nfs/nfs4proc.c 					sizeof(p->o_arg.u.verifier.data));
p                1322 fs/nfs/nfs4proc.c 		switch (p->o_arg.claim) {
p                1327 fs/nfs/nfs4proc.c 			p->o_arg.access = NFS4_ACCESS_READ |
p                1333 fs/nfs/nfs4proc.c 	p->o_arg.clientid = server->nfs_client->cl_clientid;
p                1334 fs/nfs/nfs4proc.c 	p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
p                1335 fs/nfs/nfs4proc.c 	p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
p                1336 fs/nfs/nfs4proc.c 	p->o_arg.name = &dentry->d_name;
p                1337 fs/nfs/nfs4proc.c 	p->o_arg.server = server;
p                1338 fs/nfs/nfs4proc.c 	p->o_arg.bitmask = nfs4_bitmask(server, label);
p                1339 fs/nfs/nfs4proc.c 	p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
p                1340 fs/nfs/nfs4proc.c 	switch (p->o_arg.claim) {
p                1344 fs/nfs/nfs4proc.c 		p->o_arg.fh = NFS_FH(dir);
p                1350 fs/nfs/nfs4proc.c 		p->o_arg.fh = NFS_FH(d_inode(dentry));
p                1352 fs/nfs/nfs4proc.c 	p->c_arg.fh = &p->o_res.fh;
p                1353 fs/nfs/nfs4proc.c 	p->c_arg.stateid = &p->o_res.stateid;
p                1354 fs/nfs/nfs4proc.c 	p->c_arg.seqid = p->o_arg.seqid;
p                1355 fs/nfs/nfs4proc.c 	nfs4_init_opendata_res(p);
p                1356 fs/nfs/nfs4proc.c 	kref_init(&p->kref);
p                1357 fs/nfs/nfs4proc.c 	return p;
p                1360 fs/nfs/nfs4proc.c 	nfs4_label_free(p->a_label);
p                1362 fs/nfs/nfs4proc.c 	nfs4_label_free(p->f_label);
p                1364 fs/nfs/nfs4proc.c 	kfree(p);
p                1372 fs/nfs/nfs4proc.c 	struct nfs4_opendata *p = container_of(kref,
p                1374 fs/nfs/nfs4proc.c 	struct super_block *sb = p->dentry->d_sb;
p                1376 fs/nfs/nfs4proc.c 	nfs4_lgopen_release(p->lgp);
p                1377 fs/nfs/nfs4proc.c 	nfs_free_seqid(p->o_arg.seqid);
p                1378 fs/nfs/nfs4proc.c 	nfs4_sequence_free_slot(&p->o_res.seq_res);
p                1379 fs/nfs/nfs4proc.c 	if (p->state != NULL)
p                1380 fs/nfs/nfs4proc.c 		nfs4_put_open_state(p->state);
p                1381 fs/nfs/nfs4proc.c 	nfs4_put_state_owner(p->owner);
p                1383 fs/nfs/nfs4proc.c 	nfs4_label_free(p->a_label);
p                1384 fs/nfs/nfs4proc.c 	nfs4_label_free(p->f_label);
p                1386 fs/nfs/nfs4proc.c 	dput(p->dir);
p                1387 fs/nfs/nfs4proc.c 	dput(p->dentry);
p                1389 fs/nfs/nfs4proc.c 	nfs_fattr_free_names(&p->f_attr);
p                1390 fs/nfs/nfs4proc.c 	kfree(p->f_attr.mdsthreshold);
p                1391 fs/nfs/nfs4proc.c 	kfree(p);
p                1394 fs/nfs/nfs4proc.c static void nfs4_opendata_put(struct nfs4_opendata *p)
p                1396 fs/nfs/nfs4proc.c 	if (p != NULL)
p                1397 fs/nfs/nfs4proc.c 		kref_put(&p->kref, nfs4_opendata_free);
p                6494 fs/nfs/nfs4proc.c 	struct nfs4_unlockdata *p;
p                6498 fs/nfs/nfs4proc.c 	p = kzalloc(sizeof(*p), GFP_NOFS);
p                6499 fs/nfs/nfs4proc.c 	if (p == NULL)
p                6501 fs/nfs/nfs4proc.c 	p->arg.fh = NFS_FH(inode);
p                6502 fs/nfs/nfs4proc.c 	p->arg.fl = &p->fl;
p                6503 fs/nfs/nfs4proc.c 	p->arg.seqid = seqid;
p                6504 fs/nfs/nfs4proc.c 	p->res.seqid = seqid;
p                6505 fs/nfs/nfs4proc.c 	p->lsp = lsp;
p                6507 fs/nfs/nfs4proc.c 	p->ctx = get_nfs_open_context(ctx);
p                6508 fs/nfs/nfs4proc.c 	p->l_ctx = nfs_get_lock_context(ctx);
p                6509 fs/nfs/nfs4proc.c 	locks_init_lock(&p->fl);
p                6510 fs/nfs/nfs4proc.c 	locks_copy_lock(&p->fl, fl);
p                6511 fs/nfs/nfs4proc.c 	p->server = NFS_SERVER(inode);
p                6513 fs/nfs/nfs4proc.c 	nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
p                6515 fs/nfs/nfs4proc.c 	return p;
p                6712 fs/nfs/nfs4proc.c 	struct nfs4_lockdata *p;
p                6717 fs/nfs/nfs4proc.c 	p = kzalloc(sizeof(*p), gfp_mask);
p                6718 fs/nfs/nfs4proc.c 	if (p == NULL)
p                6721 fs/nfs/nfs4proc.c 	p->arg.fh = NFS_FH(inode);
p                6722 fs/nfs/nfs4proc.c 	p->arg.fl = &p->fl;
p                6723 fs/nfs/nfs4proc.c 	p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
p                6724 fs/nfs/nfs4proc.c 	if (IS_ERR(p->arg.open_seqid))
p                6727 fs/nfs/nfs4proc.c 	p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
p                6728 fs/nfs/nfs4proc.c 	if (IS_ERR(p->arg.lock_seqid))
p                6730 fs/nfs/nfs4proc.c 	p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p                6731 fs/nfs/nfs4proc.c 	p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
p                6732 fs/nfs/nfs4proc.c 	p->arg.lock_owner.s_dev = server->s_dev;
p                6733 fs/nfs/nfs4proc.c 	p->res.lock_seqid = p->arg.lock_seqid;
p                6734 fs/nfs/nfs4proc.c 	p->lsp = lsp;
p                6735 fs/nfs/nfs4proc.c 	p->server = server;
p                6736 fs/nfs/nfs4proc.c 	p->ctx = get_nfs_open_context(ctx);
p                6737 fs/nfs/nfs4proc.c 	locks_init_lock(&p->fl);
p                6738 fs/nfs/nfs4proc.c 	locks_copy_lock(&p->fl, fl);
p                6739 fs/nfs/nfs4proc.c 	return p;
p                6741 fs/nfs/nfs4proc.c 	nfs_free_seqid(p->arg.open_seqid);
p                6743 fs/nfs/nfs4proc.c 	kfree(p);
p                7957 fs/nfs/nfs4proc.c 	struct rpc_bind_conn_calldata *p = calldata;
p                7959 fs/nfs/nfs4proc.c 	return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
p                  41 fs/nfs/nfs4session.c 	struct nfs4_slot **p;
p                  45 fs/nfs/nfs4session.c 	p = &tbl->slots;
p                  47 fs/nfs/nfs4session.c 		p = &(*p)->next;
p                  48 fs/nfs/nfs4session.c 	while (*p) {
p                  49 fs/nfs/nfs4session.c 		struct nfs4_slot *slot = *p;
p                  51 fs/nfs/nfs4session.c 		*p = slot->next;
p                 123 fs/nfs/nfs4session.c 	struct nfs4_slot **p, *slot;
p                 125 fs/nfs/nfs4session.c 	p = &tbl->slots;
p                 127 fs/nfs/nfs4session.c 		if (*p == NULL) {
p                 128 fs/nfs/nfs4session.c 			*p = nfs4_new_slot(tbl, tbl->max_slots,
p                 130 fs/nfs/nfs4session.c 			if (*p == NULL)
p                 134 fs/nfs/nfs4session.c 		slot = *p;
p                 137 fs/nfs/nfs4session.c 		p = &slot->next;
p                 276 fs/nfs/nfs4session.c 	struct nfs4_slot **p;
p                 279 fs/nfs/nfs4session.c 	p = &tbl->slots;
p                 280 fs/nfs/nfs4session.c 	while (*p) {
p                 281 fs/nfs/nfs4session.c 		(*p)->seq_nr = ivalue;
p                 282 fs/nfs/nfs4session.c 		(*p)->seq_nr_highest_sent = ivalue;
p                 283 fs/nfs/nfs4session.c 		(*p)->seq_nr_last_acked = ivalue - 1;
p                 284 fs/nfs/nfs4session.c 		p = &(*p)->next;
p                 403 fs/nfs/nfs4state.c 	struct rb_node **p = &server->state_owners.rb_node,
p                 408 fs/nfs/nfs4state.c 	while (*p != NULL) {
p                 409 fs/nfs/nfs4state.c 		parent = *p;
p                 414 fs/nfs/nfs4state.c 			p = &parent->rb_left;
p                 416 fs/nfs/nfs4state.c 			p = &parent->rb_right;
p                 431 fs/nfs/nfs4state.c 	struct rb_node **p = &server->state_owners.rb_node,
p                 436 fs/nfs/nfs4state.c 	while (*p != NULL) {
p                 437 fs/nfs/nfs4state.c 		parent = *p;
p                 442 fs/nfs/nfs4state.c 			p = &parent->rb_left;
p                 444 fs/nfs/nfs4state.c 			p = &parent->rb_right;
p                 452 fs/nfs/nfs4state.c 	rb_link_node(&new->so_server_node, parent, p);
p                 162 fs/nfs/nfs4super.c 	struct nfs_referral_count *p;
p                 164 fs/nfs/nfs4super.c 	list_for_each_entry(p, &nfs_referral_count_list, list) {
p                 165 fs/nfs/nfs4super.c 		if (p->task == current)
p                 166 fs/nfs/nfs4super.c 			return p;
p                 175 fs/nfs/nfs4super.c 	struct nfs_referral_count *p, *new;
p                 186 fs/nfs/nfs4super.c 	p = nfs_find_referral_count();
p                 187 fs/nfs/nfs4super.c 	if (p != NULL) {
p                 188 fs/nfs/nfs4super.c 		if (p->referral_count >= NFS_MAX_NESTED_REFERRALS)
p                 191 fs/nfs/nfs4super.c 			p->referral_count++;
p                 204 fs/nfs/nfs4super.c 	struct nfs_referral_count *p;
p                 207 fs/nfs/nfs4super.c 	p = nfs_find_referral_count();
p                 208 fs/nfs/nfs4super.c 	p->referral_count--;
p                 209 fs/nfs/nfs4super.c 	if (p->referral_count == 0)
p                 210 fs/nfs/nfs4super.c 		list_del(&p->list);
p                 212 fs/nfs/nfs4super.c 		p = NULL;
p                 214 fs/nfs/nfs4super.c 	kfree(p);
p                 954 fs/nfs/nfs4xdr.c 	__be32 *p = xdr_reserve_space(xdr, nbytes);
p                 955 fs/nfs/nfs4xdr.c 	BUG_ON(!p);
p                 956 fs/nfs/nfs4xdr.c 	return p;
p                1021 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1030 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 8);
p                1031 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(hdr->minorversion);
p                1032 fs/nfs/nfs4xdr.c 	hdr->nops_p = p;
p                1033 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(hdr->nops);
p                1062 fs/nfs/nfs4xdr.c xdr_encode_nfstime4(__be32 *p, const struct timespec *t)
p                1064 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, (__s64)t->tv_sec);
p                1065 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(t->tv_nsec);
p                1066 fs/nfs/nfs4xdr.c 	return p;
p                1080 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1151 fs/nfs/nfs4xdr.c 	xdr_stream_encode_opaque_inline(xdr, (void **)&p, len);
p                1154 fs/nfs/nfs4xdr.c 		p = xdr_encode_hyper(p, iap->ia_size);
p                1156 fs/nfs/nfs4xdr.c 		*p++ = cpu_to_be32(iap->ia_mode & S_IALLUGO);
p                1158 fs/nfs/nfs4xdr.c 		p = xdr_encode_opaque(p, owner_name, owner_namelen);
p                1160 fs/nfs/nfs4xdr.c 		p = xdr_encode_opaque(p, owner_group, owner_grouplen);
p                1163 fs/nfs/nfs4xdr.c 			*p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
p                1165 fs/nfs/nfs4xdr.c 			p = xdr_encode_nfstime4(p, &ts);
p                1167 fs/nfs/nfs4xdr.c 			*p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
p                1171 fs/nfs/nfs4xdr.c 			*p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
p                1173 fs/nfs/nfs4xdr.c 			p = xdr_encode_nfstime4(p, &ts);
p                1175 fs/nfs/nfs4xdr.c 			*p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
p                1178 fs/nfs/nfs4xdr.c 		*p++ = cpu_to_be32(label->lfs);
p                1179 fs/nfs/nfs4xdr.c 		*p++ = cpu_to_be32(label->pi);
p                1180 fs/nfs/nfs4xdr.c 		*p++ = cpu_to_be32(label->len);
p                1181 fs/nfs/nfs4xdr.c 		p = xdr_encode_opaque_fixed(p, label->label, label->len);
p                1184 fs/nfs/nfs4xdr.c 		*p++ = cpu_to_be32(iap->ia_mode & S_IALLUGO);
p                1185 fs/nfs/nfs4xdr.c 		*p++ = cpu_to_be32(*umask);
p                1206 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1209 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 12);
p                1210 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, args->offset);
p                1211 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(args->count);
p                1216 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1223 fs/nfs/nfs4xdr.c 		p = reserve_space(xdr, 4);
p                1224 fs/nfs/nfs4xdr.c 		*p = cpu_to_be32(create->u.symlink.len);
p                1231 fs/nfs/nfs4xdr.c 		p = reserve_space(xdr, 8);
p                1232 fs/nfs/nfs4xdr.c 		*p++ = cpu_to_be32(create->u.device.specdata1);
p                1233 fs/nfs/nfs4xdr.c 		*p = cpu_to_be32(create->u.device.specdata2);
p                1313 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1315 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 32);
p                1316 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, lowner->clientid);
p                1317 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(20);
p                1318 fs/nfs/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, "lock id:", 8);
p                1319 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(lowner->s_dev);
p                1320 fs/nfs/nfs4xdr.c 	xdr_encode_hyper(p, lowner->id);
p                1329 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1332 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 28);
p                1333 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(nfs4_lock_type(args->fl, args->block));
p                1334 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->reclaim);
p                1335 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, args->fl->fl_start);
p                1336 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
p                1337 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(args->new_lock_owner);
p                1352 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1355 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 20);
p                1356 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0));
p                1357 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, args->fl->fl_start);
p                1358 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
p                1364 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1370 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 16);
p                1371 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, args->fl->fl_start);
p                1372 fs/nfs/nfs4xdr.c 	xdr_encode_hyper(p, nfs4_lock_length(args->fl));
p                1394 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1396 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 8);
p                1397 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(share_access);
p                1398 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(0);		/* for linux, share_deny = 0 always */
p                1403 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1410 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 36);
p                1411 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, arg->clientid);
p                1412 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(24);
p                1413 fs/nfs/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, "open id:", 8);
p                1414 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(arg->server->s_dev);
p                1415 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(arg->id.uniquifier);
p                1416 fs/nfs/nfs4xdr.c 	xdr_encode_hyper(p, arg->id.create_time);
p                1421 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1423 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 4);
p                1426 fs/nfs/nfs4xdr.c 		*p = cpu_to_be32(NFS4_CREATE_UNCHECKED);
p                1431 fs/nfs/nfs4xdr.c 		*p = cpu_to_be32(NFS4_CREATE_GUARDED);
p                1436 fs/nfs/nfs4xdr.c 		*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
p                1440 fs/nfs/nfs4xdr.c 		*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1);
p                1449 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1451 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 4);
p                1454 fs/nfs/nfs4xdr.c 		*p = cpu_to_be32(NFS4_OPEN_NOCREATE);
p                1457 fs/nfs/nfs4xdr.c 		*p = cpu_to_be32(NFS4_OPEN_CREATE);
p                1464 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1466 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 4);
p                1469 fs/nfs/nfs4xdr.c 		*p = cpu_to_be32(NFS4_OPEN_DELEGATE_NONE);
p                1472 fs/nfs/nfs4xdr.c 		*p = cpu_to_be32(NFS4_OPEN_DELEGATE_READ);
p                1475 fs/nfs/nfs4xdr.c 		*p = cpu_to_be32(NFS4_OPEN_DELEGATE_WRITE);
p                1484 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1486 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 4);
p                1487 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(NFS4_OPEN_CLAIM_NULL);
p                1493 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1495 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 4);
p                1496 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(NFS4_OPEN_CLAIM_PREVIOUS);
p                1502 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1504 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 4);
p                1505 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(NFS4_OPEN_CLAIM_DELEGATE_CUR);
p                1512 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1514 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 4);
p                1515 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(NFS4_OPEN_CLAIM_FH);
p                1520 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1522 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 4);
p                1523 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(NFS4_OPEN_CLAIM_DELEG_CUR_FH);
p                1583 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1588 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 12);
p                1589 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, args->offset);
p                1590 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(args->count);
p                1600 fs/nfs/nfs4xdr.c 	__be32 *p, verf[2];
p                1626 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 12 + (attrlen << 2));
p                1627 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(dircount);
p                1628 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(readdir->count);
p                1629 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(attrlen);
p                1631 fs/nfs/nfs4xdr.c 		*p++ = cpu_to_be32(attrs[i]);
p                1678 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1682 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 2*4);
p                1683 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(1);
p                1684 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(FATTR4_WORD0_ACL);
p                1685 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 4);
p                1686 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(arg->acl_len);
p                1706 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1713 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 4);
p                1714 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(setclientid->sc_prog);
p                1717 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 4);
p                1718 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(setclientid->sc_clnt->cl_cb_ident);
p                1732 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1737 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 16);
p                1738 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, args->offset);
p                1739 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->stable);
p                1740 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(args->count);
p                1763 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1768 fs/nfs/nfs4xdr.c 	p = xdr_reserve_space(xdr, 8);
p                1769 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->dir);
p                1770 fs/nfs/nfs4xdr.c 	*p = (args->use_conn_in_rdma_mode) ? cpu_to_be32(1) : cpu_to_be32(0);
p                1785 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1826 fs/nfs/nfs4xdr.c 		p = reserve_space(xdr, 12);
p                1827 fs/nfs/nfs4xdr.c 		p = xdr_encode_hyper(p, 0);
p                1828 fs/nfs/nfs4xdr.c 		*p = cpu_to_be32(0);
p                1837 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1851 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12);
p                1852 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, args->clientid);
p                1853 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->seqid);			/*Sequence id */
p                1854 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->flags);			/*flags */
p                1857 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(0);				/* header padding size */
p                1858 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz);	/* max req size */
p                1859 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->fc_attrs.max_resp_sz);	/* max resp size */
p                1860 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(max_resp_sz_cached);		/* Max resp sz cached */
p                1861 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->fc_attrs.max_ops);	/* max operations */
p                1862 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->fc_attrs.max_reqs);	/* max requests */
p                1863 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(0);				/* rdmachannel_attrs */
p                1866 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(0);				/* header padding size */
p                1867 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz);	/* max req size */
p                1868 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->bc_attrs.max_resp_sz);	/* max resp size */
p                1869 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached);	/* Max resp sz cached */
p                1870 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->bc_attrs.max_ops);	/* max operations */
p                1871 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->bc_attrs.max_reqs);	/* max requests */
p                1872 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(0);				/* rdmachannel_attrs */
p                1874 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->cb_program);		/* cb_program */
p                1875 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(1);
p                1876 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(RPC_AUTH_UNIX);			/* auth_sys */
p                1879 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(ktime_to_ns(nn->boot_time));	/* stamp */
p                1880 fs/nfs/nfs4xdr.c 	p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
p                1881 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(0);				/* UID */
p                1882 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(0);				/* GID */
p                1883 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(0);				/* No more gids */
p                1919 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1940 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 16);
p                1941 fs/nfs/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
p                1942 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(slot->seq_nr);
p                1943 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(slot->slot_nr);
p                1944 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(tp->highest_used_slotid);
p                1945 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(args->sa_cache_this);
p                1955 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1958 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, NFS4_DEVICEID4_SIZE + 4 + 4);
p                1959 fs/nfs/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, args->pdev->dev_id.data,
p                1961 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->pdev->layout_type);
p                1962 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->pdev->maxcount);	/* gdia_maxcount */
p                1964 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 4 + 4);
p                1965 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(1);			/* bitmap length */
p                1966 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->notify_types);
p                1974 fs/nfs/nfs4xdr.c 	__be32 *p;
p                1977 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 36);
p                1978 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(0);     /* Signal layout available */
p                1979 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->type);
p                1980 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->range.iomode);
p                1981 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, args->range.offset);
p                1982 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, args->range.length);
p                1983 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, args->minlength);
p                2002 fs/nfs/nfs4xdr.c 	__be32 *p;
p                2008 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 20);
p                2010 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, 0); /* offset */
p                2011 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, args->lastbytewritten + 1);	/* length */
p                2012 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(0); /* reclaim */
p                2015 fs/nfs/nfs4xdr.c 		p = reserve_space(xdr, 20);
p                2016 fs/nfs/nfs4xdr.c 		*p++ = cpu_to_be32(1); /* newoffset = TRUE */
p                2017 fs/nfs/nfs4xdr.c 		p = xdr_encode_hyper(p, args->lastbytewritten);
p                2019 fs/nfs/nfs4xdr.c 		p = reserve_space(xdr, 12);
p                2020 fs/nfs/nfs4xdr.c 		*p++ = cpu_to_be32(0); /* newoffset = FALSE */
p                2022 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(0); /* Never send time_modify_changed */
p                2023 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(NFS_SERVER(args->inode)->pnfs_curr_ld->id);/* type */
p                2038 fs/nfs/nfs4xdr.c 	__be32 *p;
p                2041 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 16);
p                2042 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(0);		/* reclaim. always 0 for now */
p                2043 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->layout_type);
p                2044 fs/nfs/nfs4xdr.c 	*p++ = cpu_to_be32(args->range.iomode);
p                2045 fs/nfs/nfs4xdr.c 	*p = cpu_to_be32(RETURN_FILE);
p                2046 fs/nfs/nfs4xdr.c 	p = reserve_space(xdr, 16);
p                2047 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, args->range.offset);
p                2048 fs/nfs/nfs4xdr.c 	p = xdr_encode_hyper(p, args->range.length);
p                3158 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3160 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 8);
p                3161 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                3163 fs/nfs/nfs4xdr.c 	hdr->status = be32_to_cpup(p++);
p                3164 fs/nfs/nfs4xdr.c 	hdr->taglen = be32_to_cpup(p);
p                3166 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, hdr->taglen + 4);
p                3167 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                3169 fs/nfs/nfs4xdr.c 	hdr->tag = (char *)p;
p                3170 fs/nfs/nfs4xdr.c 	p += XDR_QUADLEN(hdr->taglen);
p                3171 fs/nfs/nfs4xdr.c 	hdr->nops = be32_to_cpup(p);
p                3180 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3184 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 8);
p                3185 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                3187 fs/nfs/nfs4xdr.c 	opnum = be32_to_cpup(p++);
p                3190 fs/nfs/nfs4xdr.c 	if (unlikely(*p != cpu_to_be32(NFS_OK)))
p                3195 fs/nfs/nfs4xdr.c 	nfserr = be32_to_cpup(p);
p                3221 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3225 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 12);
p                3226 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                3253 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3255 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                3256 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                3258 fs/nfs/nfs4xdr.c 	*attrlen = be32_to_cpup(p);
p                3280 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3287 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                3288 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3290 fs/nfs/nfs4xdr.c 		*type = be32_to_cpup(p);
p                3305 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3311 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                3312 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3314 fs/nfs/nfs4xdr.c 		*type = be32_to_cpup(p);
p                3323 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3330 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                3331 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3333 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, change);
p                3344 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3351 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                3352 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3354 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, size);
p                3364 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3370 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                3371 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3373 fs/nfs/nfs4xdr.c 		*res = be32_to_cpup(p);
p                3382 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3388 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                3389 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3391 fs/nfs/nfs4xdr.c 		*res = be32_to_cpup(p);
p                3400 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3408 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 16);
p                3409 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3411 fs/nfs/nfs4xdr.c 		p = xdr_decode_hyper(p, &fsid->major);
p                3412 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, &fsid->minor);
p                3424 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3430 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                3431 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3433 fs/nfs/nfs4xdr.c 		*res = be32_to_cpup(p);
p                3442 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3447 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                3448 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3451 fs/nfs/nfs4xdr.c 		*res = -be32_to_cpup(p);
p                3474 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3483 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                3484 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3486 fs/nfs/nfs4xdr.c 		len = be32_to_cpup(p);
p                3489 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, len);
p                3490 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3493 fs/nfs/nfs4xdr.c 			memcpy(fh->data, p, len);
p                3503 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3509 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                3510 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3512 fs/nfs/nfs4xdr.c 		*res = be32_to_cpup(p);
p                3521 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3528 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                3529 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3531 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, fileid);
p                3541 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3548 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                3549 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3551 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, fileid);
p                3561 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3568 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                3569 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3571 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, res);
p                3580 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3587 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                3588 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3590 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, res);
p                3599 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3606 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                3607 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3609 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, res);
p                3619 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3622 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                3623 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                3625 fs/nfs/nfs4xdr.c 	n = be32_to_cpup(p);
p                3661 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3678 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                3679 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                3681 fs/nfs/nfs4xdr.c 	n = be32_to_cpup(p);
p                3691 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                3692 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3694 fs/nfs/nfs4xdr.c 		m = be32_to_cpup(p);
p                3738 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3745 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                3746 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3748 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, res);
p                3757 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3764 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                3765 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3767 fs/nfs/nfs4xdr.c 		*maxlink = be32_to_cpup(p);
p                3776 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3783 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                3784 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3786 fs/nfs/nfs4xdr.c 		*maxname = be32_to_cpup(p);
p                3795 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3803 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                3804 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3806 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, &maxread);
p                3818 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3826 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                3827 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3829 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, &maxwrite);
p                3842 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3849 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                3850 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3852 fs/nfs/nfs4xdr.c 		tmp = be32_to_cpup(p);
p                3863 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3870 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                3871 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3873 fs/nfs/nfs4xdr.c 		*nlink = be32_to_cpup(p);
p                3899 fs/nfs/nfs4xdr.c 	char *p;
p                3915 fs/nfs/nfs4xdr.c 		len = xdr_stream_decode_opaque_inline(xdr, (void **)&p,
p                3917 fs/nfs/nfs4xdr.c 		if (len <= 0 || nfs_map_name_to_uid(server, p, len, uid) != 0)
p                3933 fs/nfs/nfs4xdr.c 	char *p;
p                3949 fs/nfs/nfs4xdr.c 		len = xdr_stream_decode_opaque_inline(xdr, (void **)&p,
p                3951 fs/nfs/nfs4xdr.c 		if (len <= 0 || nfs_map_group_to_gid(server, p, len, gid) != 0)
p                3965 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3974 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                3975 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                3977 fs/nfs/nfs4xdr.c 		major = be32_to_cpup(p++);
p                3978 fs/nfs/nfs4xdr.c 		minor = be32_to_cpup(p);
p                3991 fs/nfs/nfs4xdr.c 	__be32 *p;
p                3998 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                3999 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                4001 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, res);
p                4010 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4017 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                4018 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                4020 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, res);
p                4029 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4036 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                4037 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                4039 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, res);
p                4048 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4055 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                4056 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                4058 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, used);
p                4068 fs/nfs/nfs4xdr.c xdr_decode_nfstime4(__be32 *p, struct timespec *t)
p                4072 fs/nfs/nfs4xdr.c 	p = xdr_decode_hyper(p, &sec);
p                4074 fs/nfs/nfs4xdr.c 	t->tv_nsec = be32_to_cpup(p++);
p                4075 fs/nfs/nfs4xdr.c 	return p;
p                4080 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4082 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, nfstime4_maxsz << 2);
p                4083 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                4085 fs/nfs/nfs4xdr.c 	xdr_decode_nfstime4(p, time);
p                4149 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4155 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                4156 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                4158 fs/nfs/nfs4xdr.c 		lfs = be32_to_cpup(p++);
p                4159 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                4160 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                4162 fs/nfs/nfs4xdr.c 		pi = be32_to_cpup(p++);
p                4163 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                4164 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                4166 fs/nfs/nfs4xdr.c 		len = be32_to_cpup(p++);
p                4167 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, len);
p                4168 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                4172 fs/nfs/nfs4xdr.c 				memcpy(label->label, p, len);
p                4226 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4228 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 20);
p                4229 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                4231 fs/nfs/nfs4xdr.c 	cinfo->atomic = be32_to_cpup(p++);
p                4232 fs/nfs/nfs4xdr.c 	p = xdr_decode_hyper(p, &cinfo->before);
p                4233 fs/nfs/nfs4xdr.c 	xdr_decode_hyper(p, &cinfo->after);
p                4239 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4246 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 8);
p                4247 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                4249 fs/nfs/nfs4xdr.c 	supp = be32_to_cpup(p++);
p                4250 fs/nfs/nfs4xdr.c 	acc = be32_to_cpup(p);
p                4332 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4341 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                4342 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                4344 fs/nfs/nfs4xdr.c 	bmlen = be32_to_cpup(p);
p                4345 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, bmlen << 2);
p                4346 fs/nfs/nfs4xdr.c 	if (likely(p))
p                4449 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4453 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                4454 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                4456 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, res);
p                4464 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4470 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                4471 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                4473 fs/nfs/nfs4xdr.c 	res->l_type = be32_to_cpup(p);
p                4518 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4528 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                4529 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                4531 fs/nfs/nfs4xdr.c 		num = be32_to_cpup(p);
p                4725 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4728 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                4729 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                4731 fs/nfs/nfs4xdr.c 	fsinfo->nlayouttypes = be32_to_cpup(p);
p                4738 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, fsinfo->nlayouttypes * 4);
p                4739 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                4750 fs/nfs/nfs4xdr.c 		fsinfo->layouttype[i] = be32_to_cpup(p++);
p                4779 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4784 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                4785 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                4787 fs/nfs/nfs4xdr.c 		*res = be32_to_cpup(p);
p                4799 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4804 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                4805 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                4807 fs/nfs/nfs4xdr.c 		*res = be32_to_cpup(p);
p                4869 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4880 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                4881 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                4883 fs/nfs/nfs4xdr.c 	len = be32_to_cpup(p);
p                4887 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, len);
p                4888 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                4890 fs/nfs/nfs4xdr.c 	memcpy(fh->data, p, len);
p                4910 fs/nfs/nfs4xdr.c 	__be32 *p;
p                4913 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 32); /* read 32 bytes */
p                4914 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                4916 fs/nfs/nfs4xdr.c 	p = xdr_decode_hyper(p, &offset); /* read 2 8-byte long words */
p                4917 fs/nfs/nfs4xdr.c 	p = xdr_decode_hyper(p, &length);
p                4918 fs/nfs/nfs4xdr.c 	type = be32_to_cpup(p++); /* 4 byte read */
p                4929 fs/nfs/nfs4xdr.c 	p = xdr_decode_hyper(p, &clientid); /* read 8 bytes */
p                4930 fs/nfs/nfs4xdr.c 	namelen = be32_to_cpup(p); /* read 4 bytes */  /* have read all 32 bytes now */
p                4931 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, namelen); /* variable size field */
p                4932 fs/nfs/nfs4xdr.c 	if (likely(!p))
p                4997 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5001 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 12);
p                5002 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5004 fs/nfs/nfs4xdr.c 	limit_type = be32_to_cpup(p++);
p                5007 fs/nfs/nfs4xdr.c 		xdr_decode_hyper(p, &maxsize);
p                5010 fs/nfs/nfs4xdr.c 		nblocks = be32_to_cpup(p++);
p                5011 fs/nfs/nfs4xdr.c 		blocksize = be32_to_cpup(p);
p                5023 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5029 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5030 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5032 fs/nfs/nfs4xdr.c 	res->do_recall = be32_to_cpup(p);
p                5048 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5051 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5052 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5054 fs/nfs/nfs4xdr.c 	why_no_delegation = be32_to_cpup(p);
p                5066 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5069 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5070 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5072 fs/nfs/nfs4xdr.c 	delegation_type = be32_to_cpup(p);
p                5088 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5103 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 8);
p                5104 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5106 fs/nfs/nfs4xdr.c 	res->rflags = be32_to_cpup(p++);
p                5107 fs/nfs/nfs4xdr.c 	bmlen = be32_to_cpup(p);
p                5111 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, bmlen << 2);
p                5112 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5116 fs/nfs/nfs4xdr.c 		res->attrset[i] = be32_to_cpup(p++);
p                5163 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5170 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 8);
p                5171 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5173 fs/nfs/nfs4xdr.c 	eof = be32_to_cpup(p++);
p                5174 fs/nfs/nfs4xdr.c 	count = be32_to_cpup(p);
p                5207 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5215 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5216 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5218 fs/nfs/nfs4xdr.c 	len = be32_to_cpup(p);
p                5345 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5349 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 8);
p                5350 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5352 fs/nfs/nfs4xdr.c 	opnum = be32_to_cpup(p++);
p                5358 fs/nfs/nfs4xdr.c 	nfserr = be32_to_cpup(p);
p                5360 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8 + NFS4_VERIFIER_SIZE);
p                5361 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                5363 fs/nfs/nfs4xdr.c 		p = xdr_decode_hyper(p, &res->clientid);
p                5364 fs/nfs/nfs4xdr.c 		memcpy(res->confirm.data, p, NFS4_VERIFIER_SIZE);
p                5369 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                5370 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                5372 fs/nfs/nfs4xdr.c 		len = be32_to_cpup(p);
p                5373 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, len);
p                5374 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                5378 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                5379 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                5381 fs/nfs/nfs4xdr.c 		len = be32_to_cpup(p);
p                5382 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, len);
p                5383 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                5399 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5406 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 8);
p                5407 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5409 fs/nfs/nfs4xdr.c 	res->count = be32_to_cpup(p++);
p                5410 fs/nfs/nfs4xdr.c 	res->verf->committed = be32_to_cpup(p++);
p                5423 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5425 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5426 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5428 fs/nfs/nfs4xdr.c 	oid_len = be32_to_cpup(p);
p                5432 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, oid_len);
p                5433 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5435 fs/nfs/nfs4xdr.c 	memcpy(flavor->flavor_info.oid.data, p, oid_len);
p                5438 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 8);
p                5439 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5441 fs/nfs/nfs4xdr.c 	flavor->flavor_info.qop = be32_to_cpup(p++);
p                5442 fs/nfs/nfs4xdr.c 	flavor->flavor_info.service = be32_to_cpup(p);
p                5452 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5454 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5455 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5459 fs/nfs/nfs4xdr.c 	num_flavors = be32_to_cpup(p);
p                5466 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                5467 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                5469 fs/nfs/nfs4xdr.c 		sec_flavor->flavor = be32_to_cpup(p);
p                5503 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5507 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5508 fs/nfs/nfs4xdr.c 	if (!p)
p                5510 fs/nfs/nfs4xdr.c 	bitmap_words = be32_to_cpup(p++);
p                5513 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4 * bitmap_words);
p                5515 fs/nfs/nfs4xdr.c 		op_map->u.words[i] = be32_to_cpup(p++);
p                5523 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5533 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 8);
p                5534 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5536 fs/nfs/nfs4xdr.c 	xdr_decode_hyper(p, &res->clientid);
p                5537 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 12);
p                5538 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5540 fs/nfs/nfs4xdr.c 	res->seqid = be32_to_cpup(p++);
p                5541 fs/nfs/nfs4xdr.c 	res->flags = be32_to_cpup(p++);
p                5543 fs/nfs/nfs4xdr.c 	res->state_protect.how = be32_to_cpup(p);
p                5561 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 8);
p                5562 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5564 fs/nfs/nfs4xdr.c 	p = xdr_decode_hyper(p, &res->server_owner->minor_id);
p                5581 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5582 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5584 fs/nfs/nfs4xdr.c 	impl_id_count = be32_to_cpup(p++);
p                5600 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 12);
p                5601 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                5603 fs/nfs/nfs4xdr.c 		p = xdr_decode_hyper(p, &res->impl_id->date.seconds);
p                5604 fs/nfs/nfs4xdr.c 		res->impl_id->date.nseconds = be32_to_cpup(p);
p                5614 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5617 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 28);
p                5618 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5620 fs/nfs/nfs4xdr.c 	val = be32_to_cpup(p++);	/* headerpadsz */
p                5623 fs/nfs/nfs4xdr.c 	attrs->max_rqst_sz = be32_to_cpup(p++);
p                5624 fs/nfs/nfs4xdr.c 	attrs->max_resp_sz = be32_to_cpup(p++);
p                5625 fs/nfs/nfs4xdr.c 	attrs->max_resp_sz_cached = be32_to_cpup(p++);
p                5626 fs/nfs/nfs4xdr.c 	attrs->max_ops = be32_to_cpup(p++);
p                5627 fs/nfs/nfs4xdr.c 	attrs->max_reqs = be32_to_cpup(p++);
p                5628 fs/nfs/nfs4xdr.c 	nr_attrs = be32_to_cpup(p);
p                5635 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4); /* skip rdma_attrs */
p                5636 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                5650 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5660 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 8);
p                5661 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5664 fs/nfs/nfs4xdr.c 	res->dir = be32_to_cpup(p++);
p                5667 fs/nfs/nfs4xdr.c 	if (be32_to_cpup(p) == 0)
p                5678 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5688 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 8);
p                5689 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5691 fs/nfs/nfs4xdr.c 	res->seqid = be32_to_cpup(p++);
p                5692 fs/nfs/nfs4xdr.c 	res->flags = be32_to_cpup(p);
p                5726 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5752 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 20);
p                5753 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5757 fs/nfs/nfs4xdr.c 	dummy = be32_to_cpup(p++);
p                5763 fs/nfs/nfs4xdr.c 	dummy = be32_to_cpup(p++);
p                5769 fs/nfs/nfs4xdr.c 	res->sr_highest_slotid = be32_to_cpup(p++);
p                5771 fs/nfs/nfs4xdr.c 	res->sr_target_highest_slotid = be32_to_cpup(p++);
p                5773 fs/nfs/nfs4xdr.c 	res->sr_status_flags = be32_to_cpup(p);
p                5797 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5804 fs/nfs/nfs4xdr.c 			p = xdr_inline_decode(xdr, 4);
p                5805 fs/nfs/nfs4xdr.c 			if (unlikely(!p))
p                5807 fs/nfs/nfs4xdr.c 			pdev->mincount = be32_to_cpup(p);
p                5814 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 8);
p                5815 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5817 fs/nfs/nfs4xdr.c 	type = be32_to_cpup(p++);
p                5828 fs/nfs/nfs4xdr.c 	pdev->mincount = be32_to_cpup(p);
p                5833 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5834 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5836 fs/nfs/nfs4xdr.c 	len = be32_to_cpup(p);
p                5840 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4 * len);
p                5841 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                5844 fs/nfs/nfs4xdr.c 		res->notification = be32_to_cpup(p++);
p                5846 fs/nfs/nfs4xdr.c 			if (be32_to_cpup(p++)) {
p                5859 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5867 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5868 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5870 fs/nfs/nfs4xdr.c 	res->return_on_close = be32_to_cpup(p);
p                5872 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5873 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5875 fs/nfs/nfs4xdr.c 	layout_count = be32_to_cpup(p);
p                5883 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 28);
p                5884 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5886 fs/nfs/nfs4xdr.c 	p = xdr_decode_hyper(p, &res->range.offset);
p                5887 fs/nfs/nfs4xdr.c 	p = xdr_decode_hyper(p, &res->range.length);
p                5888 fs/nfs/nfs4xdr.c 	res->range.iomode = be32_to_cpup(p++);
p                5889 fs/nfs/nfs4xdr.c 	res->type = be32_to_cpup(p++);
p                5890 fs/nfs/nfs4xdr.c 	res->layoutp->len = be32_to_cpup(p);
p                5930 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5936 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5937 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5939 fs/nfs/nfs4xdr.c 	res->lrs_present = be32_to_cpup(p);
p                5951 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5960 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5961 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5963 fs/nfs/nfs4xdr.c 	sizechanged = be32_to_cpup(p);
p                5967 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 8);
p                5968 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                5977 fs/nfs/nfs4xdr.c 	__be32 *p;
p                5985 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5986 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5988 fs/nfs/nfs4xdr.c 	num_res = be32_to_cpup(p++);
p                5992 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 4);
p                5993 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                5995 fs/nfs/nfs4xdr.c 	res->status = be32_to_cpup(p++);
p                6385 fs/nfs/nfs4xdr.c 		void *p = page_address(res->acl_scratch);
p                6386 fs/nfs/nfs4xdr.c 		xdr_set_scratch_buffer(xdr, p, PAGE_SIZE);
p                7369 fs/nfs/nfs4xdr.c 	__be32 *p = xdr_inline_decode(xdr, 4);
p                7370 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                7372 fs/nfs/nfs4xdr.c 	if (*p == xdr_zero) {
p                7373 fs/nfs/nfs4xdr.c 		p = xdr_inline_decode(xdr, 4);
p                7374 fs/nfs/nfs4xdr.c 		if (unlikely(!p))
p                7376 fs/nfs/nfs4xdr.c 		if (*p == xdr_zero)
p                7382 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, 12);
p                7383 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                7385 fs/nfs/nfs4xdr.c 	p = xdr_decode_hyper(p, &new_cookie);
p                7386 fs/nfs/nfs4xdr.c 	entry->len = be32_to_cpup(p);
p                7388 fs/nfs/nfs4xdr.c 	p = xdr_inline_decode(xdr, entry->len);
p                7389 fs/nfs/nfs4xdr.c 	if (unlikely(!p))
p                7391 fs/nfs/nfs4xdr.c 	entry->name = (const char *) p;
p                 189 fs/nfs/nfsroot.c 	char *p;
p                 194 fs/nfs/nfsroot.c 	p = strsep(&incoming, ",");
p                 195 fs/nfs/nfsroot.c 	if (*p != '\0' && strcmp(p, "default") != 0)
p                 196 fs/nfs/nfsroot.c 		if (root_nfs_copy(exppath, p, exppathlen))
p                  80 fs/nfs/pagelist.c 	struct nfs_page	*p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
p                  81 fs/nfs/pagelist.c 	if (p)
p                  82 fs/nfs/pagelist.c 		INIT_LIST_HEAD(&p->wb_list);
p                  83 fs/nfs/pagelist.c 	return p;
p                  87 fs/nfs/pagelist.c nfs_page_free(struct nfs_page *p)
p                  89 fs/nfs/pagelist.c 	kmem_cache_free(nfs_page_cachep, p);
p                  54 fs/nfs/pnfs_dev.c 	u32 *p = (u32 *)id;
p                  57 fs/nfs/pnfs_dev.c 		p[0], p[1], p[2], p[3]);
p                 471 fs/nfs/pnfs_nfs.c 	char *p;
p                 482 fs/nfs/pnfs_nfs.c 	p = remotestr;
p                 483 fs/nfs/pnfs_nfs.c 	*(p++) = '{';
p                 491 fs/nfs/pnfs_nfs.c 		memcpy(p, da->da_remotestr, ll);
p                 492 fs/nfs/pnfs_nfs.c 		p += ll;
p                 497 fs/nfs/pnfs_nfs.c 		(*p++) = ',';
p                 502 fs/nfs/pnfs_nfs.c 	*(p++) = '}';
p                 503 fs/nfs/pnfs_nfs.c 	*p = '\0';
p                 789 fs/nfs/pnfs_nfs.c 	__be32 *p;
p                 797 fs/nfs/pnfs_nfs.c 	p = xdr_inline_decode(xdr, 4);
p                 798 fs/nfs/pnfs_nfs.c 	if (unlikely(!p))
p                 800 fs/nfs/pnfs_nfs.c 	nlen = be32_to_cpup(p++);
p                 802 fs/nfs/pnfs_nfs.c 	p = xdr_inline_decode(xdr, nlen);
p                 803 fs/nfs/pnfs_nfs.c 	if (unlikely(!p))
p                 811 fs/nfs/pnfs_nfs.c 	memcpy(netid, p, nlen);
p                 814 fs/nfs/pnfs_nfs.c 	p = xdr_inline_decode(xdr, 4);
p                 815 fs/nfs/pnfs_nfs.c 	if (unlikely(!p))
p                 817 fs/nfs/pnfs_nfs.c 	rlen = be32_to_cpup(p);
p                 819 fs/nfs/pnfs_nfs.c 	p = xdr_inline_decode(xdr, rlen);
p                 820 fs/nfs/pnfs_nfs.c 	if (unlikely(!p))
p                 835 fs/nfs/pnfs_nfs.c 	memcpy(buf, p, rlen);
p                  40 fs/nfs/read.c  	struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
p                  42 fs/nfs/read.c  	if (p)
p                  43 fs/nfs/read.c  		p->rw_mode = FMODE_READ;
p                  44 fs/nfs/read.c  	return p;
p                1097 fs/nfs/super.c 	char *p;
p                1101 fs/nfs/super.c 	while ((p = strsep(&value, ":")) != NULL) {
p                1102 fs/nfs/super.c 		switch (match_token(p, nfs_secflavor_tokens, args)) {
p                1138 fs/nfs/super.c 				 "NFS: sec= option '%s' not recognized\n", p);
p                1230 fs/nfs/super.c 	char *p, *string;
p                1245 fs/nfs/super.c 	while ((p = strsep(&raw, ",")) != NULL) {
p                1250 fs/nfs/super.c 		if (!*p)
p                1253 fs/nfs/super.c 		dfprintk(MOUNT, "NFS:   parsing nfs mount option '%s'\n", p);
p                1255 fs/nfs/super.c 		token = match_token(p, nfs_mount_option_tokens, args);
p                1311 fs/nfs/super.c 			xprt_load_transport(p);
p                1641 fs/nfs/super.c 					"'%s'\n", p);
p                1647 fs/nfs/super.c 					"'%s'\n", p);
p                1689 fs/nfs/super.c 	printk(KERN_INFO "NFS: bad IP address specified: %s\n", p);
p                1692 fs/nfs/super.c 	printk(KERN_INFO "NFS: bad mount option value specified: %s\n", p);
p                2982 fs/nfs/super.c #define param_check_portnr(name, p) __param_check(name, p, unsigned int);
p                 101 fs/nfs/sysfs.c 	char *p;
p                 107 fs/nfs/sysfs.c 	p = kmemdup_nul(buf, len, GFP_KERNEL);
p                 108 fs/nfs/sysfs.c 	if (!p)
p                 110 fs/nfs/sysfs.c 	old = xchg(&c->identifier, p);
p                 152 fs/nfs/sysfs.c 	struct nfs_netns_client *p;
p                 154 fs/nfs/sysfs.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 155 fs/nfs/sysfs.c 	if (p) {
p                 156 fs/nfs/sysfs.c 		p->net = net;
p                 157 fs/nfs/sysfs.c 		p->kobject.kset = nfs_client_kset;
p                 158 fs/nfs/sysfs.c 		if (kobject_init_and_add(&p->kobject, &nfs_netns_client_type,
p                 160 fs/nfs/sysfs.c 			return p;
p                 161 fs/nfs/sysfs.c 		kobject_put(&p->kobject);
p                  75 fs/nfs/write.c 	struct nfs_commit_data *p;
p                  78 fs/nfs/write.c 		p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
p                  85 fs/nfs/write.c 		p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT);
p                  86 fs/nfs/write.c 		if (!p)
p                  87 fs/nfs/write.c 			p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO |
p                  89 fs/nfs/write.c 		if (!p)
p                  93 fs/nfs/write.c 	memset(p, 0, sizeof(*p));
p                  94 fs/nfs/write.c 	INIT_LIST_HEAD(&p->pages);
p                  95 fs/nfs/write.c 	return p;
p                  99 fs/nfs/write.c void nfs_commit_free(struct nfs_commit_data *p)
p                 101 fs/nfs/write.c 	mempool_free(p, nfs_commit_mempool);
p                 107 fs/nfs/write.c 	struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_KERNEL);
p                 109 fs/nfs/write.c 	memset(p, 0, sizeof(*p));
p                 110 fs/nfs/write.c 	p->rw_mode = FMODE_WRITE;
p                 111 fs/nfs/write.c 	return p;
p                  53 fs/nfs_common/nfsacl.c 	__be32 *p = elem;
p                  58 fs/nfs_common/nfsacl.c 	*p++ = htonl(entry->e_tag | nfsacl_desc->typeflag);
p                  61 fs/nfs_common/nfsacl.c 			*p++ = htonl(from_kuid(&init_user_ns, nfsacl_desc->uid));
p                  64 fs/nfs_common/nfsacl.c 			*p++ = htonl(from_kgid(&init_user_ns, nfsacl_desc->gid));
p                  67 fs/nfs_common/nfsacl.c 			*p++ = htonl(from_kuid(&init_user_ns, entry->e_uid));
p                  70 fs/nfs_common/nfsacl.c 			*p++ = htonl(from_kgid(&init_user_ns, entry->e_gid));
p                  73 fs/nfs_common/nfsacl.c 			*p++ = 0;
p                  76 fs/nfs_common/nfsacl.c 	*p++ = htonl(entry->e_perm & S_IRWXO);
p                 150 fs/nfs_common/nfsacl.c 	__be32 *p = elem;
p                 164 fs/nfs_common/nfsacl.c 	entry->e_tag = ntohl(*p++) & ~NFS_ACL_DEFAULT;
p                 165 fs/nfs_common/nfsacl.c 	id = ntohl(*p++);
p                 166 fs/nfs_common/nfsacl.c 	entry->e_perm = ntohl(*p++);
p                  22 fs/nfsd/blocklayoutxdr.c 	__be32 *p;
p                  24 fs/nfsd/blocklayoutxdr.c 	p = xdr_reserve_space(xdr, sizeof(__be32) + len);
p                  25 fs/nfsd/blocklayoutxdr.c 	if (!p)
p                  28 fs/nfsd/blocklayoutxdr.c 	*p++ = cpu_to_be32(len);
p                  29 fs/nfsd/blocklayoutxdr.c 	*p++ = cpu_to_be32(1);		/* we always return a single extent */
p                  31 fs/nfsd/blocklayoutxdr.c 	p = xdr_encode_opaque_fixed(p, &b->vol_id,
p                  33 fs/nfsd/blocklayoutxdr.c 	p = xdr_encode_hyper(p, b->foff);
p                  34 fs/nfsd/blocklayoutxdr.c 	p = xdr_encode_hyper(p, b->len);
p                  35 fs/nfsd/blocklayoutxdr.c 	p = xdr_encode_hyper(p, b->soff);
p                  36 fs/nfsd/blocklayoutxdr.c 	*p++ = cpu_to_be32(b->es);
p                  43 fs/nfsd/blocklayoutxdr.c 	__be32 *p;
p                  49 fs/nfsd/blocklayoutxdr.c 		p = xdr_reserve_space(xdr, len);
p                  50 fs/nfsd/blocklayoutxdr.c 		if (!p)
p                  53 fs/nfsd/blocklayoutxdr.c 		*p++ = cpu_to_be32(b->type);
p                  54 fs/nfsd/blocklayoutxdr.c 		*p++ = cpu_to_be32(1);	/* single signature */
p                  55 fs/nfsd/blocklayoutxdr.c 		p = xdr_encode_hyper(p, b->simple.offset);
p                  56 fs/nfsd/blocklayoutxdr.c 		p = xdr_encode_opaque(p, b->simple.sig, b->simple.sig_len);
p                  60 fs/nfsd/blocklayoutxdr.c 		p = xdr_reserve_space(xdr, len);
p                  61 fs/nfsd/blocklayoutxdr.c 		if (!p)
p                  64 fs/nfsd/blocklayoutxdr.c 		*p++ = cpu_to_be32(b->type);
p                  65 fs/nfsd/blocklayoutxdr.c 		*p++ = cpu_to_be32(b->scsi.code_set);
p                  66 fs/nfsd/blocklayoutxdr.c 		*p++ = cpu_to_be32(b->scsi.designator_type);
p                  67 fs/nfsd/blocklayoutxdr.c 		p = xdr_encode_opaque(p, b->scsi.designator, b->scsi.designator_len);
p                  68 fs/nfsd/blocklayoutxdr.c 		p = xdr_encode_hyper(p, b->scsi.pr_key);
p                  83 fs/nfsd/blocklayoutxdr.c 	__be32 *p;
p                  85 fs/nfsd/blocklayoutxdr.c 	p = xdr_reserve_space(xdr, len + sizeof(__be32));
p                  86 fs/nfsd/blocklayoutxdr.c 	if (!p)
p                 100 fs/nfsd/blocklayoutxdr.c 	*p++ = cpu_to_be32(len);
p                 101 fs/nfsd/blocklayoutxdr.c 	*p++ = cpu_to_be32(dev->nr_volumes);
p                 106 fs/nfsd/blocklayoutxdr.c nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
p                 122 fs/nfsd/blocklayoutxdr.c 	nr_iomaps = be32_to_cpup(p++);
p                 138 fs/nfsd/blocklayoutxdr.c 		memcpy(&bex.vol_id, p, sizeof(struct nfsd4_deviceid));
p                 139 fs/nfsd/blocklayoutxdr.c 		p += XDR_QUADLEN(sizeof(struct nfsd4_deviceid));
p                 141 fs/nfsd/blocklayoutxdr.c 		p = xdr_decode_hyper(p, &bex.foff);
p                 147 fs/nfsd/blocklayoutxdr.c 		p = xdr_decode_hyper(p, &bex.len);
p                 153 fs/nfsd/blocklayoutxdr.c 		p = xdr_decode_hyper(p, &bex.soff);
p                 159 fs/nfsd/blocklayoutxdr.c 		bex.es = be32_to_cpup(p++);
p                 178 fs/nfsd/blocklayoutxdr.c nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
p                 189 fs/nfsd/blocklayoutxdr.c 	nr_iomaps = be32_to_cpup(p++);
p                 206 fs/nfsd/blocklayoutxdr.c 		p = xdr_decode_hyper(p, &val);
p                 213 fs/nfsd/blocklayoutxdr.c 		p = xdr_decode_hyper(p, &val);
p                  57 fs/nfsd/blocklayoutxdr.h int nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
p                  59 fs/nfsd/blocklayoutxdr.h int nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
p                1214 fs/nfsd/export.c static int e_show(struct seq_file *m, void *p)
p                1216 fs/nfsd/export.c 	struct cache_head *cp = p;
p                1220 fs/nfsd/export.c 	if (p == SEQ_START_TOKEN) {
p                  24 fs/nfsd/flexfilelayoutxdr.c 	__be32 *p;
p                  47 fs/nfsd/flexfilelayoutxdr.c 	p = xdr_reserve_space(xdr, sizeof(__be32) + len);
p                  48 fs/nfsd/flexfilelayoutxdr.c 	if (!p)
p                  51 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(len);
p                  52 fs/nfsd/flexfilelayoutxdr.c 	p = xdr_encode_hyper(p, 0);		/* stripe unit of 1 */
p                  54 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(1);			/* single mirror */
p                  55 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(1);			/* single data server */
p                  57 fs/nfsd/flexfilelayoutxdr.c 	p = xdr_encode_opaque_fixed(p, &fl->deviceid,
p                  60 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(1);			/* efficiency */
p                  62 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(fl->stateid.si_generation);
p                  63 fs/nfsd/flexfilelayoutxdr.c 	p = xdr_encode_opaque_fixed(p, &fl->stateid.si_opaque,
p                  66 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(1);			/* single file handle */
p                  67 fs/nfsd/flexfilelayoutxdr.c 	p = xdr_encode_opaque(p, fl->fh.data, fl->fh.size);
p                  69 fs/nfsd/flexfilelayoutxdr.c 	p = xdr_encode_opaque(p, uid.buf, uid.len);
p                  70 fs/nfsd/flexfilelayoutxdr.c 	p = xdr_encode_opaque(p, gid.buf, gid.len);
p                  72 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(fl->flags);
p                  73 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(0);			/* No stats collect hint */
p                  86 fs/nfsd/flexfilelayoutxdr.c 	__be32 *p;
p                  94 fs/nfsd/flexfilelayoutxdr.c 	p = xdr_reserve_space(xdr, len + sizeof(__be32));
p                  95 fs/nfsd/flexfilelayoutxdr.c 	if (!p)
p                 102 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(len);
p                 103 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(1);			/* 1 netaddr */
p                 104 fs/nfsd/flexfilelayoutxdr.c 	p = xdr_encode_opaque(p, da->netaddr.netid, da->netaddr.netid_len);
p                 105 fs/nfsd/flexfilelayoutxdr.c 	p = xdr_encode_opaque(p, da->netaddr.addr, da->netaddr.addr_len);
p                 107 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(1);			/* 1 versions */
p                 109 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(da->version);
p                 110 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(da->minor_version);
p                 111 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(da->rsize);
p                 112 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(da->wsize);
p                 113 fs/nfsd/flexfilelayoutxdr.c 	*p++ = cpu_to_be32(da->tightly_coupled);
p                 186 fs/nfsd/nfs2acl.c static int nfsaclsvc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p)
p                 190 fs/nfsd/nfs2acl.c 	p = nfs2svc_decode_fh(p, &argp->fh);
p                 191 fs/nfsd/nfs2acl.c 	if (!p)
p                 193 fs/nfsd/nfs2acl.c 	argp->mask = ntohl(*p); p++;
p                 195 fs/nfsd/nfs2acl.c 	return xdr_argsize_check(rqstp, p);
p                 199 fs/nfsd/nfs2acl.c static int nfsaclsvc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p)
p                 206 fs/nfsd/nfs2acl.c 	p = nfs2svc_decode_fh(p, &argp->fh);
p                 207 fs/nfsd/nfs2acl.c 	if (!p)
p                 209 fs/nfsd/nfs2acl.c 	argp->mask = ntohl(*p++);
p                 211 fs/nfsd/nfs2acl.c 	    !xdr_argsize_check(rqstp, p))
p                 214 fs/nfsd/nfs2acl.c 	base = (char *)p - (char *)head->iov_base;
p                 225 fs/nfsd/nfs2acl.c static int nfsaclsvc_decode_fhandleargs(struct svc_rqst *rqstp, __be32 *p)
p                 229 fs/nfsd/nfs2acl.c 	p = nfs2svc_decode_fh(p, &argp->fh);
p                 230 fs/nfsd/nfs2acl.c 	if (!p)
p                 232 fs/nfsd/nfs2acl.c 	return xdr_argsize_check(rqstp, p);
p                 235 fs/nfsd/nfs2acl.c static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p)
p                 239 fs/nfsd/nfs2acl.c 	p = nfs2svc_decode_fh(p, &argp->fh);
p                 240 fs/nfsd/nfs2acl.c 	if (!p)
p                 242 fs/nfsd/nfs2acl.c 	argp->access = ntohl(*p++);
p                 244 fs/nfsd/nfs2acl.c 	return xdr_argsize_check(rqstp, p);
p                 255 fs/nfsd/nfs2acl.c static int nfsaclsvc_encode_voidres(struct svc_rqst *rqstp, __be32 *p)
p                 257 fs/nfsd/nfs2acl.c 	return xdr_ressize_check(rqstp, p);
p                 261 fs/nfsd/nfs2acl.c static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p)
p                 280 fs/nfsd/nfs2acl.c 	p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat);
p                 281 fs/nfsd/nfs2acl.c 	*p++ = htonl(resp->mask);
p                 282 fs/nfsd/nfs2acl.c 	if (!xdr_ressize_check(rqstp, p))
p                 284 fs/nfsd/nfs2acl.c 	base = (char *)p - (char *)head->iov_base;
p                 306 fs/nfsd/nfs2acl.c static int nfsaclsvc_encode_attrstatres(struct svc_rqst *rqstp, __be32 *p)
p                 310 fs/nfsd/nfs2acl.c 	p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat);
p                 311 fs/nfsd/nfs2acl.c 	return xdr_ressize_check(rqstp, p);
p                 315 fs/nfsd/nfs2acl.c static int nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, __be32 *p)
p                 319 fs/nfsd/nfs2acl.c 	p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat);
p                 320 fs/nfsd/nfs2acl.c 	*p++ = htonl(resp->access);
p                 321 fs/nfsd/nfs2acl.c 	return xdr_ressize_check(rqstp, p);
p                 128 fs/nfsd/nfs3acl.c static int nfs3svc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p)
p                 132 fs/nfsd/nfs3acl.c 	p = nfs3svc_decode_fh(p, &args->fh);
p                 133 fs/nfsd/nfs3acl.c 	if (!p)
p                 135 fs/nfsd/nfs3acl.c 	args->mask = ntohl(*p); p++;
p                 137 fs/nfsd/nfs3acl.c 	return xdr_argsize_check(rqstp, p);
p                 141 fs/nfsd/nfs3acl.c static int nfs3svc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p)
p                 148 fs/nfsd/nfs3acl.c 	p = nfs3svc_decode_fh(p, &args->fh);
p                 149 fs/nfsd/nfs3acl.c 	if (!p)
p                 151 fs/nfsd/nfs3acl.c 	args->mask = ntohl(*p++);
p                 153 fs/nfsd/nfs3acl.c 	    !xdr_argsize_check(rqstp, p))
p                 156 fs/nfsd/nfs3acl.c 	base = (char *)p - (char *)head->iov_base;
p                 172 fs/nfsd/nfs3acl.c static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p)
p                 177 fs/nfsd/nfs3acl.c 	p = nfs3svc_encode_post_op_attr(rqstp, p, &resp->fh);
p                 185 fs/nfsd/nfs3acl.c 		*p++ = htonl(resp->mask);
p                 186 fs/nfsd/nfs3acl.c 		if (!xdr_ressize_check(rqstp, p))
p                 188 fs/nfsd/nfs3acl.c 		base = (char *)p - (char *)head->iov_base;
p                 210 fs/nfsd/nfs3acl.c 		if (!xdr_ressize_check(rqstp, p))
p                 217 fs/nfsd/nfs3acl.c static int nfs3svc_encode_setaclres(struct svc_rqst *rqstp, __be32 *p)
p                 221 fs/nfsd/nfs3acl.c 	p = nfs3svc_encode_post_op_attr(rqstp, p, &resp->fh);
p                 223 fs/nfsd/nfs3acl.c 	return xdr_ressize_check(rqstp, p);
p                 441 fs/nfsd/nfs3proc.c 	struct page	**p;
p                 463 fs/nfsd/nfs3proc.c 	for (p = rqstp->rq_respages + 1; p < rqstp->rq_next_page; p++) {
p                 464 fs/nfsd/nfs3proc.c 		page_addr = page_address(*p);
p                 503 fs/nfsd/nfs3proc.c 	struct page **p;
p                 535 fs/nfsd/nfs3proc.c 	for (p = rqstp->rq_respages + 1; p < rqstp->rq_next_page; p++) {
p                 536 fs/nfsd/nfs3proc.c 		page_addr = page_address(*p);
p                  35 fs/nfsd/nfs3xdr.c encode_time3(__be32 *p, struct timespec *time)
p                  37 fs/nfsd/nfs3xdr.c 	*p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec);
p                  38 fs/nfsd/nfs3xdr.c 	return p;
p                  42 fs/nfsd/nfs3xdr.c decode_time3(__be32 *p, struct timespec *time)
p                  44 fs/nfsd/nfs3xdr.c 	time->tv_sec = ntohl(*p++);
p                  45 fs/nfsd/nfs3xdr.c 	time->tv_nsec = ntohl(*p++);
p                  46 fs/nfsd/nfs3xdr.c 	return p;
p                  50 fs/nfsd/nfs3xdr.c decode_fh(__be32 *p, struct svc_fh *fhp)
p                  54 fs/nfsd/nfs3xdr.c 	size = ntohl(*p++);
p                  58 fs/nfsd/nfs3xdr.c 	memcpy(&fhp->fh_handle.fh_base, p, size);
p                  60 fs/nfsd/nfs3xdr.c 	return p + XDR_QUADLEN(size);
p                  64 fs/nfsd/nfs3xdr.c __be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp)
p                  66 fs/nfsd/nfs3xdr.c 	return decode_fh(p, fhp);
p                  70 fs/nfsd/nfs3xdr.c encode_fh(__be32 *p, struct svc_fh *fhp)
p                  73 fs/nfsd/nfs3xdr.c 	*p++ = htonl(size);
p                  74 fs/nfsd/nfs3xdr.c 	if (size) p[XDR_QUADLEN(size)-1]=0;
p                  75 fs/nfsd/nfs3xdr.c 	memcpy(p, &fhp->fh_handle.fh_base, size);
p                  76 fs/nfsd/nfs3xdr.c 	return p + XDR_QUADLEN(size);
p                  84 fs/nfsd/nfs3xdr.c decode_filename(__be32 *p, char **namp, unsigned int *lenp)
p                  89 fs/nfsd/nfs3xdr.c 	if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS3_MAXNAMLEN)) != NULL) {
p                  96 fs/nfsd/nfs3xdr.c 	return p;
p                 100 fs/nfsd/nfs3xdr.c decode_sattr3(__be32 *p, struct iattr *iap, struct user_namespace *userns)
p                 106 fs/nfsd/nfs3xdr.c 	if (*p++) {
p                 108 fs/nfsd/nfs3xdr.c 		iap->ia_mode = ntohl(*p++);
p                 110 fs/nfsd/nfs3xdr.c 	if (*p++) {
p                 111 fs/nfsd/nfs3xdr.c 		iap->ia_uid = make_kuid(userns, ntohl(*p++));
p                 115 fs/nfsd/nfs3xdr.c 	if (*p++) {
p                 116 fs/nfsd/nfs3xdr.c 		iap->ia_gid = make_kgid(userns, ntohl(*p++));
p                 120 fs/nfsd/nfs3xdr.c 	if (*p++) {
p                 124 fs/nfsd/nfs3xdr.c 		p = xdr_decode_hyper(p, &newsize);
p                 127 fs/nfsd/nfs3xdr.c 	if ((tmp = ntohl(*p++)) == 1) {	/* set to server time */
p                 131 fs/nfsd/nfs3xdr.c 		iap->ia_atime.tv_sec = ntohl(*p++);
p                 132 fs/nfsd/nfs3xdr.c 		iap->ia_atime.tv_nsec = ntohl(*p++);
p                 134 fs/nfsd/nfs3xdr.c 	if ((tmp = ntohl(*p++)) == 1) {	/* set to server time */
p                 138 fs/nfsd/nfs3xdr.c 		iap->ia_mtime.tv_sec = ntohl(*p++);
p                 139 fs/nfsd/nfs3xdr.c 		iap->ia_mtime.tv_nsec = ntohl(*p++);
p                 141 fs/nfsd/nfs3xdr.c 	return p;
p                 144 fs/nfsd/nfs3xdr.c static __be32 *encode_fsid(__be32 *p, struct svc_fh *fhp)
p                 150 fs/nfsd/nfs3xdr.c 		p = xdr_encode_hyper(p, (u64)huge_encode_dev
p                 154 fs/nfsd/nfs3xdr.c 		p = xdr_encode_hyper(p, (u64) fhp->fh_export->ex_fsid);
p                 159 fs/nfsd/nfs3xdr.c 		p = xdr_encode_hyper(p, f);
p                 162 fs/nfsd/nfs3xdr.c 	return p;
p                 166 fs/nfsd/nfs3xdr.c encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
p                 171 fs/nfsd/nfs3xdr.c 	*p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
p                 172 fs/nfsd/nfs3xdr.c 	*p++ = htonl((u32) (stat->mode & S_IALLUGO));
p                 173 fs/nfsd/nfs3xdr.c 	*p++ = htonl((u32) stat->nlink);
p                 174 fs/nfsd/nfs3xdr.c 	*p++ = htonl((u32) from_kuid_munged(userns, stat->uid));
p                 175 fs/nfsd/nfs3xdr.c 	*p++ = htonl((u32) from_kgid_munged(userns, stat->gid));
p                 177 fs/nfsd/nfs3xdr.c 		p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN);
p                 179 fs/nfsd/nfs3xdr.c 		p = xdr_encode_hyper(p, (u64) stat->size);
p                 181 fs/nfsd/nfs3xdr.c 	p = xdr_encode_hyper(p, ((u64)stat->blocks) << 9);
p                 182 fs/nfsd/nfs3xdr.c 	*p++ = htonl((u32) MAJOR(stat->rdev));
p                 183 fs/nfsd/nfs3xdr.c 	*p++ = htonl((u32) MINOR(stat->rdev));
p                 184 fs/nfsd/nfs3xdr.c 	p = encode_fsid(p, fhp);
p                 185 fs/nfsd/nfs3xdr.c 	p = xdr_encode_hyper(p, stat->ino);
p                 187 fs/nfsd/nfs3xdr.c 	p = encode_time3(p, &ts);
p                 189 fs/nfsd/nfs3xdr.c 	p = encode_time3(p, &ts);
p                 191 fs/nfsd/nfs3xdr.c 	p = encode_time3(p, &ts);
p                 193 fs/nfsd/nfs3xdr.c 	return p;
p                 197 fs/nfsd/nfs3xdr.c encode_saved_post_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
p                 200 fs/nfsd/nfs3xdr.c 	*p++ = xdr_one;
p                 201 fs/nfsd/nfs3xdr.c 	return encode_fattr3(rqstp, p, fhp, &fhp->fh_post_attr);
p                 210 fs/nfsd/nfs3xdr.c encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
p                 219 fs/nfsd/nfs3xdr.c 			*p++ = xdr_one;		/* attributes follow */
p                 221 fs/nfsd/nfs3xdr.c 			return encode_fattr3(rqstp, p, fhp, &stat);
p                 224 fs/nfsd/nfs3xdr.c 	*p++ = xdr_zero;
p                 225 fs/nfsd/nfs3xdr.c 	return p;
p                 230 fs/nfsd/nfs3xdr.c nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
p                 232 fs/nfsd/nfs3xdr.c 	return encode_post_op_attr(rqstp, p, fhp);
p                 239 fs/nfsd/nfs3xdr.c encode_wcc_data(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
p                 245 fs/nfsd/nfs3xdr.c 			*p++ = xdr_one;
p                 246 fs/nfsd/nfs3xdr.c 			p = xdr_encode_hyper(p, (u64) fhp->fh_pre_size);
p                 247 fs/nfsd/nfs3xdr.c 			p = encode_time3(p, &fhp->fh_pre_mtime);
p                 248 fs/nfsd/nfs3xdr.c 			p = encode_time3(p, &fhp->fh_pre_ctime);
p                 250 fs/nfsd/nfs3xdr.c 			*p++ = xdr_zero;
p                 252 fs/nfsd/nfs3xdr.c 		return encode_saved_post_attr(rqstp, p, fhp);
p                 255 fs/nfsd/nfs3xdr.c 	*p++ = xdr_zero;
p                 256 fs/nfsd/nfs3xdr.c 	return encode_post_op_attr(rqstp, p, fhp);
p                 312 fs/nfsd/nfs3xdr.c nfs3svc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p)
p                 316 fs/nfsd/nfs3xdr.c 	p = decode_fh(p, &args->fh);
p                 317 fs/nfsd/nfs3xdr.c 	if (!p)
p                 319 fs/nfsd/nfs3xdr.c 	return xdr_argsize_check(rqstp, p);
p                 323 fs/nfsd/nfs3xdr.c nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p)
p                 327 fs/nfsd/nfs3xdr.c 	p = decode_fh(p, &args->fh);
p                 328 fs/nfsd/nfs3xdr.c 	if (!p)
p                 330 fs/nfsd/nfs3xdr.c 	p = decode_sattr3(p, &args->attrs, nfsd_user_namespace(rqstp));
p                 332 fs/nfsd/nfs3xdr.c 	if ((args->check_guard = ntohl(*p++)) != 0) { 
p                 334 fs/nfsd/nfs3xdr.c 		p = decode_time3(p, &time);
p                 338 fs/nfsd/nfs3xdr.c 	return xdr_argsize_check(rqstp, p);
p                 342 fs/nfsd/nfs3xdr.c nfs3svc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p)
p                 346 fs/nfsd/nfs3xdr.c 	if (!(p = decode_fh(p, &args->fh))
p                 347 fs/nfsd/nfs3xdr.c 	 || !(p = decode_filename(p, &args->name, &args->len)))
p                 350 fs/nfsd/nfs3xdr.c 	return xdr_argsize_check(rqstp, p);
p                 354 fs/nfsd/nfs3xdr.c nfs3svc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p)
p                 358 fs/nfsd/nfs3xdr.c 	p = decode_fh(p, &args->fh);
p                 359 fs/nfsd/nfs3xdr.c 	if (!p)
p                 361 fs/nfsd/nfs3xdr.c 	args->access = ntohl(*p++);
p                 363 fs/nfsd/nfs3xdr.c 	return xdr_argsize_check(rqstp, p);
p                 367 fs/nfsd/nfs3xdr.c nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p)
p                 374 fs/nfsd/nfs3xdr.c 	p = decode_fh(p, &args->fh);
p                 375 fs/nfsd/nfs3xdr.c 	if (!p)
p                 377 fs/nfsd/nfs3xdr.c 	p = xdr_decode_hyper(p, &args->offset);
p                 379 fs/nfsd/nfs3xdr.c 	args->count = ntohl(*p++);
p                 385 fs/nfsd/nfs3xdr.c 		struct page *p = *(rqstp->rq_next_page++);
p                 387 fs/nfsd/nfs3xdr.c 		rqstp->rq_vec[v].iov_base = page_address(p);
p                 393 fs/nfsd/nfs3xdr.c 	return xdr_argsize_check(rqstp, p);
p                 397 fs/nfsd/nfs3xdr.c nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p)
p                 405 fs/nfsd/nfs3xdr.c 	p = decode_fh(p, &args->fh);
p                 406 fs/nfsd/nfs3xdr.c 	if (!p)
p                 408 fs/nfsd/nfs3xdr.c 	p = xdr_decode_hyper(p, &args->offset);
p                 410 fs/nfsd/nfs3xdr.c 	args->count = ntohl(*p++);
p                 411 fs/nfsd/nfs3xdr.c 	args->stable = ntohl(*p++);
p                 412 fs/nfsd/nfs3xdr.c 	len = args->len = ntohl(*p++);
p                 413 fs/nfsd/nfs3xdr.c 	if ((void *)p > head->iov_base + head->iov_len)
p                 425 fs/nfsd/nfs3xdr.c 	hdr = (void*)p - head->iov_base;
p                 443 fs/nfsd/nfs3xdr.c 	args->first.iov_base = (void *)p;
p                 449 fs/nfsd/nfs3xdr.c nfs3svc_decode_createargs(struct svc_rqst *rqstp, __be32 *p)
p                 453 fs/nfsd/nfs3xdr.c 	if (!(p = decode_fh(p, &args->fh))
p                 454 fs/nfsd/nfs3xdr.c 	 || !(p = decode_filename(p, &args->name, &args->len)))
p                 457 fs/nfsd/nfs3xdr.c 	switch (args->createmode = ntohl(*p++)) {
p                 460 fs/nfsd/nfs3xdr.c 		p = decode_sattr3(p, &args->attrs, nfsd_user_namespace(rqstp));
p                 463 fs/nfsd/nfs3xdr.c 		args->verf = p;
p                 464 fs/nfsd/nfs3xdr.c 		p += 2;
p                 470 fs/nfsd/nfs3xdr.c 	return xdr_argsize_check(rqstp, p);
p                 474 fs/nfsd/nfs3xdr.c nfs3svc_decode_mkdirargs(struct svc_rqst *rqstp, __be32 *p)
p                 478 fs/nfsd/nfs3xdr.c 	if (!(p = decode_fh(p, &args->fh)) ||
p                 479 fs/nfsd/nfs3xdr.c 	    !(p = decode_filename(p, &args->name, &args->len)))
p                 481 fs/nfsd/nfs3xdr.c 	p = decode_sattr3(p, &args->attrs, nfsd_user_namespace(rqstp));
p                 483 fs/nfsd/nfs3xdr.c 	return xdr_argsize_check(rqstp, p);
p                 487 fs/nfsd/nfs3xdr.c nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p)
p                 490 fs/nfsd/nfs3xdr.c 	char *base = (char *)p;
p                 493 fs/nfsd/nfs3xdr.c 	if (!(p = decode_fh(p, &args->ffh)) ||
p                 494 fs/nfsd/nfs3xdr.c 	    !(p = decode_filename(p, &args->fname, &args->flen)))
p                 496 fs/nfsd/nfs3xdr.c 	p = decode_sattr3(p, &args->attrs, nfsd_user_namespace(rqstp));
p                 498 fs/nfsd/nfs3xdr.c 	args->tlen = ntohl(*p++);
p                 500 fs/nfsd/nfs3xdr.c 	args->first.iov_base = p;
p                 502 fs/nfsd/nfs3xdr.c 	args->first.iov_len -= (char *)p - base;
p                 512 fs/nfsd/nfs3xdr.c nfs3svc_decode_mknodargs(struct svc_rqst *rqstp, __be32 *p)
p                 516 fs/nfsd/nfs3xdr.c 	if (!(p = decode_fh(p, &args->fh))
p                 517 fs/nfsd/nfs3xdr.c 	 || !(p = decode_filename(p, &args->name, &args->len)))
p                 520 fs/nfsd/nfs3xdr.c 	args->ftype = ntohl(*p++);
p                 524 fs/nfsd/nfs3xdr.c 		p = decode_sattr3(p, &args->attrs, nfsd_user_namespace(rqstp));
p                 527 fs/nfsd/nfs3xdr.c 		args->major = ntohl(*p++);
p                 528 fs/nfsd/nfs3xdr.c 		args->minor = ntohl(*p++);
p                 531 fs/nfsd/nfs3xdr.c 	return xdr_argsize_check(rqstp, p);
p                 535 fs/nfsd/nfs3xdr.c nfs3svc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p)
p                 539 fs/nfsd/nfs3xdr.c 	if (!(p = decode_fh(p, &args->ffh))
p                 540 fs/nfsd/nfs3xdr.c 	 || !(p = decode_filename(p, &args->fname, &args->flen))
p                 541 fs/nfsd/nfs3xdr.c 	 || !(p = decode_fh(p, &args->tfh))
p                 542 fs/nfsd/nfs3xdr.c 	 || !(p = decode_filename(p, &args->tname, &args->tlen)))
p                 545 fs/nfsd/nfs3xdr.c 	return xdr_argsize_check(rqstp, p);
p                 549 fs/nfsd/nfs3xdr.c nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p)
p                 553 fs/nfsd/nfs3xdr.c 	p = decode_fh(p, &args->fh);
p                 554 fs/nfsd/nfs3xdr.c 	if (!p)
p                 558 fs/nfsd/nfs3xdr.c 	return xdr_argsize_check(rqstp, p);
p                 562 fs/nfsd/nfs3xdr.c nfs3svc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p)
p                 566 fs/nfsd/nfs3xdr.c 	if (!(p = decode_fh(p, &args->ffh))
p                 567 fs/nfsd/nfs3xdr.c 	 || !(p = decode_fh(p, &args->tfh))
p                 568 fs/nfsd/nfs3xdr.c 	 || !(p = decode_filename(p, &args->tname, &args->tlen)))
p                 571 fs/nfsd/nfs3xdr.c 	return xdr_argsize_check(rqstp, p);
p                 575 fs/nfsd/nfs3xdr.c nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p)
p                 581 fs/nfsd/nfs3xdr.c 	p = decode_fh(p, &args->fh);
p                 582 fs/nfsd/nfs3xdr.c 	if (!p)
p                 584 fs/nfsd/nfs3xdr.c 	p = xdr_decode_hyper(p, &args->cookie);
p                 585 fs/nfsd/nfs3xdr.c 	args->verf   = p; p += 2;
p                 587 fs/nfsd/nfs3xdr.c 	args->count  = ntohl(*p++);
p                 591 fs/nfsd/nfs3xdr.c 		struct page *p = *(rqstp->rq_next_page++);
p                 593 fs/nfsd/nfs3xdr.c 			args->buffer = page_address(p);
p                 597 fs/nfsd/nfs3xdr.c 	return xdr_argsize_check(rqstp, p);
p                 601 fs/nfsd/nfs3xdr.c nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p)
p                 607 fs/nfsd/nfs3xdr.c 	p = decode_fh(p, &args->fh);
p                 608 fs/nfsd/nfs3xdr.c 	if (!p)
p                 610 fs/nfsd/nfs3xdr.c 	p = xdr_decode_hyper(p, &args->cookie);
p                 611 fs/nfsd/nfs3xdr.c 	args->verf     = p; p += 2;
p                 612 fs/nfsd/nfs3xdr.c 	args->dircount = ntohl(*p++);
p                 613 fs/nfsd/nfs3xdr.c 	args->count    = ntohl(*p++);
p                 617 fs/nfsd/nfs3xdr.c 		struct page *p = *(rqstp->rq_next_page++);
p                 619 fs/nfsd/nfs3xdr.c 			args->buffer = page_address(p);
p                 623 fs/nfsd/nfs3xdr.c 	return xdr_argsize_check(rqstp, p);
p                 627 fs/nfsd/nfs3xdr.c nfs3svc_decode_commitargs(struct svc_rqst *rqstp, __be32 *p)
p                 630 fs/nfsd/nfs3xdr.c 	p = decode_fh(p, &args->fh);
p                 631 fs/nfsd/nfs3xdr.c 	if (!p)
p                 633 fs/nfsd/nfs3xdr.c 	p = xdr_decode_hyper(p, &args->offset);
p                 634 fs/nfsd/nfs3xdr.c 	args->count = ntohl(*p++);
p                 636 fs/nfsd/nfs3xdr.c 	return xdr_argsize_check(rqstp, p);
p                 647 fs/nfsd/nfs3xdr.c nfs3svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p)
p                 649 fs/nfsd/nfs3xdr.c 	return xdr_ressize_check(rqstp, p);
p                 654 fs/nfsd/nfs3xdr.c nfs3svc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p)
p                 661 fs/nfsd/nfs3xdr.c 		p = encode_fattr3(rqstp, p, &resp->fh, &resp->stat);
p                 663 fs/nfsd/nfs3xdr.c 	return xdr_ressize_check(rqstp, p);
p                 668 fs/nfsd/nfs3xdr.c nfs3svc_encode_wccstat(struct svc_rqst *rqstp, __be32 *p)
p                 672 fs/nfsd/nfs3xdr.c 	p = encode_wcc_data(rqstp, p, &resp->fh);
p                 673 fs/nfsd/nfs3xdr.c 	return xdr_ressize_check(rqstp, p);
p                 678 fs/nfsd/nfs3xdr.c nfs3svc_encode_diropres(struct svc_rqst *rqstp, __be32 *p)
p                 683 fs/nfsd/nfs3xdr.c 		p = encode_fh(p, &resp->fh);
p                 684 fs/nfsd/nfs3xdr.c 		p = encode_post_op_attr(rqstp, p, &resp->fh);
p                 686 fs/nfsd/nfs3xdr.c 	p = encode_post_op_attr(rqstp, p, &resp->dirfh);
p                 687 fs/nfsd/nfs3xdr.c 	return xdr_ressize_check(rqstp, p);
p                 692 fs/nfsd/nfs3xdr.c nfs3svc_encode_accessres(struct svc_rqst *rqstp, __be32 *p)
p                 696 fs/nfsd/nfs3xdr.c 	p = encode_post_op_attr(rqstp, p, &resp->fh);
p                 698 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->access);
p                 699 fs/nfsd/nfs3xdr.c 	return xdr_ressize_check(rqstp, p);
p                 704 fs/nfsd/nfs3xdr.c nfs3svc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p)
p                 708 fs/nfsd/nfs3xdr.c 	p = encode_post_op_attr(rqstp, p, &resp->fh);
p                 710 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->len);
p                 711 fs/nfsd/nfs3xdr.c 		xdr_ressize_check(rqstp, p);
p                 715 fs/nfsd/nfs3xdr.c 			rqstp->rq_res.tail[0].iov_base = p;
p                 716 fs/nfsd/nfs3xdr.c 			*p = 0;
p                 721 fs/nfsd/nfs3xdr.c 		return xdr_ressize_check(rqstp, p);
p                 726 fs/nfsd/nfs3xdr.c nfs3svc_encode_readres(struct svc_rqst *rqstp, __be32 *p)
p                 730 fs/nfsd/nfs3xdr.c 	p = encode_post_op_attr(rqstp, p, &resp->fh);
p                 732 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->count);
p                 733 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->eof);
p                 734 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->count);	/* xdr opaque count */
p                 735 fs/nfsd/nfs3xdr.c 		xdr_ressize_check(rqstp, p);
p                 740 fs/nfsd/nfs3xdr.c 			rqstp->rq_res.tail[0].iov_base = p;
p                 741 fs/nfsd/nfs3xdr.c 			*p = 0;
p                 746 fs/nfsd/nfs3xdr.c 		return xdr_ressize_check(rqstp, p);
p                 751 fs/nfsd/nfs3xdr.c nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p)
p                 757 fs/nfsd/nfs3xdr.c 	p = encode_wcc_data(rqstp, p, &resp->fh);
p                 759 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->count);
p                 760 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->committed);
p                 763 fs/nfsd/nfs3xdr.c 		*p++ = verf[0];
p                 764 fs/nfsd/nfs3xdr.c 		*p++ = verf[1];
p                 766 fs/nfsd/nfs3xdr.c 	return xdr_ressize_check(rqstp, p);
p                 771 fs/nfsd/nfs3xdr.c nfs3svc_encode_createres(struct svc_rqst *rqstp, __be32 *p)
p                 776 fs/nfsd/nfs3xdr.c 		*p++ = xdr_one;
p                 777 fs/nfsd/nfs3xdr.c 		p = encode_fh(p, &resp->fh);
p                 778 fs/nfsd/nfs3xdr.c 		p = encode_post_op_attr(rqstp, p, &resp->fh);
p                 780 fs/nfsd/nfs3xdr.c 	p = encode_wcc_data(rqstp, p, &resp->dirfh);
p                 781 fs/nfsd/nfs3xdr.c 	return xdr_ressize_check(rqstp, p);
p                 786 fs/nfsd/nfs3xdr.c nfs3svc_encode_renameres(struct svc_rqst *rqstp, __be32 *p)
p                 790 fs/nfsd/nfs3xdr.c 	p = encode_wcc_data(rqstp, p, &resp->ffh);
p                 791 fs/nfsd/nfs3xdr.c 	p = encode_wcc_data(rqstp, p, &resp->tfh);
p                 792 fs/nfsd/nfs3xdr.c 	return xdr_ressize_check(rqstp, p);
p                 797 fs/nfsd/nfs3xdr.c nfs3svc_encode_linkres(struct svc_rqst *rqstp, __be32 *p)
p                 801 fs/nfsd/nfs3xdr.c 	p = encode_post_op_attr(rqstp, p, &resp->fh);
p                 802 fs/nfsd/nfs3xdr.c 	p = encode_wcc_data(rqstp, p, &resp->tfh);
p                 803 fs/nfsd/nfs3xdr.c 	return xdr_ressize_check(rqstp, p);
p                 808 fs/nfsd/nfs3xdr.c nfs3svc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p)
p                 812 fs/nfsd/nfs3xdr.c 	p = encode_post_op_attr(rqstp, p, &resp->fh);
p                 816 fs/nfsd/nfs3xdr.c 		memcpy(p, resp->verf, 8); p += 2;
p                 817 fs/nfsd/nfs3xdr.c 		xdr_ressize_check(rqstp, p);
p                 823 fs/nfsd/nfs3xdr.c 		rqstp->rq_res.tail[0].iov_base = p;
p                 824 fs/nfsd/nfs3xdr.c 		*p++ = 0;		/* no more entries */
p                 825 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->common.err == nfserr_eof);
p                 829 fs/nfsd/nfs3xdr.c 		return xdr_ressize_check(rqstp, p);
p                 833 fs/nfsd/nfs3xdr.c encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name,
p                 836 fs/nfsd/nfs3xdr.c 	*p++ = xdr_one;				 /* mark entry present */
p                 837 fs/nfsd/nfs3xdr.c 	p    = xdr_encode_hyper(p, ino);	 /* file id */
p                 838 fs/nfsd/nfs3xdr.c 	p    = xdr_encode_array(p, name, namlen);/* name length & name */
p                 840 fs/nfsd/nfs3xdr.c 	cd->offset = p;				/* remember pointer */
p                 841 fs/nfsd/nfs3xdr.c 	p = xdr_encode_hyper(p, NFS_OFFSET_MAX);/* offset of next entry */
p                 843 fs/nfsd/nfs3xdr.c 	return p;
p                 881 fs/nfsd/nfs3xdr.c static __be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen, u64 ino)
p                 889 fs/nfsd/nfs3xdr.c 		*p++ = 0;
p                 890 fs/nfsd/nfs3xdr.c 		*p++ = 0;
p                 893 fs/nfsd/nfs3xdr.c 	p = encode_post_op_attr(cd->rqstp, p, fh);
p                 894 fs/nfsd/nfs3xdr.c 	*p++ = xdr_one;			/* yes, a file handle follows */
p                 895 fs/nfsd/nfs3xdr.c 	p = encode_fh(p, fh);
p                 898 fs/nfsd/nfs3xdr.c 	return p;
p                 919 fs/nfsd/nfs3xdr.c 	__be32		*p = cd->buffer;
p                 970 fs/nfsd/nfs3xdr.c 		p = encode_entry_baggage(cd, p, name, namlen, ino);
p                 973 fs/nfsd/nfs3xdr.c 			p = encode_entryplus_baggage(cd, p, name, namlen, ino);
p                 974 fs/nfsd/nfs3xdr.c 		num_entry_words = p - cd->buffer;
p                 996 fs/nfsd/nfs3xdr.c 			memmove(p, tmp, num_entry_words << 2);
p                 997 fs/nfsd/nfs3xdr.c 			p += num_entry_words;
p                1012 fs/nfsd/nfs3xdr.c 				cd->offset = p + (cd->offset - tmp);
p                1018 fs/nfsd/nfs3xdr.c 				cd->offset = p + (cd->offset - tmp);
p                1025 fs/nfsd/nfs3xdr.c 			memmove(p, tmp, len1);
p                1028 fs/nfsd/nfs3xdr.c 			p = tmp + (len2 >> 2);
p                1037 fs/nfsd/nfs3xdr.c 	cd->buffer = p;
p                1060 fs/nfsd/nfs3xdr.c nfs3svc_encode_fsstatres(struct svc_rqst *rqstp, __be32 *p)
p                1066 fs/nfsd/nfs3xdr.c 	*p++ = xdr_zero;	/* no post_op_attr */
p                1069 fs/nfsd/nfs3xdr.c 		p = xdr_encode_hyper(p, bs * s->f_blocks);	/* total bytes */
p                1070 fs/nfsd/nfs3xdr.c 		p = xdr_encode_hyper(p, bs * s->f_bfree);	/* free bytes */
p                1071 fs/nfsd/nfs3xdr.c 		p = xdr_encode_hyper(p, bs * s->f_bavail);	/* user available bytes */
p                1072 fs/nfsd/nfs3xdr.c 		p = xdr_encode_hyper(p, s->f_files);	/* total inodes */
p                1073 fs/nfsd/nfs3xdr.c 		p = xdr_encode_hyper(p, s->f_ffree);	/* free inodes */
p                1074 fs/nfsd/nfs3xdr.c 		p = xdr_encode_hyper(p, s->f_ffree);	/* user available inodes */
p                1075 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->invarsec);	/* mean unchanged time */
p                1077 fs/nfsd/nfs3xdr.c 	return xdr_ressize_check(rqstp, p);
p                1082 fs/nfsd/nfs3xdr.c nfs3svc_encode_fsinfores(struct svc_rqst *rqstp, __be32 *p)
p                1086 fs/nfsd/nfs3xdr.c 	*p++ = xdr_zero;	/* no post_op_attr */
p                1089 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->f_rtmax);
p                1090 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->f_rtpref);
p                1091 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->f_rtmult);
p                1092 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->f_wtmax);
p                1093 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->f_wtpref);
p                1094 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->f_wtmult);
p                1095 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->f_dtpref);
p                1096 fs/nfsd/nfs3xdr.c 		p = xdr_encode_hyper(p, resp->f_maxfilesize);
p                1097 fs/nfsd/nfs3xdr.c 		*p++ = xdr_one;
p                1098 fs/nfsd/nfs3xdr.c 		*p++ = xdr_zero;
p                1099 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->f_properties);
p                1102 fs/nfsd/nfs3xdr.c 	return xdr_ressize_check(rqstp, p);
p                1107 fs/nfsd/nfs3xdr.c nfs3svc_encode_pathconfres(struct svc_rqst *rqstp, __be32 *p)
p                1111 fs/nfsd/nfs3xdr.c 	*p++ = xdr_zero;	/* no post_op_attr */
p                1114 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->p_link_max);
p                1115 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->p_name_max);
p                1116 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->p_no_trunc);
p                1117 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->p_chown_restricted);
p                1118 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->p_case_insensitive);
p                1119 fs/nfsd/nfs3xdr.c 		*p++ = htonl(resp->p_case_preserving);
p                1122 fs/nfsd/nfs3xdr.c 	return xdr_ressize_check(rqstp, p);
p                1127 fs/nfsd/nfs3xdr.c nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p)
p                1133 fs/nfsd/nfs3xdr.c 	p = encode_wcc_data(rqstp, p, &resp->fh);
p                1138 fs/nfsd/nfs3xdr.c 		*p++ = verf[0];
p                1139 fs/nfsd/nfs3xdr.c 		*p++ = verf[1];
p                1141 fs/nfsd/nfs3xdr.c 	return xdr_ressize_check(rqstp, p);
p                 855 fs/nfsd/nfs4acl.c nfs4_acl_get_whotype(char *p, u32 len)
p                 861 fs/nfsd/nfs4acl.c 				0 == memcmp(s2t_map[i].string, p, len))
p                 869 fs/nfsd/nfs4acl.c 	__be32 *p;
p                 875 fs/nfsd/nfs4acl.c 		p = xdr_reserve_space(xdr, s2t_map[i].stringlen + 4);
p                 876 fs/nfsd/nfs4acl.c 		if (!p)
p                 878 fs/nfsd/nfs4acl.c 		p = xdr_encode_opaque(p, s2t_map[i].string,
p                  63 fs/nfsd/nfs4callback.c static __be32 *xdr_encode_empty_array(__be32 *p)
p                  65 fs/nfsd/nfs4callback.c 	*p++ = xdr_zero;
p                  66 fs/nfsd/nfs4callback.c 	return p;
p                 105 fs/nfsd/nfs4callback.c 	__be32 *p;
p                 107 fs/nfsd/nfs4callback.c 	p = xdr_reserve_space(xdr, 4);
p                 108 fs/nfsd/nfs4callback.c 	*p = cpu_to_be32(op);
p                 119 fs/nfsd/nfs4callback.c 	__be32 *p;
p                 122 fs/nfsd/nfs4callback.c 	p = xdr_reserve_space(xdr, 4 + length);
p                 123 fs/nfsd/nfs4callback.c 	xdr_encode_opaque(p, &fh->fh_base, length);
p                 136 fs/nfsd/nfs4callback.c 	__be32 *p;
p                 138 fs/nfsd/nfs4callback.c 	p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
p                 139 fs/nfsd/nfs4callback.c 	*p++ = cpu_to_be32(sid->si_generation);
p                 140 fs/nfsd/nfs4callback.c 	xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE);
p                 151 fs/nfsd/nfs4callback.c 	__be32 *p;
p                 153 fs/nfsd/nfs4callback.c 	p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
p                 154 fs/nfsd/nfs4callback.c 	xdr_encode_opaque_fixed(p, session->se_sessionid.data,
p                 221 fs/nfsd/nfs4callback.c 	__be32 *p;
p                 224 fs/nfsd/nfs4callback.c 	p = xdr_inline_decode(xdr, 4 + 4);
p                 225 fs/nfsd/nfs4callback.c 	if (unlikely(p == NULL))
p                 227 fs/nfsd/nfs4callback.c 	op = be32_to_cpup(p++);
p                 230 fs/nfsd/nfs4callback.c 	*status = nfs_cb_stat_to_errno(be32_to_cpup(p));
p                 253 fs/nfsd/nfs4callback.c 	__be32 * p;
p                 255 fs/nfsd/nfs4callback.c 	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
p                 256 fs/nfsd/nfs4callback.c 	p = xdr_encode_empty_array(p);		/* empty tag */
p                 257 fs/nfsd/nfs4callback.c 	*p++ = cpu_to_be32(hdr->minorversion);
p                 258 fs/nfsd/nfs4callback.c 	*p++ = cpu_to_be32(hdr->ident);
p                 260 fs/nfsd/nfs4callback.c 	hdr->nops_p = p;
p                 261 fs/nfsd/nfs4callback.c 	*p = cpu_to_be32(hdr->nops);		/* argarray element count */
p                 286 fs/nfsd/nfs4callback.c 	__be32 *p;
p                 288 fs/nfsd/nfs4callback.c 	p = xdr_inline_decode(xdr, 4 + 4);
p                 289 fs/nfsd/nfs4callback.c 	if (unlikely(p == NULL))
p                 291 fs/nfsd/nfs4callback.c 	hdr->status = be32_to_cpup(p++);
p                 293 fs/nfsd/nfs4callback.c 	length = be32_to_cpup(p++);
p                 294 fs/nfsd/nfs4callback.c 	p = xdr_inline_decode(xdr, length + 4);
p                 295 fs/nfsd/nfs4callback.c 	if (unlikely(p == NULL))
p                 297 fs/nfsd/nfs4callback.c 	p += XDR_QUADLEN(length);
p                 298 fs/nfsd/nfs4callback.c 	hdr->nops = be32_to_cpup(p);
p                 317 fs/nfsd/nfs4callback.c 	__be32 *p;
p                 322 fs/nfsd/nfs4callback.c 	p = xdr_reserve_space(xdr, 4);
p                 323 fs/nfsd/nfs4callback.c 	*p++ = xdr_zero;			/* truncate */
p                 347 fs/nfsd/nfs4callback.c 	__be32 *p;
p                 355 fs/nfsd/nfs4callback.c 	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
p                 356 fs/nfsd/nfs4callback.c 	*p++ = cpu_to_be32(session->se_cb_seq_nr);	/* csa_sequenceid */
p                 357 fs/nfsd/nfs4callback.c 	*p++ = xdr_zero;			/* csa_slotid */
p                 358 fs/nfsd/nfs4callback.c 	*p++ = xdr_zero;			/* csa_highest_slotid */
p                 359 fs/nfsd/nfs4callback.c 	*p++ = xdr_zero;			/* csa_cachethis */
p                 360 fs/nfsd/nfs4callback.c 	xdr_encode_empty_array(p);		/* csa_referring_call_lists */
p                 391 fs/nfsd/nfs4callback.c 	__be32 *p;
p                 398 fs/nfsd/nfs4callback.c 	p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4);
p                 399 fs/nfsd/nfs4callback.c 	if (unlikely(p == NULL))
p                 402 fs/nfsd/nfs4callback.c 	if (memcmp(p, session->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
p                 406 fs/nfsd/nfs4callback.c 	p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
p                 408 fs/nfsd/nfs4callback.c 	dummy = be32_to_cpup(p++);
p                 414 fs/nfsd/nfs4callback.c 	dummy = be32_to_cpup(p++);
p                 553 fs/nfsd/nfs4callback.c 	__be32 *p;
p                 557 fs/nfsd/nfs4callback.c 	p = xdr_reserve_space(xdr, 5 * 4);
p                 558 fs/nfsd/nfs4callback.c 	*p++ = cpu_to_be32(OP_CB_LAYOUTRECALL);
p                 559 fs/nfsd/nfs4callback.c 	*p++ = cpu_to_be32(ls->ls_layout_type);
p                 560 fs/nfsd/nfs4callback.c 	*p++ = cpu_to_be32(IOMODE_ANY);
p                 561 fs/nfsd/nfs4callback.c 	*p++ = cpu_to_be32(1);
p                 562 fs/nfsd/nfs4callback.c 	*p = cpu_to_be32(RETURN_FILE);
p                 566 fs/nfsd/nfs4callback.c 	p = xdr_reserve_space(xdr, 2 * 8);
p                 567 fs/nfsd/nfs4callback.c 	p = xdr_encode_hyper(p, 0);
p                 568 fs/nfsd/nfs4callback.c 	xdr_encode_hyper(p, NFS4_MAX_UINT64);
p                 615 fs/nfsd/nfs4callback.c 	__be32	*p;
p                 617 fs/nfsd/nfs4callback.c 	p = xdr_reserve_space(xdr, 8 + 4 + so->so_owner.len);
p                 618 fs/nfsd/nfs4callback.c 	p = xdr_encode_opaque_fixed(p, &so->so_client->cl_clientid, 8);
p                 619 fs/nfsd/nfs4callback.c 	xdr_encode_opaque(p, so->so_owner.data, so->so_owner.len);
p                 635 fs/nfsd/nfs4callback.c 	__be32 *p;
p                 642 fs/nfsd/nfs4callback.c 	p = xdr_reserve_space(xdr, 4);
p                 643 fs/nfsd/nfs4callback.c 	*p = cpu_to_be32(OP_CB_NOTIFY_LOCK);
p                 693 fs/nfsd/nfs4callback.c 	__be32 *p;
p                 695 fs/nfsd/nfs4callback.c 	p = xdr_reserve_space(xdr, 4);
p                 696 fs/nfsd/nfs4callback.c 	*p++ = nfserr;
p                 698 fs/nfsd/nfs4callback.c 		p = xdr_reserve_space(xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
p                 699 fs/nfsd/nfs4callback.c 		p = xdr_encode_empty_array(p);
p                 700 fs/nfsd/nfs4callback.c 		p = xdr_encode_hyper(p, cp->cp_res.wr_bytes_written);
p                 701 fs/nfsd/nfs4callback.c 		*p++ = cpu_to_be32(cp->cp_res.wr_stable_how);
p                 702 fs/nfsd/nfs4callback.c 		p = xdr_encode_opaque_fixed(p, cp->cp_res.wr_verifier.data,
p                 705 fs/nfsd/nfs4callback.c 		p = xdr_reserve_space(xdr, 8);
p                 707 fs/nfsd/nfs4callback.c 		p = xdr_encode_hyper(p, 0);
p                 717 fs/nfsd/nfs4callback.c 	__be32 *p;
p                 719 fs/nfsd/nfs4callback.c 	p = xdr_reserve_space(xdr, 4);
p                 720 fs/nfsd/nfs4callback.c 	*p++ = cpu_to_be32(OP_CB_OFFLOAD);
p                 552 fs/nfsd/nfs4idmap.c 	__be32 *p;
p                 555 fs/nfsd/nfs4idmap.c 	p = xdr_reserve_space(xdr, len + 4);
p                 556 fs/nfsd/nfs4idmap.c 	if (!p)
p                 558 fs/nfsd/nfs4idmap.c 	p = xdr_encode_opaque(p, buf, len);
p                 569 fs/nfsd/nfs4idmap.c 	__be32 *p;
p                 581 fs/nfsd/nfs4idmap.c 	p = xdr_reserve_space(xdr, ret + 4);
p                 582 fs/nfsd/nfs4idmap.c 	if (!p)
p                 584 fs/nfsd/nfs4idmap.c 	p = xdr_encode_opaque(p, item->name, ret);
p                1452 fs/nfsd/nfs4proc.c 	__be32 *buf, *p;
p                1478 fs/nfsd/nfs4proc.c 	p = buf;
p                1479 fs/nfsd/nfs4proc.c 	status = nfsd4_encode_fattr_to_buf(&p, count, &cstate->current_fh,
p                1494 fs/nfsd/nfs4proc.c 	p = buf + 1 + ntohl(buf[0]);
p                1496 fs/nfsd/nfs4proc.c 	if (ntohl(*p++) != verify->ve_attrlen)
p                1498 fs/nfsd/nfs4proc.c 	if (!memcmp(p, verify->ve_attrval, verify->ve_attrlen))
p                1905 fs/nfsd/nfs4proc.c 	xdr->p   = head->iov_base + head->iov_len;
p                1931 fs/nfsd/nfs4proc.c 	resp->tagp = resp->xdr.p;
p                2878 fs/nfsd/nfs4state.c 	__be32 *p;
p                2887 fs/nfsd/nfs4state.c 	p = xdr_reserve_space(xdr, slot->sl_datalen);
p                2888 fs/nfsd/nfs4state.c 	if (!p) {
p                2892 fs/nfsd/nfs4state.c 	xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
p                  99 fs/nfsd/nfs4xdr.c 	__be32 *p;				\
p                 112 fs/nfsd/nfs4xdr.c 	x = (char *)p;				\
p                 113 fs/nfsd/nfs4xdr.c 	p += XDR_QUADLEN(nbytes);		\
p                 116 fs/nfsd/nfs4xdr.c 	if (!(x = (p==argp->tmp || p == argp->tmpp) ? \
p                 117 fs/nfsd/nfs4xdr.c  		savemem(argp, p, nbytes) :	\
p                 118 fs/nfsd/nfs4xdr.c  		(char *)p)) {			\
p                 123 fs/nfsd/nfs4xdr.c 	p += XDR_QUADLEN(nbytes);		\
p                 126 fs/nfsd/nfs4xdr.c 	memcpy((x), p, nbytes);			\
p                 127 fs/nfsd/nfs4xdr.c 	p += XDR_QUADLEN(nbytes);		\
p                 132 fs/nfsd/nfs4xdr.c 	if (nbytes <= (u32)((char *)argp->end - (char *)argp->p)) {	\
p                 133 fs/nfsd/nfs4xdr.c 		p = argp->p;			\
p                 134 fs/nfsd/nfs4xdr.c 		argp->p += XDR_QUADLEN(nbytes);	\
p                 135 fs/nfsd/nfs4xdr.c 	} else if (!(p = read_buf(argp, nbytes))) { \
p                 144 fs/nfsd/nfs4xdr.c 	argp->p = page_address(argp->pagelist[0]);
p                 147 fs/nfsd/nfs4xdr.c 		argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
p                 150 fs/nfsd/nfs4xdr.c 		argp->end = argp->p + (PAGE_SIZE>>2);
p                 160 fs/nfsd/nfs4xdr.c 	unsigned int avail = (char *)argp->end - (char *)argp->p;
p                 161 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                 169 fs/nfsd/nfs4xdr.c 			argp->p = vec->iov_base;
p                 176 fs/nfsd/nfs4xdr.c 		p = argp->p;
p                 177 fs/nfsd/nfs4xdr.c 		argp->p += XDR_QUADLEN(nbytes);
p                 178 fs/nfsd/nfs4xdr.c 		return p;
p                 187 fs/nfsd/nfs4xdr.c 		p = argp->tmp;
p                 190 fs/nfsd/nfs4xdr.c 		p = argp->tmpp = kmalloc(nbytes, GFP_KERNEL);
p                 191 fs/nfsd/nfs4xdr.c 		if (!p)
p                 200 fs/nfsd/nfs4xdr.c 	memcpy(p, argp->p, avail);
p                 202 fs/nfsd/nfs4xdr.c 	memcpy(((char*)p)+avail, argp->p, (nbytes - avail));
p                 203 fs/nfsd/nfs4xdr.c 	argp->p += XDR_QUADLEN(nbytes - avail);
p                 204 fs/nfsd/nfs4xdr.c 	return p;
p                 209 fs/nfsd/nfs4xdr.c 	unsigned int this = (char *)argp->end - (char *)argp->p;
p                 250 fs/nfsd/nfs4xdr.c 	char *p = svcxdr_tmpalloc(argp, len + 1);
p                 252 fs/nfsd/nfs4xdr.c 	if (!p)
p                 254 fs/nfsd/nfs4xdr.c 	memcpy(p, buf, len);
p                 255 fs/nfsd/nfs4xdr.c 	p[len] = '\0';
p                 256 fs/nfsd/nfs4xdr.c 	return p;
p                 269 fs/nfsd/nfs4xdr.c static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes)
p                 276 fs/nfsd/nfs4xdr.c 	memcpy(ret, p, nbytes);
p                 286 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &tv->tv_sec);
p                 287 fs/nfsd/nfs4xdr.c 	tv->tv_nsec = be32_to_cpup(p++);
p                 305 fs/nfsd/nfs4xdr.c 	bmlen = be32_to_cpup(p++);
p                 311 fs/nfsd/nfs4xdr.c 		bmval[0] = be32_to_cpup(p++);
p                 313 fs/nfsd/nfs4xdr.c 		bmval[1] = be32_to_cpup(p++);
p                 315 fs/nfsd/nfs4xdr.c 		bmval[2] = be32_to_cpup(p++);
p                 343 fs/nfsd/nfs4xdr.c 	expected_len = be32_to_cpup(p++);
p                 348 fs/nfsd/nfs4xdr.c 		p = xdr_decode_hyper(p, &iattr->ia_size);
p                 356 fs/nfsd/nfs4xdr.c 		nace = be32_to_cpup(p++);
p                 373 fs/nfsd/nfs4xdr.c 			ace->type = be32_to_cpup(p++);
p                 374 fs/nfsd/nfs4xdr.c 			ace->flag = be32_to_cpup(p++);
p                 375 fs/nfsd/nfs4xdr.c 			ace->access_mask = be32_to_cpup(p++);
p                 376 fs/nfsd/nfs4xdr.c 			dummy32 = be32_to_cpup(p++);
p                 398 fs/nfsd/nfs4xdr.c 		iattr->ia_mode = be32_to_cpup(p++);
p                 405 fs/nfsd/nfs4xdr.c 		dummy32 = be32_to_cpup(p++);
p                 416 fs/nfsd/nfs4xdr.c 		dummy32 = be32_to_cpup(p++);
p                 427 fs/nfsd/nfs4xdr.c 		dummy32 = be32_to_cpup(p++);
p                 446 fs/nfsd/nfs4xdr.c 		dummy32 = be32_to_cpup(p++);
p                 468 fs/nfsd/nfs4xdr.c 		dummy32 = be32_to_cpup(p++); /* lfs: we don't use it */
p                 471 fs/nfsd/nfs4xdr.c 		dummy32 = be32_to_cpup(p++); /* pi: we don't use it either */
p                 474 fs/nfsd/nfs4xdr.c 		dummy32 = be32_to_cpup(p++);
p                 490 fs/nfsd/nfs4xdr.c 		dummy32 = be32_to_cpup(p++);
p                 492 fs/nfsd/nfs4xdr.c 		dummy32 = be32_to_cpup(p++);
p                 508 fs/nfsd/nfs4xdr.c 	sid->si_generation = be32_to_cpup(p++);
p                 520 fs/nfsd/nfs4xdr.c 	access->ac_req_access = be32_to_cpup(p++);
p                 536 fs/nfsd/nfs4xdr.c 	nr_secflavs = be32_to_cpup(p++);
p                 544 fs/nfsd/nfs4xdr.c 		dummy = be32_to_cpup(p++);
p                 554 fs/nfsd/nfs4xdr.c 			dummy = be32_to_cpup(p++);
p                 557 fs/nfsd/nfs4xdr.c 			dummy = be32_to_cpup(p++);
p                 563 fs/nfsd/nfs4xdr.c 			uid = be32_to_cpup(p++);
p                 564 fs/nfsd/nfs4xdr.c 			gid = be32_to_cpup(p++);
p                 568 fs/nfsd/nfs4xdr.c 			dummy = be32_to_cpup(p++);
p                 588 fs/nfsd/nfs4xdr.c 			dummy = be32_to_cpup(p++);
p                 590 fs/nfsd/nfs4xdr.c 			dummy = be32_to_cpup(p++);
p                 592 fs/nfsd/nfs4xdr.c 			p += XDR_QUADLEN(dummy);
p                 595 fs/nfsd/nfs4xdr.c 			dummy = be32_to_cpup(p++);
p                 611 fs/nfsd/nfs4xdr.c 	bc->bc_cb_program = be32_to_cpup(p++);
p                 623 fs/nfsd/nfs4xdr.c 	bcts->dir = be32_to_cpup(p++);
p                 635 fs/nfsd/nfs4xdr.c 	close->cl_seqid = be32_to_cpup(p++);
p                 648 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &commit->co_offset);
p                 649 fs/nfsd/nfs4xdr.c 	commit->co_count = be32_to_cpup(p++);
p                 660 fs/nfsd/nfs4xdr.c 	create->cr_type = be32_to_cpup(p++);
p                 664 fs/nfsd/nfs4xdr.c 		create->cr_datalen = be32_to_cpup(p++);
p                 666 fs/nfsd/nfs4xdr.c 		create->cr_data = svcxdr_dupstr(argp, p, create->cr_datalen);
p                 673 fs/nfsd/nfs4xdr.c 		create->cr_specdata1 = be32_to_cpup(p++);
p                 674 fs/nfsd/nfs4xdr.c 		create->cr_specdata2 = be32_to_cpup(p++);
p                 684 fs/nfsd/nfs4xdr.c 	create->cr_namelen = be32_to_cpup(p++);
p                 717 fs/nfsd/nfs4xdr.c 	link->li_namelen = be32_to_cpup(p++);
p                 735 fs/nfsd/nfs4xdr.c 	lock->lk_type = be32_to_cpup(p++);
p                 738 fs/nfsd/nfs4xdr.c 	lock->lk_reclaim = be32_to_cpup(p++);
p                 739 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &lock->lk_offset);
p                 740 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &lock->lk_length);
p                 741 fs/nfsd/nfs4xdr.c 	lock->lk_is_new = be32_to_cpup(p++);
p                 745 fs/nfsd/nfs4xdr.c 		lock->lk_new_open_seqid = be32_to_cpup(p++);
p                 750 fs/nfsd/nfs4xdr.c 		lock->lk_new_lock_seqid = be32_to_cpup(p++);
p                 752 fs/nfsd/nfs4xdr.c 		lock->lk_new_owner.len = be32_to_cpup(p++);
p                 760 fs/nfsd/nfs4xdr.c 		lock->lk_old_lock_seqid = be32_to_cpup(p++);
p                 772 fs/nfsd/nfs4xdr.c 	lockt->lt_type = be32_to_cpup(p++);
p                 775 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &lockt->lt_offset);
p                 776 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &lockt->lt_length);
p                 778 fs/nfsd/nfs4xdr.c 	lockt->lt_owner.len = be32_to_cpup(p++);
p                 791 fs/nfsd/nfs4xdr.c 	locku->lu_type = be32_to_cpup(p++);
p                 794 fs/nfsd/nfs4xdr.c 	locku->lu_seqid = be32_to_cpup(p++);
p                 799 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &locku->lu_offset);
p                 800 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &locku->lu_length);
p                 811 fs/nfsd/nfs4xdr.c 	lookup->lo_len = be32_to_cpup(p++);
p                 822 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                 826 fs/nfsd/nfs4xdr.c 	w = be32_to_cpup(p++);
p                 875 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                 878 fs/nfsd/nfs4xdr.c 	*x = be32_to_cpup(p++);
p                 889 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                 892 fs/nfsd/nfs4xdr.c 	o->len = be32_to_cpup(p++);
p                 917 fs/nfsd/nfs4xdr.c 	open->op_seqid = be32_to_cpup(p++);
p                 932 fs/nfsd/nfs4xdr.c 	open->op_create = be32_to_cpup(p++);
p                 938 fs/nfsd/nfs4xdr.c 		open->op_createmode = be32_to_cpup(p++);
p                 973 fs/nfsd/nfs4xdr.c 	open->op_claim_type = be32_to_cpup(p++);
p                 978 fs/nfsd/nfs4xdr.c 		open->op_fname.len = be32_to_cpup(p++);
p                 986 fs/nfsd/nfs4xdr.c 		open->op_delegate_type = be32_to_cpup(p++);
p                 993 fs/nfsd/nfs4xdr.c 		open->op_fname.len = be32_to_cpup(p++);
p                1031 fs/nfsd/nfs4xdr.c 	open_conf->oc_seqid = be32_to_cpup(p++);
p                1045 fs/nfsd/nfs4xdr.c 	open_down->od_seqid = be32_to_cpup(p++);
p                1062 fs/nfsd/nfs4xdr.c 	putfh->pf_fhlen = be32_to_cpup(p++);
p                1072 fs/nfsd/nfs4xdr.c nfsd4_decode_putpubfh(struct nfsd4_compoundargs *argp, void *p)
p                1088 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &read->rd_offset);
p                1089 fs/nfsd/nfs4xdr.c 	read->rd_length = be32_to_cpup(p++);
p                1100 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &readdir->rd_cookie);
p                1102 fs/nfsd/nfs4xdr.c 	readdir->rd_dircount = be32_to_cpup(p++);
p                1103 fs/nfsd/nfs4xdr.c 	readdir->rd_maxcount = be32_to_cpup(p++);
p                1116 fs/nfsd/nfs4xdr.c 	remove->rm_namelen = be32_to_cpup(p++);
p                1131 fs/nfsd/nfs4xdr.c 	rename->rn_snamelen = be32_to_cpup(p++);
p                1135 fs/nfsd/nfs4xdr.c 	rename->rn_tnamelen = be32_to_cpup(p++);
p                1167 fs/nfsd/nfs4xdr.c 	secinfo->si_namelen = be32_to_cpup(p++);
p                1183 fs/nfsd/nfs4xdr.c 	sin->sin_style = be32_to_cpup(p++);
p                1214 fs/nfsd/nfs4xdr.c 	setclientid->se_callback_prog = be32_to_cpup(p++);
p                1215 fs/nfsd/nfs4xdr.c 	setclientid->se_callback_netid_len = be32_to_cpup(p++);
p                1219 fs/nfsd/nfs4xdr.c 	setclientid->se_callback_addr_len = be32_to_cpup(p++);
p                1224 fs/nfsd/nfs4xdr.c 	setclientid->se_callback_ident = be32_to_cpup(p++);
p                1257 fs/nfsd/nfs4xdr.c 	verify->ve_attrlen = be32_to_cpup(p++);
p                1275 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &write->wr_offset);
p                1276 fs/nfsd/nfs4xdr.c 	write->wr_stable_how = be32_to_cpup(p++);
p                1279 fs/nfsd/nfs4xdr.c 	write->wr_buflen = be32_to_cpup(p++);
p                1285 fs/nfsd/nfs4xdr.c 	avail = (char*)argp->end - (char*)argp->p;
p                1291 fs/nfsd/nfs4xdr.c 	write->wr_head.iov_base = p;
p                1308 fs/nfsd/nfs4xdr.c 	argp->p += XDR_QUADLEN(len);
p                1323 fs/nfsd/nfs4xdr.c 	rlockowner->rl_owner.len = be32_to_cpup(p++);
p                1347 fs/nfsd/nfs4xdr.c 	exid->flags = be32_to_cpup(p++);
p                1351 fs/nfsd/nfs4xdr.c 	exid->spa_how = be32_to_cpup(p++);
p                1369 fs/nfsd/nfs4xdr.c 		dummy = be32_to_cpup(p++);
p                1371 fs/nfsd/nfs4xdr.c 		p += dummy;
p                1374 fs/nfsd/nfs4xdr.c 		dummy = be32_to_cpup(p++);
p                1376 fs/nfsd/nfs4xdr.c 		p += dummy;
p                1380 fs/nfsd/nfs4xdr.c 		tmp = be32_to_cpup(p++);
p                1383 fs/nfsd/nfs4xdr.c 			dummy = be32_to_cpup(p++);
p                1385 fs/nfsd/nfs4xdr.c 			p += XDR_QUADLEN(dummy);
p                1390 fs/nfsd/nfs4xdr.c 		tmp = be32_to_cpup(p++);
p                1393 fs/nfsd/nfs4xdr.c 			dummy = be32_to_cpup(p++);
p                1395 fs/nfsd/nfs4xdr.c 			p += XDR_QUADLEN(dummy);
p                1406 fs/nfsd/nfs4xdr.c 	dummy = be32_to_cpup(p++);
p                1437 fs/nfsd/nfs4xdr.c 	sess->seqid = be32_to_cpup(p++);
p                1438 fs/nfsd/nfs4xdr.c 	sess->flags = be32_to_cpup(p++);
p                1442 fs/nfsd/nfs4xdr.c 	p++; /* headerpadsz is always 0 */
p                1443 fs/nfsd/nfs4xdr.c 	sess->fore_channel.maxreq_sz = be32_to_cpup(p++);
p                1444 fs/nfsd/nfs4xdr.c 	sess->fore_channel.maxresp_sz = be32_to_cpup(p++);
p                1445 fs/nfsd/nfs4xdr.c 	sess->fore_channel.maxresp_cached = be32_to_cpup(p++);
p                1446 fs/nfsd/nfs4xdr.c 	sess->fore_channel.maxops = be32_to_cpup(p++);
p                1447 fs/nfsd/nfs4xdr.c 	sess->fore_channel.maxreqs = be32_to_cpup(p++);
p                1448 fs/nfsd/nfs4xdr.c 	sess->fore_channel.nr_rdma_attrs = be32_to_cpup(p++);
p                1451 fs/nfsd/nfs4xdr.c 		sess->fore_channel.rdma_attrs = be32_to_cpup(p++);
p                1459 fs/nfsd/nfs4xdr.c 	p++; /* headerpadsz is always 0 */
p                1460 fs/nfsd/nfs4xdr.c 	sess->back_channel.maxreq_sz = be32_to_cpup(p++);
p                1461 fs/nfsd/nfs4xdr.c 	sess->back_channel.maxresp_sz = be32_to_cpup(p++);
p                1462 fs/nfsd/nfs4xdr.c 	sess->back_channel.maxresp_cached = be32_to_cpup(p++);
p                1463 fs/nfsd/nfs4xdr.c 	sess->back_channel.maxops = be32_to_cpup(p++);
p                1464 fs/nfsd/nfs4xdr.c 	sess->back_channel.maxreqs = be32_to_cpup(p++);
p                1465 fs/nfsd/nfs4xdr.c 	sess->back_channel.nr_rdma_attrs = be32_to_cpup(p++);
p                1468 fs/nfsd/nfs4xdr.c 		sess->back_channel.rdma_attrs = be32_to_cpup(p++);
p                1475 fs/nfsd/nfs4xdr.c 	sess->callback_prog = be32_to_cpup(p++);
p                1498 fs/nfsd/nfs4xdr.c 	free_stateid->fr_stateid.si_generation = be32_to_cpup(p++);
p                1512 fs/nfsd/nfs4xdr.c 	seq->seqid = be32_to_cpup(p++);
p                1513 fs/nfsd/nfs4xdr.c 	seq->slotid = be32_to_cpup(p++);
p                1514 fs/nfsd/nfs4xdr.c 	seq->maxslots = be32_to_cpup(p++);
p                1515 fs/nfsd/nfs4xdr.c 	seq->cachethis = be32_to_cpup(p++);
p                1524 fs/nfsd/nfs4xdr.c 	__be32 *p, status;
p                1528 fs/nfsd/nfs4xdr.c 	test_stateid->ts_num_ids = ntohl(*p++);
p                1571 fs/nfsd/nfs4xdr.c 	rc->rca_one_fs = be32_to_cpup(p++);
p                1586 fs/nfsd/nfs4xdr.c 	gdev->gd_layout_type = be32_to_cpup(p++);
p                1587 fs/nfsd/nfs4xdr.c 	gdev->gd_maxcount = be32_to_cpup(p++);
p                1588 fs/nfsd/nfs4xdr.c 	num = be32_to_cpup(p++);
p                1593 fs/nfsd/nfs4xdr.c 		gdev->gd_notify_types = be32_to_cpup(p++);
p                1595 fs/nfsd/nfs4xdr.c 			if (be32_to_cpup(p++)) {
p                1611 fs/nfsd/nfs4xdr.c 	lgp->lg_signal = be32_to_cpup(p++);
p                1612 fs/nfsd/nfs4xdr.c 	lgp->lg_layout_type = be32_to_cpup(p++);
p                1613 fs/nfsd/nfs4xdr.c 	lgp->lg_seg.iomode = be32_to_cpup(p++);
p                1614 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &lgp->lg_seg.offset);
p                1615 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &lgp->lg_seg.length);
p                1616 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &lgp->lg_minlength);
p                1623 fs/nfsd/nfs4xdr.c 	lgp->lg_maxcount = be32_to_cpup(p++);
p                1636 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &lcp->lc_seg.offset);
p                1637 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &lcp->lc_seg.length);
p                1638 fs/nfsd/nfs4xdr.c 	lcp->lc_reclaim = be32_to_cpup(p++);
p                1645 fs/nfsd/nfs4xdr.c 	lcp->lc_newoffset = be32_to_cpup(p++);
p                1648 fs/nfsd/nfs4xdr.c 		p = xdr_decode_hyper(p, &lcp->lc_last_wr);
p                1652 fs/nfsd/nfs4xdr.c 	timechange = be32_to_cpup(p++);
p                1661 fs/nfsd/nfs4xdr.c 	lcp->lc_layout_type = be32_to_cpup(p++);
p                1667 fs/nfsd/nfs4xdr.c 	lcp->lc_up_len = be32_to_cpup(p++);
p                1683 fs/nfsd/nfs4xdr.c 	lrp->lr_reclaim = be32_to_cpup(p++);
p                1684 fs/nfsd/nfs4xdr.c 	lrp->lr_layout_type = be32_to_cpup(p++);
p                1685 fs/nfsd/nfs4xdr.c 	lrp->lr_seg.iomode = be32_to_cpup(p++);
p                1686 fs/nfsd/nfs4xdr.c 	lrp->lr_return_type = be32_to_cpup(p++);
p                1689 fs/nfsd/nfs4xdr.c 		p = xdr_decode_hyper(p, &lrp->lr_seg.offset);
p                1690 fs/nfsd/nfs4xdr.c 		p = xdr_decode_hyper(p, &lrp->lr_seg.length);
p                1697 fs/nfsd/nfs4xdr.c 		lrp->lrf_body_len = be32_to_cpup(p++);
p                1722 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &fallocate->falloc_offset);
p                1723 fs/nfsd/nfs4xdr.c 	xdr_decode_hyper(p, &fallocate->falloc_length);
p                1741 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &clone->cl_src_pos);
p                1742 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &clone->cl_dst_pos);
p                1743 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &clone->cl_count);
p                1760 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &copy->cp_src_pos);
p                1761 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &copy->cp_dst_pos);
p                1762 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &copy->cp_count);
p                1763 fs/nfsd/nfs4xdr.c 	p++; /* ca_consecutive: we always do consecutive copies */
p                1764 fs/nfsd/nfs4xdr.c 	copy->cp_synchronous = be32_to_cpup(p++);
p                1787 fs/nfsd/nfs4xdr.c 	p = xdr_decode_hyper(p, &seek->seek_offset);
p                1788 fs/nfsd/nfs4xdr.c 	seek->seek_whence = be32_to_cpup(p);
p                1794 fs/nfsd/nfs4xdr.c nfsd4_decode_noop(struct nfsd4_compoundargs *argp, void *p)
p                1800 fs/nfsd/nfs4xdr.c nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
p                1918 fs/nfsd/nfs4xdr.c 	argp->taglen = be32_to_cpup(p++);
p                1922 fs/nfsd/nfs4xdr.c 	argp->minorversion = be32_to_cpup(p++);
p                1923 fs/nfsd/nfs4xdr.c 	argp->opcnt = be32_to_cpup(p++);
p                1953 fs/nfsd/nfs4xdr.c 		op->opnum = be32_to_cpup(p++);
p                1999 fs/nfsd/nfs4xdr.c static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
p                2003 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time));
p                2004 fs/nfsd/nfs4xdr.c 		*p++ = 0;
p                2006 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, nfsd4_change_attribute(stat, inode));
p                2008 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(stat->ctime.tv_sec);
p                2009 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(stat->ctime.tv_nsec);
p                2011 fs/nfsd/nfs4xdr.c 	return p;
p                2025 fs/nfsd/nfs4xdr.c static __be32 *encode_time_delta(__be32 *p, struct inode *inode)
p                2033 fs/nfsd/nfs4xdr.c 	p = xdr_encode_hyper(p, ts.tv_sec);
p                2034 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(ts.tv_nsec);
p                2036 fs/nfsd/nfs4xdr.c 	return p;
p                2039 fs/nfsd/nfs4xdr.c static __be32 *encode_cinfo(__be32 *p, struct nfsd4_change_info *c)
p                2041 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(c->atomic);
p                2043 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, c->before_change);
p                2044 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, c->after_change);
p                2046 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(c->before_ctime_sec);
p                2047 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(c->before_ctime_nsec);
p                2048 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(c->after_ctime_sec);
p                2049 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(c->after_ctime_nsec);
p                2051 fs/nfsd/nfs4xdr.c 	return p;
p                2061 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                2070 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                2071 fs/nfsd/nfs4xdr.c 	if (!p)
p                2073 fs/nfsd/nfs4xdr.c 	p++; /* We will fill this in with @count later */
p                2096 fs/nfsd/nfs4xdr.c 			p = xdr_reserve_space(xdr, strlen + 4);
p                2097 fs/nfsd/nfs4xdr.c 			if (!p)
p                2099 fs/nfsd/nfs4xdr.c 			p = xdr_encode_opaque(p, str, strlen);
p                2149 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                2181 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                2182 fs/nfsd/nfs4xdr.c 	if (!p)
p                2184 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(ncomponents);
p                2192 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, len + 4);
p                2193 fs/nfsd/nfs4xdr.c 		if (!p) {
p                2197 fs/nfsd/nfs4xdr.c 		p = xdr_encode_opaque(p, dentry->d_name.name, len);
p                2236 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                2242 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                2243 fs/nfsd/nfs4xdr.c 	if (!p)
p                2245 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(fslocs->locations_count);
p                2283 fs/nfsd/nfs4xdr.c 	__be32		*p;
p                2286 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 4 + 4 * i);
p                2287 fs/nfsd/nfs4xdr.c 	if (!p)
p                2290 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(i);
p                2294 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(i);
p                2309 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                2311 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, len + 4 + 4 + 4);
p                2312 fs/nfsd/nfs4xdr.c 	if (!p)
p                2319 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(0); /* lfs */
p                2320 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(0); /* pi */
p                2321 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque(p, context, len);
p                2367 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                2370 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 16);
p                2371 fs/nfsd/nfs4xdr.c 		if (!p)
p                2373 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(3);
p                2374 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(bmval0);
p                2375 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(bmval1);
p                2376 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(bmval2);
p                2378 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 12);
p                2379 fs/nfsd/nfs4xdr.c 		if (!p)
p                2381 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(2);
p                2382 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(bmval0);
p                2383 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(bmval1);
p                2385 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2386 fs/nfsd/nfs4xdr.c 		if (!p)
p                2388 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(1);
p                2389 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(bmval0);
p                2413 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                2501 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                2502 fs/nfsd/nfs4xdr.c 	if (!p)
p                2504 fs/nfsd/nfs4xdr.c 	p++;                /* to be backfilled later */
p                2516 fs/nfsd/nfs4xdr.c 			p = xdr_reserve_space(xdr, 12);
p                2517 fs/nfsd/nfs4xdr.c 			if (!p)
p                2519 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(2);
p                2520 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(supp[0]);
p                2521 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(supp[1]);
p                2523 fs/nfsd/nfs4xdr.c 			p = xdr_reserve_space(xdr, 16);
p                2524 fs/nfsd/nfs4xdr.c 			if (!p)
p                2526 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(3);
p                2527 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(supp[0]);
p                2528 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(supp[1]);
p                2529 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(supp[2]);
p                2533 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2534 fs/nfsd/nfs4xdr.c 		if (!p)
p                2541 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(dummy);
p                2544 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2545 fs/nfsd/nfs4xdr.c 		if (!p)
p                2548 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(NFS4_FH_PERSISTENT);
p                2550 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(NFS4_FH_PERSISTENT|
p                2554 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2555 fs/nfsd/nfs4xdr.c 		if (!p)
p                2557 fs/nfsd/nfs4xdr.c 		p = encode_change(p, &stat, d_inode(dentry), exp);
p                2560 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2561 fs/nfsd/nfs4xdr.c 		if (!p)
p                2563 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, stat.size);
p                2566 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2567 fs/nfsd/nfs4xdr.c 		if (!p)
p                2569 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(1);
p                2572 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2573 fs/nfsd/nfs4xdr.c 		if (!p)
p                2575 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(1);
p                2578 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2579 fs/nfsd/nfs4xdr.c 		if (!p)
p                2581 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);
p                2584 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 16);
p                2585 fs/nfsd/nfs4xdr.c 		if (!p)
p                2588 fs/nfsd/nfs4xdr.c 			p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MAJOR);
p                2589 fs/nfsd/nfs4xdr.c 			p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MINOR);
p                2592 fs/nfsd/nfs4xdr.c 			p = xdr_encode_hyper(p, (u64)exp->ex_fsid);
p                2593 fs/nfsd/nfs4xdr.c 			p = xdr_encode_hyper(p, (u64)0);
p                2596 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(0);
p                2597 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(MAJOR(stat.dev));
p                2598 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(0);
p                2599 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(MINOR(stat.dev));
p                2602 fs/nfsd/nfs4xdr.c 			p = xdr_encode_opaque_fixed(p, exp->ex_uuid,
p                2608 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2609 fs/nfsd/nfs4xdr.c 		if (!p)
p                2611 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);
p                2614 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2615 fs/nfsd/nfs4xdr.c 		if (!p)
p                2617 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(nn->nfsd4_lease);
p                2620 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2621 fs/nfsd/nfs4xdr.c 		if (!p)
p                2623 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(rdattr_err);
p                2629 fs/nfsd/nfs4xdr.c 			p = xdr_reserve_space(xdr, 4);
p                2630 fs/nfsd/nfs4xdr.c 			if (!p)
p                2633 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(0);
p                2636 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2637 fs/nfsd/nfs4xdr.c 		if (!p)
p                2639 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(acl->naces);
p                2642 fs/nfsd/nfs4xdr.c 			p = xdr_reserve_space(xdr, 4*3);
p                2643 fs/nfsd/nfs4xdr.c 			if (!p)
p                2645 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(ace->type);
p                2646 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(ace->flag);
p                2647 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(ace->access_mask &
p                2656 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2657 fs/nfsd/nfs4xdr.c 		if (!p)
p                2659 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(IS_POSIXACL(dentry->d_inode) ?
p                2663 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2664 fs/nfsd/nfs4xdr.c 		if (!p)
p                2666 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(1);
p                2669 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2670 fs/nfsd/nfs4xdr.c 		if (!p)
p                2672 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);
p                2675 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2676 fs/nfsd/nfs4xdr.c 		if (!p)
p                2678 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(1);
p                2681 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2682 fs/nfsd/nfs4xdr.c 		if (!p)
p                2684 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(1);
p                2687 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, fhp->fh_handle.fh_size + 4);
p                2688 fs/nfsd/nfs4xdr.c 		if (!p)
p                2690 fs/nfsd/nfs4xdr.c 		p = xdr_encode_opaque(p, &fhp->fh_handle.fh_base,
p                2694 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2695 fs/nfsd/nfs4xdr.c 		if (!p)
p                2697 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, stat.ino);
p                2700 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2701 fs/nfsd/nfs4xdr.c 		if (!p)
p                2703 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, (u64) statfs.f_ffree);
p                2706 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2707 fs/nfsd/nfs4xdr.c 		if (!p)
p                2709 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, (u64) statfs.f_ffree);
p                2712 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2713 fs/nfsd/nfs4xdr.c 		if (!p)
p                2715 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, (u64) statfs.f_files);
p                2723 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2724 fs/nfsd/nfs4xdr.c 		if (!p)
p                2726 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(1);
p                2729 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2730 fs/nfsd/nfs4xdr.c 		if (!p)
p                2732 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, exp->ex_path.mnt->mnt_sb->s_maxbytes);
p                2735 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2736 fs/nfsd/nfs4xdr.c 		if (!p)
p                2738 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(255);
p                2741 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2742 fs/nfsd/nfs4xdr.c 		if (!p)
p                2744 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(statfs.f_namelen);
p                2747 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2748 fs/nfsd/nfs4xdr.c 		if (!p)
p                2750 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, (u64) svc_max_payload(rqstp));
p                2753 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2754 fs/nfsd/nfs4xdr.c 		if (!p)
p                2756 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, (u64) svc_max_payload(rqstp));
p                2759 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2760 fs/nfsd/nfs4xdr.c 		if (!p)
p                2762 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(stat.mode & S_IALLUGO);
p                2765 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2766 fs/nfsd/nfs4xdr.c 		if (!p)
p                2768 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(1);
p                2771 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2772 fs/nfsd/nfs4xdr.c 		if (!p)
p                2774 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(stat.nlink);
p                2787 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2788 fs/nfsd/nfs4xdr.c 		if (!p)
p                2790 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32((u32) MAJOR(stat.rdev));
p                2791 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32((u32) MINOR(stat.rdev));
p                2794 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2795 fs/nfsd/nfs4xdr.c 		if (!p)
p                2798 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, dummy64);
p                2801 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2802 fs/nfsd/nfs4xdr.c 		if (!p)
p                2805 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, dummy64);
p                2808 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2809 fs/nfsd/nfs4xdr.c 		if (!p)
p                2812 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, dummy64);
p                2815 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2816 fs/nfsd/nfs4xdr.c 		if (!p)
p                2819 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, dummy64);
p                2822 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 12);
p                2823 fs/nfsd/nfs4xdr.c 		if (!p)
p                2825 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, (s64)stat.atime.tv_sec);
p                2826 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(stat.atime.tv_nsec);
p                2829 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 12);
p                2830 fs/nfsd/nfs4xdr.c 		if (!p)
p                2832 fs/nfsd/nfs4xdr.c 		p = encode_time_delta(p, d_inode(dentry));
p                2835 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 12);
p                2836 fs/nfsd/nfs4xdr.c 		if (!p)
p                2838 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, (s64)stat.ctime.tv_sec);
p                2839 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(stat.ctime.tv_nsec);
p                2842 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 12);
p                2843 fs/nfsd/nfs4xdr.c 		if (!p)
p                2845 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, (s64)stat.mtime.tv_sec);
p                2846 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(stat.mtime.tv_nsec);
p                2852 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                2853 fs/nfsd/nfs4xdr.c 		if (!p)
p                2866 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, ino);
p                2882 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2883 fs/nfsd/nfs4xdr.c 		if (!p)
p                2885 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(stat.blksize);
p                2902 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                2903 fs/nfsd/nfs4xdr.c 		if (!p)
p                2906 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR);
p                2908 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(NFS4_CHANGE_TYPE_IS_TIME_METADATA);
p                2946 fs/nfsd/nfs4xdr.c 				struct xdr_buf *buf, __be32 *p, int bytes)
p                2950 fs/nfsd/nfs4xdr.c 	buf->head[0].iov_base = p;
p                2955 fs/nfsd/nfs4xdr.c 	xdr->p = p;
p                2956 fs/nfsd/nfs4xdr.c 	xdr->end = (void *)p + bytes;
p                2960 fs/nfsd/nfs4xdr.c __be32 nfsd4_encode_fattr_to_buf(__be32 **p, int words,
p                2969 fs/nfsd/nfs4xdr.c 	svcxdr_init_encode_from_buffer(&xdr, &dummy, *p, words << 2);
p                2972 fs/nfsd/nfs4xdr.c 	*p = xdr.p;
p                3050 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3052 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 20);
p                3053 fs/nfsd/nfs4xdr.c 	if (!p)
p                3055 fs/nfsd/nfs4xdr.c 	*p++ = htonl(2);
p                3056 fs/nfsd/nfs4xdr.c 	*p++ = htonl(FATTR4_WORD0_RDATTR_ERROR); /* bmval0 */
p                3057 fs/nfsd/nfs4xdr.c 	*p++ = htonl(0);			 /* bmval1 */
p                3059 fs/nfsd/nfs4xdr.c 	*p++ = htonl(4);     /* attribute length */
p                3060 fs/nfsd/nfs4xdr.c 	*p++ = nfserr;       /* no htonl */
p                3061 fs/nfsd/nfs4xdr.c 	return p;
p                3077 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3091 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                3092 fs/nfsd/nfs4xdr.c 	if (!p)
p                3094 fs/nfsd/nfs4xdr.c 	*p++ = xdr_one;                             /* mark entry present */
p                3096 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 3*4 + namlen);
p                3097 fs/nfsd/nfs4xdr.c 	if (!p)
p                3099 fs/nfsd/nfs4xdr.c 	p = xdr_encode_hyper(p, NFS_OFFSET_MAX);    /* offset of next entry */
p                3100 fs/nfsd/nfs4xdr.c 	p = xdr_encode_array(p, name, namlen);      /* name length & name */
p                3122 fs/nfsd/nfs4xdr.c 		p = nfsd4_encode_rdattr_error(xdr, nfserr);
p                3123 fs/nfsd/nfs4xdr.c 		if (p == NULL) {
p                3157 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3159 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, sizeof(stateid_t));
p                3160 fs/nfsd/nfs4xdr.c 	if (!p)
p                3162 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sid->si_generation);
p                3163 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, &sid->si_opaque,
p                3172 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3174 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 8);
p                3175 fs/nfsd/nfs4xdr.c 	if (!p)
p                3177 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(access->ac_supported);
p                3178 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(access->ac_resp_access);
p                3185 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3187 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 8);
p                3188 fs/nfsd/nfs4xdr.c 	if (!p)
p                3190 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, bcts->sessionid.data,
p                3192 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(bcts->dir);
p                3194 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(0);
p                3211 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3213 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
p                3214 fs/nfsd/nfs4xdr.c 	if (!p)
p                3216 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, commit->co_verf.data,
p                3225 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3227 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 20);
p                3228 fs/nfsd/nfs4xdr.c 	if (!p)
p                3230 fs/nfsd/nfs4xdr.c 	encode_cinfo(p, &create->cr_cinfo);
p                3251 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3254 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, len + 4);
p                3255 fs/nfsd/nfs4xdr.c 	if (!p)
p                3257 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque(p, &fhp->fh_handle.fh_base, len);
p                3269 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3272 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 32 + XDR_LEN(conf->len));
p                3273 fs/nfsd/nfs4xdr.c 	if (!p) {
p                3286 fs/nfsd/nfs4xdr.c 	p = xdr_encode_hyper(p, ld->ld_start);
p                3287 fs/nfsd/nfs4xdr.c 	p = xdr_encode_hyper(p, ld->ld_length);
p                3288 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(ld->ld_type);
p                3290 fs/nfsd/nfs4xdr.c 		p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
p                3291 fs/nfsd/nfs4xdr.c 		p = xdr_encode_opaque(p, conf->data, conf->len);
p                3294 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, (u64)0); /* clientid */
p                3295 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0); /* length of owner name */
p                3336 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3338 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 20);
p                3339 fs/nfsd/nfs4xdr.c 	if (!p)
p                3341 fs/nfsd/nfs4xdr.c 	p = encode_cinfo(p, &link->li_cinfo);
p                3350 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3355 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 24);
p                3356 fs/nfsd/nfs4xdr.c 	if (!p)
p                3358 fs/nfsd/nfs4xdr.c 	p = encode_cinfo(p, &open->op_cinfo);
p                3359 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(open->op_rflags);
p                3366 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                3367 fs/nfsd/nfs4xdr.c 	if (!p)
p                3370 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(open->op_delegate_type);
p                3378 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 20);
p                3379 fs/nfsd/nfs4xdr.c 		if (!p)
p                3381 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(open->op_recall);
p                3386 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
p                3387 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);
p                3388 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);
p                3389 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);   /* XXX: is NULL principal ok? */
p                3395 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 32);
p                3396 fs/nfsd/nfs4xdr.c 		if (!p)
p                3398 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);
p                3403 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(NFS4_LIMIT_SIZE);
p                3404 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(~(u32)0);
p                3405 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(~(u32)0);
p                3410 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
p                3411 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);
p                3412 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);
p                3413 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);   /* XXX: is NULL principal ok? */
p                3419 fs/nfsd/nfs4xdr.c 			p = xdr_reserve_space(xdr, 8);
p                3420 fs/nfsd/nfs4xdr.c 			if (!p)
p                3422 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(open->op_why_no_deleg);
p                3424 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(0);
p                3427 fs/nfsd/nfs4xdr.c 			p = xdr_reserve_space(xdr, 4);
p                3428 fs/nfsd/nfs4xdr.c 			if (!p)
p                3430 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(open->op_why_no_deleg);
p                3467 fs/nfsd/nfs4xdr.c 	__be32 *p = xdr->p - 2;
p                3470 fs/nfsd/nfs4xdr.c 	if (xdr->end - xdr->p < 1)
p                3487 fs/nfsd/nfs4xdr.c 	*(p++) = htonl(eof);
p                3488 fs/nfsd/nfs4xdr.c 	*(p++) = htonl(maxcount);
p                3496 fs/nfsd/nfs4xdr.c 	buf->tail[0].iov_base = xdr->p;
p                3502 fs/nfsd/nfs4xdr.c 		*(xdr->p++) = 0;
p                3509 fs/nfsd/nfs4xdr.c 	space_left = min_t(int, (void *)xdr->end - (void *)xdr->p,
p                3529 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3536 fs/nfsd/nfs4xdr.c 	thislen = min_t(long, len, ((void *)xdr->end - (void *)xdr->p));
p                3537 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, (thislen+3)&~3);
p                3538 fs/nfsd/nfs4xdr.c 	WARN_ON_ONCE(!p);
p                3539 fs/nfsd/nfs4xdr.c 	resp->rqstp->rq_vec[v].iov_base = p;
p                3546 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, (thislen+3)&~3);
p                3547 fs/nfsd/nfs4xdr.c 		WARN_ON_ONCE(!p);
p                3548 fs/nfsd/nfs4xdr.c 		resp->rqstp->rq_vec[v].iov_base = p;
p                3584 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3590 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 8); /* eof flag and byte count */
p                3591 fs/nfsd/nfs4xdr.c 	if (!p) {
p                3627 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3629 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                3630 fs/nfsd/nfs4xdr.c 	if (!p)
p                3634 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, maxcount);
p                3635 fs/nfsd/nfs4xdr.c 	if (!p)
p                3644 fs/nfsd/nfs4xdr.c 						(char *)p, &maxcount);
p                3670 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3672 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
p                3673 fs/nfsd/nfs4xdr.c 	if (!p)
p                3677 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(0);
p                3678 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(0);
p                3679 fs/nfsd/nfs4xdr.c 	resp->xdr.buf->head[0].iov_len = ((char *)resp->xdr.p)
p                3738 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 8);
p                3739 fs/nfsd/nfs4xdr.c 	if (!p) {
p                3743 fs/nfsd/nfs4xdr.c 	*p++ = 0;	/* no more entries */
p                3744 fs/nfsd/nfs4xdr.c 	*p++ = htonl(readdir->common.err == nfserr_eof);
p                3756 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3758 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 20);
p                3759 fs/nfsd/nfs4xdr.c 	if (!p)
p                3761 fs/nfsd/nfs4xdr.c 	p = encode_cinfo(p, &remove->rm_cinfo);
p                3769 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3771 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 40);
p                3772 fs/nfsd/nfs4xdr.c 	if (!p)
p                3774 fs/nfsd/nfs4xdr.c 	p = encode_cinfo(p, &rename->rn_sinfo);
p                3775 fs/nfsd/nfs4xdr.c 	p = encode_cinfo(p, &rename->rn_tinfo);
p                3785 fs/nfsd/nfs4xdr.c 	__be32 *p, *flavorsp;
p                3809 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                3810 fs/nfsd/nfs4xdr.c 	if (!p)
p                3812 fs/nfsd/nfs4xdr.c 	flavorsp = p++;		/* to be backfilled later */
p                3820 fs/nfsd/nfs4xdr.c 			p = xdr_reserve_space(xdr, 4 + 4 +
p                3822 fs/nfsd/nfs4xdr.c 			if (!p)
p                3824 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(RPC_AUTH_GSS);
p                3825 fs/nfsd/nfs4xdr.c 			p = xdr_encode_opaque(p,  info.oid.data, info.oid.len);
p                3826 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(info.qop);
p                3827 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(info.service);
p                3830 fs/nfsd/nfs4xdr.c 			p = xdr_reserve_space(xdr, 4);
p                3831 fs/nfsd/nfs4xdr.c 			if (!p)
p                3833 fs/nfsd/nfs4xdr.c 			*p++ = cpu_to_be32(pf);
p                3873 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3875 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 16);
p                3876 fs/nfsd/nfs4xdr.c 	if (!p)
p                3879 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(3);
p                3880 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);
p                3881 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);
p                3882 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);
p                3885 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(3);
p                3886 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(setattr->sa_bmval[0]);
p                3887 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(setattr->sa_bmval[1]);
p                3888 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(setattr->sa_bmval[2]);
p                3897 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3900 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8 + NFS4_VERIFIER_SIZE);
p                3901 fs/nfsd/nfs4xdr.c 		if (!p)
p                3903 fs/nfsd/nfs4xdr.c 		p = xdr_encode_opaque_fixed(p, &scd->se_clientid, 8);
p                3904 fs/nfsd/nfs4xdr.c 		p = xdr_encode_opaque_fixed(p, &scd->se_confirm,
p                3908 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                3909 fs/nfsd/nfs4xdr.c 		if (!p)
p                3911 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);
p                3912 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);
p                3921 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3923 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 16);
p                3924 fs/nfsd/nfs4xdr.c 	if (!p)
p                3926 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(write->wr_bytes_written);
p                3927 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(write->wr_how_written);
p                3928 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
p                3938 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                3950 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr,
p                3955 fs/nfsd/nfs4xdr.c 	if (!p)
p                3958 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, &exid->clientid, 8);
p                3959 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(exid->seqid);
p                3960 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(exid->flags);
p                3962 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(exid->spa_how);
p                3987 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr,
p                3994 fs/nfsd/nfs4xdr.c 	if (!p)
p                3998 fs/nfsd/nfs4xdr.c 	p = xdr_encode_hyper(p, minor_id);      /* Minor id */
p                4000 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque(p, major_id, major_id_sz);
p                4003 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque(p, server_scope, server_scope_sz);
p                4006 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(0);	/* zero length nfs_impl_id4 array */
p                4015 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                4017 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 24);
p                4018 fs/nfsd/nfs4xdr.c 	if (!p)
p                4020 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, sess->sessionid.data,
p                4022 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sess->seqid);
p                4023 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sess->flags);
p                4025 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 28);
p                4026 fs/nfsd/nfs4xdr.c 	if (!p)
p                4028 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(0); /* headerpadsz */
p                4029 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sess->fore_channel.maxreq_sz);
p                4030 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sess->fore_channel.maxresp_sz);
p                4031 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sess->fore_channel.maxresp_cached);
p                4032 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sess->fore_channel.maxops);
p                4033 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sess->fore_channel.maxreqs);
p                4034 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sess->fore_channel.nr_rdma_attrs);
p                4037 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                4038 fs/nfsd/nfs4xdr.c 		if (!p)
p                4040 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(sess->fore_channel.rdma_attrs);
p                4043 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 28);
p                4044 fs/nfsd/nfs4xdr.c 	if (!p)
p                4046 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(0); /* headerpadsz */
p                4047 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sess->back_channel.maxreq_sz);
p                4048 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sess->back_channel.maxresp_sz);
p                4049 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sess->back_channel.maxresp_cached);
p                4050 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sess->back_channel.maxops);
p                4051 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sess->back_channel.maxreqs);
p                4052 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(sess->back_channel.nr_rdma_attrs);
p                4055 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                4056 fs/nfsd/nfs4xdr.c 		if (!p)
p                4058 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(sess->back_channel.rdma_attrs);
p                4068 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                4070 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 20);
p                4071 fs/nfsd/nfs4xdr.c 	if (!p)
p                4073 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, seq->sessionid.data,
p                4075 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(seq->seqid);
p                4076 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(seq->slotid);
p                4078 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(seq->maxslots - 1); /* sr_highest_slotid */
p                4079 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(seq->maxslots - 1); /* sr_target_highest_slotid */
p                4080 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(seq->status_flags);
p                4092 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                4094 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 4 + (4 * test_stateid->ts_num_ids));
p                4095 fs/nfsd/nfs4xdr.c 	if (!p)
p                4097 fs/nfsd/nfs4xdr.c 	*p++ = htonl(test_stateid->ts_num_ids);
p                4100 fs/nfsd/nfs4xdr.c 		*p++ = stateid->ts_id_status;
p                4114 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                4116 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                4117 fs/nfsd/nfs4xdr.c 	if (!p)
p                4120 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(gdev->gd_layout_type);
p                4139 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4 + 4);
p                4140 fs/nfsd/nfs4xdr.c 		if (!p)
p                4142 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(1);			/* bitmap length */
p                4143 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(gdev->gd_notify_types);
p                4145 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 4);
p                4146 fs/nfsd/nfs4xdr.c 		if (!p)
p                4148 fs/nfsd/nfs4xdr.c 		*p++ = 0;
p                4156 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                4157 fs/nfsd/nfs4xdr.c 	if (!p)
p                4159 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(needed_len);
p                4169 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                4171 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 36 + sizeof(stateid_opaque_t));
p                4172 fs/nfsd/nfs4xdr.c 	if (!p)
p                4175 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(1);	/* we always set return-on-close */
p                4176 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(lgp->lg_sid.si_generation);
p                4177 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, &lgp->lg_sid.si_opaque,
p                4180 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(1);	/* we always return a single layout */
p                4181 fs/nfsd/nfs4xdr.c 	p = xdr_encode_hyper(p, lgp->lg_seg.offset);
p                4182 fs/nfsd/nfs4xdr.c 	p = xdr_encode_hyper(p, lgp->lg_seg.length);
p                4183 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(lgp->lg_seg.iomode);
p                4184 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(lgp->lg_layout_type);
p                4195 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                4197 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                4198 fs/nfsd/nfs4xdr.c 	if (!p)
p                4200 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(lcp->lc_size_chg);
p                4202 fs/nfsd/nfs4xdr.c 		p = xdr_reserve_space(xdr, 8);
p                4203 fs/nfsd/nfs4xdr.c 		if (!p)
p                4205 fs/nfsd/nfs4xdr.c 		p = xdr_encode_hyper(p, lcp->lc_newsize);
p                4216 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                4218 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 4);
p                4219 fs/nfsd/nfs4xdr.c 	if (!p)
p                4221 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(lrp->lrs_present);
p                4232 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                4233 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(&resp->xdr, 4);
p                4234 fs/nfsd/nfs4xdr.c 	if (!p)
p                4238 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(0);
p                4241 fs/nfsd/nfs4xdr.c 		*p++ = cpu_to_be32(1);
p                4246 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(&resp->xdr, 8 + 4 + NFS4_VERIFIER_SIZE);
p                4247 fs/nfsd/nfs4xdr.c 	if (!p)
p                4250 fs/nfsd/nfs4xdr.c 	p = xdr_encode_hyper(p, write->wr_bytes_written);
p                4251 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(write->wr_stable_how);
p                4252 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
p                4261 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                4268 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(&resp->xdr, 4 + 4);
p                4269 fs/nfsd/nfs4xdr.c 	*p++ = xdr_one; /* cr_consecutive */
p                4270 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(copy->cp_synchronous);
p                4279 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                4281 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 8 + 4);
p                4282 fs/nfsd/nfs4xdr.c 	if (!p)
p                4284 fs/nfsd/nfs4xdr.c 	p = xdr_encode_hyper(p, os->count);
p                4285 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(0);
p                4294 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                4296 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(&resp->xdr, 4 + 8);
p                4297 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(seek->seek_eof);
p                4298 fs/nfsd/nfs4xdr.c 	p = xdr_encode_hyper(p, seek->seek_pos);
p                4304 fs/nfsd/nfs4xdr.c nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr, void *p)
p                4438 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                4440 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 8);
p                4441 fs/nfsd/nfs4xdr.c 	if (!p) {
p                4445 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(op->opnum);
p                4511 fs/nfsd/nfs4xdr.c 	__be32 *p;
p                4516 fs/nfsd/nfs4xdr.c 	p = xdr_reserve_space(xdr, 8 + rp->rp_buflen);
p                4517 fs/nfsd/nfs4xdr.c 	if (!p) {
p                4521 fs/nfsd/nfs4xdr.c 	*p++ = cpu_to_be32(op->opnum);
p                4522 fs/nfsd/nfs4xdr.c 	*p++ = rp->rp_status;  /* already xdr'ed */
p                4524 fs/nfsd/nfs4xdr.c 	p = xdr_encode_opaque_fixed(p, rp->rp_buf, rp->rp_buflen);
p                4528 fs/nfsd/nfs4xdr.c nfs4svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p)
p                4530 fs/nfsd/nfs4xdr.c         return xdr_ressize_check(rqstp, p);
p                4551 fs/nfsd/nfs4xdr.c nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, __be32 *p)
p                4561 fs/nfsd/nfs4xdr.c 	args->p = p;
p                4575 fs/nfsd/nfs4xdr.c nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p)
p                4588 fs/nfsd/nfs4xdr.c 	p = resp->tagp;
p                4589 fs/nfsd/nfs4xdr.c 	*p++ = htonl(resp->taglen);
p                4590 fs/nfsd/nfs4xdr.c 	memcpy(p, resp->tag, resp->taglen);
p                4591 fs/nfsd/nfs4xdr.c 	p += XDR_QUADLEN(resp->taglen);
p                4592 fs/nfsd/nfs4xdr.c 	*p++ = htonl(resp->opcnt);
p                 298 fs/nfsd/nfscache.c 	const unsigned char *p = buf->head[0].iov_base;
p                 304 fs/nfsd/nfscache.c 	csum = csum_partial(p, len, 0);
p                 311 fs/nfsd/nfscache.c 		p = page_address(buf->pages[idx]) + base;
p                 313 fs/nfsd/nfscache.c 		csum = csum_partial(p, len, csum);
p                 342 fs/nfsd/nfscache.c 	struct rb_node		**p = &b->rb_head.rb_node,
p                 347 fs/nfsd/nfscache.c 	while (*p != NULL) {
p                 349 fs/nfsd/nfscache.c 		parent = *p;
p                 354 fs/nfsd/nfscache.c 			p = &parent->rb_left;
p                 356 fs/nfsd/nfscache.c 			p = &parent->rb_right;
p                 362 fs/nfsd/nfscache.c 	rb_link_node(&key->c_node, parent, p);
p                  29 fs/nfsd/nfsxdr.c decode_fh(__be32 *p, struct svc_fh *fhp)
p                  32 fs/nfsd/nfsxdr.c 	memcpy(&fhp->fh_handle.fh_base, p, NFS_FHSIZE);
p                  37 fs/nfsd/nfsxdr.c 	return p + (NFS_FHSIZE >> 2);
p                  41 fs/nfsd/nfsxdr.c __be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp)
p                  43 fs/nfsd/nfsxdr.c 	return decode_fh(p, fhp);
p                  47 fs/nfsd/nfsxdr.c encode_fh(__be32 *p, struct svc_fh *fhp)
p                  49 fs/nfsd/nfsxdr.c 	memcpy(p, &fhp->fh_handle.fh_base, NFS_FHSIZE);
p                  50 fs/nfsd/nfsxdr.c 	return p + (NFS_FHSIZE>> 2);
p                  58 fs/nfsd/nfsxdr.c decode_filename(__be32 *p, char **namp, unsigned int *lenp)
p                  63 fs/nfsd/nfsxdr.c 	if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS_MAXNAMLEN)) != NULL) {
p                  70 fs/nfsd/nfsxdr.c 	return p;
p                  74 fs/nfsd/nfsxdr.c decode_sattr(__be32 *p, struct iattr *iap, struct user_namespace *userns)
p                  84 fs/nfsd/nfsxdr.c 	if ((tmp = ntohl(*p++)) != (u32)-1 && tmp != 0xffff) {
p                  88 fs/nfsd/nfsxdr.c 	if ((tmp = ntohl(*p++)) != (u32)-1) {
p                  93 fs/nfsd/nfsxdr.c 	if ((tmp = ntohl(*p++)) != (u32)-1) {
p                  98 fs/nfsd/nfsxdr.c 	if ((tmp = ntohl(*p++)) != (u32)-1) {
p                 102 fs/nfsd/nfsxdr.c 	tmp  = ntohl(*p++); tmp1 = ntohl(*p++);
p                 108 fs/nfsd/nfsxdr.c 	tmp  = ntohl(*p++); tmp1 = ntohl(*p++);
p                 125 fs/nfsd/nfsxdr.c 	return p;
p                 129 fs/nfsd/nfsxdr.c encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
p                 140 fs/nfsd/nfsxdr.c 	*p++ = htonl(nfs_ftypes[type >> 12]);
p                 141 fs/nfsd/nfsxdr.c 	*p++ = htonl((u32) stat->mode);
p                 142 fs/nfsd/nfsxdr.c 	*p++ = htonl((u32) stat->nlink);
p                 143 fs/nfsd/nfsxdr.c 	*p++ = htonl((u32) from_kuid_munged(userns, stat->uid));
p                 144 fs/nfsd/nfsxdr.c 	*p++ = htonl((u32) from_kgid_munged(userns, stat->gid));
p                 147 fs/nfsd/nfsxdr.c 		*p++ = htonl(NFS_MAXPATHLEN);
p                 149 fs/nfsd/nfsxdr.c 		*p++ = htonl((u32) stat->size);
p                 151 fs/nfsd/nfsxdr.c 	*p++ = htonl((u32) stat->blksize);
p                 153 fs/nfsd/nfsxdr.c 		*p++ = htonl(new_encode_dev(stat->rdev));
p                 155 fs/nfsd/nfsxdr.c 		*p++ = htonl(0xffffffff);
p                 156 fs/nfsd/nfsxdr.c 	*p++ = htonl((u32) stat->blocks);
p                 160 fs/nfsd/nfsxdr.c 		*p++ = htonl(new_encode_dev(stat->dev));
p                 163 fs/nfsd/nfsxdr.c 		*p++ = htonl((u32) fhp->fh_export->ex_fsid);
p                 170 fs/nfsd/nfsxdr.c 		*p++ = htonl(f);
p                 173 fs/nfsd/nfsxdr.c 	*p++ = htonl((u32) stat->ino);
p                 174 fs/nfsd/nfsxdr.c 	*p++ = htonl((u32) stat->atime.tv_sec);
p                 175 fs/nfsd/nfsxdr.c 	*p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0);
p                 178 fs/nfsd/nfsxdr.c 	*p++ = htonl((u32) time.tv_sec);
p                 179 fs/nfsd/nfsxdr.c 	*p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0); 
p                 180 fs/nfsd/nfsxdr.c 	*p++ = htonl((u32) stat->ctime.tv_sec);
p                 181 fs/nfsd/nfsxdr.c 	*p++ = htonl(stat->ctime.tv_nsec ? stat->ctime.tv_nsec / 1000 : 0);
p                 183 fs/nfsd/nfsxdr.c 	return p;
p                 187 fs/nfsd/nfsxdr.c __be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat)
p                 189 fs/nfsd/nfsxdr.c 	return encode_fattr(rqstp, p, fhp, stat);
p                 196 fs/nfsd/nfsxdr.c nfssvc_decode_void(struct svc_rqst *rqstp, __be32 *p)
p                 198 fs/nfsd/nfsxdr.c 	return xdr_argsize_check(rqstp, p);
p                 202 fs/nfsd/nfsxdr.c nfssvc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p)
p                 206 fs/nfsd/nfsxdr.c 	p = decode_fh(p, &args->fh);
p                 207 fs/nfsd/nfsxdr.c 	if (!p)
p                 209 fs/nfsd/nfsxdr.c 	return xdr_argsize_check(rqstp, p);
p                 213 fs/nfsd/nfsxdr.c nfssvc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p)
p                 217 fs/nfsd/nfsxdr.c 	p = decode_fh(p, &args->fh);
p                 218 fs/nfsd/nfsxdr.c 	if (!p)
p                 220 fs/nfsd/nfsxdr.c 	p = decode_sattr(p, &args->attrs, nfsd_user_namespace(rqstp));
p                 222 fs/nfsd/nfsxdr.c 	return xdr_argsize_check(rqstp, p);
p                 226 fs/nfsd/nfsxdr.c nfssvc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p)
p                 230 fs/nfsd/nfsxdr.c 	if (!(p = decode_fh(p, &args->fh))
p                 231 fs/nfsd/nfsxdr.c 	 || !(p = decode_filename(p, &args->name, &args->len)))
p                 234 fs/nfsd/nfsxdr.c 	return xdr_argsize_check(rqstp, p);
p                 238 fs/nfsd/nfsxdr.c nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p)
p                 243 fs/nfsd/nfsxdr.c 	p = decode_fh(p, &args->fh);
p                 244 fs/nfsd/nfsxdr.c 	if (!p)
p                 247 fs/nfsd/nfsxdr.c 	args->offset    = ntohl(*p++);
p                 248 fs/nfsd/nfsxdr.c 	len = args->count     = ntohl(*p++);
p                 249 fs/nfsd/nfsxdr.c 	p++; /* totalcount - unused */
p                 258 fs/nfsd/nfsxdr.c 		struct page *p = *(rqstp->rq_next_page++);
p                 260 fs/nfsd/nfsxdr.c 		rqstp->rq_vec[v].iov_base = page_address(p);
p                 266 fs/nfsd/nfsxdr.c 	return xdr_argsize_check(rqstp, p);
p                 270 fs/nfsd/nfsxdr.c nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p)
p                 276 fs/nfsd/nfsxdr.c 	p = decode_fh(p, &args->fh);
p                 277 fs/nfsd/nfsxdr.c 	if (!p)
p                 280 fs/nfsd/nfsxdr.c 	p++;				/* beginoffset */
p                 281 fs/nfsd/nfsxdr.c 	args->offset = ntohl(*p++);	/* offset */
p                 282 fs/nfsd/nfsxdr.c 	p++;				/* totalcount */
p                 283 fs/nfsd/nfsxdr.c 	len = args->len = ntohl(*p++);
p                 294 fs/nfsd/nfsxdr.c 	hdr = (void*)p - head->iov_base;
p                 310 fs/nfsd/nfsxdr.c 	args->first.iov_base = (void *)p;
p                 316 fs/nfsd/nfsxdr.c nfssvc_decode_createargs(struct svc_rqst *rqstp, __be32 *p)
p                 320 fs/nfsd/nfsxdr.c 	if (   !(p = decode_fh(p, &args->fh))
p                 321 fs/nfsd/nfsxdr.c 	    || !(p = decode_filename(p, &args->name, &args->len)))
p                 323 fs/nfsd/nfsxdr.c 	p = decode_sattr(p, &args->attrs, nfsd_user_namespace(rqstp));
p                 325 fs/nfsd/nfsxdr.c 	return xdr_argsize_check(rqstp, p);
p                 329 fs/nfsd/nfsxdr.c nfssvc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p)
p                 333 fs/nfsd/nfsxdr.c 	if (!(p = decode_fh(p, &args->ffh))
p                 334 fs/nfsd/nfsxdr.c 	 || !(p = decode_filename(p, &args->fname, &args->flen))
p                 335 fs/nfsd/nfsxdr.c 	 || !(p = decode_fh(p, &args->tfh))
p                 336 fs/nfsd/nfsxdr.c 	 || !(p = decode_filename(p, &args->tname, &args->tlen)))
p                 339 fs/nfsd/nfsxdr.c 	return xdr_argsize_check(rqstp, p);
p                 343 fs/nfsd/nfsxdr.c nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p)
p                 347 fs/nfsd/nfsxdr.c 	p = decode_fh(p, &args->fh);
p                 348 fs/nfsd/nfsxdr.c 	if (!p)
p                 352 fs/nfsd/nfsxdr.c 	return xdr_argsize_check(rqstp, p);
p                 356 fs/nfsd/nfsxdr.c nfssvc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p)
p                 360 fs/nfsd/nfsxdr.c 	if (!(p = decode_fh(p, &args->ffh))
p                 361 fs/nfsd/nfsxdr.c 	 || !(p = decode_fh(p, &args->tfh))
p                 362 fs/nfsd/nfsxdr.c 	 || !(p = decode_filename(p, &args->tname, &args->tlen)))
p                 365 fs/nfsd/nfsxdr.c 	return xdr_argsize_check(rqstp, p);
p                 369 fs/nfsd/nfsxdr.c nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p)
p                 372 fs/nfsd/nfsxdr.c 	char *base = (char *)p;
p                 375 fs/nfsd/nfsxdr.c 	if (   !(p = decode_fh(p, &args->ffh))
p                 376 fs/nfsd/nfsxdr.c 	    || !(p = decode_filename(p, &args->fname, &args->flen)))
p                 379 fs/nfsd/nfsxdr.c 	args->tlen = ntohl(*p++);
p                 383 fs/nfsd/nfsxdr.c 	args->first.iov_base = p;
p                 385 fs/nfsd/nfsxdr.c 	args->first.iov_len -= (char *)p - base;
p                 395 fs/nfsd/nfsxdr.c 		p = rqstp->rq_arg.tail[0].iov_base;
p                 400 fs/nfsd/nfsxdr.c 		p += xdrlen;
p                 402 fs/nfsd/nfsxdr.c 	decode_sattr(p, &args->attrs, nfsd_user_namespace(rqstp));
p                 408 fs/nfsd/nfsxdr.c nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p)
p                 412 fs/nfsd/nfsxdr.c 	p = decode_fh(p, &args->fh);
p                 413 fs/nfsd/nfsxdr.c 	if (!p)
p                 415 fs/nfsd/nfsxdr.c 	args->cookie = ntohl(*p++);
p                 416 fs/nfsd/nfsxdr.c 	args->count  = ntohl(*p++);
p                 420 fs/nfsd/nfsxdr.c 	return xdr_argsize_check(rqstp, p);
p                 427 fs/nfsd/nfsxdr.c nfssvc_encode_void(struct svc_rqst *rqstp, __be32 *p)
p                 429 fs/nfsd/nfsxdr.c 	return xdr_ressize_check(rqstp, p);
p                 433 fs/nfsd/nfsxdr.c nfssvc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p)
p                 437 fs/nfsd/nfsxdr.c 	p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
p                 438 fs/nfsd/nfsxdr.c 	return xdr_ressize_check(rqstp, p);
p                 442 fs/nfsd/nfsxdr.c nfssvc_encode_diropres(struct svc_rqst *rqstp, __be32 *p)
p                 446 fs/nfsd/nfsxdr.c 	p = encode_fh(p, &resp->fh);
p                 447 fs/nfsd/nfsxdr.c 	p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
p                 448 fs/nfsd/nfsxdr.c 	return xdr_ressize_check(rqstp, p);
p                 452 fs/nfsd/nfsxdr.c nfssvc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p)
p                 456 fs/nfsd/nfsxdr.c 	*p++ = htonl(resp->len);
p                 457 fs/nfsd/nfsxdr.c 	xdr_ressize_check(rqstp, p);
p                 461 fs/nfsd/nfsxdr.c 		rqstp->rq_res.tail[0].iov_base = p;
p                 462 fs/nfsd/nfsxdr.c 		*p = 0;
p                 469 fs/nfsd/nfsxdr.c nfssvc_encode_readres(struct svc_rqst *rqstp, __be32 *p)
p                 473 fs/nfsd/nfsxdr.c 	p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
p                 474 fs/nfsd/nfsxdr.c 	*p++ = htonl(resp->count);
p                 475 fs/nfsd/nfsxdr.c 	xdr_ressize_check(rqstp, p);
p                 481 fs/nfsd/nfsxdr.c 		rqstp->rq_res.tail[0].iov_base = p;
p                 482 fs/nfsd/nfsxdr.c 		*p = 0;
p                 489 fs/nfsd/nfsxdr.c nfssvc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p)
p                 493 fs/nfsd/nfsxdr.c 	xdr_ressize_check(rqstp, p);
p                 494 fs/nfsd/nfsxdr.c 	p = resp->buffer;
p                 495 fs/nfsd/nfsxdr.c 	*p++ = 0;			/* no more entries */
p                 496 fs/nfsd/nfsxdr.c 	*p++ = htonl((resp->common.err == nfserr_eof));
p                 497 fs/nfsd/nfsxdr.c 	rqstp->rq_res.page_len = (((unsigned long)p-1) & ~PAGE_MASK)+1;
p                 503 fs/nfsd/nfsxdr.c nfssvc_encode_statfsres(struct svc_rqst *rqstp, __be32 *p)
p                 508 fs/nfsd/nfsxdr.c 	*p++ = htonl(NFSSVC_MAXBLKSIZE_V2);	/* max transfer size */
p                 509 fs/nfsd/nfsxdr.c 	*p++ = htonl(stat->f_bsize);
p                 510 fs/nfsd/nfsxdr.c 	*p++ = htonl(stat->f_blocks);
p                 511 fs/nfsd/nfsxdr.c 	*p++ = htonl(stat->f_bfree);
p                 512 fs/nfsd/nfsxdr.c 	*p++ = htonl(stat->f_bavail);
p                 513 fs/nfsd/nfsxdr.c 	return xdr_ressize_check(rqstp, p);
p                 522 fs/nfsd/nfsxdr.c 	__be32	*p = cd->buffer;
p                 549 fs/nfsd/nfsxdr.c 	*p++ = xdr_one;				/* mark entry present */
p                 550 fs/nfsd/nfsxdr.c 	*p++ = htonl((u32) ino);		/* file id */
p                 551 fs/nfsd/nfsxdr.c 	p    = xdr_encode_array(p, name, namlen);/* name length & name */
p                 552 fs/nfsd/nfsxdr.c 	cd->offset = p;			/* remember pointer */
p                 553 fs/nfsd/nfsxdr.c 	*p++ = htonl(~0U);		/* offset of next entry */
p                 556 fs/nfsd/nfsxdr.c 	cd->buffer = p;
p                 143 fs/nfsd/vfs.h  	struct path p = {.mnt = fh->fh_export->ex_path.mnt,
p                 145 fs/nfsd/vfs.h  	return nfserrno(vfs_getattr(&p, stat, STATX_BASIC_STATS,
p                 162 fs/nfsd/xdr.h  __be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat);
p                 163 fs/nfsd/xdr.h  __be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp);
p                 316 fs/nfsd/xdr3.h __be32 *nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p,
p                 318 fs/nfsd/xdr3.h __be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp);
p                 362 fs/nfsd/xdr4.h 	__be32 *p;
p                 647 fs/nfsd/xdr4.h 	__be32 *			p;
p                 738 fs/nfsd/xdr4.h __be32 nfsd4_encode_fattr_to_buf(__be32 **p, int words,
p                 541 fs/nilfs2/btree.c 	struct nilfs_btree_readahead_info p, *ra;
p                 561 fs/nilfs2/btree.c 			p.node = nilfs_btree_get_node(btree, path, level + 1,
p                 562 fs/nilfs2/btree.c 						      &p.ncmax);
p                 563 fs/nilfs2/btree.c 			p.index = index;
p                 564 fs/nilfs2/btree.c 			p.max_ra_blocks = 7;
p                 565 fs/nilfs2/btree.c 			ra = &p;
p                 702 fs/nilfs2/btree.c 	struct nilfs_btree_readahead_info p;
p                 747 fs/nilfs2/btree.c 		p.node = nilfs_btree_get_node(btree, path, level + 1, &p.ncmax);
p                 748 fs/nilfs2/btree.c 		p.index = path[level + 1].bp_index + 1;
p                 749 fs/nilfs2/btree.c 		p.max_ra_blocks = 7;
p                 750 fs/nilfs2/btree.c 		if (p.index >= nilfs_btree_node_get_nchildren(p.node) ||
p                 751 fs/nilfs2/btree.c 		    nilfs_btree_node_get_key(p.node, p.index) != key + cnt)
p                 753 fs/nilfs2/btree.c 		ptr2 = nilfs_btree_node_get_ptr(p.node, p.index, p.ncmax);
p                 754 fs/nilfs2/btree.c 		path[level + 1].bp_index = p.index;
p                 760 fs/nilfs2/btree.c 					      &p);
p                 124 fs/nilfs2/dir.c 	struct nilfs_dir_entry *p;
p                 135 fs/nilfs2/dir.c 		p = (struct nilfs_dir_entry *)(kaddr + offs);
p                 136 fs/nilfs2/dir.c 		rec_len = nilfs_rec_len_from_disk(p->rec_len);
p                 142 fs/nilfs2/dir.c 		if (rec_len < NILFS_DIR_REC_LEN(p->name_len))
p                 175 fs/nilfs2/dir.c 		    (unsigned long)le64_to_cpu(p->inode),
p                 176 fs/nilfs2/dir.c 		    rec_len, p->name_len);
p                 179 fs/nilfs2/dir.c 	p = (struct nilfs_dir_entry *)(kaddr + offs);
p                 183 fs/nilfs2/dir.c 		    (unsigned long)le64_to_cpu(p->inode));
p                 226 fs/nilfs2/dir.c static struct nilfs_dir_entry *nilfs_next_entry(struct nilfs_dir_entry *p)
p                 228 fs/nilfs2/dir.c 	return (struct nilfs_dir_entry *)((char *)p +
p                 229 fs/nilfs2/dir.c 					  nilfs_rec_len_from_disk(p->rec_len));
p                 388 fs/nilfs2/dir.c struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
p                 396 fs/nilfs2/dir.c 		*p = page;
p                 322 fs/nilfs2/page.c 			struct page *p;
p                 326 fs/nilfs2/page.c 			p = __xa_erase(&smap->i_pages, offset);
p                 327 fs/nilfs2/page.c 			WARN_ON(page != p);
p                 332 fs/nilfs2/page.c 			p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS);
p                 333 fs/nilfs2/page.c 			if (unlikely(p)) {
p                 399 fs/nilfs2/segment.c 	void *p;
p                 407 fs/nilfs2/segment.c 	p = ssp->bh->b_data + ssp->offset;
p                 409 fs/nilfs2/segment.c 	return p;
p                 717 fs/nilfs2/super.c 	char *p;
p                 723 fs/nilfs2/super.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 726 fs/nilfs2/super.c 		if (!*p)
p                 729 fs/nilfs2/super.c 		token = match_token(p, tokens, args);
p                 760 fs/nilfs2/super.c 					  p);
p                 775 fs/nilfs2/super.c 				  "unrecognized mount option \"%s\"", p);
p                1230 fs/nilfs2/super.c 	char *p, *options = data;
p                1236 fs/nilfs2/super.c 		p = strsep(&options, ",");
p                1237 fs/nilfs2/super.c 		if (p != NULL && *p) {
p                1238 fs/nilfs2/super.c 			token = match_token(p, tokens, args);
p                1240 fs/nilfs2/super.c 				ret = nilfs_parse_snapshot_option(p, &args[0],
p                 744 fs/nilfs2/the_nilfs.c 	struct rb_node **p, *parent;
p                 758 fs/nilfs2/the_nilfs.c 	p = &nilfs->ns_cptree.rb_node;
p                 761 fs/nilfs2/the_nilfs.c 	while (*p) {
p                 762 fs/nilfs2/the_nilfs.c 		parent = *p;
p                 766 fs/nilfs2/the_nilfs.c 			p = &(*p)->rb_left;
p                 768 fs/nilfs2/the_nilfs.c 			p = &(*p)->rb_right;
p                 784 fs/nilfs2/the_nilfs.c 	rb_link_node(&new->rb_node, parent, p);
p                 497 fs/notify/fanotify/fanotify_user.c 	void __user *p;
p                 503 fs/notify/fanotify/fanotify_user.c 	p = (void __user *) arg;
p                 511 fs/notify/fanotify/fanotify_user.c 		ret = put_user(send_len, (int __user *) p);
p                 153 fs/notify/inotify/inotify_fsnotify.c static int idr_callback(int id, void *p, void *data)
p                 163 fs/notify/inotify/inotify_fsnotify.c 	fsn_mark = p;
p                 167 fs/notify/inotify/inotify_fsnotify.c 		"idr.  Probably leaking memory\n", id, p, data);
p                 281 fs/notify/inotify/inotify_user.c 	void __user *p;
p                 286 fs/notify/inotify/inotify_user.c 	p = (void __user *) arg;
p                 299 fs/notify/inotify/inotify_user.c 		ret = put_user(send_len, (int __user *) p);
p                 446 fs/ntfs/inode.c 		u8 *p, *p2;
p                 453 fs/ntfs/inode.c 		p = (u8*)attr + le32_to_cpu(attr->length);
p                 454 fs/ntfs/inode.c 		if (p < (u8*)ctx->mrec || (u8*)p > (u8*)ctx->mrec +
p                 480 fs/ntfs/inode.c 		if (p2 < (u8*)attr || p2 > p)
p                 126 fs/ntfs/layout.h static inline bool __ntfs_is_magicp(le32 *p, NTFS_RECORD_TYPE r)
p                 128 fs/ntfs/layout.h 	return (*p == r);
p                 130 fs/ntfs/layout.h #define ntfs_is_magicp(p, m)	__ntfs_is_magicp(p, magic_##m)
p                 136 fs/ntfs/layout.h #define ntfs_is_file_recordp(p)		( ntfs_is_magicp(p, FILE) )
p                 138 fs/ntfs/layout.h #define ntfs_is_mft_recordp(p)		( ntfs_is_file_recordp(p) )
p                 140 fs/ntfs/layout.h #define ntfs_is_indx_recordp(p)		( ntfs_is_magicp(p, INDX) )
p                 142 fs/ntfs/layout.h #define ntfs_is_hole_recordp(p)		( ntfs_is_magicp(p, HOLE) )
p                 145 fs/ntfs/layout.h #define ntfs_is_rstr_recordp(p)		( ntfs_is_magicp(p, RSTR) )
p                 147 fs/ntfs/layout.h #define ntfs_is_rcrd_recordp(p)		( ntfs_is_magicp(p, RCRD) )
p                 150 fs/ntfs/layout.h #define ntfs_is_chkd_recordp(p)		( ntfs_is_magicp(p, CHKD) )
p                 153 fs/ntfs/layout.h #define ntfs_is_baad_recordp(p)		( ntfs_is_magicp(p, BAAD) )
p                 156 fs/ntfs/layout.h #define ntfs_is_empty_recordp(p)	( ntfs_is_magicp(p, empty) )
p                  89 fs/ntfs/super.c 	char *p, *v, *ov;
p                 101 fs/ntfs/super.c 	if (!strcmp(p, option)) {					\
p                 111 fs/ntfs/super.c 	if (!strcmp(p, option)) {					\
p                 119 fs/ntfs/super.c 	if (!strcmp(p, option)) {					\
p                 131 fs/ntfs/super.c 	if (!strcmp(p, option)) {					\
p                 143 fs/ntfs/super.c 	if (!strcmp(p, option)) {					\
p                 151 fs/ntfs/super.c 	if (!strcmp(p, option)) {					\
p                 158 fs/ntfs/super.c 	if (!strcmp(p, option)) {					\
p                 176 fs/ntfs/super.c 	while ((p = strsep(&opt, ","))) {
p                 177 fs/ntfs/super.c 		if ((v = strchr(p, '=')))
p                 191 fs/ntfs/super.c 		else if (!strcmp(p, "posix") || !strcmp(p, "show_inodes"))
p                 193 fs/ntfs/super.c 					p);
p                 194 fs/ntfs/super.c 		else if (!strcmp(p, "nls") || !strcmp(p, "iocharset")) {
p                 195 fs/ntfs/super.c 			if (!strcmp(p, "iocharset"))
p                 218 fs/ntfs/super.c 		} else if (!strcmp(p, "utf8")) {
p                 234 fs/ntfs/super.c 			ntfs_error(vol->sb, "Unrecognized mount option %s.", p);
p                 334 fs/ntfs/super.c 	ntfs_error(vol->sb, "The %s option requires an argument.", p);
p                 337 fs/ntfs/super.c 	ntfs_error(vol->sb, "The %s option requires a boolean argument.", p);
p                 340 fs/ntfs/super.c 	ntfs_error(vol->sb, "Invalid %s option argument: %s", p, ov);
p                  58 fs/ocfs2/blockcheck.c 	unsigned int b, p = 0;
p                  68 fs/ocfs2/blockcheck.c 		p = *p_cache;
p                  69 fs/ocfs2/blockcheck.c         b += p;
p                  79 fs/ocfs2/blockcheck.c 	for (; (1 << p) < (b + 1); p++)
p                  83 fs/ocfs2/blockcheck.c 		*p_cache = p;
p                 101 fs/ocfs2/blockcheck.c 	unsigned int i, b, p = 0;
p                 120 fs/ocfs2/blockcheck.c 		b = calc_code_bit(nr + i, &p);
p                1543 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
p                1545 fs/ocfs2/cluster/heartbeat.c 	bytes = simple_strtoul(p, &p, 0);
p                1546 fs/ocfs2/cluster/heartbeat.c 	if (!p || (*p && (*p != '\n')))
p                1604 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
p                1609 fs/ocfs2/cluster/heartbeat.c 	tmp = simple_strtoull(p, &p, 0);
p                1610 fs/ocfs2/cluster/heartbeat.c 	if (!p || (*p && (*p != '\n')))
p                1629 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
p                1634 fs/ocfs2/cluster/heartbeat.c 	tmp = simple_strtoul(p, &p, 0);
p                1635 fs/ocfs2/cluster/heartbeat.c 	if (!p || (*p && (*p != '\n')))
p                1769 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
p                1783 fs/ocfs2/cluster/heartbeat.c 	fd = simple_strtol(p, &p, 0);
p                1784 fs/ocfs2/cluster/heartbeat.c 	if (!p || (*p && (*p != '\n')))
p                2148 fs/ocfs2/cluster/heartbeat.c 	char *p = (char *)page;
p                2150 fs/ocfs2/cluster/heartbeat.c 	tmp = simple_strtoul(p, &p, 10);
p                2151 fs/ocfs2/cluster/heartbeat.c 	if (!p || (*p && (*p != '\n')))
p                2529 fs/ocfs2/cluster/heartbeat.c 	char *p;
p                2533 fs/ocfs2/cluster/heartbeat.c 	p = region_uuids;
p                2540 fs/ocfs2/cluster/heartbeat.c 			memcpy(p, config_item_name(&reg->hr_item),
p                2542 fs/ocfs2/cluster/heartbeat.c 			p += O2HB_MAX_REGION_NAME_LEN;
p                  71 fs/ocfs2/cluster/nodemanager.c 	struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
p                  75 fs/ocfs2/cluster/nodemanager.c 	while (*p) {
p                  78 fs/ocfs2/cluster/nodemanager.c 		parent = *p;
p                  84 fs/ocfs2/cluster/nodemanager.c 			p = &(*p)->rb_left;
p                  86 fs/ocfs2/cluster/nodemanager.c 			p = &(*p)->rb_right;
p                  94 fs/ocfs2/cluster/nodemanager.c 		*ret_p = p;
p                 191 fs/ocfs2/cluster/nodemanager.c 	char *p = (char *)page;
p                 194 fs/ocfs2/cluster/nodemanager.c 	tmp = simple_strtoul(p, &p, 0);
p                 195 fs/ocfs2/cluster/nodemanager.c 	if (!p || (*p && (*p != '\n')))
p                 245 fs/ocfs2/cluster/nodemanager.c 	char *p = (char *)page;
p                 247 fs/ocfs2/cluster/nodemanager.c 	tmp = simple_strtoul(p, &p, 0);
p                 248 fs/ocfs2/cluster/nodemanager.c 	if (!p || (*p && (*p != '\n')))
p                 275 fs/ocfs2/cluster/nodemanager.c 	struct rb_node **p, *parent;
p                 299 fs/ocfs2/cluster/nodemanager.c 	if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
p                 305 fs/ocfs2/cluster/nodemanager.c 		rb_link_node(&node->nd_ip_node, parent, p);
p                 330 fs/ocfs2/cluster/nodemanager.c 	char *p = (char *)page;
p                 333 fs/ocfs2/cluster/nodemanager.c 	tmp = simple_strtoul(p, &p, 0);
p                 334 fs/ocfs2/cluster/nodemanager.c 	if (!p || (*p && (*p != '\n')))
p                 430 fs/ocfs2/cluster/nodemanager.c 	char *p = (char *)page;
p                 432 fs/ocfs2/cluster/nodemanager.c 	tmp = simple_strtoul(p, &p, 0);
p                 433 fs/ocfs2/cluster/nodemanager.c 	if (!p || (*p && (*p != '\n')))
p                 755 fs/ocfs2/cluster/tcp.c 	struct rb_node **p = &o2net_handler_tree.rb_node;
p                 760 fs/ocfs2/cluster/tcp.c 	while (*p) {
p                 761 fs/ocfs2/cluster/tcp.c 		parent = *p;
p                 766 fs/ocfs2/cluster/tcp.c 			p = &(*p)->rb_left;
p                 768 fs/ocfs2/cluster/tcp.c 			p = &(*p)->rb_right;
p                 776 fs/ocfs2/cluster/tcp.c 		*ret_p = p;
p                 804 fs/ocfs2/cluster/tcp.c 	struct rb_node **p, *parent;
p                 845 fs/ocfs2/cluster/tcp.c 	if (o2net_handler_tree_lookup(msg_type, key, &p, &parent))
p                 848 fs/ocfs2/cluster/tcp.c 	        rb_link_node(&nmh->nh_node, parent, p);
p                 855 fs/ocfs2/cluster/tcp.c 		mlog_bug_on_msg(o2net_handler_tree_lookup(msg_type, key, &p,
p                 109 fs/ocfs2/dir.c 	char *p = data;
p                 111 fs/ocfs2/dir.c 	p += blocksize - sizeof(struct ocfs2_dir_block_trailer);
p                 112 fs/ocfs2/dir.c 	return (struct ocfs2_dir_block_trailer *)p;
p                 257 fs/ocfs2/dir.c 	const char	*p;
p                 282 fs/ocfs2/dir.c 	p = name;
p                 284 fs/ocfs2/dir.c 		str2hashbuf(p, len, in, 4);
p                 287 fs/ocfs2/dir.c 		p += 16;
p                2041 fs/ocfs2/dir.c 	struct ocfs2_empty_dir_priv *p =
p                2056 fs/ocfs2/dir.c 		p->seen_dot = 1;
p                2062 fs/ocfs2/dir.c 		p->seen_dot_dot = 1;
p                2064 fs/ocfs2/dir.c 		if (p->dx_dir && p->seen_dot)
p                2070 fs/ocfs2/dir.c 	p->seen_other = 1;
p                1048 fs/ocfs2/dlm/dlmdomain.c 	char *p;
p                1068 fs/ocfs2/dlm/dlmdomain.c 	p = qr->qr_regions;
p                1069 fs/ocfs2/dlm/dlmdomain.c 	for (i = 0; i < qr->qr_numregions; ++i, p += O2HB_MAX_REGION_NAME_LEN)
p                1070 fs/ocfs2/dlm/dlmdomain.c 		mlog(0, "Region %.*s\n", O2HB_MAX_REGION_NAME_LEN, p);
p                 151 fs/ocfs2/filecheck.c 	struct ocfs2_filecheck_entry *p;
p                 155 fs/ocfs2/filecheck.c 		p = list_first_entry(&entry->fs_fcheck->fc_head,
p                 157 fs/ocfs2/filecheck.c 		list_del(&p->fe_list);
p                 158 fs/ocfs2/filecheck.c 		BUG_ON(!p->fe_done); /* To free a undone file check entry */
p                 159 fs/ocfs2/filecheck.c 		kfree(p);
p                 306 fs/ocfs2/filecheck.c 	struct ocfs2_filecheck_entry *p;
p                 324 fs/ocfs2/filecheck.c 	list_for_each_entry(p, &ent->fs_fcheck->fc_head, fe_list) {
p                 325 fs/ocfs2/filecheck.c 		if (p->fe_type != type)
p                 329 fs/ocfs2/filecheck.c 			       p->fe_ino, p->fe_done,
p                 330 fs/ocfs2/filecheck.c 			       ocfs2_filecheck_error(p->fe_status));
p                 353 fs/ocfs2/filecheck.c 	struct ocfs2_filecheck_entry *p;
p                 355 fs/ocfs2/filecheck.c 	list_for_each_entry(p, &ent->fs_fcheck->fc_head, fe_list) {
p                 356 fs/ocfs2/filecheck.c 		if (!p->fe_done) {
p                 357 fs/ocfs2/filecheck.c 			if (p->fe_ino == ino)
p                 368 fs/ocfs2/filecheck.c 	struct ocfs2_filecheck_entry *p;
p                 370 fs/ocfs2/filecheck.c 	list_for_each_entry(p, &ent->fs_fcheck->fc_head, fe_list) {
p                 371 fs/ocfs2/filecheck.c 		if (p->fe_done) {
p                 372 fs/ocfs2/filecheck.c 			list_del(&p->fe_list);
p                 373 fs/ocfs2/filecheck.c 			kfree(p);
p                2038 fs/ocfs2/journal.c 	struct ocfs2_orphan_filldir_priv *p =
p                2048 fs/ocfs2/journal.c 	if ((p->orphan_reco_type == ORPHAN_NO_NEED_TRUNCATE) &&
p                2054 fs/ocfs2/journal.c 	iter = ocfs2_iget(p->osb, ino,
p                2073 fs/ocfs2/journal.c 	OCFS2_I(iter)->ip_next_orphan = p->head;
p                2074 fs/ocfs2/journal.c 	p->head = iter;
p                 219 fs/ocfs2/refcounttree.c 	struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
p                 222 fs/ocfs2/refcounttree.c 	while (*p) {
p                 223 fs/ocfs2/refcounttree.c 		parent = *p;
p                 229 fs/ocfs2/refcounttree.c 			p = &(*p)->rb_left;
p                 231 fs/ocfs2/refcounttree.c 			p = &(*p)->rb_right;
p                 240 fs/ocfs2/refcounttree.c 	rb_link_node(&new->rf_node, parent, p);
p                 311 fs/ocfs2/reservations.c 	struct rb_node **p = &root->rb_node;
p                 318 fs/ocfs2/reservations.c 	while (*p) {
p                 319 fs/ocfs2/reservations.c 		parent = *p;
p                 324 fs/ocfs2/reservations.c 			p = &(*p)->rb_left;
p                 332 fs/ocfs2/reservations.c 			p = &(*p)->rb_right;
p                 340 fs/ocfs2/reservations.c 	rb_link_node(&new->r_node, parent, p);
p                 176 fs/ocfs2/stack_user.c 	struct ocfs2_control_private *p = file->private_data;
p                 177 fs/ocfs2/stack_user.c 	p->op_state = state;
p                 182 fs/ocfs2/stack_user.c 	struct ocfs2_control_private *p = file->private_data;
p                 183 fs/ocfs2/stack_user.c 	return p->op_state;
p                 304 fs/ocfs2/stack_user.c 	struct ocfs2_control_private *p = file->private_data;
p                 306 fs/ocfs2/stack_user.c 	BUG_ON(p->op_state != OCFS2_CONTROL_HANDSHAKE_PROTOCOL);
p                 310 fs/ocfs2/stack_user.c 	if (p->op_this_node < 0) {
p                 313 fs/ocfs2/stack_user.c 		   (ocfs2_control_this_node != p->op_this_node)) {
p                 318 fs/ocfs2/stack_user.c 	if (!p->op_proto.pv_major) {
p                 321 fs/ocfs2/stack_user.c 		   ((running_proto.pv_major != p->op_proto.pv_major) ||
p                 322 fs/ocfs2/stack_user.c 		    (running_proto.pv_minor != p->op_proto.pv_minor))) {
p                 328 fs/ocfs2/stack_user.c 		ocfs2_control_this_node = p->op_this_node;
p                 329 fs/ocfs2/stack_user.c 		running_proto.pv_major = p->op_proto.pv_major;
p                 330 fs/ocfs2/stack_user.c 		running_proto.pv_minor = p->op_proto.pv_minor;
p                 365 fs/ocfs2/stack_user.c 	struct ocfs2_control_private *p = file->private_data;
p                 386 fs/ocfs2/stack_user.c 	p->op_this_node = nodenum;
p                 396 fs/ocfs2/stack_user.c 	struct ocfs2_control_private *p = file->private_data;
p                 435 fs/ocfs2/stack_user.c 	p->op_proto.pv_major = major;
p                 436 fs/ocfs2/stack_user.c 	p->op_proto.pv_minor = minor;
p                 445 fs/ocfs2/stack_user.c 	char *p = NULL;
p                 460 fs/ocfs2/stack_user.c 	nodenum = simple_strtol(msg->nodestr, &p, 16);
p                 461 fs/ocfs2/stack_user.c 	if (!p || *p)
p                 563 fs/ocfs2/stack_user.c 	struct ocfs2_control_private *p = file->private_data;
p                 590 fs/ocfs2/stack_user.c 	list_del_init(&p->op_list);
p                 595 fs/ocfs2/stack_user.c 	kfree(p);
p                 602 fs/ocfs2/stack_user.c 	struct ocfs2_control_private *p;
p                 604 fs/ocfs2/stack_user.c 	p = kzalloc(sizeof(struct ocfs2_control_private), GFP_KERNEL);
p                 605 fs/ocfs2/stack_user.c 	if (!p)
p                 607 fs/ocfs2/stack_user.c 	p->op_this_node = -1;
p                 610 fs/ocfs2/stack_user.c 	file->private_data = p;
p                 611 fs/ocfs2/stack_user.c 	list_add(&p->op_list, &ocfs2_control_private_list);
p                  45 fs/ocfs2/stackglue.c 	struct ocfs2_stack_plugin *p;
p                  49 fs/ocfs2/stackglue.c 	list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
p                  50 fs/ocfs2/stackglue.c 		if (!strcmp(p->sp_name, name))
p                  51 fs/ocfs2/stackglue.c 			return p;
p                  61 fs/ocfs2/stackglue.c 	struct ocfs2_stack_plugin *p;
p                  86 fs/ocfs2/stackglue.c 	p = ocfs2_stack_lookup(plugin_name);
p                  87 fs/ocfs2/stackglue.c 	if (!p || !try_module_get(p->sp_owner)) {
p                  92 fs/ocfs2/stackglue.c 	active_stack = p;
p                 189 fs/ocfs2/stackglue.c 	struct ocfs2_stack_plugin *p;
p                 192 fs/ocfs2/stackglue.c 	p = ocfs2_stack_lookup(plugin->sp_name);
p                 193 fs/ocfs2/stackglue.c 	if (p) {
p                 194 fs/ocfs2/stackglue.c 		BUG_ON(p != plugin);
p                 210 fs/ocfs2/stackglue.c 	struct ocfs2_stack_plugin *p;
p                 218 fs/ocfs2/stackglue.c 		list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
p                 219 fs/ocfs2/stackglue.c 			p->sp_max_proto = locking_max_version;
p                 499 fs/ocfs2/stackglue.c 	struct ocfs2_stack_plugin *p;
p                 502 fs/ocfs2/stackglue.c 	list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
p                 504 fs/ocfs2/stackglue.c 			       p->sp_name);
p                1244 fs/ocfs2/super.c 	char *p;
p                1265 fs/ocfs2/super.c 	while ((p = strsep(&options, ",")) != NULL) {
p                1266 fs/ocfs2/super.c 		if (!*p)
p                1269 fs/ocfs2/super.c 		token = match_token(p, tokens, args);
p                1451 fs/ocfs2/super.c 			     "or missing value\n", p);
p                 310 fs/ocfs2/uptodate.c 	struct rb_node **p = &ci->ci_cache.ci_tree.rb_node;
p                 317 fs/ocfs2/uptodate.c 	while(*p) {
p                 318 fs/ocfs2/uptodate.c 		parent = *p;
p                 323 fs/ocfs2/uptodate.c 			p = &(*p)->rb_left;
p                 325 fs/ocfs2/uptodate.c 			p = &(*p)->rb_right;
p                 334 fs/ocfs2/uptodate.c 	rb_link_node(&new->c_node, parent, p);
p                 886 fs/ocfs2/xattr.c 	char *p = buffer + *result;
p                 923 fs/ocfs2/xattr.c 	memcpy(p, prefix, prefix_len);
p                 924 fs/ocfs2/xattr.c 	memcpy(p + prefix_len, name, name_len);
p                 925 fs/ocfs2/xattr.c 	p[prefix_len + name_len] = '\0';
p                5779 fs/ocfs2/xattr.c 	struct ocfs2_post_refcount *p = NULL;
p                5817 fs/ocfs2/xattr.c 				p = &refcount;
p                5876 fs/ocfs2/xattr.c 				       le32_to_cpu(vb.vb_xv->xr_clusters), p);
p                6055 fs/ocfs2/xattr.c 	struct ocfs2_post_refcount *p = NULL;
p                6059 fs/ocfs2/xattr.c 		p = &refcount;
p                6083 fs/ocfs2/xattr.c 							ref->dealloc, p);
p                 411 fs/omfs/dir.c  	__be64 *p;
p                 434 fs/omfs/dir.c  	p = (__be64 *)(bh->b_data + OMFS_DIR_START) + hchain;
p                 437 fs/omfs/dir.c  		__u64 fsblock = be64_to_cpu(*p++);
p                 407 fs/omfs/inode.c 	char *p;
p                 414 fs/omfs/inode.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 416 fs/omfs/inode.c 		if (!*p)
p                 419 fs/omfs/inode.c 		token = match_token(p, tokens, args);
p                  50 fs/openpromfs/inode.c static int is_string(unsigned char *p, int len)
p                  55 fs/openpromfs/inode.c 		unsigned char val = p[i];
p                 282 fs/orangefs/orangefs-debugfs.c static void help_stop(struct seq_file *m, void *p)
p                  51 fs/orangefs/protocol.h 				   void *p, int size)
p                  54 fs/orangefs/protocol.h 	memcpy(p, kh->u, 16);
p                  55 fs/orangefs/protocol.h 	memset(p + 16, 0, size - 16);
p                  60 fs/orangefs/protocol.h 				     void *p, int size)
p                  63 fs/orangefs/protocol.h 	memcpy(kh->u, p, 16);
p                  58 fs/orangefs/super.c 	char *p;
p                  68 fs/orangefs/super.c 	while ((p = strsep(&options, ",")) != NULL) {
p                  71 fs/orangefs/super.c 		if (!*p)
p                  74 fs/orangefs/super.c 		token = match_token(p, tokens, args);
p                  93 fs/orangefs/super.c 		gossip_err("Error: mount option [%s] is not supported.\n", p);
p                 310 fs/overlayfs/inode.c 	const char *p;
p                 316 fs/overlayfs/inode.c 	p = vfs_get_link(ovl_dentry_real(dentry), done);
p                 318 fs/overlayfs/inode.c 	return p;
p                  99 fs/overlayfs/readdir.c 		struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
p                 101 fs/overlayfs/readdir.c 		cmp = strncmp(name, p->name, len);
p                 103 fs/overlayfs/readdir.c 			node = p->node.rb_right;
p                 104 fs/overlayfs/readdir.c 		else if (cmp < 0 || len < p->len)
p                 105 fs/overlayfs/readdir.c 			node = p->node.rb_left;
p                 107 fs/overlayfs/readdir.c 			return p;
p                 114 fs/overlayfs/readdir.c 			   struct ovl_cache_entry *p)
p                 125 fs/overlayfs/readdir.c 	if (strcmp(p->name, "..") == 0)
p                 136 fs/overlayfs/readdir.c 	if ((p->name[0] == '.' && p->len == 1) ||
p                 147 fs/overlayfs/readdir.c 	struct ovl_cache_entry *p;
p                 150 fs/overlayfs/readdir.c 	p = kmalloc(size, GFP_KERNEL);
p                 151 fs/overlayfs/readdir.c 	if (!p)
p                 154 fs/overlayfs/readdir.c 	memcpy(p->name, name, len);
p                 155 fs/overlayfs/readdir.c 	p->name[len] = '\0';
p                 156 fs/overlayfs/readdir.c 	p->len = len;
p                 157 fs/overlayfs/readdir.c 	p->type = d_type;
p                 158 fs/overlayfs/readdir.c 	p->real_ino = ino;
p                 159 fs/overlayfs/readdir.c 	p->ino = ino;
p                 161 fs/overlayfs/readdir.c 	if (ovl_calc_d_ino(rdd, p))
p                 162 fs/overlayfs/readdir.c 		p->ino = 0;
p                 163 fs/overlayfs/readdir.c 	p->is_upper = rdd->is_upper;
p                 164 fs/overlayfs/readdir.c 	p->is_whiteout = false;
p                 167 fs/overlayfs/readdir.c 		p->next_maybe_whiteout = rdd->first_maybe_whiteout;
p                 168 fs/overlayfs/readdir.c 		rdd->first_maybe_whiteout = p;
p                 170 fs/overlayfs/readdir.c 	return p;
p                 179 fs/overlayfs/readdir.c 	struct ovl_cache_entry *p;
p                 184 fs/overlayfs/readdir.c 	p = ovl_cache_entry_new(rdd, name, len, ino, d_type);
p                 185 fs/overlayfs/readdir.c 	if (p == NULL) {
p                 190 fs/overlayfs/readdir.c 	list_add_tail(&p->l_node, rdd->list);
p                 191 fs/overlayfs/readdir.c 	rb_link_node(&p->node, parent, newp);
p                 192 fs/overlayfs/readdir.c 	rb_insert_color(&p->node, rdd->root);
p                 201 fs/overlayfs/readdir.c 	struct ovl_cache_entry *p;
p                 203 fs/overlayfs/readdir.c 	p = ovl_cache_entry_find(rdd->root, name, namelen);
p                 204 fs/overlayfs/readdir.c 	if (p) {
p                 205 fs/overlayfs/readdir.c 		list_move_tail(&p->l_node, &rdd->middle);
p                 207 fs/overlayfs/readdir.c 		p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
p                 208 fs/overlayfs/readdir.c 		if (p == NULL)
p                 211 fs/overlayfs/readdir.c 			list_add_tail(&p->l_node, &rdd->middle);
p                 219 fs/overlayfs/readdir.c 	struct ovl_cache_entry *p;
p                 222 fs/overlayfs/readdir.c 	list_for_each_entry_safe(p, n, list, l_node)
p                 223 fs/overlayfs/readdir.c 		kfree(p);
p                 270 fs/overlayfs/readdir.c 	struct ovl_cache_entry *p;
p                 279 fs/overlayfs/readdir.c 			p = rdd->first_maybe_whiteout;
p                 280 fs/overlayfs/readdir.c 			rdd->first_maybe_whiteout = p->next_maybe_whiteout;
p                 281 fs/overlayfs/readdir.c 			dentry = lookup_one_len(p->name, dir, p->len);
p                 283 fs/overlayfs/readdir.c 				p->is_whiteout = ovl_is_whiteout(dentry);
p                 393 fs/overlayfs/readdir.c 	struct list_head *p;
p                 396 fs/overlayfs/readdir.c 	list_for_each(p, &od->cache->entries) {
p                 402 fs/overlayfs/readdir.c 	od->cursor = p;
p                 462 fs/overlayfs/readdir.c static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
p                 468 fs/overlayfs/readdir.c 	u64 ino = p->real_ino;
p                 475 fs/overlayfs/readdir.c 	if (p->name[0] == '.') {
p                 476 fs/overlayfs/readdir.c 		if (p->len == 1) {
p                 480 fs/overlayfs/readdir.c 		if (p->len == 2 && p->name[1] == '.') {
p                 486 fs/overlayfs/readdir.c 	this = lookup_one_len(p->name, dir, p->len);
p                 518 fs/overlayfs/readdir.c 					  p->name, p->len);
p                 522 fs/overlayfs/readdir.c 	p->ino = ino;
p                 528 fs/overlayfs/readdir.c 			    p->name, err);
p                 536 fs/overlayfs/readdir.c 	struct ovl_cache_entry *p;
p                 541 fs/overlayfs/readdir.c 	p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
p                 542 fs/overlayfs/readdir.c 	if (p == NULL) {
p                 546 fs/overlayfs/readdir.c 	list_add_tail(&p->l_node, rdd->list);
p                 556 fs/overlayfs/readdir.c 	struct ovl_cache_entry *p, *n;
p                 571 fs/overlayfs/readdir.c 	list_for_each_entry_safe(p, n, list, l_node) {
p                 572 fs/overlayfs/readdir.c 		if (strcmp(p->name, ".") != 0 &&
p                 573 fs/overlayfs/readdir.c 		    strcmp(p->name, "..") != 0) {
p                 574 fs/overlayfs/readdir.c 			err = ovl_cache_update_ino(path, p);
p                 578 fs/overlayfs/readdir.c 		if (p->ino == p->real_ino) {
p                 579 fs/overlayfs/readdir.c 			list_del(&p->l_node);
p                 580 fs/overlayfs/readdir.c 			kfree(p);
p                 585 fs/overlayfs/readdir.c 			if (WARN_ON(ovl_cache_entry_find_link(p->name, p->len,
p                 589 fs/overlayfs/readdir.c 			rb_link_node(&p->node, parent, newp);
p                 590 fs/overlayfs/readdir.c 			rb_insert_color(&p->node, root);
p                 661 fs/overlayfs/readdir.c 		struct ovl_cache_entry *p;
p                 663 fs/overlayfs/readdir.c 		p = ovl_cache_entry_find(&rdt->cache->root, name, namelen);
p                 664 fs/overlayfs/readdir.c 		if (p)
p                 665 fs/overlayfs/readdir.c 			ino = p->ino;
p                 734 fs/overlayfs/readdir.c 	struct ovl_cache_entry *p;
p                 767 fs/overlayfs/readdir.c 		p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
p                 768 fs/overlayfs/readdir.c 		if (!p->is_whiteout) {
p                 769 fs/overlayfs/readdir.c 			if (!p->ino) {
p                 770 fs/overlayfs/readdir.c 				err = ovl_cache_update_ino(&file->f_path, p);
p                 774 fs/overlayfs/readdir.c 			if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
p                 777 fs/overlayfs/readdir.c 		od->cursor = p->l_node.next;
p                 921 fs/overlayfs/readdir.c 	struct ovl_cache_entry *p, *n;
p                 933 fs/overlayfs/readdir.c 	list_for_each_entry_safe(p, n, list, l_node) {
p                 938 fs/overlayfs/readdir.c 		if (p->is_whiteout) {
p                 939 fs/overlayfs/readdir.c 			if (p->is_upper)
p                 944 fs/overlayfs/readdir.c 		if (p->name[0] == '.') {
p                 945 fs/overlayfs/readdir.c 			if (p->len == 1)
p                 947 fs/overlayfs/readdir.c 			if (p->len == 2 && p->name[1] == '.')
p                 954 fs/overlayfs/readdir.c 		list_del(&p->l_node);
p                 955 fs/overlayfs/readdir.c 		kfree(p);
p                 963 fs/overlayfs/readdir.c 	struct ovl_cache_entry *p;
p                 966 fs/overlayfs/readdir.c 	list_for_each_entry(p, list, l_node) {
p                 969 fs/overlayfs/readdir.c 		if (WARN_ON(!p->is_whiteout || !p->is_upper))
p                 972 fs/overlayfs/readdir.c 		dentry = lookup_one_len(p->name, upper, p->len);
p                 975 fs/overlayfs/readdir.c 			       upper->d_name.name, p->len, p->name,
p                1028 fs/overlayfs/readdir.c 	struct ovl_cache_entry *p;
p                1042 fs/overlayfs/readdir.c 	list_for_each_entry(p, &list, l_node) {
p                1045 fs/overlayfs/readdir.c 		if (p->name[0] == '.') {
p                1046 fs/overlayfs/readdir.c 			if (p->len == 1)
p                1048 fs/overlayfs/readdir.c 			if (p->len == 2 && p->name[1] == '.')
p                1051 fs/overlayfs/readdir.c 		dentry = lookup_one_len(p->name, path->dentry, p->len);
p                1093 fs/overlayfs/readdir.c 	struct ovl_cache_entry *p;
p                1107 fs/overlayfs/readdir.c 	list_for_each_entry(p, &list, l_node) {
p                1108 fs/overlayfs/readdir.c 		if (p->name[0] == '.') {
p                1109 fs/overlayfs/readdir.c 			if (p->len == 1)
p                1111 fs/overlayfs/readdir.c 			if (p->len == 2 && p->name[1] == '.')
p                1114 fs/overlayfs/readdir.c 		index = lookup_one_len(p->name, indexdir, p->len);
p                 430 fs/overlayfs/super.c 	char *p;
p                 435 fs/overlayfs/super.c 	for (p = sbegin; *p; p++) {
p                 436 fs/overlayfs/super.c 		if (*p == '\\') {
p                 437 fs/overlayfs/super.c 			p++;
p                 438 fs/overlayfs/super.c 			if (!*p)
p                 440 fs/overlayfs/super.c 		} else if (*p == ',') {
p                 441 fs/overlayfs/super.c 			*p = '\0';
p                 442 fs/overlayfs/super.c 			*s = p + 1;
p                 475 fs/overlayfs/super.c 	char *p;
p                 483 fs/overlayfs/super.c 	while ((p = ovl_next_opt(&opt)) != NULL) {
p                 487 fs/overlayfs/super.c 		if (!*p)
p                 490 fs/overlayfs/super.c 		token = match_token(p, ovl_tokens, args);
p                 563 fs/overlayfs/super.c 			pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
p                  17 fs/pnode.c     static inline struct mount *next_peer(struct mount *p)
p                  19 fs/pnode.c     	return list_entry(p->mnt_share.next, struct mount, mnt_share);
p                  22 fs/pnode.c     static inline struct mount *first_slave(struct mount *p)
p                  24 fs/pnode.c     	return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
p                  27 fs/pnode.c     static inline struct mount *last_slave(struct mount *p)
p                  29 fs/pnode.c     	return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
p                  32 fs/pnode.c     static inline struct mount *next_slave(struct mount *p)
p                  34 fs/pnode.c     	return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
p                  84 fs/pnode.c     			struct list_head *p = &mnt->mnt_slave_list;
p                  85 fs/pnode.c     			while (!list_empty(p)) {
p                  86 fs/pnode.c     				slave_mnt = list_first_entry(p,
p                 238 fs/pnode.c     		struct mount *n, *p;
p                 240 fs/pnode.c     		for (n = m; ; n = p) {
p                 241 fs/pnode.c     			p = n->mnt_master;
p                 242 fs/pnode.c     			if (p == dest_master || IS_MNT_MARKED(p))
p                 249 fs/pnode.c     			done = parent->mnt_master == p;
p                  40 fs/posix_acl.c 	struct posix_acl **p = acl_by_type(inode, type);
p                  45 fs/posix_acl.c 		acl = rcu_dereference(*p);
p                  65 fs/posix_acl.c 	struct posix_acl **p = acl_by_type(inode, type);
p                  68 fs/posix_acl.c 	old = xchg(p, posix_acl_dup(acl));
p                  74 fs/posix_acl.c static void __forget_cached_acl(struct posix_acl **p)
p                  78 fs/posix_acl.c 	old = xchg(p, ACL_NOT_CACHED);
p                  99 fs/posix_acl.c 	struct posix_acl **p;
p                 116 fs/posix_acl.c 	p = acl_by_type(inode, type);
p                 126 fs/posix_acl.c 	if (cmpxchg(p, ACL_NOT_CACHED, sentinel) != ACL_NOT_CACHED)
p                 148 fs/posix_acl.c 		cmpxchg(p, sentinel, ACL_NOT_CACHED);
p                 156 fs/posix_acl.c 	if (unlikely(cmpxchg(p, sentinel, acl) != sentinel))
p                 585 fs/posix_acl.c 	struct posix_acl *p;
p                 595 fs/posix_acl.c 	p = get_acl(dir, ACL_TYPE_DEFAULT);
p                 596 fs/posix_acl.c 	if (!p || p == ERR_PTR(-EOPNOTSUPP)) {
p                 600 fs/posix_acl.c 	if (IS_ERR(p))
p                 601 fs/posix_acl.c 		return PTR_ERR(p);
p                 604 fs/posix_acl.c 	clone = posix_acl_clone(p, GFP_NOFS);
p                 618 fs/posix_acl.c 		posix_acl_release(p);
p                 620 fs/posix_acl.c 		*default_acl = p;
p                 627 fs/posix_acl.c 	posix_acl_release(p);
p                  99 fs/proc/array.c void proc_task_name(struct seq_file *m, struct task_struct *p, bool escape)
p                 106 fs/proc/array.c 	if (p->flags & PF_WQ_WORKER)
p                 107 fs/proc/array.c 		wq_worker_comm(tcomm, sizeof(tcomm), p);
p                 109 fs/proc/array.c 		__get_task_comm(tcomm, sizeof(tcomm), p);
p                 153 fs/proc/array.c 				struct pid *pid, struct task_struct *p)
p                 164 fs/proc/array.c 	ppid = pid_alive(p) ?
p                 165 fs/proc/array.c 		task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
p                 167 fs/proc/array.c 	tracer = ptrace_parent(p);
p                 171 fs/proc/array.c 	tgid = task_tgid_nr_ns(p, ns);
p                 172 fs/proc/array.c 	ngid = task_numa_group_id(p);
p                 173 fs/proc/array.c 	cred = get_task_cred(p);
p                 175 fs/proc/array.c 	task_lock(p);
p                 176 fs/proc/array.c 	if (p->fs)
p                 177 fs/proc/array.c 		umask = p->fs->umask;
p                 178 fs/proc/array.c 	if (p->files)
p                 179 fs/proc/array.c 		max_fds = files_fdtable(p->files)->max_fds;
p                 180 fs/proc/array.c 	task_unlock(p);
p                 186 fs/proc/array.c 	seq_puts(m, get_task_state(p));
p                 215 fs/proc/array.c 		seq_put_decimal_ull(m, "\t", task_tgid_nr_ns(p, pid->numbers[g].ns));
p                 218 fs/proc/array.c 		seq_put_decimal_ull(m, "\t", task_pid_nr_ns(p, pid->numbers[g].ns));
p                 221 fs/proc/array.c 		seq_put_decimal_ull(m, "\t", task_pgrp_nr_ns(p, pid->numbers[g].ns));
p                 224 fs/proc/array.c 		seq_put_decimal_ull(m, "\t", task_session_nr_ns(p, pid->numbers[g].ns));
p                 251 fs/proc/array.c static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
p                 257 fs/proc/array.c 	k = p->sighand->action;
p                 266 fs/proc/array.c static inline void task_sig(struct seq_file *m, struct task_struct *p)
p                 280 fs/proc/array.c 	if (lock_task_sighand(p, &flags)) {
p                 281 fs/proc/array.c 		pending = p->pending.signal;
p                 282 fs/proc/array.c 		shpending = p->signal->shared_pending.signal;
p                 283 fs/proc/array.c 		blocked = p->blocked;
p                 284 fs/proc/array.c 		collect_sigign_sigcatch(p, &ignored, &caught);
p                 285 fs/proc/array.c 		num_threads = get_nr_threads(p);
p                 287 fs/proc/array.c 		qsize = atomic_read(&__task_cred(p)->user->sigpending);
p                 289 fs/proc/array.c 		qlim = task_rlimit(p, RLIMIT_SIGPENDING);
p                 290 fs/proc/array.c 		unlock_task_sighand(p, &flags);
p                 318 fs/proc/array.c static inline void task_cap(struct seq_file *m, struct task_struct *p)
p                 325 fs/proc/array.c 	cred = __task_cred(p);
p                 340 fs/proc/array.c static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
p                 342 fs/proc/array.c 	seq_put_decimal_ull(m, "NoNewPrivs:\t", task_no_new_privs(p));
p                 344 fs/proc/array.c 	seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
p                 347 fs/proc/array.c 	switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
p                 374 fs/proc/array.c 						struct task_struct *p)
p                 376 fs/proc/array.c 	seq_put_decimal_ull(m, "voluntary_ctxt_switches:\t", p->nvcsw);
p                 377 fs/proc/array.c 	seq_put_decimal_ull(m, "\nnonvoluntary_ctxt_switches:\t", p->nivcsw);
p                1076 fs/proc/base.c 		struct task_struct *p = find_lock_task_mm(task);
p                1078 fs/proc/base.c 		if (p) {
p                1079 fs/proc/base.c 			if (atomic_read(&p->mm->mm_users) > 1) {
p                1080 fs/proc/base.c 				mm = p->mm;
p                1083 fs/proc/base.c 			task_unlock(p);
p                1093 fs/proc/base.c 		struct task_struct *p;
p                1096 fs/proc/base.c 		for_each_process(p) {
p                1097 fs/proc/base.c 			if (same_thread_group(task, p))
p                1101 fs/proc/base.c 			if (p->flags & PF_KTHREAD || is_global_init(p))
p                1104 fs/proc/base.c 			task_lock(p);
p                1105 fs/proc/base.c 			if (!p->vfork_done && process_shares_mm(p, mm)) {
p                1106 fs/proc/base.c 				p->signal->oom_score_adj = oom_adj;
p                1108 fs/proc/base.c 					p->signal->oom_score_adj_min = (short)oom_adj;
p                1110 fs/proc/base.c 			task_unlock(p);
p                1417 fs/proc/base.c 	struct task_struct *p;
p                1419 fs/proc/base.c 	p = get_proc_task(inode);
p                1420 fs/proc/base.c 	if (!p)
p                1422 fs/proc/base.c 	proc_sched_show_task(p, ns, m);
p                1424 fs/proc/base.c 	put_task_struct(p);
p                1434 fs/proc/base.c 	struct task_struct *p;
p                1436 fs/proc/base.c 	p = get_proc_task(inode);
p                1437 fs/proc/base.c 	if (!p)
p                1439 fs/proc/base.c 	proc_sched_set_task(p);
p                1441 fs/proc/base.c 	put_task_struct(p);
p                1468 fs/proc/base.c 	struct task_struct *p;
p                1470 fs/proc/base.c 	p = get_proc_task(inode);
p                1471 fs/proc/base.c 	if (!p)
p                1473 fs/proc/base.c 	proc_sched_autogroup_show_task(p, m);
p                1475 fs/proc/base.c 	put_task_struct(p);
p                1485 fs/proc/base.c 	struct task_struct *p;
p                1500 fs/proc/base.c 	p = get_proc_task(inode);
p                1501 fs/proc/base.c 	if (!p)
p                1504 fs/proc/base.c 	err = proc_sched_autogroup_set_nice(p, nice);
p                1508 fs/proc/base.c 	put_task_struct(p);
p                1540 fs/proc/base.c 	struct task_struct *p;
p                1548 fs/proc/base.c 	p = get_proc_task(inode);
p                1549 fs/proc/base.c 	if (!p)
p                1552 fs/proc/base.c 	if (same_thread_group(current, p))
p                1553 fs/proc/base.c 		set_task_comm(p, buffer);
p                1557 fs/proc/base.c 	put_task_struct(p);
p                1565 fs/proc/base.c 	struct task_struct *p;
p                1567 fs/proc/base.c 	p = get_proc_task(inode);
p                1568 fs/proc/base.c 	if (!p)
p                1571 fs/proc/base.c 	proc_task_name(m, p, false);
p                1574 fs/proc/base.c 	put_task_struct(p);
p                2170 fs/proc/base.c 	struct map_files_info *p;
p                2216 fs/proc/base.c 		p = genradix_ptr_alloc(&fa, nr_files++, GFP_KERNEL);
p                2217 fs/proc/base.c 		if (!p) {
p                2224 fs/proc/base.c 		p->start = vma->vm_start;
p                2225 fs/proc/base.c 		p->end = vma->vm_end;
p                2226 fs/proc/base.c 		p->mode = vma->vm_file->f_mode;
p                2235 fs/proc/base.c 		p = genradix_ptr(&fa, i);
p                2236 fs/proc/base.c 		len = snprintf(buf, sizeof(buf), "%lx-%lx", p->start, p->end);
p                2241 fs/proc/base.c 				      (void *)(unsigned long)p->mode))
p                2364 fs/proc/base.c 	struct task_struct *p;
p                2372 fs/proc/base.c 	p = get_proc_task(inode);
p                2373 fs/proc/base.c 	if (!p)
p                2376 fs/proc/base.c 	if (p != current) {
p                2378 fs/proc/base.c 		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
p                2385 fs/proc/base.c 		err = security_task_setscheduler(p);
p                2392 fs/proc/base.c 	task_lock(p);
p                2394 fs/proc/base.c 		p->timer_slack_ns = p->default_timer_slack_ns;
p                2396 fs/proc/base.c 		p->timer_slack_ns = slack_ns;
p                2397 fs/proc/base.c 	task_unlock(p);
p                2400 fs/proc/base.c 	put_task_struct(p);
p                2408 fs/proc/base.c 	struct task_struct *p;
p                2411 fs/proc/base.c 	p = get_proc_task(inode);
p                2412 fs/proc/base.c 	if (!p)
p                2415 fs/proc/base.c 	if (p != current) {
p                2417 fs/proc/base.c 		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
p                2424 fs/proc/base.c 		err = security_task_getscheduler(p);
p                2429 fs/proc/base.c 	task_lock(p);
p                2430 fs/proc/base.c 	seq_printf(m, "%llu\n", p->timer_slack_ns);
p                2431 fs/proc/base.c 	task_unlock(p);
p                2434 fs/proc/base.c 	put_task_struct(p);
p                2455 fs/proc/base.c 	const struct pid_entry *p = ptr;
p                2459 fs/proc/base.c 	inode = proc_pid_make_inode(dentry->d_sb, task, p->mode);
p                2466 fs/proc/base.c 	if (p->iop)
p                2467 fs/proc/base.c 		inode->i_op = p->iop;
p                2468 fs/proc/base.c 	if (p->fop)
p                2469 fs/proc/base.c 		inode->i_fop = p->fop;
p                2470 fs/proc/base.c 	ei->op = p->op;
p                2478 fs/proc/base.c 					 const struct pid_entry *p,
p                2491 fs/proc/base.c 	for (; p < end; p++) {
p                2492 fs/proc/base.c 		if (p->len != dentry->d_name.len)
p                2494 fs/proc/base.c 		if (!memcmp(dentry->d_name.name, p->name, p->len)) {
p                2495 fs/proc/base.c 			res = proc_pident_instantiate(dentry, task, p);
p                2508 fs/proc/base.c 	const struct pid_entry *p;
p                2519 fs/proc/base.c 	for (p = ents + (ctx->pos - 2); p < ents + nents; p++) {
p                2520 fs/proc/base.c 		if (!proc_fill_cache(file, ctx, p->name, p->len,
p                2521 fs/proc/base.c 				proc_pident_instantiate, task, p))
p                2535 fs/proc/base.c 	char *p = NULL;
p                2544 fs/proc/base.c 				      &p);
p                2547 fs/proc/base.c 		length = simple_read_from_buffer(buf, count, ppos, p, length);
p                2548 fs/proc/base.c 	kfree(p);
p                3692 fs/proc/base.c 	struct task_struct *p = get_proc_task(inode);
p                3695 fs/proc/base.c 	if (p) {
p                3696 fs/proc/base.c 		stat->nlink += get_nr_threads(p);
p                3697 fs/proc/base.c 		put_task_struct(p);
p                 231 fs/proc/fd.c   	struct task_struct *p = get_proc_task(file_inode(file));
p                 235 fs/proc/fd.c   	if (!p)
p                 240 fs/proc/fd.c   	files = get_files_struct(p);
p                 262 fs/proc/fd.c   				     name, len, instantiate, p,
p                 272 fs/proc/fd.c   	put_task_struct(p);
p                 299 fs/proc/fd.c   	struct task_struct *p;
p                 307 fs/proc/fd.c   	p = pid_task(proc_pid(inode), PIDTYPE_PID);
p                 308 fs/proc/fd.c   	if (p && same_thread_group(p, current))
p                 520 fs/proc/generic.c 	struct proc_dir_entry *p;
p                 529 fs/proc/generic.c 	p = __proc_create(parent, name, mode, 1);
p                 530 fs/proc/generic.c 	if (p) {
p                 531 fs/proc/generic.c 		p->proc_iops = &proc_file_inode_operations;
p                 532 fs/proc/generic.c 		p->data = data;
p                 534 fs/proc/generic.c 	return p;
p                 541 fs/proc/generic.c 	struct proc_dir_entry *p;
p                 545 fs/proc/generic.c 	p = proc_create_reg(name, mode, &parent, data);
p                 546 fs/proc/generic.c 	if (!p)
p                 548 fs/proc/generic.c 	p->proc_fops = proc_fops;
p                 549 fs/proc/generic.c 	return proc_register(parent, p);
p                 590 fs/proc/generic.c 	struct proc_dir_entry *p;
p                 592 fs/proc/generic.c 	p = proc_create_reg(name, mode, &parent, data);
p                 593 fs/proc/generic.c 	if (!p)
p                 595 fs/proc/generic.c 	p->proc_fops = &proc_seq_fops;
p                 596 fs/proc/generic.c 	p->seq_ops = ops;
p                 597 fs/proc/generic.c 	p->state_size = state_size;
p                 598 fs/proc/generic.c 	return proc_register(parent, p);
p                 620 fs/proc/generic.c 	struct proc_dir_entry *p;
p                 622 fs/proc/generic.c 	p = proc_create_reg(name, mode, &parent, data);
p                 623 fs/proc/generic.c 	if (!p)
p                 625 fs/proc/generic.c 	p->proc_fops = &proc_single_fops;
p                 626 fs/proc/generic.c 	p->single_show = show;
p                 627 fs/proc/generic.c 	return proc_register(parent, p);
p                 429 fs/proc/inode.c static void proc_put_link(void *p)
p                 431 fs/proc/inode.c 	unuse_pde(p);
p                 141 fs/proc/internal.h extern void proc_task_name(struct seq_file *m, struct task_struct *p,
p                 190 fs/proc/kcore.c 	struct page *p;
p                 195 fs/proc/kcore.c 	p = pfn_to_page(pfn);
p                 196 fs/proc/kcore.c 	if (!memmap_valid_within(pfn, p, page_zone(p)))
p                 202 fs/proc/kcore.c 	ent->addr = (unsigned long)page_to_virt(p);
p                  76 fs/proc/nommu.c 	struct rb_node *p = _p;
p                  78 fs/proc/nommu.c 	return nommu_region_show(m, rb_entry(p, struct vm_region, vm_rb));
p                  83 fs/proc/nommu.c 	struct rb_node *p;
p                  88 fs/proc/nommu.c 	for (p = rb_first(&nommu_region_tree); p; p = rb_next(p))
p                  90 fs/proc/nommu.c 			return p;
p                  61 fs/proc/proc_net.c 	struct seq_net_private *p;
p                  64 fs/proc/proc_net.c 	WARN_ON_ONCE(state_size < sizeof(*p));
p                  73 fs/proc/proc_net.c 	p = __seq_open_private(file, PDE(inode)->seq_ops, state_size);
p                  74 fs/proc/proc_net.c 	if (!p) {
p                  79 fs/proc/proc_net.c 	p->net = net;
p                 105 fs/proc/proc_net.c 	struct proc_dir_entry *p;
p                 107 fs/proc/proc_net.c 	p = proc_create_reg(name, mode, &parent, data);
p                 108 fs/proc/proc_net.c 	if (!p)
p                 110 fs/proc/proc_net.c 	pde_force_lookup(p);
p                 111 fs/proc/proc_net.c 	p->proc_fops = &proc_net_seq_fops;
p                 112 fs/proc/proc_net.c 	p->seq_ops = ops;
p                 113 fs/proc/proc_net.c 	p->state_size = state_size;
p                 114 fs/proc/proc_net.c 	return proc_register(parent, p);
p                 149 fs/proc/proc_net.c 	struct proc_dir_entry *p;
p                 151 fs/proc/proc_net.c 	p = proc_create_reg(name, mode, &parent, data);
p                 152 fs/proc/proc_net.c 	if (!p)
p                 154 fs/proc/proc_net.c 	pde_force_lookup(p);
p                 155 fs/proc/proc_net.c 	p->proc_fops = &proc_net_seq_fops;
p                 156 fs/proc/proc_net.c 	p->seq_ops = ops;
p                 157 fs/proc/proc_net.c 	p->state_size = state_size;
p                 158 fs/proc/proc_net.c 	p->write = write;
p                 159 fs/proc/proc_net.c 	return proc_register(parent, p);
p                 198 fs/proc/proc_net.c 	struct proc_dir_entry *p;
p                 200 fs/proc/proc_net.c 	p = proc_create_reg(name, mode, &parent, data);
p                 201 fs/proc/proc_net.c 	if (!p)
p                 203 fs/proc/proc_net.c 	pde_force_lookup(p);
p                 204 fs/proc/proc_net.c 	p->proc_fops = &proc_net_single_fops;
p                 205 fs/proc/proc_net.c 	p->single_show = show;
p                 206 fs/proc/proc_net.c 	return proc_register(parent, p);
p                 241 fs/proc/proc_net.c 	struct proc_dir_entry *p;
p                 243 fs/proc/proc_net.c 	p = proc_create_reg(name, mode, &parent, data);
p                 244 fs/proc/proc_net.c 	if (!p)
p                 246 fs/proc/proc_net.c 	pde_force_lookup(p);
p                 247 fs/proc/proc_net.c 	p->proc_fops = &proc_net_single_fops;
p                 248 fs/proc/proc_net.c 	p->single_show = show;
p                 249 fs/proc/proc_net.c 	p->write = write;
p                 250 fs/proc/proc_net.c 	return proc_register(parent, p);
p                 143 fs/proc/proc_sysctl.c 	struct rb_node **p = &head->parent->root.rb_node;
p                 148 fs/proc/proc_sysctl.c 	while (*p) {
p                 155 fs/proc/proc_sysctl.c 		parent = *p;
p                 163 fs/proc/proc_sysctl.c 			p = &(*p)->rb_left;
p                 165 fs/proc/proc_sysctl.c 			p = &(*p)->rb_right;
p                 174 fs/proc/proc_sysctl.c 	rb_link_node(node, parent, p);
p                 254 fs/proc/proc_sysctl.c static int use_table(struct ctl_table_header *p)
p                 256 fs/proc/proc_sysctl.c 	if (unlikely(p->unregistering))
p                 258 fs/proc/proc_sysctl.c 	p->used++;
p                 263 fs/proc/proc_sysctl.c static void unuse_table(struct ctl_table_header *p)
p                 265 fs/proc/proc_sysctl.c 	if (!--p->used)
p                 266 fs/proc/proc_sysctl.c 		if (unlikely(p->unregistering))
p                 267 fs/proc/proc_sysctl.c 			complete(p->unregistering);
p                 309 fs/proc/proc_sysctl.c static void start_unregistering(struct ctl_table_header *p)
p                 315 fs/proc/proc_sysctl.c 	if (unlikely(p->used)) {
p                 318 fs/proc/proc_sysctl.c 		p->unregistering = &wait;
p                 323 fs/proc/proc_sysctl.c 		p->unregistering = ERR_PTR(-EINVAL);
p                 330 fs/proc/proc_sysctl.c 	proc_sys_prune_dcache(p);
p                 336 fs/proc/proc_sysctl.c 	erase_header(p);
p                 537 fs/proc/proc_sysctl.c 	struct ctl_table *p;
p                 548 fs/proc/proc_sysctl.c 	p = lookup_entry(&h, ctl_dir, name->name, name->len);
p                 549 fs/proc/proc_sysctl.c 	if (!p)
p                 552 fs/proc/proc_sysctl.c 	if (S_ISLNK(p->mode)) {
p                 553 fs/proc/proc_sysctl.c 		ret = sysctl_follow_link(&h, &p);
p                 559 fs/proc/proc_sysctl.c 	inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
p                 909 fs/proc/proc_sysctl.c static int sysctl_is_seen(struct ctl_table_header *p)
p                 911 fs/proc/proc_sysctl.c 	struct ctl_table_set *set = p->set;
p                 914 fs/proc/proc_sysctl.c 	if (p->unregistering)
p                  28 fs/proc/proc_tty.c static void show_tty_range(struct seq_file *m, struct tty_driver *p,
p                  31 fs/proc/proc_tty.c 	seq_printf(m, "%-20s ", p->driver_name ? p->driver_name : "unknown");
p                  32 fs/proc/proc_tty.c 	seq_printf(m, "/dev/%-8s ", p->name);
p                  33 fs/proc/proc_tty.c 	if (p->num > 1) {
p                  39 fs/proc/proc_tty.c 	switch (p->type) {
p                  42 fs/proc/proc_tty.c 		if (p->subtype == SYSTEM_TYPE_TTY)
p                  44 fs/proc/proc_tty.c 		else if (p->subtype == SYSTEM_TYPE_SYSCONS)
p                  46 fs/proc/proc_tty.c 		else if (p->subtype == SYSTEM_TYPE_CONSOLE)
p                  56 fs/proc/proc_tty.c 		if (p->subtype == PTY_TYPE_MASTER)
p                  58 fs/proc/proc_tty.c 		else if (p->subtype == PTY_TYPE_SLAVE)
p                  64 fs/proc/proc_tty.c 		seq_printf(m, "type:%d.%d", p->type, p->subtype);
p                  71 fs/proc/proc_tty.c 	struct tty_driver *p = list_entry(v, struct tty_driver, tty_drivers);
p                  72 fs/proc/proc_tty.c 	dev_t from = MKDEV(p->major, p->minor_start);
p                  73 fs/proc/proc_tty.c 	dev_t to = from + p->num;
p                  75 fs/proc/proc_tty.c 	if (&p->tty_drivers == tty_drivers.next) {
p                  97 fs/proc/proc_tty.c 		show_tty_range(m, p, from, next - from);
p                 101 fs/proc/proc_tty.c 		show_tty_range(m, p, from, to - from);
p                  10 fs/proc/softirqs.c static int show_softirqs(struct seq_file *p, void *v)
p                  14 fs/proc/softirqs.c 	seq_puts(p, "                    ");
p                  16 fs/proc/softirqs.c 		seq_printf(p, "CPU%-8d", i);
p                  17 fs/proc/softirqs.c 	seq_putc(p, '\n');
p                  20 fs/proc/softirqs.c 		seq_printf(p, "%12s:", softirq_to_name[i]);
p                  22 fs/proc/softirqs.c 			seq_printf(p, " %10u", kstat_softirqs_cpu(i, j));
p                  23 fs/proc/softirqs.c 		seq_putc(p, '\n');
p                  82 fs/proc/stat.c static void show_irq_gap(struct seq_file *p, unsigned int gap)
p                  90 fs/proc/stat.c 		seq_write(p, zeros, 2 * inc);
p                  95 fs/proc/stat.c static void show_all_irqs(struct seq_file *p)
p                 100 fs/proc/stat.c 		show_irq_gap(p, i - next);
p                 101 fs/proc/stat.c 		seq_put_decimal_ull(p, " ", kstat_irqs_usr(i));
p                 104 fs/proc/stat.c 	show_irq_gap(p, nr_irqs - next);
p                 107 fs/proc/stat.c static int show_stat(struct seq_file *p, void *v)
p                 147 fs/proc/stat.c 	seq_put_decimal_ull(p, "cpu  ", nsec_to_clock_t(user));
p                 148 fs/proc/stat.c 	seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
p                 149 fs/proc/stat.c 	seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
p                 150 fs/proc/stat.c 	seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
p                 151 fs/proc/stat.c 	seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
p                 152 fs/proc/stat.c 	seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
p                 153 fs/proc/stat.c 	seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
p                 154 fs/proc/stat.c 	seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
p                 155 fs/proc/stat.c 	seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
p                 156 fs/proc/stat.c 	seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
p                 157 fs/proc/stat.c 	seq_putc(p, '\n');
p                 173 fs/proc/stat.c 		seq_printf(p, "cpu%d", i);
p                 174 fs/proc/stat.c 		seq_put_decimal_ull(p, " ", nsec_to_clock_t(user));
p                 175 fs/proc/stat.c 		seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
p                 176 fs/proc/stat.c 		seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
p                 177 fs/proc/stat.c 		seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
p                 178 fs/proc/stat.c 		seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
p                 179 fs/proc/stat.c 		seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
p                 180 fs/proc/stat.c 		seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
p                 181 fs/proc/stat.c 		seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
p                 182 fs/proc/stat.c 		seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
p                 183 fs/proc/stat.c 		seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
p                 184 fs/proc/stat.c 		seq_putc(p, '\n');
p                 186 fs/proc/stat.c 	seq_put_decimal_ull(p, "intr ", (unsigned long long)sum);
p                 188 fs/proc/stat.c 	show_all_irqs(p);
p                 190 fs/proc/stat.c 	seq_printf(p,
p                 202 fs/proc/stat.c 	seq_put_decimal_ull(p, "softirq ", (unsigned long long)sum_softirq);
p                 205 fs/proc/stat.c 		seq_put_decimal_ull(p, " ", per_softirq_sums[i]);
p                 206 fs/proc/stat.c 	seq_putc(p, '\n');
p                  25 fs/proc/task_nommu.c 	struct rb_node *p;
p                  29 fs/proc/task_nommu.c 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
p                  30 fs/proc/task_nommu.c 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
p                  86 fs/proc/task_nommu.c 	struct rb_node *p;
p                  90 fs/proc/task_nommu.c 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
p                  91 fs/proc/task_nommu.c 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
p                 104 fs/proc/task_nommu.c 	struct rb_node *p;
p                 108 fs/proc/task_nommu.c 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
p                 109 fs/proc/task_nommu.c 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
p                 193 fs/proc/task_nommu.c 	struct rb_node *p = _p;
p                 195 fs/proc/task_nommu.c 	return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb));
p                 202 fs/proc/task_nommu.c 	struct rb_node *p;
p                 220 fs/proc/task_nommu.c 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
p                 222 fs/proc/task_nommu.c 			return p;
p                 245 fs/proc/task_nommu.c 	struct rb_node *p = _p;
p                 248 fs/proc/task_nommu.c 	return p ? rb_next(p) : NULL;
p                  24 fs/proc_namespace.c 	struct proc_mounts *p = m->private;
p                  25 fs/proc_namespace.c 	struct mnt_namespace *ns = p->ns;
p                  29 fs/proc_namespace.c 	poll_wait(file, &p->ns->poll, wait);
p                  99 fs/proc_namespace.c 	struct proc_mounts *p = m->private;
p                 114 fs/proc_namespace.c 	err = seq_path_root(m, &mnt_path, &p->root, " \t\n\\");
p                 133 fs/proc_namespace.c 	struct proc_mounts *p = m->private;
p                 151 fs/proc_namespace.c 	err = seq_path_root(m, &mnt_path, &p->root, " \t\n\\");
p                 163 fs/proc_namespace.c 		int dom = get_dominating_id(r, &p->root);
p                 195 fs/proc_namespace.c 	struct proc_mounts *p = m->private;
p                 218 fs/proc_namespace.c 	err = seq_path_root(m, &mnt_path, &p->root, " \t\n\\");
p                 245 fs/proc_namespace.c 	struct proc_mounts *p;
p                 278 fs/proc_namespace.c 	p = m->private;
p                 279 fs/proc_namespace.c 	p->ns = ns;
p                 280 fs/proc_namespace.c 	p->root = root;
p                 281 fs/proc_namespace.c 	p->show = show;
p                 282 fs/proc_namespace.c 	p->cached_event = ~0ULL;
p                 297 fs/proc_namespace.c 	struct proc_mounts *p = m->private;
p                 298 fs/proc_namespace.c 	path_put(&p->root);
p                 299 fs/proc_namespace.c 	put_mnt_ns(p->ns);
p                 179 fs/pstore/inode.c 	struct pstore_private *p = d_inode(dentry)->i_private;
p                 180 fs/pstore/inode.c 	struct pstore_record *record = p->record;
p                 194 fs/pstore/inode.c 	struct pstore_private	*p = inode->i_private;
p                 198 fs/pstore/inode.c 	if (p) {
p                 200 fs/pstore/inode.c 		list_del(&p->list);
p                 202 fs/pstore/inode.c 		free_pstore_private(p);
p                 232 fs/pstore/inode.c 	char		*p;
p                 239 fs/pstore/inode.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 242 fs/pstore/inode.c 		if (!*p)
p                 245 fs/pstore/inode.c 		token = match_token(p, tokens, args);
p                  47 fs/qnx6/dir.c  					 struct page **p)
p                  58 fs/qnx6/dir.c  	kmap(*p = page);
p                 234 fs/qnx6/inode.c 	char *p;
p                 241 fs/qnx6/inode.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 243 fs/qnx6/inode.c 		if (!*p)
p                 246 fs/qnx6/inode.c 		token = match_token(p, tokens, args);
p                 296 fs/qnx6/inode.c 					struct qnx6_root_node *p);
p                 507 fs/qnx6/inode.c 					struct qnx6_root_node *p)
p                 513 fs/qnx6/inode.c 		inode->i_size = fs64_to_cpu(sbi, p->size);
p                 514 fs/qnx6/inode.c 		memcpy(ei->di_block_ptr, p->ptr, sizeof(p->ptr));
p                 515 fs/qnx6/inode.c 		ei->di_filelevels = p->levels;
p                 490 fs/read_write.c static ssize_t __vfs_write(struct file *file, const char __user *p,
p                 494 fs/read_write.c 		return file->f_op->write(file, p, count, pos);
p                 496 fs/read_write.c 		return new_sync_write(file, p, count, pos);
p                 504 fs/read_write.c 	const char __user *p;
p                 512 fs/read_write.c 	p = (__force const char __user *)buf;
p                 515 fs/read_write.c 	ret = __vfs_write(file, p, count, pos);
p                 293 fs/reiserfs/inode.c 	char *p = NULL;
p                 308 fs/reiserfs/inode.c 		if (p)
p                 355 fs/reiserfs/inode.c 		if (p)
p                 366 fs/reiserfs/inode.c 		if (p)
p                 399 fs/reiserfs/inode.c 	if (!p)
p                 400 fs/reiserfs/inode.c 		p = (char *)kmap(bh_result->b_page);
p                 402 fs/reiserfs/inode.c 	p += offset;
p                 403 fs/reiserfs/inode.c 	memset(p, 0, inode->i_sb->s_blocksize);
p                 424 fs/reiserfs/inode.c 		memcpy(p, ih_item_body(bh, ih) + path.pos_in_item, chars);
p                 429 fs/reiserfs/inode.c 		p += chars;
p                1530 fs/reiserfs/inode.c int reiserfs_init_locked_inode(struct inode *inode, void *p)
p                1532 fs/reiserfs/inode.c 	struct reiserfs_iget_args *args = (struct reiserfs_iget_args *)p;
p                2430 fs/reiserfs/inode.c 		char *p;
p                2431 fs/reiserfs/inode.c 		p = page_address(bh_result->b_page);
p                2432 fs/reiserfs/inode.c 		p += (byte_offset - 1) & (PAGE_SIZE - 1);
p                2461 fs/reiserfs/inode.c 		memcpy(ih_item_body(bh, ih) + pos_in_item, p + bytes_copied,
p                 119 fs/reiserfs/prints.c 		char *p = buf;
p                 122 fs/reiserfs/prints.c 		p += scnprintf(p, end - p, "%s",
p                 126 fs/reiserfs/prints.c 		p += scnprintf_le_key(p, end - p, &ih->ih_key);
p                 128 fs/reiserfs/prints.c 		p += scnprintf(p, end - p,
p                 132 fs/reiserfs/prints.c 		return p - buf;
p                 207 fs/reiserfs/prints.c 	char *p = error_buf;
p                 221 fs/reiserfs/prints.c 		p += vscnprintf(p, end - p, fmt1, args);
p                 225 fs/reiserfs/prints.c 			p += scnprintf_le_key(p, end - p,
p                 229 fs/reiserfs/prints.c 			p += scnprintf_cpu_key(p, end - p,
p                 233 fs/reiserfs/prints.c 			p += scnprintf_item_head(p, end - p,
p                 237 fs/reiserfs/prints.c 			p += scnprintf_direntry(p, end - p,
p                 241 fs/reiserfs/prints.c 			p += scnprintf_disk_child(p, end - p,
p                 245 fs/reiserfs/prints.c 			p += scnprintf_block_head(p, end - p,
p                 249 fs/reiserfs/prints.c 			p += scnprintf_buffer_head(p, end - p,
p                 253 fs/reiserfs/prints.c 			p += scnprintf_de_head(p, end - p,
p                 260 fs/reiserfs/prints.c 	p += vscnprintf(p, end - p, fmt1, args);
p                1422 fs/reiserfs/reiserfs.h #define get_block_num(p, i) get_unaligned_le32((p) + (i))
p                1423 fs/reiserfs/reiserfs.h #define put_block_num(p, i, v) put_unaligned_le32((v), (p) + (i))
p                3013 fs/reiserfs/reiserfs.h int reiserfs_check_path(struct treepath *p);
p                3067 fs/reiserfs/reiserfs.h int reiserfs_find_actor(struct inode *inode, void *p);
p                3068 fs/reiserfs/reiserfs.h int reiserfs_init_locked_inode(struct inode *inode, void *p);
p                 347 fs/reiserfs/stree.c int reiserfs_check_path(struct treepath *p)
p                 349 fs/reiserfs/stree.c 	RFALSE(p->path_length != ILLEGAL_PATH_ELEMENT_OFFSET,
p                 982 fs/reiserfs/super.c 	char *p;
p                 993 fs/reiserfs/super.c 	p = *cur;
p                 996 fs/reiserfs/super.c 	*cur = strchr(p, ',');
p                1002 fs/reiserfs/super.c 	if (!strncmp(p, "alloc=", 6)) {
p                1009 fs/reiserfs/super.c 		if (reiserfs_parse_alloc_options(s, p + 6)) {
p                1018 fs/reiserfs/super.c 		if (!strncmp(p, opt->option_name, strlen(opt->option_name))) {
p                1024 fs/reiserfs/super.c 							 p);
p                1031 fs/reiserfs/super.c 							 p);
p                1040 fs/reiserfs/super.c 				 "unknown mount option \"%s\"", p);
p                1044 fs/reiserfs/super.c 	p += strlen(opt->option_name);
p                1045 fs/reiserfs/super.c 	switch (*p) {
p                1075 fs/reiserfs/super.c 	p++;
p                1079 fs/reiserfs/super.c 	    && !strlen(p)) {
p                1089 fs/reiserfs/super.c 		*opt_arg = p;
p                1095 fs/reiserfs/super.c 		if (!strcmp(p, arg->value)) {
p                1105 fs/reiserfs/super.c 			 "bad value \"%s\" for option \"%s\"\n", p,
p                1195 fs/reiserfs/super.c 			char *p;
p                1197 fs/reiserfs/super.c 			p = NULL;
p                1206 fs/reiserfs/super.c 				*blocks = simple_strtoul(arg, &p, 0);
p                1207 fs/reiserfs/super.c 				if (*p != '\0') {
p                1218 fs/reiserfs/super.c 			char *p = NULL;
p                1219 fs/reiserfs/super.c 			unsigned long val = simple_strtoul(arg, &p, 0);
p                1221 fs/reiserfs/super.c 			if (*p != '\0' || val >= (unsigned int)-1) {
p                  41 fs/romfs/storage.c 	u_char buf[16], *p;
p                  51 fs/romfs/storage.c 		p = memchr(buf, 0, len);
p                  52 fs/romfs/storage.c 		if (p)
p                  53 fs/romfs/storage.c 			return n + (p - buf);
p                 137 fs/romfs/storage.c 	u_char *buf, *p;
p                 147 fs/romfs/storage.c 		p = memchr(buf, 0, segment);
p                 149 fs/romfs/storage.c 		if (p)
p                 150 fs/romfs/storage.c 			return n + (p - buf);
p                 119 fs/select.c    		       poll_table *p);
p                 140 fs/select.c    	struct poll_table_page * p = pwq->table;
p                 144 fs/select.c    	while (p) {
p                 148 fs/select.c    		entry = p->entry;
p                 152 fs/select.c    		} while (entry > p->entries);
p                 153 fs/select.c    		old = p;
p                 154 fs/select.c    		p = p->next;
p                 160 fs/select.c    static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
p                 162 fs/select.c    	struct poll_table_page *table = p->table;
p                 164 fs/select.c    	if (p->inline_index < N_INLINE_POLL_ENTRIES)
p                 165 fs/select.c    		return p->inline_entries + p->inline_index++;
p                 172 fs/select.c    			p->error = -ENOMEM;
p                 177 fs/select.c    		p->table = new_table;
p                 222 fs/select.c    				poll_table *p)
p                 224 fs/select.c    	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
p                 230 fs/select.c    	entry->key = p->_key;
p                 298 fs/select.c    			      void __user *p,
p                 305 fs/select.c    	if (!p)
p                 330 fs/select.c    			if (!copy_to_user(p, &rtv, sizeof(rtv)))
p                 340 fs/select.c    			if (!copy_to_user(p, &rtv, sizeof(rtv)))
p                 345 fs/select.c    		if (!put_timespec64(&rts, p))
p                 349 fs/select.c    		if (!put_old_timespec32(&rts, p))
p                  53 fs/seq_file.c  	struct seq_file *p;
p                  57 fs/seq_file.c  	p = kmem_cache_zalloc(seq_file_cache, GFP_KERNEL);
p                  58 fs/seq_file.c  	if (!p)
p                  61 fs/seq_file.c  	file->private_data = p;
p                  63 fs/seq_file.c  	mutex_init(&p->lock);
p                  64 fs/seq_file.c  	p->op = op;
p                  68 fs/seq_file.c  	p->file = file;
p                  95 fs/seq_file.c  	void *p;
p                 108 fs/seq_file.c  	p = m->op->start(m, &m->index);
p                 109 fs/seq_file.c  	while (p) {
p                 110 fs/seq_file.c  		error = PTR_ERR(p);
p                 111 fs/seq_file.c  		if (IS_ERR(p))
p                 113 fs/seq_file.c  		error = m->op->show(m, p);
p                 122 fs/seq_file.c  		p = m->op->next(m, p, &m->index);
p                 133 fs/seq_file.c  	m->op->stop(m, p);
p                 137 fs/seq_file.c  	m->op->stop(m, p);
p                 158 fs/seq_file.c  	void *p;
p                 224 fs/seq_file.c  	p = m->op->start(m, &m->index);
p                 226 fs/seq_file.c  		err = PTR_ERR(p);
p                 227 fs/seq_file.c  		if (!p || IS_ERR(p))
p                 229 fs/seq_file.c  		err = m->op->show(m, p);
p                 235 fs/seq_file.c  			p = m->op->next(m, p, &m->index);
p                 240 fs/seq_file.c  		m->op->stop(m, p);
p                 247 fs/seq_file.c  		p = m->op->start(m, &m->index);
p                 249 fs/seq_file.c  	m->op->stop(m, p);
p                 258 fs/seq_file.c  		p = m->op->next(m, p, &m->index);
p                 262 fs/seq_file.c  		if (!p || IS_ERR(p)) {
p                 263 fs/seq_file.c  			err = PTR_ERR(p);
p                 268 fs/seq_file.c  		err = m->op->show(m, p);
p                 275 fs/seq_file.c  	m->op->stop(m, p);
p                 434 fs/seq_file.c  char *mangle_path(char *s, const char *p, const char *esc)
p                 436 fs/seq_file.c  	while (s <= p) {
p                 437 fs/seq_file.c  		char c = *p++;
p                 442 fs/seq_file.c  		} else if (s + 4 > p) {
p                 471 fs/seq_file.c  		char *p = d_path(path, buf, size);
p                 472 fs/seq_file.c  		if (!IS_ERR(p)) {
p                 473 fs/seq_file.c  			char *end = mangle_path(buf, p, esc);
p                 509 fs/seq_file.c  		char *p;
p                 511 fs/seq_file.c  		p = __d_path(path, root, buf, size);
p                 512 fs/seq_file.c  		if (!p)
p                 514 fs/seq_file.c  		res = PTR_ERR(p);
p                 515 fs/seq_file.c  		if (!IS_ERR(p)) {
p                 516 fs/seq_file.c  			char *end = mangle_path(buf, p, esc);
p                 538 fs/seq_file.c  		char *p = dentry_path(dentry, buf, size);
p                 539 fs/seq_file.c  		if (!IS_ERR(p)) {
p                 540 fs/seq_file.c  			char *end = mangle_path(buf, p, esc);
p                 551 fs/seq_file.c  static void *single_start(struct seq_file *p, loff_t *pos)
p                 556 fs/seq_file.c  static void *single_next(struct seq_file *p, void *v, loff_t *pos)
p                 562 fs/seq_file.c  static void single_stop(struct seq_file *p, void *v)
p                 124 fs/statfs.c    static int do_statfs_native(struct kstatfs *st, struct statfs __user *p)
p                 161 fs/statfs.c    	if (copy_to_user(p, &buf, sizeof(buf)))
p                 166 fs/statfs.c    static int do_statfs64(struct kstatfs *st, struct statfs64 __user *p)
p                 185 fs/statfs.c    	if (copy_to_user(p, &buf, sizeof(buf)))
p                 653 fs/super.c     	struct super_block *sb, *p = NULL;
p                 665 fs/super.c     		if (p)
p                 666 fs/super.c     			__put_super(p);
p                 667 fs/super.c     		p = sb;
p                 669 fs/super.c     	if (p)
p                 670 fs/super.c     		__put_super(p);
p                 683 fs/super.c     	struct super_block *sb, *p = NULL;
p                 698 fs/super.c     		if (p)
p                 699 fs/super.c     			__put_super(p);
p                 700 fs/super.c     		p = sb;
p                 702 fs/super.c     	if (p)
p                 703 fs/super.c     		__put_super(p);
p                 719 fs/super.c     	struct super_block *sb, *p = NULL;
p                 732 fs/super.c     		if (p)
p                 733 fs/super.c     			__put_super(p);
p                 734 fs/super.c     		p = sb;
p                 736 fs/super.c     	if (p)
p                 737 fs/super.c     		__put_super(p);
p                1073 fs/super.c     int get_anon_bdev(dev_t *p)
p                1088 fs/super.c     	*p = MKDEV(0, dev);
p                1498 fs/super.c     static int compare_single(struct super_block *s, void *p)
p                 345 fs/sysv/dir.c  struct sysv_dir_entry * sysv_dotdot (struct inode *dir, struct page **p)
p                 352 fs/sysv/dir.c  		*p = page;
p                 321 fs/sysv/inode.c static void init_once(void *p)
p                 323 fs/sysv/inode.c 	struct sysv_inode_info *si = (struct sysv_inode_info *)p;
p                  60 fs/sysv/itree.c 	sysv_zone_t     *p;
p                  67 fs/sysv/itree.c static inline void add_chain(Indirect *p, struct buffer_head *bh, sysv_zone_t *v)
p                  69 fs/sysv/itree.c 	p->key = *(p->p = v);
p                  70 fs/sysv/itree.c 	p->bh = bh;
p                  75 fs/sysv/itree.c 	while (from <= to && from->key == *from->p)
p                  95 fs/sysv/itree.c 	Indirect *p = chain;
p                 100 fs/sysv/itree.c 	if (!p->key)
p                 103 fs/sysv/itree.c 		int block = block_to_cpu(SYSV_SB(sb), p->key);
p                 107 fs/sysv/itree.c 		if (!verify_chain(chain, p))
p                 109 fs/sysv/itree.c 		add_chain(++p, bh, (sysv_zone_t*)bh->b_data + *++offsets);
p                 110 fs/sysv/itree.c 		if (!p->key)
p                 122 fs/sysv/itree.c 	return p;
p                 151 fs/sysv/itree.c 		branch[n].p = (sysv_zone_t*) bh->b_data + offsets[n];
p                 152 fs/sysv/itree.c 		*branch[n].p = branch[n].key;
p                 177 fs/sysv/itree.c 	if (!verify_chain(chain, where-1) || *where->p)
p                 179 fs/sysv/itree.c 	*where->p = where->key;
p                 269 fs/sysv/itree.c static inline int all_zeroes(sysv_zone_t *p, sysv_zone_t *q)
p                 271 fs/sysv/itree.c 	while (p < q)
p                 272 fs/sysv/itree.c 		if (*p++)
p                 283 fs/sysv/itree.c 	Indirect *partial, *p;
p                 298 fs/sysv/itree.c 	if (!partial->key && *partial->p) {
p                 302 fs/sysv/itree.c 	for (p=partial; p>chain && all_zeroes((sysv_zone_t*)p->bh->b_data,p->p); p--)
p                 310 fs/sysv/itree.c 	if (p == chain + k - 1 && p > chain) {
p                 311 fs/sysv/itree.c 		p->p--;
p                 313 fs/sysv/itree.c 		*top = *p->p;
p                 314 fs/sysv/itree.c 		*p->p = 0;
p                 318 fs/sysv/itree.c 	while (partial > p) {
p                 326 fs/sysv/itree.c static inline void free_data(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q)
p                 328 fs/sysv/itree.c 	for ( ; p < q ; p++) {
p                 329 fs/sysv/itree.c 		sysv_zone_t nr = *p;
p                 331 fs/sysv/itree.c 			*p = 0;
p                 338 fs/sysv/itree.c static void free_branches(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q, int depth)
p                 344 fs/sysv/itree.c 		for ( ; p < q ; p++) {
p                 346 fs/sysv/itree.c 			sysv_zone_t nr = *p;
p                 349 fs/sysv/itree.c 			*p = 0;
p                 361 fs/sysv/itree.c 		free_data(inode, p, q);
p                 405 fs/sysv/itree.c 		free_branches(inode, partial->p + 1, block_end(partial->bh),
p                 374 fs/timerfd.c   static int timerfd_fget(int fd, struct fd *p)
p                 383 fs/timerfd.c   	*p = f;
p                 169 fs/tracefs/inode.c 	char *p;
p                 173 fs/tracefs/inode.c 	while ((p = strsep(&data, ",")) != NULL) {
p                 174 fs/tracefs/inode.c 		if (!*p)
p                 177 fs/tracefs/inode.c 		token = match_token(p, tokens, args);
p                  31 fs/ubifs/crypto.c 	void *p = &dn->data;
p                  40 fs/ubifs/crypto.c 		memset(p + in_len, 0, pad_len - in_len);
p                  42 fs/ubifs/crypto.c 	err = fscrypt_encrypt_block_inplace(inode, virt_to_page(p), pad_len,
p                  43 fs/ubifs/crypto.c 					    offset_in_page(p), block, GFP_NOFS);
p                  93 fs/ubifs/debug.c 	char *p = buffer;
p                  99 fs/ubifs/debug.c 			len -= snprintf(p, len, "(%lu, %s)",
p                 105 fs/ubifs/debug.c 			len -= snprintf(p, len, "(%lu, %s, %#08x)",
p                 110 fs/ubifs/debug.c 			len -= snprintf(p, len, "(%lu, %s, %u)",
p                 115 fs/ubifs/debug.c 			len -= snprintf(p, len, "(%lu, %s)",
p                 120 fs/ubifs/debug.c 			len -= snprintf(p, len, "(bad key type: %#08x, %#08x)",
p                 124 fs/ubifs/debug.c 		len -= snprintf(p, len, "bad key format %d", c->key_fmt);
p                 126 fs/ubifs/debug.c 	return p;
p                1795 fs/ubifs/debug.c 	struct rb_node **p, *parent = NULL;
p                1801 fs/ubifs/debug.c 	p = &fsckd->inodes.rb_node;
p                1802 fs/ubifs/debug.c 	while (*p) {
p                1803 fs/ubifs/debug.c 		parent = *p;
p                1806 fs/ubifs/debug.c 			p = &(*p)->rb_left;
p                1808 fs/ubifs/debug.c 			p = &(*p)->rb_right;
p                1860 fs/ubifs/debug.c 	rb_link_node(&fscki->rb, parent, p);
p                1877 fs/ubifs/debug.c 	struct rb_node *p;
p                1880 fs/ubifs/debug.c 	p = fsckd->inodes.rb_node;
p                1881 fs/ubifs/debug.c 	while (p) {
p                1882 fs/ubifs/debug.c 		fscki = rb_entry(p, struct fsck_inode, rb);
p                1884 fs/ubifs/debug.c 			p = p->rb_left;
p                1886 fs/ubifs/debug.c 			p = p->rb_right;
p                2551 fs/ubifs/debug.c 	unsigned char *p = (void *)buf;
p                2561 fs/ubifs/debug.c 		memset(p + from, 0xFF, to - from);
p                2563 fs/ubifs/debug.c 		prandom_bytes(p + from, to - from);
p                1058 fs/ubifs/journal.c 	void *p;
p                1120 fs/ubifs/journal.c 	p = (void *)dent2 + aligned_dlen2;
p                1122 fs/ubifs/journal.c 		pack_inode(c, p, fst_dir, 1);
p                1123 fs/ubifs/journal.c 		err = ubifs_node_calc_hash(c, p, hash_p1);
p                1127 fs/ubifs/journal.c 		pack_inode(c, p, fst_dir, 0);
p                1128 fs/ubifs/journal.c 		err = ubifs_node_calc_hash(c, p, hash_p1);
p                1131 fs/ubifs/journal.c 		p += ALIGN(plen, 8);
p                1132 fs/ubifs/journal.c 		pack_inode(c, p, snd_dir, 1);
p                1133 fs/ubifs/journal.c 		err = ubifs_node_calc_hash(c, p, hash_p2);
p                1217 fs/ubifs/journal.c 	void *p;
p                1301 fs/ubifs/journal.c 	p = (void *)dent2 + aligned_dlen2;
p                1303 fs/ubifs/journal.c 		pack_inode(c, p, new_inode, 0);
p                1304 fs/ubifs/journal.c 		err = ubifs_node_calc_hash(c, p, hash_new_inode);
p                1308 fs/ubifs/journal.c 		p += ALIGN(ilen, 8);
p                1312 fs/ubifs/journal.c 		pack_inode(c, p, old_dir, 1);
p                1313 fs/ubifs/journal.c 		err = ubifs_node_calc_hash(c, p, hash_old_dir);
p                1317 fs/ubifs/journal.c 		pack_inode(c, p, old_dir, 0);
p                1318 fs/ubifs/journal.c 		err = ubifs_node_calc_hash(c, p, hash_old_dir);
p                1322 fs/ubifs/journal.c 		p += ALIGN(plen, 8);
p                1323 fs/ubifs/journal.c 		pack_inode(c, p, new_dir, 1);
p                1324 fs/ubifs/journal.c 		err = ubifs_node_calc_hash(c, p, hash_new_dir);
p                  32 fs/ubifs/log.c 	struct rb_node *p;
p                  36 fs/ubifs/log.c 	p = c->buds.rb_node;
p                  37 fs/ubifs/log.c 	while (p) {
p                  38 fs/ubifs/log.c 		bud = rb_entry(p, struct ubifs_bud, rb);
p                  40 fs/ubifs/log.c 			p = p->rb_left;
p                  42 fs/ubifs/log.c 			p = p->rb_right;
p                  61 fs/ubifs/log.c 	struct rb_node *p;
p                  69 fs/ubifs/log.c 	p = c->buds.rb_node;
p                  70 fs/ubifs/log.c 	while (p) {
p                  71 fs/ubifs/log.c 		bud = rb_entry(p, struct ubifs_bud, rb);
p                  73 fs/ubifs/log.c 			p = p->rb_left;
p                  75 fs/ubifs/log.c 			p = p->rb_right;
p                 114 fs/ubifs/log.c 	struct rb_node **p, *parent = NULL;
p                 119 fs/ubifs/log.c 	p = &c->buds.rb_node;
p                 120 fs/ubifs/log.c 	while (*p) {
p                 121 fs/ubifs/log.c 		parent = *p;
p                 125 fs/ubifs/log.c 			p = &(*p)->rb_left;
p                 127 fs/ubifs/log.c 			p = &(*p)->rb_right;
p                 130 fs/ubifs/log.c 	rb_link_node(&bud->rb, parent, p);
p                 299 fs/ubifs/log.c 	struct rb_node *p;
p                 304 fs/ubifs/log.c 	p = rb_first(&c->buds);
p                 305 fs/ubifs/log.c 	while (p) {
p                 306 fs/ubifs/log.c 		struct rb_node *p1 = p;
p                 310 fs/ubifs/log.c 		p = rb_next(p);
p                 563 fs/ubifs/log.c 	struct rb_node **p = &done_tree->rb_node, *parent = NULL;
p                 566 fs/ubifs/log.c 	while (*p) {
p                 567 fs/ubifs/log.c 		parent = *p;
p                 570 fs/ubifs/log.c 			p = &(*p)->rb_left;
p                 572 fs/ubifs/log.c 			p = &(*p)->rb_right;
p                 583 fs/ubifs/log.c 	rb_link_node(&dr->rb, parent, p);
p                 224 fs/ubifs/lpt.c 	uint8_t *p = *addr;
p                 233 fs/ubifs/lpt.c 		*p |= ((uint8_t)val) << b;
p                 236 fs/ubifs/lpt.c 			*++p = (uint8_t)(val >>= (8 - b));
p                 238 fs/ubifs/lpt.c 				*++p = (uint8_t)(val >>= 8);
p                 240 fs/ubifs/lpt.c 					*++p = (uint8_t)(val >>= 8);
p                 242 fs/ubifs/lpt.c 						*++p = (uint8_t)(val >>= 8);
p                 247 fs/ubifs/lpt.c 		*p = (uint8_t)val;
p                 249 fs/ubifs/lpt.c 			*++p = (uint8_t)(val >>= 8);
p                 251 fs/ubifs/lpt.c 				*++p = (uint8_t)(val >>= 8);
p                 253 fs/ubifs/lpt.c 					*++p = (uint8_t)(val >>= 8);
p                 259 fs/ubifs/lpt.c 		p++;
p                 260 fs/ubifs/lpt.c 	*addr = p;
p                 276 fs/ubifs/lpt.c 	uint8_t *p = *addr;
p                 288 fs/ubifs/lpt.c 			val = p[1];
p                 291 fs/ubifs/lpt.c 			val = p[1] | ((uint32_t)p[2] << 8);
p                 294 fs/ubifs/lpt.c 			val = p[1] | ((uint32_t)p[2] << 8) |
p                 295 fs/ubifs/lpt.c 				     ((uint32_t)p[3] << 16);
p                 298 fs/ubifs/lpt.c 			val = p[1] | ((uint32_t)p[2] << 8) |
p                 299 fs/ubifs/lpt.c 				     ((uint32_t)p[3] << 16) |
p                 300 fs/ubifs/lpt.c 				     ((uint32_t)p[4] << 24);
p                 303 fs/ubifs/lpt.c 		val |= *p >> b;
p                 308 fs/ubifs/lpt.c 			val = p[0];
p                 311 fs/ubifs/lpt.c 			val = p[0] | ((uint32_t)p[1] << 8);
p                 314 fs/ubifs/lpt.c 			val = p[0] | ((uint32_t)p[1] << 8) |
p                 315 fs/ubifs/lpt.c 				     ((uint32_t)p[2] << 16);
p                 318 fs/ubifs/lpt.c 			val = p[0] | ((uint32_t)p[1] << 8) |
p                 319 fs/ubifs/lpt.c 				     ((uint32_t)p[2] << 16) |
p                 320 fs/ubifs/lpt.c 				     ((uint32_t)p[3] << 24);
p                 327 fs/ubifs/lpt.c 	p += nrbits >> 3;
p                 328 fs/ubifs/lpt.c 	*addr = p;
p                 606 fs/ubifs/lpt.c 	void *buf = NULL, *p;
p                 650 fs/ubifs/lpt.c 	p = buf;
p                 673 fs/ubifs/lpt.c 	ubifs_pack_pnode(c, p, pnode);
p                 674 fs/ubifs/lpt.c 	err = ubifs_shash_update(c, desc, p, c->pnode_sz);
p                 678 fs/ubifs/lpt.c 	p += c->pnode_sz;
p                 704 fs/ubifs/lpt.c 			memset(p, 0xff, alen - len);
p                 708 fs/ubifs/lpt.c 			p = buf;
p                 711 fs/ubifs/lpt.c 		ubifs_pack_pnode(c, p, pnode);
p                 712 fs/ubifs/lpt.c 		err = ubifs_shash_update(c, desc, p, c->pnode_sz);
p                 716 fs/ubifs/lpt.c 		p += c->pnode_sz;
p                 738 fs/ubifs/lpt.c 				memset(p, 0xff, alen - len);
p                 742 fs/ubifs/lpt.c 				p = buf;
p                 767 fs/ubifs/lpt.c 			ubifs_pack_nnode(c, p, nnode);
p                 768 fs/ubifs/lpt.c 			p += c->nnode_sz;
p                 785 fs/ubifs/lpt.c 			memset(p, 0xff, alen - len);
p                 789 fs/ubifs/lpt.c 			p = buf;
p                 801 fs/ubifs/lpt.c 		ubifs_pack_lsave(c, p, lsave);
p                 802 fs/ubifs/lpt.c 		p += c->lsave_sz;
p                 810 fs/ubifs/lpt.c 		memset(p, 0xff, alen - len);
p                 814 fs/ubifs/lpt.c 		p = buf;
p                 826 fs/ubifs/lpt.c 	ubifs_pack_ltab(c, p, ltab);
p                 827 fs/ubifs/lpt.c 	p += c->ltab_sz;
p                 830 fs/ubifs/lpt.c 	memset(p, 0xff, alen - len);
p                1565 fs/ubifs/lpt.c 	struct ubifs_pnode *p;
p                1577 fs/ubifs/lpt.c 	p = kmemdup(pnode, sizeof(struct ubifs_pnode), GFP_NOFS);
p                1578 fs/ubifs/lpt.c 	if (unlikely(!p))
p                1581 fs/ubifs/lpt.c 	p->cnext = NULL;
p                1582 fs/ubifs/lpt.c 	__set_bit(DIRTY_CNODE, &p->flags);
p                1583 fs/ubifs/lpt.c 	__clear_bit(COW_CNODE, &p->flags);
p                1584 fs/ubifs/lpt.c 	replace_cats(c, pnode, p);
p                1591 fs/ubifs/lpt.c 	pnode->parent->nbranch[p->iip].pnode = p;
p                1592 fs/ubifs/lpt.c 	return p;
p                1594 fs/ubifs/lpt_commit.c 	void *buf, *p;
p                1599 fs/ubifs/lpt_commit.c 	buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
p                1612 fs/ubifs/lpt_commit.c 		if (!is_a_node(c, p, len)) {
p                1615 fs/ubifs/lpt_commit.c 			pad_len = get_pad_len(c, p, len);
p                1617 fs/ubifs/lpt_commit.c 				p += pad_len;
p                1622 fs/ubifs/lpt_commit.c 			if (!dbg_is_all_ff(p, len)) {
p                1640 fs/ubifs/lpt_commit.c 		node_type = get_lpt_node_type(c, p, &node_num);
p                1645 fs/ubifs/lpt_commit.c 		p += node_len;
p                1845 fs/ubifs/lpt_commit.c 	void *buf, *p;
p                1848 fs/ubifs/lpt_commit.c 	buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
p                1860 fs/ubifs/lpt_commit.c 		if (!is_a_node(c, p, len)) {
p                1863 fs/ubifs/lpt_commit.c 			pad_len = get_pad_len(c, p, len);
p                1867 fs/ubifs/lpt_commit.c 				p += pad_len;
p                1877 fs/ubifs/lpt_commit.c 		node_type = get_lpt_node_type(c, p, &node_num);
p                1901 fs/ubifs/lpt_commit.c 			err = ubifs_unpack_nnode(c, p, &nnode);
p                1929 fs/ubifs/lpt_commit.c 		p += node_len;
p                  49 fs/ubifs/orphan.c 	struct rb_node **p, *parent = NULL;
p                  64 fs/ubifs/orphan.c 	p = &c->orph_tree.rb_node;
p                  65 fs/ubifs/orphan.c 	while (*p) {
p                  66 fs/ubifs/orphan.c 		parent = *p;
p                  69 fs/ubifs/orphan.c 			p = &(*p)->rb_left;
p                  71 fs/ubifs/orphan.c 			p = &(*p)->rb_right;
p                  81 fs/ubifs/orphan.c 	rb_link_node(&orphan->rb, parent, p);
p                  99 fs/ubifs/orphan.c 	struct rb_node *p;
p                 101 fs/ubifs/orphan.c 	p = c->orph_tree.rb_node;
p                 102 fs/ubifs/orphan.c 	while (p) {
p                 103 fs/ubifs/orphan.c 		o = rb_entry(p, struct ubifs_orphan, rb);
p                 105 fs/ubifs/orphan.c 			p = p->rb_left;
p                 107 fs/ubifs/orphan.c 			p = p->rb_right;
p                 578 fs/ubifs/orphan.c 	struct rb_node **p, *parent = NULL;
p                 585 fs/ubifs/orphan.c 	p = &c->orph_tree.rb_node;
p                 586 fs/ubifs/orphan.c 	while (*p) {
p                 587 fs/ubifs/orphan.c 		parent = *p;
p                 590 fs/ubifs/orphan.c 			p = &(*p)->rb_left;
p                 592 fs/ubifs/orphan.c 			p = &(*p)->rb_right;
p                 600 fs/ubifs/orphan.c 	rb_link_node(&orphan->rb, parent, p);
p                 859 fs/ubifs/orphan.c 	struct rb_node **p, *parent = NULL;
p                 866 fs/ubifs/orphan.c 	p = &root->rb_node;
p                 867 fs/ubifs/orphan.c 	while (*p) {
p                 868 fs/ubifs/orphan.c 		parent = *p;
p                 871 fs/ubifs/orphan.c 			p = &(*p)->rb_left;
p                 873 fs/ubifs/orphan.c 			p = &(*p)->rb_right;
p                 879 fs/ubifs/orphan.c 	rb_link_node(&orphan->rb, parent, p);
p                 887 fs/ubifs/orphan.c 	struct rb_node *p;
p                 889 fs/ubifs/orphan.c 	p = root->rb_node;
p                 890 fs/ubifs/orphan.c 	while (p) {
p                 891 fs/ubifs/orphan.c 		o = rb_entry(p, struct check_orphan, rb);
p                 893 fs/ubifs/orphan.c 			p = p->rb_left;
p                 895 fs/ubifs/orphan.c 			p = p->rb_right;
p                  52 fs/ubifs/recovery.c 	uint8_t *p = buf;
p                  56 fs/ubifs/recovery.c 		if (*p++ != 0xff)
p                  71 fs/ubifs/recovery.c 	uint8_t *p = buf;
p                  75 fs/ubifs/recovery.c 		if (*p++ != 0xff)
p                 405 fs/ubifs/recovery.c 	uint8_t *p;
p                 413 fs/ubifs/recovery.c 	p = buf + empty_offs - offs;
p                 414 fs/ubifs/recovery.c 	return is_empty(p, check_len);
p                1249 fs/ubifs/recovery.c 	struct rb_node **p = &c->size_tree.rb_node, *parent = NULL;
p                1252 fs/ubifs/recovery.c 	while (*p) {
p                1253 fs/ubifs/recovery.c 		parent = *p;
p                1256 fs/ubifs/recovery.c 			p = &(*p)->rb_left;
p                1258 fs/ubifs/recovery.c 			p = &(*p)->rb_right;
p                1270 fs/ubifs/recovery.c 	rb_link_node(&e->rb, parent, p);
p                1283 fs/ubifs/recovery.c 	struct rb_node *p = c->size_tree.rb_node;
p                1286 fs/ubifs/recovery.c 	while (p) {
p                1287 fs/ubifs/recovery.c 		e = rb_entry(p, struct size_entry, rb);
p                1289 fs/ubifs/recovery.c 			p = p->rb_left;
p                1291 fs/ubifs/recovery.c 			p = p->rb_right;
p                1405 fs/ubifs/recovery.c 	unsigned char *p;
p                1434 fs/ubifs/recovery.c 	p = c->sbuf;
p                1436 fs/ubifs/recovery.c 	while (p[len] == 0xff)
p                  31 fs/ubifs/scan.c 	uint8_t *p = buf;
p                  35 fs/ubifs/scan.c 	while (pad_len < max_pad_len && *p++ == UBIFS_PADDING_BYTE)
p                 147 fs/ubifs/shrinker.c 	struct list_head *p;
p                 156 fs/ubifs/shrinker.c 	p = ubifs_infos.next;
p                 157 fs/ubifs/shrinker.c 	while (p != &ubifs_infos) {
p                 158 fs/ubifs/shrinker.c 		c = list_entry(p, struct ubifs_info, infos_list);
p                 168 fs/ubifs/shrinker.c 			p = p->next;
p                 178 fs/ubifs/shrinker.c 			p = p->next;
p                 191 fs/ubifs/shrinker.c 		p = p->next;
p                1009 fs/ubifs/super.c 	char *p;
p                1015 fs/ubifs/super.c 	while ((p = strsep(&options, ","))) {
p                1018 fs/ubifs/super.c 		if (!*p)
p                1021 fs/ubifs/super.c 		token = match_token(p, tokens, args);
p                1111 fs/ubifs/super.c 			flag = parse_standard_option(p);
p                1114 fs/ubifs/super.c 					  p);
p                  73 fs/ubifs/tnc.c 	struct rb_node **p, *parent = NULL;
p                  81 fs/ubifs/tnc.c 	p = &c->old_idx.rb_node;
p                  82 fs/ubifs/tnc.c 	while (*p) {
p                  83 fs/ubifs/tnc.c 		parent = *p;
p                  86 fs/ubifs/tnc.c 			p = &(*p)->rb_left;
p                  88 fs/ubifs/tnc.c 			p = &(*p)->rb_right;
p                  90 fs/ubifs/tnc.c 			p = &(*p)->rb_left;
p                  92 fs/ubifs/tnc.c 			p = &(*p)->rb_right;
p                  99 fs/ubifs/tnc.c 	rb_link_node(&old_idx->rb, parent, p);
p                1094 fs/ubifs/tnc.c 	int *path = c->bottom_up_buf, p = 0;
p                1116 fs/ubifs/tnc.c 			ubifs_assert(c, p < c->zroot.znode->level);
p                1117 fs/ubifs/tnc.c 			path[p++] = n;
p                1130 fs/ubifs/tnc.c 			ubifs_assert(c, path[p - 1] >= 0);
p                1131 fs/ubifs/tnc.c 			ubifs_assert(c, path[p - 1] < zp->child_cnt);
p                1132 fs/ubifs/tnc.c 			zbr = &zp->zbranch[path[--p]];
p                1138 fs/ubifs/tnc.c 		if (IS_ERR(znode) || !p)
p                1140 fs/ubifs/tnc.c 		ubifs_assert(c, path[p - 1] >= 0);
p                1141 fs/ubifs/tnc.c 		ubifs_assert(c, path[p - 1] < znode->child_cnt);
p                1142 fs/ubifs/tnc.c 		znode = znode->zbranch[path[p - 1]].znode;
p                 166 fs/ubifs/tnc_commit.c 	struct rb_node *p;
p                 168 fs/ubifs/tnc_commit.c 	p = c->old_idx.rb_node;
p                 169 fs/ubifs/tnc_commit.c 	while (p) {
p                 170 fs/ubifs/tnc_commit.c 		o = rb_entry(p, struct ubifs_old_idx, rb);
p                 172 fs/ubifs/tnc_commit.c 			p = p->rb_left;
p                 174 fs/ubifs/tnc_commit.c 			p = p->rb_right;
p                 176 fs/ubifs/tnc_commit.c 			p = p->rb_left;
p                 178 fs/ubifs/tnc_commit.c 			p = p->rb_right;
p                 224 fs/ubifs/tnc_commit.c static int layout_leb_in_gaps(struct ubifs_info *c, int p)
p                 239 fs/ubifs/tnc_commit.c 	c->gap_lebs[p] = lnum;
p                 358 fs/ubifs/tnc_commit.c 	int err, leb_needed_cnt, written, p = 0, old_idx_lebs, *gap_lebs;
p                 369 fs/ubifs/tnc_commit.c 		ubifs_assert(c, p < c->lst.idx_lebs);
p                 370 fs/ubifs/tnc_commit.c 		written = layout_leb_in_gaps(c, p);
p                 390 fs/ubifs/tnc_commit.c 		p++;
p                 403 fs/ubifs/tnc_commit.c 		if (leb_needed_cnt > c->ileb_cnt && p >= old_idx_lebs &&
p                 417 fs/ubifs/tnc_commit.c 	c->gap_lebs[p] = -1;
p                1059 fs/ubifs/tnc_commit.c 	int *p, err;
p                1065 fs/ubifs/tnc_commit.c 	for (p = c->gap_lebs; *p != -1; p++) {
p                1066 fs/ubifs/tnc_commit.c 		err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0,
p                 460 fs/udf/super.c 	char *p;
p                 471 fs/udf/super.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 475 fs/udf/super.c 		if (!*p)
p                 478 fs/udf/super.c 		token = match_token(p, tokens, args);
p                 596 fs/udf/super.c 			pr_err("bad mount option \"%s\" or missing value\n", p);
p                1029 fs/udf/super.c 				struct partitionDesc *p,
p                1035 fs/udf/super.c 	switch (le32_to_cpu(p->accessType)) {
p                1043 fs/udf/super.c 	if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
p                1044 fs/udf/super.c 	    strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
p                1047 fs/udf/super.c 	phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
p                1077 fs/udf/super.c 		struct partitionDesc *p, int p_index)
p                1086 fs/udf/super.c 	map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
p                1087 fs/udf/super.c 	map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
p                1089 fs/udf/super.c 	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
p                1091 fs/udf/super.c 	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
p                1093 fs/udf/super.c 	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
p                1095 fs/udf/super.c 	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
p                1102 fs/udf/super.c 	err = check_partition_desc(sb, p, map);
p                1114 fs/udf/super.c 	phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
p                1235 fs/udf/super.c 	struct partitionDesc *p;
p                1251 fs/udf/super.c 	p = (struct partitionDesc *)bh->b_data;
p                1252 fs/udf/super.c 	partitionNumber = le16_to_cpu(p->partitionNumber);
p                1272 fs/udf/super.c 	ret = udf_fill_partdesc_info(sb, p, i);
p                1297 fs/udf/super.c 	ret = udf_fill_partdesc_info(sb, p, i);
p                  38 fs/udf/symlink.c 	unsigned char *p = to;
p                  59 fs/udf/symlink.c 			p = to;
p                  60 fs/udf/symlink.c 			*p++ = '/';
p                  66 fs/udf/symlink.c 			memcpy(p, "../", 3);
p                  67 fs/udf/symlink.c 			p += 3;
p                  73 fs/udf/symlink.c 			memcpy(p, "./", 2);
p                  74 fs/udf/symlink.c 			p += 2;
p                  84 fs/udf/symlink.c 						    p, tolen);
p                  88 fs/udf/symlink.c 			p += comp_len;
p                  92 fs/udf/symlink.c 			*p++ = '/';
p                  97 fs/udf/symlink.c 	if (p > to + 1)
p                  98 fs/udf/symlink.c 		p[-1] = '\0';
p                 100 fs/udf/symlink.c 		p[0] = '\0';
p                 110 fs/udf/symlink.c 	unsigned char *p = page_address(page);
p                 137 fs/udf/symlink.c 	err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
p                 344 fs/ufs/balloc.c u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
p                 364 fs/ufs/balloc.c 	tmp = ufs_data_ptr_to_cpu(sb, p);
p                 429 fs/ufs/balloc.c 			ufs_cpu_to_data_ptr(sb, p, result);
p                 478 fs/ufs/balloc.c 		ufs_cpu_to_data_ptr(sb, p, result);
p                 118 fs/ufs/dir.c   	struct ufs_dir_entry *p;
p                 129 fs/ufs/dir.c   		p = (struct ufs_dir_entry *)(kaddr + offs);
p                 130 fs/ufs/dir.c   		rec_len = fs16_to_cpu(sb, p->d_reclen);
p                 136 fs/ufs/dir.c   		if (rec_len < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, p)))
p                 140 fs/ufs/dir.c   		if (fs32_to_cpu(sb, p->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg *
p                 176 fs/ufs/dir.c   		   rec_len, ufs_get_de_namlen(sb, p));
p                 179 fs/ufs/dir.c   	p = (struct ufs_dir_entry *)(kaddr + offs);
p                 223 fs/ufs/dir.c   ufs_next_entry(struct super_block *sb, struct ufs_dir_entry *p)
p                 225 fs/ufs/dir.c   	return (struct ufs_dir_entry *)((char *)p +
p                 226 fs/ufs/dir.c   					fs16_to_cpu(sb, p->d_reclen));
p                 229 fs/ufs/dir.c   struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
p                 237 fs/ufs/dir.c   		*p = page;
p                 412 fs/ufs/dir.c   	struct ufs_dir_entry *p = (struct ufs_dir_entry*)(base + (offset&mask));
p                 413 fs/ufs/dir.c   	while ((char*)p < (char*)de)
p                 414 fs/ufs/dir.c   		p = ufs_next_entry(sb, p);
p                 415 fs/ufs/dir.c   	return (char *)p - base;
p                  79 fs/ufs/inode.c 	void	*p;
p                  91 fs/ufs/inode.c 	Indirect *p;
p                  96 fs/ufs/inode.c 		to->key32 = *(__fs32 *)(to->p = v);
p                  97 fs/ufs/inode.c 		for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
p                 100 fs/ufs/inode.c 	return (p > to);
p                 107 fs/ufs/inode.c 	Indirect *p;
p                 112 fs/ufs/inode.c 		to->key64 = *(__fs64 *)(to->p = v);
p                 113 fs/ufs/inode.c 		for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
p                 116 fs/ufs/inode.c 	return (p > to);
p                 132 fs/ufs/inode.c 	unsigned *p;
p                 144 fs/ufs/inode.c 	p = offsets;
p                 149 fs/ufs/inode.c 	if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
p                 156 fs/ufs/inode.c 		unsigned n = *p++;
p                 172 fs/ufs/inode.c 	if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
p                 180 fs/ufs/inode.c 		unsigned n = *p++;
p                 230 fs/ufs/inode.c 	void *p;
p                 238 fs/ufs/inode.c 	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
p                 239 fs/ufs/inode.c 	tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
p                 264 fs/ufs/inode.c 	void *p;
p                 271 fs/ufs/inode.c 	p = ufs_get_direct_data_ptr(uspi, ufsi, index);
p                 272 fs/ufs/inode.c 	tmp = ufs_data_ptr_to_cpu(sb, p);
p                 289 fs/ufs/inode.c 	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
p                 343 fs/ufs/inode.c 	void *p;
p                 356 fs/ufs/inode.c 		p = (__fs64 *)bh->b_data + index;
p                 358 fs/ufs/inode.c 		p = (__fs32 *)bh->b_data + index;
p                 360 fs/ufs/inode.c 	tmp = ufs_data_ptr_to_cpu(sb, p);
p                 370 fs/ufs/inode.c 	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
p                 893 fs/ufs/inode.c 	void *p;
p                 928 fs/ufs/inode.c 	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
p                 929 fs/ufs/inode.c 	tmp = ufs_data_ptr_to_cpu(sb, p);
p                 942 fs/ufs/inode.c 		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
p                 943 fs/ufs/inode.c 		tmp = ufs_data_ptr_to_cpu(sb, p);
p                 947 fs/ufs/inode.c 		ufs_data_ptr_clear(uspi, p);
p                 961 fs/ufs/inode.c 	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
p                 962 fs/ufs/inode.c 	tmp = ufs_data_ptr_to_cpu(sb, p);
p                 967 fs/ufs/inode.c 	ufs_data_ptr_clear(uspi, p);
p                 988 fs/ufs/inode.c 			void *p = ubh_get_data_ptr(uspi, ubh, i);
p                 989 fs/ufs/inode.c 			u64 block = ufs_data_ptr_to_cpu(sb, p);
p                 997 fs/ufs/inode.c 			void *p = ubh_get_data_ptr(uspi, ubh, i);
p                 998 fs/ufs/inode.c 			u64 block = ufs_data_ptr_to_cpu(sb, p);
p                1017 fs/ufs/inode.c 			void *p = ubh_get_data_ptr(uspi, ubh, i);
p                1018 fs/ufs/inode.c 			u64 block = ufs_data_ptr_to_cpu(sb, p);
p                1021 fs/ufs/inode.c 				ufs_data_ptr_clear(uspi, p);
p                1031 fs/ufs/inode.c 			void *p = ubh_get_data_ptr(uspi, ubh, i);
p                1032 fs/ufs/inode.c 			u64 block = ufs_data_ptr_to_cpu(sb, p);
p                1035 fs/ufs/inode.c 				ufs_data_ptr_clear(uspi, p);
p                1127 fs/ufs/inode.c 	void *p;
p                1149 fs/ufs/inode.c 		p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
p                1151 fs/ufs/inode.c 			block = ufs_data_ptr_to_cpu(sb, p);
p                1157 fs/ufs/inode.c 				ufs_data_ptr_clear(uspi, p);
p                1161 fs/ufs/inode.c 			p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
p                1167 fs/ufs/inode.c 		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
p                1168 fs/ufs/inode.c 		block = ufs_data_ptr_to_cpu(sb, p);
p                1171 fs/ufs/inode.c 			ufs_data_ptr_clear(uspi, p);
p                 385 fs/ufs/super.c 	char * p;
p                 392 fs/ufs/super.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 395 fs/ufs/super.c 		if (!*p)
p                 398 fs/ufs/super.c 		token = match_token(p, tokens, args);
p                 458 fs/ufs/super.c 			pr_err("Invalid option: \"%s\" or missing value\n", p);
p                 561 fs/ufs/util.h  static inline u64 ufs_data_ptr_to_cpu(struct super_block *sb, void *p)
p                 564 fs/ufs/util.h  		fs64_to_cpu(sb, *(__fs64 *)p) :
p                 565 fs/ufs/util.h  		fs32_to_cpu(sb, *(__fs32 *)p);
p                 568 fs/ufs/util.h  static inline void ufs_cpu_to_data_ptr(struct super_block *sb, void *p, u64 val)
p                 571 fs/ufs/util.h  		*(__fs64 *)p = cpu_to_fs64(sb, val);
p                 573 fs/ufs/util.h  		*(__fs32 *)p = cpu_to_fs32(sb, val);
p                 577 fs/ufs/util.h  				      void *p)
p                 580 fs/ufs/util.h  		*(__fs64 *)p = 0;
p                 582 fs/ufs/util.h  		*(__fs32 *)p = 0;
p                 586 fs/ufs/util.h  				       void *p)
p                 589 fs/ufs/util.h  		return *(__fs64 *)p == 0;
p                 591 fs/ufs/util.h  		return *(__fs32 *)p == 0;
p                2969 fs/unicode/mkutf8data.c 	const char	*p;
p                2999 fs/unicode/mkutf8data.c 	u8c->p = NULL;
p                3064 fs/unicode/mkutf8data.c 		if (u8c->p && *u8c->s == '\0') {
p                3065 fs/unicode/mkutf8data.c 			u8c->s = u8c->p;
p                3066 fs/unicode/mkutf8data.c 			u8c->p = NULL;
p                3070 fs/unicode/mkutf8data.c 		if (!u8c->p && (u8c->len == 0 || *u8c->s == '\0')) {
p                3079 fs/unicode/mkutf8data.c 			if (!u8c->p)
p                3085 fs/unicode/mkutf8data.c 		if (u8c->p) {
p                3101 fs/unicode/mkutf8data.c 			u8c->p = u8c->s + utf8clen(u8c->s);
p                3127 fs/unicode/mkutf8data.c 			if (!u8c->p)
p                3143 fs/unicode/mkutf8data.c 			u8c->sp = u8c->p;
p                3146 fs/unicode/mkutf8data.c 			if (!u8c->p)
p                3151 fs/unicode/mkutf8data.c 			if (!u8c->p)
p                3159 fs/unicode/mkutf8data.c 			u8c->p = u8c->sp;
p                 592 fs/unicode/utf8-norm.c 	u8c->p = NULL;
p                 659 fs/unicode/utf8-norm.c 		if (u8c->p && *u8c->s == '\0') {
p                 660 fs/unicode/utf8-norm.c 			u8c->s = u8c->p;
p                 661 fs/unicode/utf8-norm.c 			u8c->p = NULL;
p                 665 fs/unicode/utf8-norm.c 		if (!u8c->p && (u8c->len == 0 || *u8c->s == '\0')) {
p                 674 fs/unicode/utf8-norm.c 			if (!u8c->p)
p                 680 fs/unicode/utf8-norm.c 		if (u8c->p) {
p                 697 fs/unicode/utf8-norm.c 			u8c->p = u8c->s + utf8clen(u8c->s);
p                 725 fs/unicode/utf8-norm.c 			if (!u8c->p)
p                 740 fs/unicode/utf8-norm.c 			u8c->sp = u8c->p;
p                 743 fs/unicode/utf8-norm.c 			if (!u8c->p)
p                 748 fs/unicode/utf8-norm.c 			if (!u8c->p)
p                 756 fs/unicode/utf8-norm.c 			u8c->p = u8c->sp;
p                  80 fs/unicode/utf8n.h 	const char	*p;
p                  38 fs/xfs/libxfs/xfs_bit.c 	uint * p = ((unsigned int *) map) + (start_bit >> BIT_TO_WORD_SHIFT);
p                  48 fs/xfs/libxfs/xfs_bit.c 		tmp = *p++;
p                  57 fs/xfs/libxfs/xfs_bit.c 		if ((tmp = *p++) != ~0U)
p                  77 fs/xfs/libxfs/xfs_bit.c 	uint * p = ((unsigned int *) map) + (start_bit >> BIT_TO_WORD_SHIFT);
p                  88 fs/xfs/libxfs/xfs_bit.c 		tmp = *p++;
p                  97 fs/xfs/libxfs/xfs_bit.c 		if ((tmp = *p++) != 0U)
p                  51 fs/xfs/libxfs/xfs_dir2_data.c 	char			*p;		/* current data position */
p                  74 fs/xfs/libxfs/xfs_dir2_data.c 	p = (char *)ops->data_entry_p(hdr);
p                  90 fs/xfs/libxfs/xfs_dir2_data.c 		    ((char *)btp - p) / sizeof(struct xfs_dir2_leaf_entry))
p                 131 fs/xfs/libxfs/xfs_dir2_data.c 	while (p < endp) {
p                 132 fs/xfs/libxfs/xfs_dir2_data.c 		dup = (xfs_dir2_data_unused_t *)p;
p                 143 fs/xfs/libxfs/xfs_dir2_data.c 			if (endp < p + be16_to_cpu(dup->length))
p                 161 fs/xfs/libxfs/xfs_dir2_data.c 			p += be16_to_cpu(dup->length);
p                 171 fs/xfs/libxfs/xfs_dir2_data.c 		dep = (xfs_dir2_data_entry_t *)p;
p                 176 fs/xfs/libxfs/xfs_dir2_data.c 		if (endp < p + ops->data_entsize(dep->namelen))
p                 201 fs/xfs/libxfs/xfs_dir2_data.c 		p += ops->data_entsize(dep->namelen);
p                 574 fs/xfs/libxfs/xfs_dir2_data.c 	char			*p;		/* current entry pointer */
p                 590 fs/xfs/libxfs/xfs_dir2_data.c 	p = (char *)ops->data_entry_p(hdr);
p                 595 fs/xfs/libxfs/xfs_dir2_data.c 	while (p < endp) {
p                 596 fs/xfs/libxfs/xfs_dir2_data.c 		dup = (xfs_dir2_data_unused_t *)p;
p                 604 fs/xfs/libxfs/xfs_dir2_data.c 			p += be16_to_cpu(dup->length);
p                 610 fs/xfs/libxfs/xfs_dir2_data.c 			dep = (xfs_dir2_data_entry_t *)p;
p                 613 fs/xfs/libxfs/xfs_dir2_data.c 			p += ops->data_entsize(dep->namelen);
p                 190 fs/xfs/scrub/dir.c 	char				*p, *endp;
p                 236 fs/xfs/scrub/dir.c 	p = (char *)mp->m_dir_inode_ops->data_entry_p(bp->b_addr);
p                 242 fs/xfs/scrub/dir.c 	while (p < endp) {
p                 246 fs/xfs/scrub/dir.c 		dup = (struct xfs_dir2_data_unused *)p;
p                 248 fs/xfs/scrub/dir.c 			p += be16_to_cpu(dup->length);
p                 251 fs/xfs/scrub/dir.c 		dep = (struct xfs_dir2_data_entry *)p;
p                 254 fs/xfs/scrub/dir.c 		p += mp->m_dir_inode_ops->data_entsize(dep->namelen);
p                 256 fs/xfs/scrub/dir.c 	if (p >= endp) {
p                 403 fs/xfs/xfs_bmap_util.c 	struct kgetbmap		*p = out + bmv->bmv_entries;
p                 424 fs/xfs/xfs_bmap_util.c 		p->bmv_oflags |= BMV_OF_DELALLOC;
p                 425 fs/xfs/xfs_bmap_util.c 		p->bmv_block = -2;
p                 427 fs/xfs/xfs_bmap_util.c 		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
p                 432 fs/xfs/xfs_bmap_util.c 		p->bmv_oflags |= BMV_OF_PREALLOC;
p                 435 fs/xfs/xfs_bmap_util.c 		p->bmv_oflags |= BMV_OF_SHARED;
p                 437 fs/xfs/xfs_bmap_util.c 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
p                 438 fs/xfs/xfs_bmap_util.c 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
p                 440 fs/xfs/xfs_bmap_util.c 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
p                 455 fs/xfs/xfs_bmap_util.c 	struct kgetbmap		*p = out + bmv->bmv_entries;
p                 460 fs/xfs/xfs_bmap_util.c 	p->bmv_block = -1;
p                 461 fs/xfs/xfs_bmap_util.c 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
p                 462 fs/xfs/xfs_bmap_util.c 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
p                 464 fs/xfs/xfs_bmap_util.c 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
p                 152 fs/xfs/xfs_inode_item.c 			struct xfs_bmbt_rec *p;
p                 156 fs/xfs/xfs_inode_item.c 			p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IEXT);
p                 157 fs/xfs/xfs_inode_item.c 			data_bytes = xfs_iextents_copy(ip, p, XFS_DATA_FORK);
p                 237 fs/xfs/xfs_inode_item.c 			struct xfs_bmbt_rec *p;
p                 242 fs/xfs/xfs_inode_item.c 			p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_EXT);
p                 243 fs/xfs/xfs_inode_item.c 			data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK);
p                 376 fs/xfs/xfs_ioctl.c 	struct xfs_fsop_attrlist_handlereq __user	*p = arg;
p                 409 fs/xfs/xfs_ioctl.c 	if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
p                1753 fs/xfs/xfs_ioctl.c 	struct kgetbmap		*p,
p                1757 fs/xfs/xfs_ioctl.c 	if (put_user(p->bmv_offset, &u->bmv_offset) ||
p                1758 fs/xfs/xfs_ioctl.c 	    put_user(p->bmv_block, &u->bmv_block) ||
p                1759 fs/xfs/xfs_ioctl.c 	    put_user(p->bmv_length, &u->bmv_length) ||
p                1766 fs/xfs/xfs_ioctl.c 	    put_user(p->bmv_oflags, &u->bmv_oflags) ||
p                2108 fs/xfs/xfs_ioctl.c 	unsigned long		p)
p                2113 fs/xfs/xfs_ioctl.c 	void			__user *arg = (void __user *)p;
p                  66 fs/xfs/xfs_ioctl.h 	unsigned long		p);
p                 159 fs/xfs/xfs_ioctl32.c 	const xfs_bstime_t	*p)
p                 163 fs/xfs/xfs_ioctl32.c 	sec32 = p->tv_sec;
p                 165 fs/xfs/xfs_ioctl32.c 	    put_user(p->tv_nsec, &p32->tv_nsec))
p                 359 fs/xfs/xfs_ioctl32.c 	compat_xfs_fsop_attrlist_handlereq_t __user *p = arg;
p                 394 fs/xfs/xfs_ioctl32.c 	if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
p                 547 fs/xfs/xfs_ioctl32.c 	unsigned long		p)
p                 552 fs/xfs/xfs_ioctl32.c 	void			__user *arg = compat_ptr(p);
p                 607 fs/xfs/xfs_ioctl32.c 		return xfs_file_ioctl(filp, cmd, p);
p                3810 fs/xfs/xfs_log.c 	void			*base_ptr, *ptr, *p;
p                3831 fs/xfs/xfs_log.c 	p = &iclog->ic_header;
p                3847 fs/xfs/xfs_log.c 		p = &ophead->oh_clientid;
p                3848 fs/xfs/xfs_log.c 		field_offset = p - base_ptr;
p                3870 fs/xfs/xfs_log.c 		p = &ophead->oh_len;
p                3871 fs/xfs/xfs_log.c 		field_offset = p - base_ptr;
p                 108 fs/xfs/xfs_message.c xfs_hex_dump(void *p, int length)
p                 110 fs/xfs/xfs_message.c 	print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1);
p                  63 fs/xfs/xfs_message.h extern void xfs_hex_dump(void *p, int length);
p                 138 fs/xfs/xfs_quota.h #define xfs_qm_vop_create_dqattach(tp, ip, u, g, p)
p                 141 fs/xfs/xfs_quota.h #define xfs_qm_vop_chown_reserve(tp, ip, u, g, p, fl)			(0)
p                 301 fs/xfs/xfs_rtalloc.c 		xfs_extlen_t	p;	/* amount to trim length by */
p                 307 fs/xfs/xfs_rtalloc.c 			div_u64_rem(bestlen, prod, &p);
p                 308 fs/xfs/xfs_rtalloc.c 			if (p)
p                 309 fs/xfs/xfs_rtalloc.c 				bestlen -= p;
p                 139 fs/xfs/xfs_rtalloc.h # define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb)    (ENOSYS)
p                 143 fs/xfs/xfs_rtalloc.h # define xfs_rtalloc_query_range(t,l,h,f,p)             (ENOSYS)
p                 144 fs/xfs/xfs_rtalloc.h # define xfs_rtalloc_query_all(t,f,p)                   (ENOSYS)
p                 145 fs/xfs/xfs_rtalloc.h # define xfs_rtbuf_get(m,t,b,i,p)                       (ENOSYS)
p                 162 fs/xfs/xfs_super.c 	char			*p;
p                 203 fs/xfs/xfs_super.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 206 fs/xfs/xfs_super.c 		if (!*p)
p                 209 fs/xfs/xfs_super.c 		token = match_token(p, tokens, args);
p                 342 fs/xfs/xfs_super.c 			xfs_warn(mp, "unknown mount option [%s].", p);
p                1237 fs/xfs/xfs_super.c 	char			*p;
p                1246 fs/xfs/xfs_super.c 	while ((p = strsep(&options, ",")) != NULL) {
p                1249 fs/xfs/xfs_super.c 		if (!*p)
p                1252 fs/xfs/xfs_super.c 		token = match_token(p, tokens, args);
p                1281 fs/xfs/xfs_super.c 		"mount option \"%s\" not supported for remount", p);
p                 636 include/acpi/acpi_bus.h static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
p                 638 include/acpi/acpi_bus.h 	if (p)
p                 639 include/acpi/acpi_bus.h 		*p = ACPI_STATE_D0;
p                 501 include/acpi/actypes.h #define ACPI_CAST_PTR(t, p)             ((t *) (acpi_uintptr_t) (p))
p                 502 include/acpi/actypes.h #define ACPI_CAST_INDIRECT_PTR(t, p)    ((t **) (acpi_uintptr_t) (p))
p                 510 include/acpi/actypes.h #define ACPI_TO_INTEGER(p)              ACPI_PTR_DIFF (p, (void *) 0)
p                 120 include/asm-generic/barrier.h #define __smp_store_release(p, v)					\
p                 122 include/asm-generic/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                 124 include/asm-generic/barrier.h 	WRITE_ONCE(*p, v);						\
p                 129 include/asm-generic/barrier.h #define __smp_load_acquire(p)						\
p                 131 include/asm-generic/barrier.h 	typeof(*p) ___p1 = READ_ONCE(*p);				\
p                 132 include/asm-generic/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                 153 include/asm-generic/barrier.h #define smp_store_release(p, v) __smp_store_release(p, v)
p                 157 include/asm-generic/barrier.h #define smp_load_acquire(p) __smp_load_acquire(p)
p                 175 include/asm-generic/barrier.h #define smp_store_release(p, v)						\
p                 177 include/asm-generic/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                 179 include/asm-generic/barrier.h 	WRITE_ONCE(*p, v);						\
p                 184 include/asm-generic/barrier.h #define smp_load_acquire(p)						\
p                 186 include/asm-generic/barrier.h 	typeof(*p) ___p1 = READ_ONCE(*p);				\
p                 187 include/asm-generic/barrier.h 	compiletime_assert_atomic_type(*p);				\
p                 203 include/asm-generic/barrier.h #define virt_store_release(p, v) __smp_store_release(p, v)
p                 204 include/asm-generic/barrier.h #define virt_load_acquire(p) __smp_load_acquire(p)
p                  14 include/asm-generic/bitops/atomic.h static inline void set_bit(unsigned int nr, volatile unsigned long *p)
p                  16 include/asm-generic/bitops/atomic.h 	p += BIT_WORD(nr);
p                  17 include/asm-generic/bitops/atomic.h 	atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
p                  20 include/asm-generic/bitops/atomic.h static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
p                  22 include/asm-generic/bitops/atomic.h 	p += BIT_WORD(nr);
p                  23 include/asm-generic/bitops/atomic.h 	atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
p                  26 include/asm-generic/bitops/atomic.h static inline void change_bit(unsigned int nr, volatile unsigned long *p)
p                  28 include/asm-generic/bitops/atomic.h 	p += BIT_WORD(nr);
p                  29 include/asm-generic/bitops/atomic.h 	atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
p                  32 include/asm-generic/bitops/atomic.h static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
p                  37 include/asm-generic/bitops/atomic.h 	p += BIT_WORD(nr);
p                  38 include/asm-generic/bitops/atomic.h 	if (READ_ONCE(*p) & mask)
p                  41 include/asm-generic/bitops/atomic.h 	old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
p                  45 include/asm-generic/bitops/atomic.h static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
p                  50 include/asm-generic/bitops/atomic.h 	p += BIT_WORD(nr);
p                  51 include/asm-generic/bitops/atomic.h 	if (!(READ_ONCE(*p) & mask))
p                  54 include/asm-generic/bitops/atomic.h 	old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
p                  58 include/asm-generic/bitops/atomic.h static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
p                  63 include/asm-generic/bitops/atomic.h 	p += BIT_WORD(nr);
p                  64 include/asm-generic/bitops/atomic.h 	old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
p                  19 include/asm-generic/bitops/lock.h 					volatile unsigned long *p)
p                  24 include/asm-generic/bitops/lock.h 	p += BIT_WORD(nr);
p                  25 include/asm-generic/bitops/lock.h 	if (READ_ONCE(*p) & mask)
p                  28 include/asm-generic/bitops/lock.h 	old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
p                  40 include/asm-generic/bitops/lock.h static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
p                  42 include/asm-generic/bitops/lock.h 	p += BIT_WORD(nr);
p                  43 include/asm-generic/bitops/lock.h 	atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
p                  58 include/asm-generic/bitops/lock.h 				      volatile unsigned long *p)
p                  62 include/asm-generic/bitops/lock.h 	p += BIT_WORD(nr);
p                  63 include/asm-generic/bitops/lock.h 	old = READ_ONCE(*p);
p                  65 include/asm-generic/bitops/lock.h 	atomic_long_set_release((atomic_long_t *)p, old);
p                  79 include/asm-generic/bitops/lock.h 						     volatile unsigned long *p)
p                  84 include/asm-generic/bitops/lock.h 	p += BIT_WORD(nr);
p                  85 include/asm-generic/bitops/lock.h 	old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
p                  19 include/asm-generic/bitops/non-atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  21 include/asm-generic/bitops/non-atomic.h 	*p  |= mask;
p                  27 include/asm-generic/bitops/non-atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  29 include/asm-generic/bitops/non-atomic.h 	*p &= ~mask;
p                  44 include/asm-generic/bitops/non-atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  46 include/asm-generic/bitops/non-atomic.h 	*p ^= mask;
p                  61 include/asm-generic/bitops/non-atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  62 include/asm-generic/bitops/non-atomic.h 	unsigned long old = *p;
p                  64 include/asm-generic/bitops/non-atomic.h 	*p = old | mask;
p                  80 include/asm-generic/bitops/non-atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  81 include/asm-generic/bitops/non-atomic.h 	unsigned long old = *p;
p                  83 include/asm-generic/bitops/non-atomic.h 	*p = old & ~mask;
p                  92 include/asm-generic/bitops/non-atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  93 include/asm-generic/bitops/non-atomic.h 	unsigned long old = *p;
p                  95 include/asm-generic/bitops/non-atomic.h 	*p = old ^ mask;
p                 896 include/asm-generic/io.h static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
p                1020 include/asm-generic/io.h static inline void ioport_unmap(void __iomem *p)
p                1026 include/asm-generic/io.h extern void ioport_unmap(void __iomem *p);
p                  27 include/asm-generic/preempt.h #define init_task_preempt_count(p) do { \
p                  28 include/asm-generic/preempt.h 	task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
p                  31 include/asm-generic/preempt.h #define init_idle_preempt_count(p, cpu) do { \
p                  32 include/asm-generic/preempt.h 	task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
p                  60 include/asm-generic/sections.h #define dereference_function_descriptor(p) (p)
p                  61 include/asm-generic/sections.h #define dereference_kernel_function_descriptor(p) (p)
p                  64 include/crypto/b128ops.h static inline void u128_xor(u128 *r, const u128 *p, const u128 *q)
p                  66 include/crypto/b128ops.h 	r->a = p->a ^ q->a;
p                  67 include/crypto/b128ops.h 	r->b = p->b ^ q->b;
p                  70 include/crypto/b128ops.h static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
p                  72 include/crypto/b128ops.h 	u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
p                  75 include/crypto/b128ops.h static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
p                  77 include/crypto/b128ops.h 	u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
p                  17 include/crypto/blowfish.h 	u32 p[18];
p                  36 include/crypto/dh.h 	void *p;
p                  67 include/crypto/ecdh.h int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p);
p                  81 include/crypto/ecdh.h int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p);
p                   5 include/crypto/internal/cryptouser.h struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
p                  35 include/crypto/internal/rsa.h 	const u8 *p;
p                  48 include/crypto/sha1_base.h 			int p = SHA1_BLOCK_SIZE - partial;
p                  50 include/crypto/sha1_base.h 			memcpy(sctx->buffer + partial, data, p);
p                  51 include/crypto/sha1_base.h 			data += p;
p                  52 include/crypto/sha1_base.h 			len -= p;
p                  49 include/crypto/sha256_base.h 			int p = SHA256_BLOCK_SIZE - partial;
p                  51 include/crypto/sha256_base.h 			memcpy(sctx->buf + partial, data, p);
p                  52 include/crypto/sha256_base.h 			data += p;
p                  53 include/crypto/sha256_base.h 			len -= p;
p                  71 include/crypto/sha512_base.h 			int p = SHA512_BLOCK_SIZE - partial;
p                  73 include/crypto/sha512_base.h 			memcpy(sctx->buf + partial, data, p);
p                  74 include/crypto/sha512_base.h 			data += p;
p                  75 include/crypto/sha512_base.h 			len -= p;
p                  51 include/crypto/sm3_base.h 			int p = SM3_BLOCK_SIZE - partial;
p                  53 include/crypto/sm3_base.h 			memcpy(sctx->buffer + partial, data, p);
p                  54 include/crypto/sm3_base.h 			data += p;
p                  55 include/crypto/sm3_base.h 			len -= p;
p                 674 include/drm/drm_atomic.h void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
p                 951 include/drm/drm_connector.h 	void (*atomic_print_state)(struct drm_printer *p,
p                 822 include/drm/drm_crtc.h 	void (*atomic_print_state)(struct drm_printer *p,
p                 545 include/drm/drm_drv.h 	void (*gem_print_info)(struct drm_printer *p, unsigned int indent,
p                 379 include/drm/drm_file.h 				  struct drm_pending_event *p,
p                 383 include/drm/drm_file.h 			   struct drm_pending_event *p,
p                 386 include/drm/drm_file.h 			   struct drm_pending_event *p);
p                 270 include/drm/drm_framebuffer.h static inline void drm_framebuffer_assign(struct drm_framebuffer **p,
p                 275 include/drm/drm_framebuffer.h 	if (*p)
p                 276 include/drm/drm_framebuffer.h 		drm_framebuffer_put(*p);
p                 277 include/drm/drm_framebuffer.h 	*p = fb;
p                  88 include/drm/drm_gem.h 	void (*print_info)(struct drm_printer *p, unsigned int indent,
p                  96 include/drm/drm_gem_cma_helper.h void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
p                 163 include/drm/drm_gem_shmem_helper.h void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
p                 547 include/drm/drm_mm.h void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p);
p                 508 include/drm/drm_modes.h void drm_mode_set_crtcinfo(struct drm_display_mode *p,
p                 476 include/drm/drm_plane.h 	void (*atomic_print_state)(struct drm_printer *p,
p                  74 include/drm/drm_print.h 	void (*printfn)(struct drm_printer *p, struct va_format *vaf);
p                  75 include/drm/drm_print.h 	void (*puts)(struct drm_printer *p, const char *str);
p                  80 include/drm/drm_print.h void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf);
p                  81 include/drm/drm_print.h void __drm_puts_coredump(struct drm_printer *p, const char *str);
p                  82 include/drm/drm_print.h void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
p                  83 include/drm/drm_print.h void __drm_puts_seq_file(struct drm_printer *p, const char *str);
p                  84 include/drm/drm_print.h void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
p                  85 include/drm/drm_print.h void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
p                  88 include/drm/drm_print.h void drm_printf(struct drm_printer *p, const char *f, ...);
p                  89 include/drm/drm_print.h void drm_puts(struct drm_printer *p, const char *str);
p                  90 include/drm/drm_print.h void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset);
p                 100 include/drm/drm_print.h drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
p                 104 include/drm/drm_print.h 	p->printfn(p, &vaf);
p                 169 include/drm/drm_print.h 	struct drm_printer p = {
p                 178 include/drm/drm_print.h 	return p;
p                 190 include/drm/drm_print.h 	struct drm_printer p = {
p                 195 include/drm/drm_print.h 	return p;
p                 207 include/drm/drm_print.h 	struct drm_printer p = {
p                 211 include/drm/drm_print.h 	return p;
p                 223 include/drm/drm_print.h 	struct drm_printer p = {
p                 227 include/drm/drm_print.h 	return p;
p                 119 include/drm/i915_drm.h #define port_name(p) ((p) + 'A')
p                  15 include/dt-bindings/pinctrl/r7s72100-pinctrl.h #define RZA1_PINMUX(b, p, f)	((b) * RZA1_PINS_PER_PORT + (p) | (f << 16))
p                  40 include/dt-bindings/pinctrl/r7s9210-pinctrl.h #define RZA2_PINMUX(b, p, f)	((b) * RZA2_PINS_PER_PORT + (p) | (f << 16))
p                  64 include/keys/trusted.h static inline void dump_payload(struct trusted_key_payload *p)
p                  66 include/keys/trusted.h 	pr_info("trusted_key: key_len %d\n", p->key_len);
p                  68 include/keys/trusted.h 		       16, 1, p->key, p->key_len, 0);
p                  69 include/keys/trusted.h 	pr_info("trusted_key: bloblen %d\n", p->blob_len);
p                  71 include/keys/trusted.h 		       16, 1, p->blob, p->blob_len, 0);
p                  72 include/keys/trusted.h 	pr_info("trusted_key: migratable %d\n", p->migratable);
p                 100 include/keys/trusted.h static inline void dump_payload(struct trusted_key_payload *p)
p                  15 include/linux/adfs_fs.h 	unsigned char *p = ptr + 511;
p                  19 include/linux/adfs_fs.h         	result = result + *--p;
p                  20 include/linux/adfs_fs.h 	} while (p != ptr);
p                 105 include/linux/amba/bus.h #define amba_set_drvdata(d,p)	dev_set_drvdata(&d->dev, p)
p                  94 include/linux/assoc_array_priv.h 		u8			*p;
p                 155 include/linux/assoc_array_priv.h struct assoc_array_ptr *__assoc_array_x_to_ptr(const void *p, unsigned long t)
p                 157 include/linux/assoc_array_priv.h 	return (struct assoc_array_ptr *)((unsigned long)p | t);
p                 160 include/linux/assoc_array_priv.h struct assoc_array_ptr *assoc_array_leaf_to_ptr(const void *p)
p                 162 include/linux/assoc_array_priv.h 	return __assoc_array_x_to_ptr(p, ASSOC_ARRAY_PTR_LEAF_TYPE);
p                 165 include/linux/assoc_array_priv.h struct assoc_array_ptr *assoc_array_node_to_ptr(const struct assoc_array_node *p)
p                 168 include/linux/assoc_array_priv.h 		p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_NODE_SUBTYPE);
p                 171 include/linux/assoc_array_priv.h struct assoc_array_ptr *assoc_array_shortcut_to_ptr(const struct assoc_array_shortcut *p)
p                 174 include/linux/assoc_array_priv.h 		p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE);
p                 294 include/linux/audit.h 	void *p = audit_context();
p                 295 include/linux/audit.h 	return !p || *(int *)p;
p                  26 include/linux/binfmts.h 	unsigned long p; /* current top of mem */
p                 125 include/linux/bitfield.h static __always_inline void type##p_replace_bits(__##type *p,		\
p                 128 include/linux/bitfield.h 	*p = (*p & ~to(field)) | type##_encode_bits(val, field);	\
p                1494 include/linux/blkdev.h static inline void put_dev_sector(Sector p)
p                1496 include/linux/blkdev.h 	put_page(p.v);
p                  27 include/linux/cdev.h void cdev_put(struct cdev *p);
p                  31 include/linux/cdev.h void cdev_set_parent(struct cdev *p, struct kobject *kobj);
p                 112 include/linux/ceph/auth.h int ceph_auth_entity_name_encode(const char *name, void **p, void *end);
p                  37 include/linux/ceph/buffer.h extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
p                  19 include/linux/ceph/decode.h static inline u64 ceph_decode_64(void **p)
p                  21 include/linux/ceph/decode.h 	u64 v = get_unaligned_le64(*p);
p                  22 include/linux/ceph/decode.h 	*p += sizeof(u64);
p                  25 include/linux/ceph/decode.h static inline u32 ceph_decode_32(void **p)
p                  27 include/linux/ceph/decode.h 	u32 v = get_unaligned_le32(*p);
p                  28 include/linux/ceph/decode.h 	*p += sizeof(u32);
p                  31 include/linux/ceph/decode.h static inline u16 ceph_decode_16(void **p)
p                  33 include/linux/ceph/decode.h 	u16 v = get_unaligned_le16(*p);
p                  34 include/linux/ceph/decode.h 	*p += sizeof(u16);
p                  37 include/linux/ceph/decode.h static inline u8 ceph_decode_8(void **p)
p                  39 include/linux/ceph/decode.h 	u8 v = *(u8 *)*p;
p                  40 include/linux/ceph/decode.h 	(*p)++;
p                  43 include/linux/ceph/decode.h static inline void ceph_decode_copy(void **p, void *pv, size_t n)
p                  45 include/linux/ceph/decode.h 	memcpy(pv, *p, n);
p                  46 include/linux/ceph/decode.h 	*p += n;
p                  52 include/linux/ceph/decode.h static inline bool ceph_has_room(void **p, void *end, size_t n)
p                  54 include/linux/ceph/decode.h 	return end >= *p && n <= end - *p;
p                  57 include/linux/ceph/decode.h #define ceph_decode_need(p, end, n, bad)			\
p                  59 include/linux/ceph/decode.h 		if (!likely(ceph_has_room(p, end, n)))		\
p                  63 include/linux/ceph/decode.h #define ceph_decode_64_safe(p, end, v, bad)			\
p                  65 include/linux/ceph/decode.h 		ceph_decode_need(p, end, sizeof(u64), bad);	\
p                  66 include/linux/ceph/decode.h 		v = ceph_decode_64(p);				\
p                  68 include/linux/ceph/decode.h #define ceph_decode_32_safe(p, end, v, bad)			\
p                  70 include/linux/ceph/decode.h 		ceph_decode_need(p, end, sizeof(u32), bad);	\
p                  71 include/linux/ceph/decode.h 		v = ceph_decode_32(p);				\
p                  73 include/linux/ceph/decode.h #define ceph_decode_16_safe(p, end, v, bad)			\
p                  75 include/linux/ceph/decode.h 		ceph_decode_need(p, end, sizeof(u16), bad);	\
p                  76 include/linux/ceph/decode.h 		v = ceph_decode_16(p);				\
p                  78 include/linux/ceph/decode.h #define ceph_decode_8_safe(p, end, v, bad)			\
p                  80 include/linux/ceph/decode.h 		ceph_decode_need(p, end, sizeof(u8), bad);	\
p                  81 include/linux/ceph/decode.h 		v = ceph_decode_8(p);				\
p                  84 include/linux/ceph/decode.h #define ceph_decode_copy_safe(p, end, pv, n, bad)		\
p                  86 include/linux/ceph/decode.h 		ceph_decode_need(p, end, n, bad);		\
p                  87 include/linux/ceph/decode.h 		ceph_decode_copy(p, pv, n);			\
p                 107 include/linux/ceph/decode.h static inline char *ceph_extract_encoded_string(void **p, void *end,
p                 111 include/linux/ceph/decode.h 	void *sp = *p;
p                 126 include/linux/ceph/decode.h 	*p = (char *) *p + sizeof (u32) + len;
p                 139 include/linux/ceph/decode.h #define ceph_decode_skip_n(p, end, n, bad)			\
p                 141 include/linux/ceph/decode.h 		ceph_decode_need(p, end, n, bad);		\
p                 142 include/linux/ceph/decode.h                 *p += n;					\
p                 145 include/linux/ceph/decode.h #define ceph_decode_skip_64(p, end, bad)			\
p                 146 include/linux/ceph/decode.h ceph_decode_skip_n(p, end, sizeof(u64), bad)
p                 148 include/linux/ceph/decode.h #define ceph_decode_skip_32(p, end, bad)			\
p                 149 include/linux/ceph/decode.h ceph_decode_skip_n(p, end, sizeof(u32), bad)
p                 151 include/linux/ceph/decode.h #define ceph_decode_skip_16(p, end, bad)			\
p                 152 include/linux/ceph/decode.h ceph_decode_skip_n(p, end, sizeof(u16), bad)
p                 154 include/linux/ceph/decode.h #define ceph_decode_skip_8(p, end, bad)				\
p                 155 include/linux/ceph/decode.h ceph_decode_skip_n(p, end, sizeof(u8), bad)
p                 157 include/linux/ceph/decode.h #define ceph_decode_skip_string(p, end, bad)			\
p                 161 include/linux/ceph/decode.h 		ceph_decode_32_safe(p, end, len, bad);		\
p                 162 include/linux/ceph/decode.h 		ceph_decode_skip_n(p, end, len, bad);		\
p                 165 include/linux/ceph/decode.h #define ceph_decode_skip_set(p, end, type, bad)			\
p                 169 include/linux/ceph/decode.h 		ceph_decode_32_safe(p, end, len, bad);		\
p                 171 include/linux/ceph/decode.h 			ceph_decode_skip_##type(p, end, bad);	\
p                 174 include/linux/ceph/decode.h #define ceph_decode_skip_map(p, end, ktype, vtype, bad)		\
p                 178 include/linux/ceph/decode.h 		ceph_decode_32_safe(p, end, len, bad);		\
p                 180 include/linux/ceph/decode.h 			ceph_decode_skip_##ktype(p, end, bad);	\
p                 181 include/linux/ceph/decode.h 			ceph_decode_skip_##vtype(p, end, bad);	\
p                 185 include/linux/ceph/decode.h #define ceph_decode_skip_map_of_map(p, end, ktype1, ktype2, vtype2, bad) \
p                 189 include/linux/ceph/decode.h 		ceph_decode_32_safe(p, end, len, bad);		\
p                 191 include/linux/ceph/decode.h 			ceph_decode_skip_##ktype1(p, end, bad);	\
p                 192 include/linux/ceph/decode.h 			ceph_decode_skip_map(p, end, ktype2, vtype2, bad); \
p                 240 include/linux/ceph/decode.h extern int ceph_decode_entity_addr(void **p, void *end,
p                 245 include/linux/ceph/decode.h static inline void ceph_encode_64(void **p, u64 v)
p                 247 include/linux/ceph/decode.h 	put_unaligned_le64(v, (__le64 *)*p);
p                 248 include/linux/ceph/decode.h 	*p += sizeof(u64);
p                 250 include/linux/ceph/decode.h static inline void ceph_encode_32(void **p, u32 v)
p                 252 include/linux/ceph/decode.h 	put_unaligned_le32(v, (__le32 *)*p);
p                 253 include/linux/ceph/decode.h 	*p += sizeof(u32);
p                 255 include/linux/ceph/decode.h static inline void ceph_encode_16(void **p, u16 v)
p                 257 include/linux/ceph/decode.h 	put_unaligned_le16(v, (__le16 *)*p);
p                 258 include/linux/ceph/decode.h 	*p += sizeof(u16);
p                 260 include/linux/ceph/decode.h static inline void ceph_encode_8(void **p, u8 v)
p                 262 include/linux/ceph/decode.h 	*(u8 *)*p = v;
p                 263 include/linux/ceph/decode.h 	(*p)++;
p                 265 include/linux/ceph/decode.h static inline void ceph_encode_copy(void **p, const void *s, int len)
p                 267 include/linux/ceph/decode.h 	memcpy(*p, s, len);
p                 268 include/linux/ceph/decode.h 	*p += len;
p                 274 include/linux/ceph/decode.h static inline void ceph_encode_filepath(void **p, void *end,
p                 278 include/linux/ceph/decode.h 	BUG_ON(*p + 1 + sizeof(ino) + sizeof(len) + len > end);
p                 279 include/linux/ceph/decode.h 	ceph_encode_8(p, 1);
p                 280 include/linux/ceph/decode.h 	ceph_encode_64(p, ino);
p                 281 include/linux/ceph/decode.h 	ceph_encode_32(p, len);
p                 283 include/linux/ceph/decode.h 		memcpy(*p, path, len);
p                 284 include/linux/ceph/decode.h 	*p += len;
p                 287 include/linux/ceph/decode.h static inline void ceph_encode_string(void **p, void *end,
p                 290 include/linux/ceph/decode.h 	BUG_ON(*p + sizeof(len) + len > end);
p                 291 include/linux/ceph/decode.h 	ceph_encode_32(p, len);
p                 293 include/linux/ceph/decode.h 		memcpy(*p, s, len);
p                 294 include/linux/ceph/decode.h 	*p += len;
p                 310 include/linux/ceph/decode.h static inline void ceph_start_encoding(void **p, u8 struct_v, u8 struct_compat,
p                 313 include/linux/ceph/decode.h 	ceph_encode_8(p, struct_v);
p                 314 include/linux/ceph/decode.h 	ceph_encode_8(p, struct_compat);
p                 315 include/linux/ceph/decode.h 	ceph_encode_32(p, struct_len);
p                 328 include/linux/ceph/decode.h static inline int ceph_start_decoding(void **p, void *end, u8 v,
p                 334 include/linux/ceph/decode.h 	ceph_decode_need(p, end, CEPH_ENCODING_START_BLK_LEN, bad);
p                 335 include/linux/ceph/decode.h 	*struct_v = ceph_decode_8(p);
p                 336 include/linux/ceph/decode.h 	struct_compat = ceph_decode_8(p);
p                 343 include/linux/ceph/decode.h 	*struct_len = ceph_decode_32(p);
p                 344 include/linux/ceph/decode.h 	ceph_decode_need(p, end, *struct_len, bad);
p                 351 include/linux/ceph/decode.h #define ceph_encode_need(p, end, n, bad)			\
p                 353 include/linux/ceph/decode.h 		if (!likely(ceph_has_room(p, end, n)))		\
p                 357 include/linux/ceph/decode.h #define ceph_encode_64_safe(p, end, v, bad)			\
p                 359 include/linux/ceph/decode.h 		ceph_encode_need(p, end, sizeof(u64), bad);	\
p                 360 include/linux/ceph/decode.h 		ceph_encode_64(p, v);				\
p                 362 include/linux/ceph/decode.h #define ceph_encode_32_safe(p, end, v, bad)			\
p                 364 include/linux/ceph/decode.h 		ceph_encode_need(p, end, sizeof(u32), bad);	\
p                 365 include/linux/ceph/decode.h 		ceph_encode_32(p, v);				\
p                 367 include/linux/ceph/decode.h #define ceph_encode_16_safe(p, end, v, bad)			\
p                 369 include/linux/ceph/decode.h 		ceph_encode_need(p, end, sizeof(u16), bad);	\
p                 370 include/linux/ceph/decode.h 		ceph_encode_16(p, v);				\
p                 372 include/linux/ceph/decode.h #define ceph_encode_8_safe(p, end, v, bad)			\
p                 374 include/linux/ceph/decode.h 		ceph_encode_need(p, end, sizeof(u8), bad);	\
p                 375 include/linux/ceph/decode.h 		ceph_encode_8(p, v);				\
p                 378 include/linux/ceph/decode.h #define ceph_encode_copy_safe(p, end, pv, n, bad)		\
p                 380 include/linux/ceph/decode.h 		ceph_encode_need(p, end, n, bad);		\
p                 381 include/linux/ceph/decode.h 		ceph_encode_copy(p, pv, n);			\
p                 383 include/linux/ceph/decode.h #define ceph_encode_string_safe(p, end, s, n, bad)		\
p                 385 include/linux/ceph/decode.h 		ceph_encode_need(p, end, n, bad);		\
p                 386 include/linux/ceph/decode.h 		ceph_encode_string(p, end, s, n);		\
p                  66 include/linux/ceph/mdsmap.h extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end);
p                 221 include/linux/ceph/osdmap.h static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
p                 225 include/linux/ceph/osdmap.h 	if (!ceph_has_room(p, end, CEPH_PGID_ENCODING_LEN)) {
p                 229 include/linux/ceph/osdmap.h 	version = ceph_decode_8(p);
p                 236 include/linux/ceph/osdmap.h 	pgid->pool = ceph_decode_64(p);
p                 237 include/linux/ceph/osdmap.h 	pgid->seed = ceph_decode_32(p);
p                 238 include/linux/ceph/osdmap.h 	*p += 4;	/* skip deprecated preferred value */
p                 244 include/linux/ceph/osdmap.h extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end);
p                 245 include/linux/ceph/osdmap.h struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
p                 124 include/linux/cgroup.h void cgroup_fork(struct task_struct *p);
p                 125 include/linux/cgroup.h extern int cgroup_can_fork(struct task_struct *p);
p                 126 include/linux/cgroup.h extern void cgroup_cancel_fork(struct task_struct *p);
p                 127 include/linux/cgroup.h extern void cgroup_post_fork(struct task_struct *p);
p                 128 include/linux/cgroup.h void cgroup_exit(struct task_struct *p);
p                 129 include/linux/cgroup.h void cgroup_release(struct task_struct *p);
p                 130 include/linux/cgroup.h void cgroup_free(struct task_struct *p);
p                 711 include/linux/cgroup.h static inline void cgroup_fork(struct task_struct *p) {}
p                 712 include/linux/cgroup.h static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
p                 713 include/linux/cgroup.h static inline void cgroup_cancel_fork(struct task_struct *p) {}
p                 714 include/linux/cgroup.h static inline void cgroup_post_fork(struct task_struct *p) {}
p                 715 include/linux/cgroup.h static inline void cgroup_exit(struct task_struct *p) {}
p                 716 include/linux/cgroup.h static inline void cgroup_release(struct task_struct *p) {}
p                 717 include/linux/cgroup.h static inline void cgroup_free(struct task_struct *p) {}
p                 173 include/linux/clk.h bool clk_is_match(const struct clk *p, const struct clk *q);
p                 216 include/linux/clk.h static inline bool clk_is_match(const struct clk *p, const struct clk *q)
p                 218 include/linux/clk.h 	return p == q;
p                 185 include/linux/compiler.h 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
p                 186 include/linux/compiler.h 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
p                 187 include/linux/compiler.h 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
p                 188 include/linux/compiler.h 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
p                 191 include/linux/compiler.h 		__builtin_memcpy((void *)res, (const void *)p, size);	\
p                 197 include/linux/compiler.h void __read_once_size(const volatile void *p, void *res, int size)
p                 215 include/linux/compiler.h void __read_once_size_nocheck(const volatile void *p, void *res, int size)
p                 220 include/linux/compiler.h static __always_inline void __write_once_size(volatile void *p, void *res, int size)
p                 223 include/linux/compiler.h 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
p                 224 include/linux/compiler.h 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
p                 225 include/linux/compiler.h 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
p                 226 include/linux/compiler.h 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
p                 229 include/linux/compiler.h 		__builtin_memcpy((void *)p, (const void *)res, size);
p                  25 include/linux/compiler_types.h # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
p                  49 include/linux/compiler_types.h # define ACCESS_PRIVATE(p, member) ((p)->member)
p                  79 include/linux/console.h 	void	(*con_invert_region)(struct vc_data *vc, u16 *p, int count);
p                 881 include/linux/cpumask.h 	const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
p                 882 include/linux/cpumask.h 	p -= cpu / BITS_PER_LONG;
p                 883 include/linux/cpumask.h 	return to_cpumask(p);
p                  60 include/linux/cpuset.h extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
p                  61 include/linux/cpuset.h extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
p                  62 include/linux/cpuset.h extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
p                 184 include/linux/cpuset.h static inline void cpuset_cpus_allowed(struct task_struct *p,
p                 190 include/linux/cpuset.h static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
p                 194 include/linux/cpuset.h static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
p                  11 include/linux/crc32.h u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len);
p                  12 include/linux/crc32.h u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len);
p                  39 include/linux/crc32.h u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len);
p                  10 include/linux/crc64.h u64 __pure crc64_be(u64 crc, const void *p, size_t len);
p                  36 include/linux/decompress/mm.h 	void *p;
p                  45 include/linux/decompress/mm.h 	p = (void *)malloc_ptr;
p                  52 include/linux/decompress/mm.h 	return p;
p                  77 include/linux/delayacct.h static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
p                  79 include/linux/delayacct.h 	if (p->delays)
p                  80 include/linux/delayacct.h 		return (p->delays->flags & DELAYACCT_PF_BLKIO);
p                 122 include/linux/delayacct.h static inline void delayacct_blkio_end(struct task_struct *p)
p                 124 include/linux/delayacct.h 	if (p->delays)
p                 125 include/linux/delayacct.h 		__delayacct_blkio_end(p);
p                 181 include/linux/delayacct.h static inline void delayacct_blkio_end(struct task_struct *p)
p                 188 include/linux/delayacct.h static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
p                 143 include/linux/device.h 	struct subsys_private *p;
p                 392 include/linux/device.h 	struct driver_private *p;
p                 596 include/linux/device.h 	struct subsys_private *p;
p                 936 include/linux/device.h extern void devm_kfree(struct device *dev, const void *p);
p                1233 include/linux/device.h 	struct device_private	*p;
p                 619 include/linux/dma-mapping.h #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
p                  97 include/linux/dmar.h #define	dmar_rcu_dereference(p)	rcu_dereference_check((p), dmar_rcu_check())
p                1670 include/linux/efi.h #define efi_call_virt_pointer(p, f, args...)				\
p                1678 include/linux/efi.h 	__s = arch_efi_call_virt(p, f, args);				\
p                1686 include/linux/efi.h #define __efi_call_virt_pointer(p, f, args...)				\
p                1693 include/linux/efi.h 	arch_efi_call_virt(p, f, args);					\
p                  66 include/linux/err.h #define PTR_RET(p) PTR_ERR_OR_ZERO(p)
p                  43 include/linux/etherdevice.h int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
p                  44 include/linux/etherdevice.h void eth_commit_mac_addr_change(struct net_device *dev, void *p);
p                  45 include/linux/etherdevice.h int eth_mac_addr(struct net_device *dev, void *p);
p                  36 include/linux/ext2_fs.h 	__u8 *p = ext2_sb;
p                  37 include/linux/ext2_fs.h 	if (*(__le16 *)(p + EXT2_SB_MAGIC_OFFSET) != cpu_to_le16(EXT2_SUPER_MAGIC))
p                  39 include/linux/ext2_fs.h 	return (u64)le32_to_cpup((__le32 *)(p + EXT2_SB_BLOCKS_OFFSET)) <<
p                  40 include/linux/ext2_fs.h 		le32_to_cpup((__le32 *)(p + EXT2_SB_BSIZE_OFFSET));
p                 579 include/linux/fb.h #define FB_LEFT_POS(p, bpp)          (fb_be_math(p) ? (32 - (bpp)) : 0)
p                 580 include/linux/fb.h #define FB_SHIFT_HIGH(p, val, bits)  (fb_be_math(p) ? (val) >> (bits) : \
p                 582 include/linux/fb.h #define FB_SHIFT_LOW(p, val, bits)   (fb_be_math(p) ? (val) << (bits) : \
p                  70 include/linux/firewire.h 	const u32 *p;
p                  74 include/linux/firewire.h void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p);
p                  37 include/linux/flex_proportions.h int fprop_global_init(struct fprop_global *p, gfp_t gfp);
p                  38 include/linux/flex_proportions.h void fprop_global_destroy(struct fprop_global *p);
p                  39 include/linux/flex_proportions.h bool fprop_new_period(struct fprop_global *p, int periods);
p                  58 include/linux/flex_proportions.h void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
p                  59 include/linux/flex_proportions.h void fprop_fraction_single(struct fprop_global *p,
p                  64 include/linux/flex_proportions.h void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
p                  69 include/linux/flex_proportions.h 	__fprop_inc_single(p, pl);
p                  86 include/linux/flex_proportions.h void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
p                  87 include/linux/flex_proportions.h void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
p                  89 include/linux/flex_proportions.h void fprop_fraction_percpu(struct fprop_global *p,
p                  94 include/linux/flex_proportions.h void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
p                  99 include/linux/flex_proportions.h 	__fprop_inc_percpu(p, pl);
p                  25 include/linux/freezer.h static inline bool frozen(struct task_struct *p)
p                  27 include/linux/freezer.h 	return p->flags & PF_FROZEN;
p                  30 include/linux/freezer.h extern bool freezing_slow_path(struct task_struct *p);
p                  35 include/linux/freezer.h static inline bool freezing(struct task_struct *p)
p                  39 include/linux/freezer.h 	return freezing_slow_path(p);
p                  70 include/linux/freezer.h extern bool freeze_task(struct task_struct *p);
p                 149 include/linux/freezer.h static inline bool freezer_should_skip(struct task_struct *p)
p                 159 include/linux/freezer.h 	return p->flags & PF_FREEZER_SKIP;
p                 261 include/linux/freezer.h static inline bool frozen(struct task_struct *p) { return false; }
p                 262 include/linux/freezer.h static inline bool freezing(struct task_struct *p) { return false; }
p                 276 include/linux/freezer.h static inline int freezer_should_skip(struct task_struct *p) { return 0; }
p                  53 include/linux/frontswap.h static inline void frontswap_map_set(struct swap_info_struct *p,
p                  56 include/linux/frontswap.h 	p->frontswap_map = map;
p                  59 include/linux/frontswap.h static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
p                  61 include/linux/frontswap.h 	return p->frontswap_map;
p                  76 include/linux/frontswap.h static inline void frontswap_map_set(struct swap_info_struct *p,
p                  81 include/linux/frontswap.h static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
p                  14 include/linux/fs_pin.h static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
p                  16 include/linux/fs_pin.h 	init_waitqueue_head(&p->wait);
p                  17 include/linux/fs_pin.h 	INIT_HLIST_NODE(&p->s_list);
p                  18 include/linux/fs_pin.h 	INIT_HLIST_NODE(&p->m_list);
p                  19 include/linux/fs_pin.h 	p->kill = kill;
p                  42 include/linux/fscrypt.h #define fname_name(p)		((p)->disk_name.name)
p                  43 include/linux/fscrypt.h #define fname_len(p)		((p)->disk_name.len)
p                  26 include/linux/genetlink.h #define rcu_dereference_genl(p)					\
p                  27 include/linux/genetlink.h 	rcu_dereference_check(p, lockdep_genl_is_held())
p                  36 include/linux/genetlink.h #define genl_dereference(p)					\
p                  37 include/linux/genetlink.h 	rcu_dereference_protected(p, lockdep_genl_is_held())
p                  43 include/linux/greybus.h #define GREYBUS_DEVICE(v, p)					\
p                  46 include/linux/greybus.h 	.product	= (p),
p                  31 include/linux/hippidevice.h int hippi_mac_addr(struct net_device *dev, void *p);
p                  32 include/linux/hippidevice.h int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
p                 167 include/linux/hugetlb.h #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n)	({ BUG(); 0; })
p                 194 include/linux/hugetlb.h #define putback_active_hugepage(p)	do {} while (0)
p                 121 include/linux/idr.h 		 int (*fn)(int id, void *p, void *data), void *data);
p                  24 include/linux/ihex.h static inline uint16_t ihex_binrec_size(const struct ihex_binrec *p)
p                  26 include/linux/ihex.h 	return be16_to_cpu(p->len) + sizeof(*p);
p                  33 include/linux/ihex.h 	const void *p = rec;
p                  35 include/linux/ihex.h 	return p + ALIGN(ihex_binrec_size(rec), 4);
p                 131 include/linux/iio/common/cros_ec_sensors_core.h irqreturn_t cros_ec_sensors_capture(int irq, void *p);
p                 259 include/linux/iio/common/st_sensors.h irqreturn_t st_sensors_trigger_handler(int irq, void *p);
p                  32 include/linux/iio/trigger_consumer.h 	irqreturn_t (*h)(int irq, void *p);
p                  33 include/linux/iio/trigger_consumer.h 	irqreturn_t (*thread)(int irq, void *p);
p                  42 include/linux/iio/trigger_consumer.h *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
p                  43 include/linux/iio/trigger_consumer.h 		    irqreturn_t (*thread)(int irq, void *p),
p                  49 include/linux/iio/trigger_consumer.h irqreturn_t iio_pollfunc_store_time(int irq, void *p);
p                  11 include/linux/iio/triggered_buffer.h 	irqreturn_t (*h)(int irq, void *p),
p                  12 include/linux/iio/triggered_buffer.h 	irqreturn_t (*thread)(int irq, void *p),
p                  18 include/linux/iio/triggered_buffer.h 				    irqreturn_t (*h)(int irq, void *p),
p                  19 include/linux/iio/triggered_buffer.h 				    irqreturn_t (*thread)(int irq, void *p),
p                   8 include/linux/iio/triggered_event.h 	irqreturn_t (*h)(int irq, void *p),
p                   9 include/linux/iio/triggered_event.h 	irqreturn_t (*thread)(int irq, void *p));
p                 363 include/linux/intel-iommu.h #define QI_DEV_EIOTLB_PASID(p)	(((u64)p) << 32)
p                 371 include/linux/intel-iommu.h #define QI_PGRP_PASID_P(p)	(((u64)(p)) << 4)
p                 372 include/linux/intel-iommu.h #define QI_PGRP_PDP(p)		(((u64)(p)) << 5)
p                 739 include/linux/interrupt.h int show_interrupts(struct seq_file *p, void *v);
p                 740 include/linux/interrupt.h int arch_show_interrupts(struct seq_file *p, int prec);
p                  10 include/linux/io-64-nonatomic-hi-lo.h 	const volatile u32 __iomem *p = addr;
p                  13 include/linux/io-64-nonatomic-hi-lo.h 	high = readl(p + 1);
p                  14 include/linux/io-64-nonatomic-hi-lo.h 	low = readl(p);
p                  27 include/linux/io-64-nonatomic-hi-lo.h 	const volatile u32 __iomem *p = addr;
p                  30 include/linux/io-64-nonatomic-hi-lo.h 	high = readl_relaxed(p + 1);
p                  31 include/linux/io-64-nonatomic-hi-lo.h 	low = readl_relaxed(p);
p                  10 include/linux/io-64-nonatomic-lo-hi.h 	const volatile u32 __iomem *p = addr;
p                  13 include/linux/io-64-nonatomic-lo-hi.h 	low = readl(p);
p                  14 include/linux/io-64-nonatomic-lo-hi.h 	high = readl(p + 1);
p                  27 include/linux/io-64-nonatomic-lo-hi.h 	const volatile u32 __iomem *p = addr;
p                  30 include/linux/io-64-nonatomic-lo-hi.h 	low = readl_relaxed(p);
p                  31 include/linux/io-64-nonatomic-lo-hi.h 	high = readl_relaxed(p + 1);
p                   7 include/linux/ipv6.h #define ipv6_optlen(p)  (((p)->hdrlen+1) << 3)
p                   8 include/linux/ipv6.h #define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
p                 498 include/linux/irq.h 	void		(*irq_print_chip)(struct irq_data *data, struct seq_file *p);
p                  34 include/linux/irqflags.h # define trace_hardirq_context(p)	((p)->hardirq_context)
p                  35 include/linux/irqflags.h # define trace_softirq_context(p)	((p)->softirq_context)
p                  36 include/linux/irqflags.h # define trace_hardirqs_enabled(p)	((p)->hardirqs_enabled)
p                  37 include/linux/irqflags.h # define trace_softirqs_enabled(p)	((p)->softirqs_enabled)
p                  57 include/linux/irqflags.h # define trace_hardirq_context(p)	0
p                  58 include/linux/irqflags.h # define trace_softirq_context(p)	0
p                  59 include/linux/irqflags.h # define trace_hardirqs_enabled(p)	0
p                  60 include/linux/irqflags.h # define trace_softirqs_enabled(p)	0
p                 137 include/linux/isdn/capiutil.h 	unsigned l, p;
p                 177 include/linux/isdn/capiutil.h 	u_char	*p;
p                  13 include/linux/kasan-checks.h bool __kasan_check_read(const volatile void *p, unsigned int size);
p                  14 include/linux/kasan-checks.h bool __kasan_check_write(const volatile void *p, unsigned int size);
p                  16 include/linux/kasan-checks.h static inline bool __kasan_check_read(const volatile void *p, unsigned int size)
p                  20 include/linux/kasan-checks.h static inline bool __kasan_check_write(const volatile void *p, unsigned int size)
p                  34 include/linux/kasan-checks.h static inline bool kasan_check_read(const volatile void *p, unsigned int size)
p                  38 include/linux/kasan-checks.h static inline bool kasan_check_write(const volatile void *p, unsigned int size)
p                 178 include/linux/kdb.h int kdb_process_cpu(const struct task_struct *p)
p                 180 include/linux/kdb.h 	unsigned int cpu = task_cpu(p);
p                  36 include/linux/kernel.h #define PTR_ALIGN(p, a)		((typeof(p))ALIGN((unsigned long)(p), (a)))
p                 398 include/linux/kexec.h static inline int kexec_should_crash(struct task_struct *p) { return 0; }
p                 121 include/linux/key-type.h 	void (*describe)(const struct key *key, struct seq_file *p);
p                 489 include/linux/key.h #define make_key_ref(k, p)		NULL
p                 145 include/linux/kgdb.h sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p);
p                 114 include/linux/kprobes.h static inline int kprobe_gone(struct kprobe *p)
p                 116 include/linux/kprobes.h 	return p->flags & KPROBE_FLAG_GONE;
p                 120 include/linux/kprobes.h static inline int kprobe_disabled(struct kprobe *p)
p                 122 include/linux/kprobes.h 	return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
p                 126 include/linux/kprobes.h static inline int kprobe_optimized(struct kprobe *p)
p                 128 include/linux/kprobes.h 	return p->flags & KPROBE_FLAG_OPTIMIZED;
p                 132 include/linux/kprobes.h static inline int kprobe_ftrace(struct kprobe *p)
p                 134 include/linux/kprobes.h 	return p->flags & KPROBE_FLAG_FTRACE;
p                 193 include/linux/kprobes.h extern int arch_trampoline_kprobe(struct kprobe *p);
p                 199 include/linux/kprobes.h static inline int arch_trampoline_kprobe(struct kprobe *p)
p                 226 include/linux/kprobes.h extern int arch_prepare_kprobe(struct kprobe *p);
p                 227 include/linux/kprobes.h extern void arch_arm_kprobe(struct kprobe *p);
p                 228 include/linux/kprobes.h extern void arch_disarm_kprobe(struct kprobe *p);
p                 231 include/linux/kprobes.h extern void kprobes_inc_nmissed_count(struct kprobe *p);
p                 308 include/linux/kprobes.h extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
p                 325 include/linux/kprobes.h extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
p                 328 include/linux/kprobes.h int arch_check_ftrace_location(struct kprobe *p);
p                 354 include/linux/kprobes.h int register_kprobe(struct kprobe *p);
p                 355 include/linux/kprobes.h void unregister_kprobe(struct kprobe *p);
p                 394 include/linux/kprobes.h static inline int register_kprobe(struct kprobe *p)
p                 402 include/linux/kprobes.h static inline void unregister_kprobe(struct kprobe *p)
p                  39 include/linux/latencytop.h void clear_tsk_latency_tracing(struct task_struct *p);
p                  51 include/linux/latencytop.h static inline void clear_tsk_latency_tracing(struct task_struct *p)
p                  86 include/linux/libata.h #define ata_msg_drv(p)    ((p)->msg_enable & ATA_MSG_DRV)
p                  87 include/linux/libata.h #define ata_msg_info(p)   ((p)->msg_enable & ATA_MSG_INFO)
p                  88 include/linux/libata.h #define ata_msg_probe(p)  ((p)->msg_enable & ATA_MSG_PROBE)
p                  89 include/linux/libata.h #define ata_msg_warn(p)   ((p)->msg_enable & ATA_MSG_WARN)
p                  90 include/linux/libata.h #define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
p                  91 include/linux/libata.h #define ata_msg_ctl(p)    ((p)->msg_enable & ATA_MSG_CTL)
p                  92 include/linux/libata.h #define ata_msg_intr(p)   ((p)->msg_enable & ATA_MSG_INTR)
p                  93 include/linux/libata.h #define ata_msg_err(p)    ((p)->msg_enable & ATA_MSG_ERR)
p                 497 include/linux/lightnvm.h 				    struct ppa_addr p)
p                 505 include/linux/lightnvm.h 		caddr = (u64)p.g.pg << ppaf->pg_offset;
p                 506 include/linux/lightnvm.h 		caddr |= (u64)p.g.pl << ppaf->pln_offset;
p                 507 include/linux/lightnvm.h 		caddr |= (u64)p.g.sec << ppaf->sec_offset;
p                 509 include/linux/lightnvm.h 		caddr = p.m.sec;
p                1636 include/linux/lsm_hooks.h 	int (*task_setpgid)(struct task_struct *p, pid_t pgid);
p                1637 include/linux/lsm_hooks.h 	int (*task_getpgid)(struct task_struct *p);
p                1638 include/linux/lsm_hooks.h 	int (*task_getsid)(struct task_struct *p);
p                1639 include/linux/lsm_hooks.h 	void (*task_getsecid)(struct task_struct *p, u32 *secid);
p                1640 include/linux/lsm_hooks.h 	int (*task_setnice)(struct task_struct *p, int nice);
p                1641 include/linux/lsm_hooks.h 	int (*task_setioprio)(struct task_struct *p, int ioprio);
p                1642 include/linux/lsm_hooks.h 	int (*task_getioprio)(struct task_struct *p);
p                1645 include/linux/lsm_hooks.h 	int (*task_setrlimit)(struct task_struct *p, unsigned int resource,
p                1647 include/linux/lsm_hooks.h 	int (*task_setscheduler)(struct task_struct *p);
p                1648 include/linux/lsm_hooks.h 	int (*task_getscheduler)(struct task_struct *p);
p                1649 include/linux/lsm_hooks.h 	int (*task_movememory)(struct task_struct *p);
p                1650 include/linux/lsm_hooks.h 	int (*task_kill)(struct task_struct *p, struct kernel_siginfo *info,
p                1654 include/linux/lsm_hooks.h 	void (*task_to_inode)(struct task_struct *p, struct inode *inode);
p                1690 include/linux/lsm_hooks.h 	int (*getprocattr)(struct task_struct *p, char *name, char **value);
p                  14 include/linux/mISDNdsp.h 	void	(*free)(void *p);
p                  15 include/linux/mISDNdsp.h 	void	(*process_tx)(void *p, unsigned char *data, int len);
p                  16 include/linux/mISDNdsp.h 	void	(*process_rx)(void *p, unsigned char *data, int len,
p                 223 include/linux/mISDNif.h #define IS_ISDN_P_TE(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_TE_E1) || \
p                 224 include/linux/mISDNif.h 				(p == ISDN_P_TE_UP0) || (p == ISDN_P_LAPD_TE))
p                 225 include/linux/mISDNif.h #define IS_ISDN_P_NT(p) ((p == ISDN_P_NT_S0) || (p == ISDN_P_NT_E1) || \
p                 226 include/linux/mISDNif.h 				(p == ISDN_P_NT_UP0) || (p == ISDN_P_LAPD_NT))
p                 227 include/linux/mISDNif.h #define IS_ISDN_P_S0(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_NT_S0))
p                 228 include/linux/mISDNif.h #define IS_ISDN_P_E1(p) ((p == ISDN_P_TE_E1) || (p == ISDN_P_NT_E1))
p                 229 include/linux/mISDNif.h #define IS_ISDN_P_UP0(p) ((p == ISDN_P_TE_UP0) || (p == ISDN_P_NT_UP0))
p                 104 include/linux/maple.h #define maple_set_drvdata(d,p)		dev_set_drvdata(&(d)->dev, (p))
p                 431 include/linux/memcontrol.h struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
p                 556 include/linux/memcontrol.h 				struct task_struct *p);
p                 572 include/linux/memcontrol.h static inline bool task_in_memcg_oom(struct task_struct *p)
p                 574 include/linux/memcontrol.h 	return p->memcg_in_oom;
p                 707 include/linux/memcontrol.h void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
p                 708 include/linux/memcontrol.h void mod_memcg_obj_state(void *p, int idx, int val);
p                1001 include/linux/memcontrol.h mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
p                1035 include/linux/memcontrol.h static inline bool task_in_memcg_oom(struct task_struct *p)
p                1126 include/linux/memcontrol.h static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
p                1129 include/linux/memcontrol.h 	struct page *page = virt_to_head_page(p);
p                1134 include/linux/memcontrol.h static inline void mod_memcg_obj_state(void *p, int idx, int val)
p                1225 include/linux/memcontrol.h static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
p                1227 include/linux/memcontrol.h 	__mod_lruvec_slab_state(p, idx, 1);
p                1230 include/linux/memcontrol.h static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
p                1232 include/linux/memcontrol.h 	__mod_lruvec_slab_state(p, idx, -1);
p                1442 include/linux/memcontrol.h struct mem_cgroup *mem_cgroup_from_obj(void *p);
p                1485 include/linux/memcontrol.h static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
p                 307 include/linux/memory_hotplug.h static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
p                 308 include/linux/memory_hotplug.h static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
p                 135 include/linux/mempolicy.h void mpol_free_shared_policy(struct shared_policy *p);
p                 139 include/linux/mempolicy.h struct mempolicy *get_task_policy(struct task_struct *p);
p                 217 include/linux/mempolicy.h static inline void mpol_put(struct mempolicy *p)
p                 236 include/linux/mempolicy.h static inline void mpol_free_shared_policy(struct shared_policy *p)
p                 190 include/linux/mfd/ti_am335x_tscadc.h static inline struct ti_tscadc_dev *ti_tscadc_dev_get(struct platform_device *p)
p                 192 include/linux/mfd/ti_am335x_tscadc.h 	struct ti_tscadc_dev **tscadc_dev = p->dev.platform_data;
p                  70 include/linux/mlx5/device.h #define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
p                  73 include/linux/mlx5/device.h #define MLX5_SET(typ, p, fld, v) do { \
p                  76 include/linux/mlx5/device.h 	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
p                  77 include/linux/mlx5/device.h 	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
p                  82 include/linux/mlx5/device.h #define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
p                  84 include/linux/mlx5/device.h 	MLX5_SET(typ, p, fld[idx], v); \
p                  87 include/linux/mlx5/device.h #define MLX5_SET_TO_ONES(typ, p, fld) do { \
p                  89 include/linux/mlx5/device.h 	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
p                  90 include/linux/mlx5/device.h 	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
p                  95 include/linux/mlx5/device.h #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
p                  99 include/linux/mlx5/device.h #define MLX5_GET_PR(typ, p, fld) ({ \
p                 100 include/linux/mlx5/device.h 	u32 ___t = MLX5_GET(typ, p, fld); \
p                 105 include/linux/mlx5/device.h #define __MLX5_SET64(typ, p, fld, v) do { \
p                 107 include/linux/mlx5/device.h 	*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
p                 110 include/linux/mlx5/device.h #define MLX5_SET64(typ, p, fld, v) do { \
p                 112 include/linux/mlx5/device.h 	__MLX5_SET64(typ, p, fld, v); \
p                 115 include/linux/mlx5/device.h #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
p                 117 include/linux/mlx5/device.h 	__MLX5_SET64(typ, p, fld[idx], v); \
p                 120 include/linux/mlx5/device.h #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
p                 122 include/linux/mlx5/device.h #define MLX5_GET64_PR(typ, p, fld) ({ \
p                 123 include/linux/mlx5/device.h 	u64 ___t = MLX5_GET64(typ, p, fld); \
p                 128 include/linux/mlx5/device.h #define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\
p                 132 include/linux/mlx5/device.h #define MLX5_SET16(typ, p, fld, v) do { \
p                 135 include/linux/mlx5/device.h 	*((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
p                 136 include/linux/mlx5/device.h 	cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \
p                 142 include/linux/mlx5/device.h #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
p                 145 include/linux/mlx5/device.h #define MLX5_GET_BE(type_t, typ, p, fld) ({				  \
p                 149 include/linux/mlx5/device.h 			tmp = (__force type_t)MLX5_GET(typ, p, fld);	  \
p                 152 include/linux/mlx5/device.h 			tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
p                 155 include/linux/mlx5/device.h 			tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
p                 158 include/linux/mlx5/device.h 			tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
p                1112 include/linux/mm.h 	struct page *p = (struct page *)page;
p                1114 include/linux/mm.h 	return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
p                1424 include/linux/mm.h #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
p                2746 include/linux/mm.h extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
p                2810 include/linux/mm.h extern void shake_page(struct page *p, int access);
p                  32 include/linux/mmiotrace.h extern int register_kmmio_probe(struct kmmio_probe *p);
p                  33 include/linux/mmiotrace.h extern void unregister_kmmio_probe(struct kmmio_probe *p);
p                 636 include/linux/module.h #define symbol_put_addr(p) do { } while (0)
p                 336 include/linux/moduleparam.h #define __param_check(name, p, type) \
p                 337 include/linux/moduleparam.h 	static inline type __always_unused *__check_##name(void) { return(p); }
p                 342 include/linux/moduleparam.h #define param_check_byte(name, p) __param_check(name, p, unsigned char)
p                 347 include/linux/moduleparam.h #define param_check_short(name, p) __param_check(name, p, short)
p                 352 include/linux/moduleparam.h #define param_check_ushort(name, p) __param_check(name, p, unsigned short)
p                 357 include/linux/moduleparam.h #define param_check_int(name, p) __param_check(name, p, int)
p                 362 include/linux/moduleparam.h #define param_check_uint(name, p) __param_check(name, p, unsigned int)
p                 367 include/linux/moduleparam.h #define param_check_long(name, p) __param_check(name, p, long)
p                 372 include/linux/moduleparam.h #define param_check_ulong(name, p) __param_check(name, p, unsigned long)
p                 377 include/linux/moduleparam.h #define param_check_ullong(name, p) __param_check(name, p, unsigned long long)
p                 383 include/linux/moduleparam.h #define param_check_charp(name, p) __param_check(name, p, char *)
p                 389 include/linux/moduleparam.h #define param_check_bool(name, p) __param_check(name, p, bool)
p                 400 include/linux/moduleparam.h #define param_check_invbool(name, p) __param_check(name, p, bool)
p                 374 include/linux/mroute_base.h 	struct seq_net_private p;
p                 380 include/linux/mroute_base.h 	struct seq_net_private p;
p                2659 include/linux/netdevice.h int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
p                3877 include/linux/netdevice.h #define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV)
p                3878 include/linux/netdevice.h #define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE)
p                3879 include/linux/netdevice.h #define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK)
p                3880 include/linux/netdevice.h #define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER)
p                3881 include/linux/netdevice.h #define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN)
p                3882 include/linux/netdevice.h #define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP)
p                3883 include/linux/netdevice.h #define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR)
p                3884 include/linux/netdevice.h #define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR)
p                3885 include/linux/netdevice.h #define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED)
p                3886 include/linux/netdevice.h #define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR)
p                3887 include/linux/netdevice.h #define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE)
p                3888 include/linux/netdevice.h #define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS)
p                3889 include/linux/netdevice.h #define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA)
p                3890 include/linux/netdevice.h #define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW)
p                3891 include/linux/netdevice.h #define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL)
p                 138 include/linux/netfilter.h static inline void nf_hook_state_init(struct nf_hook_state *p,
p                 147 include/linux/netfilter.h 	p->hook = hook;
p                 148 include/linux/netfilter.h 	p->pf = pf;
p                 149 include/linux/netfilter.h 	p->in = indev;
p                 150 include/linux/netfilter.h 	p->out = outdev;
p                 151 include/linux/netfilter.h 	p->sk = sk;
p                 152 include/linux/netfilter.h 	p->net = net;
p                 153 include/linux/netfilter.h 	p->okfn = okfn;
p                  85 include/linux/nvram.h 	char *p = buf;
p                  89 include/linux/nvram.h 	for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count)
p                  90 include/linux/nvram.h 		*p = nvram_read_byte(i);
p                  92 include/linux/nvram.h 	return p - buf;
p                  99 include/linux/nvram.h 	char *p = buf;
p                 103 include/linux/nvram.h 	for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count)
p                 104 include/linux/nvram.h 		nvram_write_byte(*p, i);
p                 106 include/linux/nvram.h 	return p - buf;
p                 210 include/linux/of.h static inline int of_property_check_flag(struct property *p, unsigned long flag)
p                 212 include/linux/of.h 	return test_bit(flag, &p->_flags);
p                 215 include/linux/of.h static inline void of_property_set_flag(struct property *p, unsigned long flag)
p                 217 include/linux/of.h 	set_bit(flag, &p->_flags);
p                 220 include/linux/of.h static inline void of_property_clear_flag(struct property *p, unsigned long flag)
p                 222 include/linux/of.h 	clear_bit(flag, &p->_flags);
p                 955 include/linux/of.h static inline int of_property_check_flag(struct property *p, unsigned long flag)
p                 960 include/linux/of.h static inline void of_property_set_flag(struct property *p, unsigned long flag)
p                 964 include/linux/of.h static inline void of_property_clear_flag(struct property *p, unsigned long flag)
p                1216 include/linux/of.h #define of_property_for_each_u32(np, propname, prop, p, u)	\
p                1218 include/linux/of.h 		p = of_prop_next_u32(prop, NULL, &u);		\
p                1219 include/linux/of.h 		p;						\
p                1220 include/linux/of.h 		p = of_prop_next_u32(prop, p, &u))
p                  77 include/linux/omap-gpmc.h extern int gpmc_cs_program_settings(int cs, struct gpmc_settings *p);
p                  82 include/linux/omap-gpmc.h 				  struct gpmc_settings *p);
p                  69 include/linux/oom.h static inline bool oom_task_origin(const struct task_struct *p)
p                  71 include/linux/oom.h 	return p->signal->oom_flag_origin;
p                 110 include/linux/oom.h extern unsigned long oom_badness(struct task_struct *p,
p                 123 include/linux/oom.h extern struct task_struct *find_lock_task_mm(struct task_struct *p);
p                 314 include/linux/overflow.h #define struct_size(p, member, n)					\
p                 316 include/linux/overflow.h 		    sizeof(*(p)->member) + __must_be_array((p)->member),\
p                 317 include/linux/overflow.h 		    sizeof(*(p)))
p                 491 include/linux/parport.h #define parport_write_data(p,x)            parport_pc_write_data(p,x)
p                 492 include/linux/parport.h #define parport_read_data(p)               parport_pc_read_data(p)
p                 493 include/linux/parport.h #define parport_write_control(p,x)         parport_pc_write_control(p,x)
p                 494 include/linux/parport.h #define parport_read_control(p)            parport_pc_read_control(p)
p                 495 include/linux/parport.h #define parport_frob_control(p,m,v)        parport_pc_frob_control(p,m,v)
p                 496 include/linux/parport.h #define parport_read_status(p)             parport_pc_read_status(p)
p                 497 include/linux/parport.h #define parport_enable_irq(p)              parport_pc_enable_irq(p)
p                 498 include/linux/parport.h #define parport_disable_irq(p)             parport_pc_disable_irq(p)
p                 499 include/linux/parport.h #define parport_data_forward(p)            parport_pc_data_forward(p)
p                 500 include/linux/parport.h #define parport_data_reverse(p)            parport_pc_data_reverse(p)
p                 505 include/linux/parport.h #define parport_write_data(p,x)            (p)->ops->write_data(p,x)
p                 506 include/linux/parport.h #define parport_read_data(p)               (p)->ops->read_data(p)
p                 507 include/linux/parport.h #define parport_write_control(p,x)         (p)->ops->write_control(p,x)
p                 508 include/linux/parport.h #define parport_read_control(p)            (p)->ops->read_control(p)
p                 509 include/linux/parport.h #define parport_frob_control(p,m,v)        (p)->ops->frob_control(p,m,v)
p                 510 include/linux/parport.h #define parport_read_status(p)             (p)->ops->read_status(p)
p                 511 include/linux/parport.h #define parport_enable_irq(p)              (p)->ops->enable_irq(p)
p                 512 include/linux/parport.h #define parport_disable_irq(p)             (p)->ops->disable_irq(p)
p                 513 include/linux/parport.h #define parport_data_forward(p)            (p)->ops->data_forward(p)
p                 514 include/linux/parport.h #define parport_data_reverse(p)            (p)->ops->data_reverse(p)
p                   9 include/linux/parport_pc.h #define ECONTROL(p) ((p)->base_hi + 0x2)
p                  10 include/linux/parport_pc.h #define CONFIGB(p)  ((p)->base_hi + 0x1)
p                  11 include/linux/parport_pc.h #define CONFIGA(p)  ((p)->base_hi + 0x0)
p                  12 include/linux/parport_pc.h #define FIFO(p)     ((p)->base_hi + 0x0)
p                  13 include/linux/parport_pc.h #define EPPDATA(p)  ((p)->base    + 0x4)
p                  14 include/linux/parport_pc.h #define EPPADDR(p)  ((p)->base    + 0x3)
p                  15 include/linux/parport_pc.h #define CONTROL(p)  ((p)->base    + 0x2)
p                  16 include/linux/parport_pc.h #define STATUS(p)   ((p)->base    + 0x1)
p                  17 include/linux/parport_pc.h #define DATA(p)     ((p)->base    + 0x0)
p                  63 include/linux/parport_pc.h static __inline__ void parport_pc_write_data(struct parport *p, unsigned char d)
p                  66 include/linux/parport_pc.h 	printk (KERN_DEBUG "parport_pc_write_data(%p,0x%02x)\n", p, d);
p                  68 include/linux/parport_pc.h 	outb(d, DATA(p));
p                  71 include/linux/parport_pc.h static __inline__ unsigned char parport_pc_read_data(struct parport *p)
p                  73 include/linux/parport_pc.h 	unsigned char val = inb (DATA (p));
p                  76 include/linux/parport_pc.h 		p, val);
p                  82 include/linux/parport_pc.h static inline void dump_parport_state (char *str, struct parport *p)
p                  85 include/linux/parport_pc.h 	unsigned char ecr = inb (ECONTROL (p));
p                  86 include/linux/parport_pc.h 	unsigned char dcr = inb (CONTROL (p));
p                  87 include/linux/parport_pc.h 	unsigned char dsr = inb (STATUS (p));
p                  89 include/linux/parport_pc.h 	const struct parport_pc_private *priv = p->physport->private_data;
p                 100 include/linux/parport_pc.h 		dcr = i ? priv->ctr : inb (CONTROL (p));
p                 128 include/linux/parport_pc.h static __inline__ unsigned char __parport_pc_frob_control (struct parport *p,
p                 132 include/linux/parport_pc.h 	struct parport_pc_private *priv = p->physport->private_data;
p                 141 include/linux/parport_pc.h 	outb (ctr, CONTROL (p));
p                 146 include/linux/parport_pc.h static __inline__ void parport_pc_data_reverse (struct parport *p)
p                 148 include/linux/parport_pc.h 	__parport_pc_frob_control (p, 0x20, 0x20);
p                 151 include/linux/parport_pc.h static __inline__ void parport_pc_data_forward (struct parport *p)
p                 153 include/linux/parport_pc.h 	__parport_pc_frob_control (p, 0x20, 0x00);
p                 156 include/linux/parport_pc.h static __inline__ void parport_pc_write_control (struct parport *p,
p                 167 include/linux/parport_pc.h 			p->name, p->cad->name);
p                 168 include/linux/parport_pc.h 		parport_pc_data_reverse (p);
p                 171 include/linux/parport_pc.h 	__parport_pc_frob_control (p, wm, d & wm);
p                 174 include/linux/parport_pc.h static __inline__ unsigned char parport_pc_read_control(struct parport *p)
p                 180 include/linux/parport_pc.h 	const struct parport_pc_private *priv = p->physport->private_data;
p                 184 include/linux/parport_pc.h static __inline__ unsigned char parport_pc_frob_control (struct parport *p,
p                 196 include/linux/parport_pc.h 			p->name, p->cad->name,
p                 199 include/linux/parport_pc.h 			parport_pc_data_reverse (p);
p                 201 include/linux/parport_pc.h 			parport_pc_data_forward (p);
p                 208 include/linux/parport_pc.h 	return __parport_pc_frob_control (p, mask, val);
p                 211 include/linux/parport_pc.h static __inline__ unsigned char parport_pc_read_status(struct parport *p)
p                 213 include/linux/parport_pc.h 	return inb(STATUS(p));
p                 217 include/linux/parport_pc.h static __inline__ void parport_pc_disable_irq(struct parport *p)
p                 219 include/linux/parport_pc.h 	__parport_pc_frob_control (p, 0x10, 0x00);
p                 222 include/linux/parport_pc.h static __inline__ void parport_pc_enable_irq(struct parport *p)
p                 224 include/linux/parport_pc.h 	__parport_pc_frob_control (p, 0x10, 0x10);
p                 227 include/linux/parport_pc.h extern void parport_pc_release_resources(struct parport *p);
p                 229 include/linux/parport_pc.h extern int parport_pc_claim_resources(struct parport *p);
p                 237 include/linux/parport_pc.h extern void parport_pc_unregister_port(struct parport *p);
p                 116 include/linux/perf/arm_pmu.h #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
p                  35 include/linux/pinctrl/consumer.h extern void pinctrl_put(struct pinctrl *p);
p                  37 include/linux/pinctrl/consumer.h 							struct pinctrl *p,
p                  39 include/linux/pinctrl/consumer.h extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
p                  42 include/linux/pinctrl/consumer.h extern void devm_pinctrl_put(struct pinctrl *p);
p                  99 include/linux/pinctrl/consumer.h static inline void pinctrl_put(struct pinctrl *p)
p                 104 include/linux/pinctrl/consumer.h 							struct pinctrl *p,
p                 110 include/linux/pinctrl/consumer.h static inline int pinctrl_select_state(struct pinctrl *p,
p                 121 include/linux/pinctrl/consumer.h static inline void devm_pinctrl_put(struct pinctrl *p)
p                 145 include/linux/pinctrl/consumer.h 	struct pinctrl *p;
p                 149 include/linux/pinctrl/consumer.h 	p = pinctrl_get(dev);
p                 150 include/linux/pinctrl/consumer.h 	if (IS_ERR(p))
p                 151 include/linux/pinctrl/consumer.h 		return p;
p                 153 include/linux/pinctrl/consumer.h 	s = pinctrl_lookup_state(p, name);
p                 155 include/linux/pinctrl/consumer.h 		pinctrl_put(p);
p                 159 include/linux/pinctrl/consumer.h 	ret = pinctrl_select_state(p, s);
p                 161 include/linux/pinctrl/consumer.h 		pinctrl_put(p);
p                 165 include/linux/pinctrl/consumer.h 	return p;
p                 177 include/linux/pinctrl/consumer.h 	struct pinctrl *p;
p                 181 include/linux/pinctrl/consumer.h 	p = devm_pinctrl_get(dev);
p                 182 include/linux/pinctrl/consumer.h 	if (IS_ERR(p))
p                 183 include/linux/pinctrl/consumer.h 		return p;
p                 185 include/linux/pinctrl/consumer.h 	s = pinctrl_lookup_state(p, name);
p                 187 include/linux/pinctrl/consumer.h 		devm_pinctrl_put(p);
p                 191 include/linux/pinctrl/consumer.h 	ret = pinctrl_select_state(p, s);
p                 193 include/linux/pinctrl/consumer.h 		devm_pinctrl_put(p);
p                 197 include/linux/pinctrl/consumer.h 	return p;
p                  31 include/linux/pinctrl/devinfo.h 	struct pinctrl *p;
p                 142 include/linux/pinctrl/pinconf-generic.h #define PIN_CONF_PACKED(p, a) ((a << 8) | ((unsigned long) p & 0xffUL))
p                 306 include/linux/pnfs_osd_xdr.h 	struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p);
p                 315 include/linux/pnfs_osd_xdr.h extern void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr);
p                  48 include/linux/poll.h static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
p                  50 include/linux/poll.h 	if (p && p->_qproc && wait_address)
p                  51 include/linux/poll.h 		p->_qproc(filp, wait_address, p);
p                  59 include/linux/poll.h static inline bool poll_does_not_wait(const poll_table *p)
p                  61 include/linux/poll.h 	return p == NULL || p->_qproc == NULL;
p                  70 include/linux/poll.h static inline __poll_t poll_requested_events(const poll_table *p)
p                  72 include/linux/poll.h 	return p ? p->_key : ~(__poll_t)0;
p                  30 include/linux/psi.h void cgroup_move_task(struct task_struct *p, struct css_set *to);
p                  55 include/linux/psi.h static inline void cgroup_move_task(struct task_struct *p, struct css_set *to)
p                  57 include/linux/psi.h 	rcu_assign_pointer(p->cgroups, to);
p                 137 include/linux/ptp_clock_kernel.h 	int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
p                 189 include/linux/qed/qed_chain.h #define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
p                 190 include/linux/qed/qed_chain.h #define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
p                 300 include/linux/qed/qed_chain.h #define is_unusable_idx(p, idx)	\
p                 301 include/linux/qed/qed_chain.h 	(((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
p                 303 include/linux/qed/qed_chain.h #define is_unusable_idx_u32(p, idx) \
p                 304 include/linux/qed/qed_chain.h 	(((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
p                 305 include/linux/qed/qed_chain.h #define is_unusable_next_idx(p, idx)				 \
p                 306 include/linux/qed/qed_chain.h 	((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
p                 307 include/linux/qed/qed_chain.h 	 (p)->usable_per_page)
p                 309 include/linux/qed/qed_chain.h #define is_unusable_next_idx_u32(p, idx)			 \
p                 310 include/linux/qed/qed_chain.h 	((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
p                 311 include/linux/qed/qed_chain.h 	 (p)->usable_per_page)
p                 313 include/linux/qed/qed_chain.h #define test_and_skip(p, idx)						   \
p                 315 include/linux/qed/qed_chain.h 		if (is_chain_u16(p)) {					   \
p                 316 include/linux/qed/qed_chain.h 			if (is_unusable_idx(p, idx))			   \
p                 317 include/linux/qed/qed_chain.h 				(p)->u.chain16.idx += (p)->elem_unusable;  \
p                 319 include/linux/qed/qed_chain.h 			if (is_unusable_idx_u32(p, idx))		   \
p                 320 include/linux/qed/qed_chain.h 				(p)->u.chain32.idx += (p)->elem_unusable;  \
p                 157 include/linux/rbtree_augmented.h static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
p                 159 include/linux/rbtree_augmented.h 	rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
p                 163 include/linux/rbtree_augmented.h 				       struct rb_node *p, int color)
p                 165 include/linux/rbtree_augmented.h 	rb->__rb_parent_color = (unsigned long)p | color;
p                 304 include/linux/rcupdate.h #define rcu_check_sparse(p, space) \
p                 305 include/linux/rcupdate.h 	((void)(((typeof(*p) space *)p) == p))
p                 307 include/linux/rcupdate.h #define rcu_check_sparse(p, space)
p                 310 include/linux/rcupdate.h #define __rcu_access_pointer(p, space) \
p                 312 include/linux/rcupdate.h 	typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
p                 313 include/linux/rcupdate.h 	rcu_check_sparse(p, space); \
p                 314 include/linux/rcupdate.h 	((typeof(*p) __force __kernel *)(_________p1)); \
p                 316 include/linux/rcupdate.h #define __rcu_dereference_check(p, c, space) \
p                 319 include/linux/rcupdate.h 	typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
p                 321 include/linux/rcupdate.h 	rcu_check_sparse(p, space); \
p                 322 include/linux/rcupdate.h 	((typeof(*p) __force __kernel *)(________p1)); \
p                 324 include/linux/rcupdate.h #define __rcu_dereference_protected(p, c, space) \
p                 327 include/linux/rcupdate.h 	rcu_check_sparse(p, space); \
p                 328 include/linux/rcupdate.h 	((typeof(*p) __force __kernel *)(p)); \
p                 330 include/linux/rcupdate.h #define rcu_dereference_raw(p) \
p                 333 include/linux/rcupdate.h 	typeof(p) ________p1 = READ_ONCE(p); \
p                 334 include/linux/rcupdate.h 	((typeof(*p) __force __kernel *)(________p1)); \
p                 374 include/linux/rcupdate.h #define rcu_assign_pointer(p, v)					      \
p                 377 include/linux/rcupdate.h 	rcu_check_sparse(p, __rcu);					      \
p                 380 include/linux/rcupdate.h 		WRITE_ONCE((p), (typeof(p))(_r_a_p__v));		      \
p                 382 include/linux/rcupdate.h 		smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
p                 420 include/linux/rcupdate.h #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
p                 455 include/linux/rcupdate.h #define rcu_dereference_check(p, c) \
p                 456 include/linux/rcupdate.h 	__rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
p                 465 include/linux/rcupdate.h #define rcu_dereference_bh_check(p, c) \
p                 466 include/linux/rcupdate.h 	__rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
p                 475 include/linux/rcupdate.h #define rcu_dereference_sched_check(p, c) \
p                 476 include/linux/rcupdate.h 	__rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
p                 486 include/linux/rcupdate.h #define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu)
p                 504 include/linux/rcupdate.h #define rcu_dereference_protected(p, c) \
p                 505 include/linux/rcupdate.h 	__rcu_dereference_protected((p), (c), __rcu)
p                 514 include/linux/rcupdate.h #define rcu_dereference(p) rcu_dereference_check(p, 0)
p                 522 include/linux/rcupdate.h #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
p                 530 include/linux/rcupdate.h #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
p                 552 include/linux/rcupdate.h #define rcu_pointer_handoff(p) (p)
p                 778 include/linux/rcupdate.h #define RCU_INIT_POINTER(p, v) \
p                 780 include/linux/rcupdate.h 		rcu_check_sparse(p, __rcu); \
p                 781 include/linux/rcupdate.h 		WRITE_ONCE(p, RCU_INITIALIZER(v)); \
p                 791 include/linux/rcupdate.h #define RCU_POINTER_INITIALIZER(p, v) \
p                 792 include/linux/rcupdate.h 		.p = RCU_INITIALIZER(v)
p                  10 include/linux/resource.h void getrusage(struct task_struct *p, int who, struct rusage *ru);
p                  85 include/linux/rhashtable-types.h 	struct rhashtable_params	p;
p                 122 include/linux/rhashtable-types.h 	struct rhash_head *p;
p                 116 include/linux/rhashtable.h 	return (char *)he - ht->p.head_offset;
p                 133 include/linux/rhashtable.h 		hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
p                 144 include/linux/rhashtable.h 		unsigned int key_len = ht->p.key_len;
p                 172 include/linux/rhashtable.h 							    ht->p.key_len,
p                 187 include/linux/rhashtable.h 	       (!ht->p.max_size || tbl->size < ht->p.max_size);
p                 200 include/linux/rhashtable.h 	       tbl->size > ht->p.min_size;
p                 212 include/linux/rhashtable.h 		(!ht->p.max_size || tbl->size < ht->p.max_size);
p                 272 include/linux/rhashtable.h #define rht_dereference(p, ht) \
p                 273 include/linux/rhashtable.h 	rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
p                 275 include/linux/rhashtable.h #define rht_dereference_rcu(p, ht) \
p                 276 include/linux/rhashtable.h 	rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
p                 278 include/linux/rhashtable.h #define rht_dereference_bucket(p, tbl, hash) \
p                 279 include/linux/rhashtable.h 	rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
p                 281 include/linux/rhashtable.h #define rht_dereference_bucket_rcu(p, tbl, hash) \
p                 282 include/linux/rhashtable.h 	rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
p                 370 include/linux/rhashtable.h 	struct rhash_head __rcu *p = __rht_ptr(bkt);
p                 372 include/linux/rhashtable.h 	return rcu_dereference(p);
p                 392 include/linux/rhashtable.h 	struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
p                 396 include/linux/rhashtable.h 	rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0)));
p                 403 include/linux/rhashtable.h 	struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
p                 408 include/linux/rhashtable.h 	rcu_assign_pointer(*p, obj);
p                 584 include/linux/rhashtable.h 	return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
p                 910 include/linux/rhashtable.h 	BUG_ON(ht->p.obj_hashfn);
p                 912 include/linux/rhashtable.h 	ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
p                 936 include/linux/rhashtable.h 	BUG_ON(ht->p.obj_hashfn);
p                 938 include/linux/rhashtable.h 	return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
p                 963 include/linux/rhashtable.h 	BUG_ON(!ht->p.obj_hashfn || !key);
p                 987 include/linux/rhashtable.h 	BUG_ON(!ht->p.obj_hashfn || !key);
p                1064 include/linux/rhashtable.h 		if (unlikely(ht->p.automatic_shrinking &&
p                  60 include/linux/rtnetlink.h #define rcu_dereference_rtnl(p)					\
p                  61 include/linux/rtnetlink.h 	rcu_dereference_check(p, lockdep_rtnl_is_held())
p                  70 include/linux/rtnetlink.h #define rcu_dereference_bh_rtnl(p)				\
p                  71 include/linux/rtnetlink.h 	rcu_dereference_bh_check(p, lockdep_rtnl_is_held())
p                  80 include/linux/rtnetlink.h #define rtnl_dereference(p)					\
p                  81 include/linux/rtnetlink.h 	rcu_dereference_protected(p, lockdep_rtnl_is_held())
p                1337 include/linux/sched.h static inline int pid_alive(const struct task_struct *p)
p                1339 include/linux/sched.h 	return p->thread_pid != NULL;
p                1500 include/linux/sched.h #define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
p                1524 include/linux/sched.h 	static inline bool task_##func(struct task_struct *p)		\
p                1525 include/linux/sched.h 	{ return test_bit(PFA_##name, &p->atomic_flags); }
p                1528 include/linux/sched.h 	static inline void task_set_##func(struct task_struct *p)	\
p                1529 include/linux/sched.h 	{ set_bit(PFA_##name, &p->atomic_flags); }
p                1532 include/linux/sched.h 	static inline void task_clear_##func(struct task_struct *p)	\
p                1533 include/linux/sched.h 	{ clear_bit(PFA_##name, &p->atomic_flags); }
p                1572 include/linux/sched.h extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
p                1574 include/linux/sched.h extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
p                1575 include/linux/sched.h extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
p                1577 include/linux/sched.h static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
p                1580 include/linux/sched.h static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
p                1588 include/linux/sched.h extern int yield_to(struct task_struct *p, bool preempt);
p                1589 include/linux/sched.h extern void set_user_nice(struct task_struct *p, long nice);
p                1590 include/linux/sched.h extern int task_prio(const struct task_struct *p);
p                1598 include/linux/sched.h static inline int task_nice(const struct task_struct *p)
p                1600 include/linux/sched.h 	return PRIO_TO_NICE((p)->static_prio);
p                1603 include/linux/sched.h extern int can_nice(const struct task_struct *p, const int nice);
p                1604 include/linux/sched.h extern int task_curr(const struct task_struct *p);
p                1619 include/linux/sched.h static inline bool is_idle_task(const struct task_struct *p)
p                1621 include/linux/sched.h 	return !!(p->flags & PF_IDLE);
p                1625 include/linux/sched.h extern void ia64_set_curr_task(int cpu, struct task_struct *p);
p                1701 include/linux/sched.h static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
p                1814 include/linux/sched.h static inline unsigned int task_cpu(const struct task_struct *p)
p                1817 include/linux/sched.h 	return READ_ONCE(p->cpu);
p                1819 include/linux/sched.h 	return READ_ONCE(task_thread_info(p)->cpu);
p                1823 include/linux/sched.h extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
p                1827 include/linux/sched.h static inline unsigned int task_cpu(const struct task_struct *p)
p                1832 include/linux/sched.h static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
p                  11 include/linux/sched/autogroup.h extern void sched_autogroup_create_attach(struct task_struct *p);
p                  12 include/linux/sched/autogroup.h extern void sched_autogroup_detach(struct task_struct *p);
p                  15 include/linux/sched/autogroup.h extern void sched_autogroup_exit_task(struct task_struct *p);
p                  17 include/linux/sched/autogroup.h extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
p                  18 include/linux/sched/autogroup.h extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
p                  21 include/linux/sched/autogroup.h static inline void sched_autogroup_create_attach(struct task_struct *p) { }
p                  22 include/linux/sched/autogroup.h static inline void sched_autogroup_detach(struct task_struct *p) { }
p                  25 include/linux/sched/autogroup.h static inline void sched_autogroup_exit_task(struct task_struct *p) { }
p                  55 include/linux/sched/cputime.h extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
p                  56 include/linux/sched/cputime.h extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
p                  18 include/linux/sched/deadline.h static inline int dl_task(struct task_struct *p)
p                  20 include/linux/sched/deadline.h 	return dl_prio(p->prio);
p                  31 include/linux/sched/deadline.h extern void dl_add_task_root_domain(struct task_struct *p);
p                  35 include/linux/sched/debug.h extern void sched_show_task(struct task_struct *p);
p                  39 include/linux/sched/debug.h extern void proc_sched_show_task(struct task_struct *p,
p                  41 include/linux/sched/debug.h extern void proc_sched_set_task(struct task_struct *p);
p                  20 include/linux/sched/numa_balancing.h extern pid_t task_numa_group_id(struct task_struct *p);
p                  22 include/linux/sched/numa_balancing.h extern void task_numa_free(struct task_struct *p, bool final);
p                  23 include/linux/sched/numa_balancing.h extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
p                  30 include/linux/sched/numa_balancing.h static inline pid_t task_numa_group_id(struct task_struct *p)
p                  37 include/linux/sched/numa_balancing.h static inline void task_numa_free(struct task_struct *p, bool final)
p                  40 include/linux/sched/numa_balancing.h static inline bool should_numa_migrate_memory(struct task_struct *p,
p                  41 include/linux/sched/prio.h #define USER_PRIO(p)		((p)-MAX_RT_PRIO)
p                  42 include/linux/sched/prio.h #define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
p                  16 include/linux/sched/rt.h static inline int rt_task(struct task_struct *p)
p                  18 include/linux/sched/rt.h 	return rt_prio(p->prio);
p                  36 include/linux/sched/rt.h static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
p                  38 include/linux/sched/rt.h 	return p->pi_top_task;
p                  40 include/linux/sched/rt.h extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
p                  41 include/linux/sched/rt.h extern void rt_mutex_adjust_pi(struct task_struct *p);
p                  51 include/linux/sched/rt.h # define rt_mutex_adjust_pi(p)		do { } while (0)
p                 332 include/linux/sched/signal.h extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
p                 335 include/linux/sched/signal.h extern int zap_other_threads(struct task_struct *p);
p                 347 include/linux/sched/signal.h static inline int signal_pending(struct task_struct *p)
p                 349 include/linux/sched/signal.h 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
p                 352 include/linux/sched/signal.h static inline int __fatal_signal_pending(struct task_struct *p)
p                 354 include/linux/sched/signal.h 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
p                 357 include/linux/sched/signal.h static inline int fatal_signal_pending(struct task_struct *p)
p                 359 include/linux/sched/signal.h 	return signal_pending(p) && __fatal_signal_pending(p);
p                 362 include/linux/sched/signal.h static inline int signal_pending_state(long state, struct task_struct *p)
p                 366 include/linux/sched/signal.h 	if (!signal_pending(p))
p                 369 include/linux/sched/signal.h 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
p                 538 include/linux/sched/signal.h static inline void sas_ss_reset(struct task_struct *p)
p                 540 include/linux/sched/signal.h 	p->sas_ss_sp = 0;
p                 541 include/linux/sched/signal.h 	p->sas_ss_size = 0;
p                 542 include/linux/sched/signal.h 	p->sas_ss_flags = SS_DISABLE;
p                 562 include/linux/sched/signal.h #define next_task(p) \
p                 563 include/linux/sched/signal.h 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
p                 565 include/linux/sched/signal.h #define for_each_process(p) \
p                 566 include/linux/sched/signal.h 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
p                 583 include/linux/sched/signal.h #define for_each_thread(p, t)		\
p                 584 include/linux/sched/signal.h 	__for_each_thread((p)->signal, t)
p                 587 include/linux/sched/signal.h #define for_each_process_thread(p, t)	\
p                 588 include/linux/sched/signal.h 	for_each_process(p) for_each_thread(p, t)
p                 590 include/linux/sched/signal.h typedef int (*proc_visitor)(struct task_struct *p, void *data);
p                 629 include/linux/sched/signal.h static inline bool thread_group_leader(struct task_struct *p)
p                 631 include/linux/sched/signal.h 	return p->exit_signal >= 0;
p                 640 include/linux/sched/signal.h static inline bool has_group_leader_pid(struct task_struct *p)
p                 642 include/linux/sched/signal.h 	return task_pid(p) == task_tgid(p);
p                 651 include/linux/sched/signal.h static inline struct task_struct *next_thread(const struct task_struct *p)
p                 653 include/linux/sched/signal.h 	return list_entry_rcu(p->thread_group.next,
p                 657 include/linux/sched/signal.h static inline int thread_group_empty(struct task_struct *p)
p                 659 include/linux/sched/signal.h 	return list_empty(&p->thread_group);
p                 662 include/linux/sched/signal.h #define delay_group_leader(p) \
p                 663 include/linux/sched/signal.h 		(thread_group_leader(p) && !thread_group_empty(p))
p                  50 include/linux/sched/task.h extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
p                  51 include/linux/sched/task.h extern void sched_dead(struct task_struct *p);
p                  59 include/linux/sched/task.h extern void release_task(struct task_struct * p);
p                  72 include/linux/sched/task.h 		struct task_struct *p, unsigned long tls)
p                  74 include/linux/sched/task.h 	return copy_thread(clone_flags, sp, arg, p);
p                 166 include/linux/sched/task.h static inline void task_lock(struct task_struct *p)
p                 168 include/linux/sched/task.h 	spin_lock(&p->alloc_lock);
p                 171 include/linux/sched/task.h static inline void task_unlock(struct task_struct *p)
p                 173 include/linux/sched/task.h 	spin_unlock(&p->alloc_lock);
p                  35 include/linux/sched/task_stack.h static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
p                  37 include/linux/sched/task_stack.h 	*task_thread_info(p) = *task_thread_info(org);
p                  38 include/linux/sched/task_stack.h 	task_thread_info(p)->task = p;
p                  50 include/linux/sched/task_stack.h static inline unsigned long *end_of_stack(struct task_struct *p)
p                  53 include/linux/sched/task_stack.h 	return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
p                  55 include/linux/sched/task_stack.h 	return (unsigned long *)(task_thread_info(p) + 1);
p                  91 include/linux/sched/task_stack.h static inline unsigned long stack_not_used(struct task_struct *p)
p                  93 include/linux/sched/task_stack.h 	unsigned long *n = end_of_stack(p);
p                 104 include/linux/sched/task_stack.h 	return (unsigned long)end_of_stack(p) - (unsigned long)n;
p                 106 include/linux/sched/task_stack.h 	return (unsigned long)n - (unsigned long)end_of_stack(p);
p                 228 include/linux/sched/topology.h static inline int task_node(const struct task_struct *p)
p                 230 include/linux/sched/topology.h 	return cpu_to_node(task_cpu(p));
p                 352 include/linux/sctp.h 	struct sctp_paramhdr p;
p                 156 include/linux/security.h extern int cap_task_setscheduler(struct task_struct *p);
p                 157 include/linux/security.h extern int cap_task_setioprio(struct task_struct *p, int ioprio);
p                 158 include/linux/security.h extern int cap_task_setnice(struct task_struct *p, int nice);
p                 391 include/linux/security.h int security_task_setpgid(struct task_struct *p, pid_t pgid);
p                 392 include/linux/security.h int security_task_getpgid(struct task_struct *p);
p                 393 include/linux/security.h int security_task_getsid(struct task_struct *p);
p                 394 include/linux/security.h void security_task_getsecid(struct task_struct *p, u32 *secid);
p                 395 include/linux/security.h int security_task_setnice(struct task_struct *p, int nice);
p                 396 include/linux/security.h int security_task_setioprio(struct task_struct *p, int ioprio);
p                 397 include/linux/security.h int security_task_getioprio(struct task_struct *p);
p                 400 include/linux/security.h int security_task_setrlimit(struct task_struct *p, unsigned int resource,
p                 402 include/linux/security.h int security_task_setscheduler(struct task_struct *p);
p                 403 include/linux/security.h int security_task_getscheduler(struct task_struct *p);
p                 404 include/linux/security.h int security_task_movememory(struct task_struct *p);
p                 405 include/linux/security.h int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
p                 409 include/linux/security.h void security_task_to_inode(struct task_struct *p, struct inode *inode);
p                 434 include/linux/security.h int security_getprocattr(struct task_struct *p, const char *lsm, char *name,
p                1029 include/linux/security.h static inline int security_task_setpgid(struct task_struct *p, pid_t pgid)
p                1034 include/linux/security.h static inline int security_task_getpgid(struct task_struct *p)
p                1039 include/linux/security.h static inline int security_task_getsid(struct task_struct *p)
p                1044 include/linux/security.h static inline void security_task_getsecid(struct task_struct *p, u32 *secid)
p                1049 include/linux/security.h static inline int security_task_setnice(struct task_struct *p, int nice)
p                1051 include/linux/security.h 	return cap_task_setnice(p, nice);
p                1054 include/linux/security.h static inline int security_task_setioprio(struct task_struct *p, int ioprio)
p                1056 include/linux/security.h 	return cap_task_setioprio(p, ioprio);
p                1059 include/linux/security.h static inline int security_task_getioprio(struct task_struct *p)
p                1071 include/linux/security.h static inline int security_task_setrlimit(struct task_struct *p,
p                1078 include/linux/security.h static inline int security_task_setscheduler(struct task_struct *p)
p                1080 include/linux/security.h 	return cap_task_setscheduler(p);
p                1083 include/linux/security.h static inline int security_task_getscheduler(struct task_struct *p)
p                1088 include/linux/security.h static inline int security_task_movememory(struct task_struct *p)
p                1093 include/linux/security.h static inline int security_task_kill(struct task_struct *p,
p                1108 include/linux/security.h static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
p                1217 include/linux/security.h static inline int security_getprocattr(struct task_struct *p, const char *lsm,
p                  23 include/linux/selection.h extern int sel_loadlut(char __user *p);
p                  42 include/linux/selection.h extern void getconsxy(struct vc_data *vc, unsigned char *p);
p                  43 include/linux/selection.h extern void putconsxy(struct vc_data *vc, unsigned char *p);
p                 108 include/linux/seq_file.h char *mangle_path(char *s, const char *p, const char *esc);
p                 396 include/linux/serial_core.h int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
p                 271 include/linux/signal.h 				struct task_struct *p, enum pid_type type);
p                 273 include/linux/signal.h 			       struct task_struct *p, enum pid_type type);
p                 364 include/linux/skbuff.h static inline bool skb_frag_must_loop(struct page *p)
p                 367 include/linux/skbuff.h 	if (PageHighMem(p))
p                 390 include/linux/skbuff.h #define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied)	\
p                 391 include/linux/skbuff.h 	for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT),		\
p                 393 include/linux/skbuff.h 	     p_len = skb_frag_must_loop(p) ?				\
p                 397 include/linux/skbuff.h 	     copied += p_len, p++, p_off = 0,				\
p                 411 include/linux/slab.h static __always_inline void kfree_bulk(size_t size, void **p)
p                 413 include/linux/slab.h 	kmem_cache_free_bulk(NULL, size, p);
p                  56 include/linux/slub_def.h #define slub_set_percpu_partial(c, p)		\
p                  58 include/linux/slub_def.h 	slub_percpu_partial(c) = (p)->next;	\
p                  65 include/linux/slub_def.h #define slub_set_percpu_partial(c, p)
p                 172 include/linux/slub_def.h void *fixup_red_left(struct kmem_cache *s, void *p);
p                  12 include/linux/soc/qcom/smem.h phys_addr_t qcom_smem_virt_to_phys(void *p);
p                 110 include/linux/srcu.h #define srcu_dereference_check(p, ssp, c) \
p                 111 include/linux/srcu.h 	__rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu)
p                 123 include/linux/srcu.h #define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0)
p                 131 include/linux/srcu.h #define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1)
p                 121 include/linux/string.h static inline void *memset_l(unsigned long *p, unsigned long v,
p                 125 include/linux/string.h 		return memset32((uint32_t *)p, v, n);
p                 127 include/linux/string.h 		return memset64((uint64_t *)p, v, n);
p                 130 include/linux/string.h static inline void *memset_p(void **p, void *v, __kernel_size_t n)
p                 133 include/linux/string.h 		return memset32((uint32_t *)p, (uintptr_t)v, n);
p                 135 include/linux/string.h 		return memset64((uint64_t *)p, (uintptr_t)v, n);
p                 272 include/linux/string.h __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
p                 274 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 279 include/linux/string.h 	return __builtin_strncpy(p, q, size);
p                 282 include/linux/string.h __FORTIFY_INLINE char *strcat(char *p, const char *q)
p                 284 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 286 include/linux/string.h 		return __builtin_strcat(p, q);
p                 287 include/linux/string.h 	if (strlcat(p, q, p_size) >= p_size)
p                 289 include/linux/string.h 	return p;
p                 292 include/linux/string.h __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
p                 295 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 299 include/linux/string.h 	    (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
p                 300 include/linux/string.h 		return __builtin_strlen(p);
p                 301 include/linux/string.h 	ret = strnlen(p, p_size);
p                 308 include/linux/string.h __FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
p                 310 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 311 include/linux/string.h 	__kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
p                 319 include/linux/string.h __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
p                 322 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 325 include/linux/string.h 		return __real_strlcpy(p, q, size);
p                 333 include/linux/string.h 		__builtin_memcpy(p, q, len);
p                 334 include/linux/string.h 		p[len] = '\0';
p                 340 include/linux/string.h __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
p                 343 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 346 include/linux/string.h 		return __builtin_strncat(p, q, count);
p                 347 include/linux/string.h 	p_len = strlen(p);
p                 351 include/linux/string.h 	__builtin_memcpy(p + p_len, q, copy_len);
p                 352 include/linux/string.h 	p[p_len + copy_len] = '\0';
p                 353 include/linux/string.h 	return p;
p                 356 include/linux/string.h __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
p                 358 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 363 include/linux/string.h 	return __builtin_memset(p, c, size);
p                 366 include/linux/string.h __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
p                 368 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 378 include/linux/string.h 	return __builtin_memcpy(p, q, size);
p                 381 include/linux/string.h __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
p                 383 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 393 include/linux/string.h 	return __builtin_memmove(p, q, size);
p                 397 include/linux/string.h __FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
p                 399 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 404 include/linux/string.h 	return __real_memscan(p, c, size);
p                 407 include/linux/string.h __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
p                 409 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 419 include/linux/string.h 	return __builtin_memcmp(p, q, size);
p                 422 include/linux/string.h __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
p                 424 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 429 include/linux/string.h 	return __builtin_memchr(p, c, size);
p                 433 include/linux/string.h __FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
p                 435 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 440 include/linux/string.h 	return __real_memchr_inv(p, c, size);
p                 444 include/linux/string.h __FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
p                 446 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 451 include/linux/string.h 	return __real_kmemdup(p, size, gfp);
p                 455 include/linux/string.h __FORTIFY_INLINE char *strcpy(char *p, const char *q)
p                 457 include/linux/string.h 	size_t p_size = __builtin_object_size(p, 0);
p                 460 include/linux/string.h 		return __builtin_strcpy(p, q);
p                 461 include/linux/string.h 	memcpy(p, q, strlen(q) + 1);
p                 462 include/linux/string.h 	return p;
p                 233 include/linux/sunrpc/cache.h extern void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos);
p                 234 include/linux/sunrpc/cache.h extern void cache_seq_stop_rcu(struct seq_file *file, void *p);
p                 329 include/linux/sunrpc/gss_krb5.h gss_krb5_make_confounder(char *p, u32 conflen);
p                  74 include/linux/sunrpc/stats.h static inline void rpc_proc_unregister(struct net *net, const char *p) {}
p                  75 include/linux/sunrpc/stats.h static inline void rpc_proc_zero(const struct rpc_program *p) {}
p                  79 include/linux/sunrpc/stats.h static inline void svc_proc_unregister(struct net *net, const char *p) {}
p                 343 include/linux/sunrpc/svc.h xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
p                 345 include/linux/sunrpc/svc.h 	char *cp = (char *)p;
p                 352 include/linux/sunrpc/svc.h xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
p                 355 include/linux/sunrpc/svc.h 	char *cp = (char*)p;
p                 524 include/linux/sunrpc/svc.h 					     struct kvec *first, void *p,
p                 172 include/linux/sunrpc/svc_rdma.h 				    struct svc_rdma_recv_ctxt *head, __be32 *p);
p                 123 include/linux/sunrpc/xdr.h __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int len);
p                 124 include/linux/sunrpc/xdr.h __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int len);
p                 125 include/linux/sunrpc/xdr.h __be32 *xdr_encode_string(__be32 *p, const char *s);
p                 126 include/linux/sunrpc/xdr.h __be32 *xdr_decode_string_inplace(__be32 *p, char **sp, unsigned int *lenp,
p                 128 include/linux/sunrpc/xdr.h __be32 *xdr_encode_netobj(__be32 *p, const struct xdr_netobj *);
p                 129 include/linux/sunrpc/xdr.h __be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *);
p                 138 include/linux/sunrpc/xdr.h static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len)
p                 140 include/linux/sunrpc/xdr.h 	return xdr_encode_opaque(p, s, len);
p                 147 include/linux/sunrpc/xdr.h xdr_encode_hyper(__be32 *p, __u64 val)
p                 149 include/linux/sunrpc/xdr.h 	put_unaligned_be64(val, p);
p                 150 include/linux/sunrpc/xdr.h 	return p + 2;
p                 154 include/linux/sunrpc/xdr.h xdr_decode_hyper(__be32 *p, __u64 *valp)
p                 156 include/linux/sunrpc/xdr.h 	*valp = get_unaligned_be64(p);
p                 157 include/linux/sunrpc/xdr.h 	return p + 2;
p                 161 include/linux/sunrpc/xdr.h xdr_decode_opaque_fixed(__be32 *p, void *ptr, unsigned int len)
p                 163 include/linux/sunrpc/xdr.h 	memcpy(ptr, p, len);
p                 164 include/linux/sunrpc/xdr.h 	return p + XDR_QUADLEN(len);
p                 178 include/linux/sunrpc/xdr.h xdr_adjust_iovec(struct kvec *iov, __be32 *p)
p                 180 include/linux/sunrpc/xdr.h 	return iov->iov_len = ((u8 *) p - (u8 *) iov->iov_base);
p                 224 include/linux/sunrpc/xdr.h extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
p                 231 include/linux/sunrpc/xdr.h 	__be32 *p;		/* start of available buffer */
p                 252 include/linux/sunrpc/xdr.h 			    __be32 *p, struct rpc_rqst *rqst);
p                 261 include/linux/sunrpc/xdr.h 			    __be32 *p, struct rpc_rqst *rqst);
p                 319 include/linux/sunrpc/xdr.h 	__be32 *p = xdr_reserve_space(xdr, len);
p                 321 include/linux/sunrpc/xdr.h 	if (unlikely(!p))
p                 323 include/linux/sunrpc/xdr.h 	*p = cpu_to_be32(n);
p                 340 include/linux/sunrpc/xdr.h 	__be32 *p = xdr_reserve_space(xdr, len);
p                 342 include/linux/sunrpc/xdr.h 	if (unlikely(!p))
p                 344 include/linux/sunrpc/xdr.h 	xdr_encode_hyper(p, n);
p                 362 include/linux/sunrpc/xdr.h 	__be32 *p = xdr_reserve_space(xdr, count);
p                 364 include/linux/sunrpc/xdr.h 	if (unlikely(!p)) {
p                 368 include/linux/sunrpc/xdr.h 	xdr_encode_opaque(p, NULL, len);
p                 369 include/linux/sunrpc/xdr.h 	*ptr = ++p;
p                 386 include/linux/sunrpc/xdr.h 	__be32 *p = xdr_reserve_space(xdr, len);
p                 388 include/linux/sunrpc/xdr.h 	if (unlikely(!p))
p                 390 include/linux/sunrpc/xdr.h 	xdr_encode_opaque_fixed(p, ptr, len);
p                 408 include/linux/sunrpc/xdr.h 	__be32 *p = xdr_reserve_space(xdr, count);
p                 410 include/linux/sunrpc/xdr.h 	if (unlikely(!p))
p                 412 include/linux/sunrpc/xdr.h 	xdr_encode_opaque(p, ptr, len);
p                 431 include/linux/sunrpc/xdr.h 	__be32 *p = xdr_reserve_space(xdr, ret);
p                 433 include/linux/sunrpc/xdr.h 	if (unlikely(!p))
p                 435 include/linux/sunrpc/xdr.h 	*p++ = cpu_to_be32(array_size);
p                 436 include/linux/sunrpc/xdr.h 	for (; array_size > 0; p++, array++, array_size--)
p                 437 include/linux/sunrpc/xdr.h 		*p = cpu_to_be32p(array);
p                 454 include/linux/sunrpc/xdr.h 	__be32 *p = xdr_inline_decode(xdr, count);
p                 456 include/linux/sunrpc/xdr.h 	if (unlikely(!p))
p                 458 include/linux/sunrpc/xdr.h 	*ptr = be32_to_cpup(p);
p                 475 include/linux/sunrpc/xdr.h 	__be32 *p = xdr_inline_decode(xdr, len);
p                 477 include/linux/sunrpc/xdr.h 	if (unlikely(!p))
p                 479 include/linux/sunrpc/xdr.h 	xdr_decode_opaque_fixed(p, ptr, len);
p                 502 include/linux/sunrpc/xdr.h 	__be32 *p;
p                 509 include/linux/sunrpc/xdr.h 		p = xdr_inline_decode(xdr, len);
p                 510 include/linux/sunrpc/xdr.h 		if (unlikely(!p))
p                 514 include/linux/sunrpc/xdr.h 		*ptr = p;
p                 534 include/linux/sunrpc/xdr.h 	__be32 *p;
p                 540 include/linux/sunrpc/xdr.h 	p = xdr_inline_decode(xdr, len * sizeof(*p));
p                 541 include/linux/sunrpc/xdr.h 	if (unlikely(!p))
p                 552 include/linux/sunrpc/xdr.h 	for (; array_size > 0; p++, array++, array_size--)
p                 553 include/linux/sunrpc/xdr.h 		*array = be32_to_cpup(p);
p                  79 include/linux/superhyway.h #define superhyway_set_drvdata(d,p)	dev_set_drvdata(&(d)->dev, (p))
p                 457 include/linux/suspend.h static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
p                 458 include/linux/suspend.h static inline void swsusp_set_page_free(struct page *p) {}
p                 459 include/linux/suspend.h static inline void swsusp_unset_page_free(struct page *p) {}
p                 550 include/linux/swap.h static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
p                 190 include/linux/swapops.h 	struct page *p = pfn_to_page(swp_offset(entry));
p                 195 include/linux/swapops.h 	BUG_ON(!PageLocked(compound_head(p)));
p                 196 include/linux/swapops.h 	return p;
p                 288 include/linux/swapops.h static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
p                 318 include/linux/syscalls.h 				struct io_uring_params __user *p);
p                 192 include/linux/sysctl.h extern void setup_sysctl_set(struct ctl_table_set *p,
p                 235 include/linux/sysctl.h static inline void setup_sysctl_set(struct ctl_table_set *p,
p                  20 include/linux/task_io_accounting_ops.h static inline unsigned long task_io_get_inblock(const struct task_struct *p)
p                  22 include/linux/task_io_accounting_ops.h 	return p->ioac.read_bytes >> 9;
p                  34 include/linux/task_io_accounting_ops.h static inline unsigned long task_io_get_oublock(const struct task_struct *p)
p                  36 include/linux/task_io_accounting_ops.h 	return p->ioac.write_bytes >> 9;
p                  63 include/linux/task_io_accounting_ops.h static inline unsigned long task_io_get_inblock(const struct task_struct *p)
p                  72 include/linux/task_io_accounting_ops.h static inline unsigned long task_io_get_oublock(const struct task_struct *p)
p                  19 include/linux/trace_events.h const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
p                  23 include/linux/trace_events.h const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
p                  27 include/linux/trace_events.h const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
p                  31 include/linux/trace_events.h const char *trace_print_symbols_seq_u64(struct trace_seq *p,
p                  37 include/linux/trace_events.h const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
p                  40 include/linux/trace_events.h const char *trace_print_hex_seq(struct trace_seq *p,
p                  44 include/linux/trace_events.h const char *trace_print_array_seq(struct trace_seq *p,
p                 507 include/linux/trace_events.h static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
p                 511 include/linux/trace_events.h static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
p                 101 include/linux/tracepoint.h static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
p                 103 include/linux/tracepoint.h 	return offset_to_ptr(p);
p                 112 include/linux/tracepoint.h static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
p                 114 include/linux/tracepoint.h 	return *p;
p                  25 include/linux/tsacct_kern.h extern void xacct_add_tsk(struct taskstats *stats, struct task_struct *p);
p                  30 include/linux/tsacct_kern.h static inline void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
p                 411 include/linux/tty.h extern void proc_clear_tty(struct task_struct *p);
p                 432 include/linux/tty.h static inline void proc_clear_tty(struct task_struct *p)
p                 717 include/linux/tty.h extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
p                 291 include/linux/tty_driver.h 	int  (*get_serial)(struct tty_struct *tty, struct serial_struct *p);
p                 292 include/linux/tty_driver.h 	int  (*set_serial)(struct tty_struct *tty, struct serial_struct *p);
p                 371 include/linux/uaccess.h #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
p                 372 include/linux/uaccess.h #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
p                   8 include/linux/unaligned/access_ok.h static __always_inline u16 get_unaligned_le16(const void *p)
p                  10 include/linux/unaligned/access_ok.h 	return le16_to_cpup((__le16 *)p);
p                  13 include/linux/unaligned/access_ok.h static __always_inline u32 get_unaligned_le32(const void *p)
p                  15 include/linux/unaligned/access_ok.h 	return le32_to_cpup((__le32 *)p);
p                  18 include/linux/unaligned/access_ok.h static __always_inline u64 get_unaligned_le64(const void *p)
p                  20 include/linux/unaligned/access_ok.h 	return le64_to_cpup((__le64 *)p);
p                  23 include/linux/unaligned/access_ok.h static __always_inline u16 get_unaligned_be16(const void *p)
p                  25 include/linux/unaligned/access_ok.h 	return be16_to_cpup((__be16 *)p);
p                  28 include/linux/unaligned/access_ok.h static __always_inline u32 get_unaligned_be32(const void *p)
p                  30 include/linux/unaligned/access_ok.h 	return be32_to_cpup((__be32 *)p);
p                  33 include/linux/unaligned/access_ok.h static __always_inline u64 get_unaligned_be64(const void *p)
p                  35 include/linux/unaligned/access_ok.h 	return be64_to_cpup((__be64 *)p);
p                  38 include/linux/unaligned/access_ok.h static __always_inline void put_unaligned_le16(u16 val, void *p)
p                  40 include/linux/unaligned/access_ok.h 	*((__le16 *)p) = cpu_to_le16(val);
p                  43 include/linux/unaligned/access_ok.h static __always_inline void put_unaligned_le32(u32 val, void *p)
p                  45 include/linux/unaligned/access_ok.h 	*((__le32 *)p) = cpu_to_le32(val);
p                  48 include/linux/unaligned/access_ok.h static __always_inline void put_unaligned_le64(u64 val, void *p)
p                  50 include/linux/unaligned/access_ok.h 	*((__le64 *)p) = cpu_to_le64(val);
p                  53 include/linux/unaligned/access_ok.h static __always_inline void put_unaligned_be16(u16 val, void *p)
p                  55 include/linux/unaligned/access_ok.h 	*((__be16 *)p) = cpu_to_be16(val);
p                  58 include/linux/unaligned/access_ok.h static __always_inline void put_unaligned_be32(u32 val, void *p)
p                  60 include/linux/unaligned/access_ok.h 	*((__be32 *)p) = cpu_to_be32(val);
p                  63 include/linux/unaligned/access_ok.h static __always_inline void put_unaligned_be64(u64 val, void *p)
p                  65 include/linux/unaligned/access_ok.h 	*((__be64 *)p) = cpu_to_be64(val);
p                   7 include/linux/unaligned/be_byteshift.h static inline u16 __get_unaligned_be16(const u8 *p)
p                   9 include/linux/unaligned/be_byteshift.h 	return p[0] << 8 | p[1];
p                  12 include/linux/unaligned/be_byteshift.h static inline u32 __get_unaligned_be32(const u8 *p)
p                  14 include/linux/unaligned/be_byteshift.h 	return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
p                  17 include/linux/unaligned/be_byteshift.h static inline u64 __get_unaligned_be64(const u8 *p)
p                  19 include/linux/unaligned/be_byteshift.h 	return (u64)__get_unaligned_be32(p) << 32 |
p                  20 include/linux/unaligned/be_byteshift.h 	       __get_unaligned_be32(p + 4);
p                  23 include/linux/unaligned/be_byteshift.h static inline void __put_unaligned_be16(u16 val, u8 *p)
p                  25 include/linux/unaligned/be_byteshift.h 	*p++ = val >> 8;
p                  26 include/linux/unaligned/be_byteshift.h 	*p++ = val;
p                  29 include/linux/unaligned/be_byteshift.h static inline void __put_unaligned_be32(u32 val, u8 *p)
p                  31 include/linux/unaligned/be_byteshift.h 	__put_unaligned_be16(val >> 16, p);
p                  32 include/linux/unaligned/be_byteshift.h 	__put_unaligned_be16(val, p + 2);
p                  35 include/linux/unaligned/be_byteshift.h static inline void __put_unaligned_be64(u64 val, u8 *p)
p                  37 include/linux/unaligned/be_byteshift.h 	__put_unaligned_be32(val >> 32, p);
p                  38 include/linux/unaligned/be_byteshift.h 	__put_unaligned_be32(val, p + 4);
p                  41 include/linux/unaligned/be_byteshift.h static inline u16 get_unaligned_be16(const void *p)
p                  43 include/linux/unaligned/be_byteshift.h 	return __get_unaligned_be16((const u8 *)p);
p                  46 include/linux/unaligned/be_byteshift.h static inline u32 get_unaligned_be32(const void *p)
p                  48 include/linux/unaligned/be_byteshift.h 	return __get_unaligned_be32((const u8 *)p);
p                  51 include/linux/unaligned/be_byteshift.h static inline u64 get_unaligned_be64(const void *p)
p                  53 include/linux/unaligned/be_byteshift.h 	return __get_unaligned_be64((const u8 *)p);
p                  56 include/linux/unaligned/be_byteshift.h static inline void put_unaligned_be16(u16 val, void *p)
p                  58 include/linux/unaligned/be_byteshift.h 	__put_unaligned_be16(val, p);
p                  61 include/linux/unaligned/be_byteshift.h static inline void put_unaligned_be32(u32 val, void *p)
p                  63 include/linux/unaligned/be_byteshift.h 	__put_unaligned_be32(val, p);
p                  66 include/linux/unaligned/be_byteshift.h static inline void put_unaligned_be64(u64 val, void *p)
p                  68 include/linux/unaligned/be_byteshift.h 	__put_unaligned_be64(val, p);
p                   7 include/linux/unaligned/be_memmove.h static inline u16 get_unaligned_be16(const void *p)
p                   9 include/linux/unaligned/be_memmove.h 	return __get_unaligned_memmove16((const u8 *)p);
p                  12 include/linux/unaligned/be_memmove.h static inline u32 get_unaligned_be32(const void *p)
p                  14 include/linux/unaligned/be_memmove.h 	return __get_unaligned_memmove32((const u8 *)p);
p                  17 include/linux/unaligned/be_memmove.h static inline u64 get_unaligned_be64(const void *p)
p                  19 include/linux/unaligned/be_memmove.h 	return __get_unaligned_memmove64((const u8 *)p);
p                  22 include/linux/unaligned/be_memmove.h static inline void put_unaligned_be16(u16 val, void *p)
p                  24 include/linux/unaligned/be_memmove.h 	__put_unaligned_memmove16(val, p);
p                  27 include/linux/unaligned/be_memmove.h static inline void put_unaligned_be32(u32 val, void *p)
p                  29 include/linux/unaligned/be_memmove.h 	__put_unaligned_memmove32(val, p);
p                  32 include/linux/unaligned/be_memmove.h static inline void put_unaligned_be64(u64 val, void *p)
p                  34 include/linux/unaligned/be_memmove.h 	__put_unaligned_memmove64(val, p);
p                   7 include/linux/unaligned/be_struct.h static inline u16 get_unaligned_be16(const void *p)
p                   9 include/linux/unaligned/be_struct.h 	return __get_unaligned_cpu16((const u8 *)p);
p                  12 include/linux/unaligned/be_struct.h static inline u32 get_unaligned_be32(const void *p)
p                  14 include/linux/unaligned/be_struct.h 	return __get_unaligned_cpu32((const u8 *)p);
p                  17 include/linux/unaligned/be_struct.h static inline u64 get_unaligned_be64(const void *p)
p                  19 include/linux/unaligned/be_struct.h 	return __get_unaligned_cpu64((const u8 *)p);
p                  22 include/linux/unaligned/be_struct.h static inline void put_unaligned_be16(u16 val, void *p)
p                  24 include/linux/unaligned/be_struct.h 	__put_unaligned_cpu16(val, p);
p                  27 include/linux/unaligned/be_struct.h static inline void put_unaligned_be32(u32 val, void *p)
p                  29 include/linux/unaligned/be_struct.h 	__put_unaligned_cpu32(val, p);
p                  32 include/linux/unaligned/be_struct.h static inline void put_unaligned_be64(u64 val, void *p)
p                  34 include/linux/unaligned/be_struct.h 	__put_unaligned_cpu64(val, p);
p                   7 include/linux/unaligned/le_byteshift.h static inline u16 __get_unaligned_le16(const u8 *p)
p                   9 include/linux/unaligned/le_byteshift.h 	return p[0] | p[1] << 8;
p                  12 include/linux/unaligned/le_byteshift.h static inline u32 __get_unaligned_le32(const u8 *p)
p                  14 include/linux/unaligned/le_byteshift.h 	return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
p                  17 include/linux/unaligned/le_byteshift.h static inline u64 __get_unaligned_le64(const u8 *p)
p                  19 include/linux/unaligned/le_byteshift.h 	return (u64)__get_unaligned_le32(p + 4) << 32 |
p                  20 include/linux/unaligned/le_byteshift.h 	       __get_unaligned_le32(p);
p                  23 include/linux/unaligned/le_byteshift.h static inline void __put_unaligned_le16(u16 val, u8 *p)
p                  25 include/linux/unaligned/le_byteshift.h 	*p++ = val;
p                  26 include/linux/unaligned/le_byteshift.h 	*p++ = val >> 8;
p                  29 include/linux/unaligned/le_byteshift.h static inline void __put_unaligned_le32(u32 val, u8 *p)
p                  31 include/linux/unaligned/le_byteshift.h 	__put_unaligned_le16(val >> 16, p + 2);
p                  32 include/linux/unaligned/le_byteshift.h 	__put_unaligned_le16(val, p);
p                  35 include/linux/unaligned/le_byteshift.h static inline void __put_unaligned_le64(u64 val, u8 *p)
p                  37 include/linux/unaligned/le_byteshift.h 	__put_unaligned_le32(val >> 32, p + 4);
p                  38 include/linux/unaligned/le_byteshift.h 	__put_unaligned_le32(val, p);
p                  41 include/linux/unaligned/le_byteshift.h static inline u16 get_unaligned_le16(const void *p)
p                  43 include/linux/unaligned/le_byteshift.h 	return __get_unaligned_le16((const u8 *)p);
p                  46 include/linux/unaligned/le_byteshift.h static inline u32 get_unaligned_le32(const void *p)
p                  48 include/linux/unaligned/le_byteshift.h 	return __get_unaligned_le32((const u8 *)p);
p                  51 include/linux/unaligned/le_byteshift.h static inline u64 get_unaligned_le64(const void *p)
p                  53 include/linux/unaligned/le_byteshift.h 	return __get_unaligned_le64((const u8 *)p);
p                  56 include/linux/unaligned/le_byteshift.h static inline void put_unaligned_le16(u16 val, void *p)
p                  58 include/linux/unaligned/le_byteshift.h 	__put_unaligned_le16(val, p);
p                  61 include/linux/unaligned/le_byteshift.h static inline void put_unaligned_le32(u32 val, void *p)
p                  63 include/linux/unaligned/le_byteshift.h 	__put_unaligned_le32(val, p);
p                  66 include/linux/unaligned/le_byteshift.h static inline void put_unaligned_le64(u64 val, void *p)
p                  68 include/linux/unaligned/le_byteshift.h 	__put_unaligned_le64(val, p);
p                   7 include/linux/unaligned/le_memmove.h static inline u16 get_unaligned_le16(const void *p)
p                   9 include/linux/unaligned/le_memmove.h 	return __get_unaligned_memmove16((const u8 *)p);
p                  12 include/linux/unaligned/le_memmove.h static inline u32 get_unaligned_le32(const void *p)
p                  14 include/linux/unaligned/le_memmove.h 	return __get_unaligned_memmove32((const u8 *)p);
p                  17 include/linux/unaligned/le_memmove.h static inline u64 get_unaligned_le64(const void *p)
p                  19 include/linux/unaligned/le_memmove.h 	return __get_unaligned_memmove64((const u8 *)p);
p                  22 include/linux/unaligned/le_memmove.h static inline void put_unaligned_le16(u16 val, void *p)
p                  24 include/linux/unaligned/le_memmove.h 	__put_unaligned_memmove16(val, p);
p                  27 include/linux/unaligned/le_memmove.h static inline void put_unaligned_le32(u32 val, void *p)
p                  29 include/linux/unaligned/le_memmove.h 	__put_unaligned_memmove32(val, p);
p                  32 include/linux/unaligned/le_memmove.h static inline void put_unaligned_le64(u64 val, void *p)
p                  34 include/linux/unaligned/le_memmove.h 	__put_unaligned_memmove64(val, p);
p                   7 include/linux/unaligned/le_struct.h static inline u16 get_unaligned_le16(const void *p)
p                   9 include/linux/unaligned/le_struct.h 	return __get_unaligned_cpu16((const u8 *)p);
p                  12 include/linux/unaligned/le_struct.h static inline u32 get_unaligned_le32(const void *p)
p                  14 include/linux/unaligned/le_struct.h 	return __get_unaligned_cpu32((const u8 *)p);
p                  17 include/linux/unaligned/le_struct.h static inline u64 get_unaligned_le64(const void *p)
p                  19 include/linux/unaligned/le_struct.h 	return __get_unaligned_cpu64((const u8 *)p);
p                  22 include/linux/unaligned/le_struct.h static inline void put_unaligned_le16(u16 val, void *p)
p                  24 include/linux/unaligned/le_struct.h 	__put_unaligned_cpu16(val, p);
p                  27 include/linux/unaligned/le_struct.h static inline void put_unaligned_le32(u32 val, void *p)
p                  29 include/linux/unaligned/le_struct.h 	__put_unaligned_cpu32(val, p);
p                  32 include/linux/unaligned/le_struct.h static inline void put_unaligned_le64(u64 val, void *p)
p                  34 include/linux/unaligned/le_struct.h 	__put_unaligned_cpu64(val, p);
p                  10 include/linux/unaligned/memmove.h static inline u16 __get_unaligned_memmove16(const void *p)
p                  13 include/linux/unaligned/memmove.h 	memmove(&tmp, p, 2);
p                  17 include/linux/unaligned/memmove.h static inline u32 __get_unaligned_memmove32(const void *p)
p                  20 include/linux/unaligned/memmove.h 	memmove(&tmp, p, 4);
p                  24 include/linux/unaligned/memmove.h static inline u64 __get_unaligned_memmove64(const void *p)
p                  27 include/linux/unaligned/memmove.h 	memmove(&tmp, p, 8);
p                  31 include/linux/unaligned/memmove.h static inline void __put_unaligned_memmove16(u16 val, void *p)
p                  33 include/linux/unaligned/memmove.h 	memmove(p, &val, 2);
p                  36 include/linux/unaligned/memmove.h static inline void __put_unaligned_memmove32(u32 val, void *p)
p                  38 include/linux/unaligned/memmove.h 	memmove(p, &val, 4);
p                  41 include/linux/unaligned/memmove.h static inline void __put_unaligned_memmove64(u64 val, void *p)
p                  43 include/linux/unaligned/memmove.h 	memmove(p, &val, 8);
p                  10 include/linux/unaligned/packed_struct.h static inline u16 __get_unaligned_cpu16(const void *p)
p                  12 include/linux/unaligned/packed_struct.h 	const struct __una_u16 *ptr = (const struct __una_u16 *)p;
p                  16 include/linux/unaligned/packed_struct.h static inline u32 __get_unaligned_cpu32(const void *p)
p                  18 include/linux/unaligned/packed_struct.h 	const struct __una_u32 *ptr = (const struct __una_u32 *)p;
p                  22 include/linux/unaligned/packed_struct.h static inline u64 __get_unaligned_cpu64(const void *p)
p                  24 include/linux/unaligned/packed_struct.h 	const struct __una_u64 *ptr = (const struct __una_u64 *)p;
p                  28 include/linux/unaligned/packed_struct.h static inline void __put_unaligned_cpu16(u16 val, void *p)
p                  30 include/linux/unaligned/packed_struct.h 	struct __una_u16 *ptr = (struct __una_u16 *)p;
p                  34 include/linux/unaligned/packed_struct.h static inline void __put_unaligned_cpu32(u32 val, void *p)
p                  36 include/linux/unaligned/packed_struct.h 	struct __una_u32 *ptr = (struct __una_u32 *)p;
p                  40 include/linux/unaligned/packed_struct.h static inline void __put_unaligned_cpu64(u64 val, void *p)
p                  42 include/linux/unaligned/packed_struct.h 	struct __una_u64 *ptr = (struct __una_u64 *)p;
p                  36 include/linux/usb/ehci_def.h #define HC_LENGTH(ehci, p)	(0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
p                  38 include/linux/usb/ehci_def.h #define HC_VERSION(ehci, p)	(0xffff&((p) >> /* bits 31:16 / offset 02h */ \
p                  41 include/linux/usb/ehci_def.h #define HCS_DEBUG_PORT(p)	(((p)>>20)&0xf)	/* bits 23:20, debug port? */
p                  42 include/linux/usb/ehci_def.h #define HCS_INDICATOR(p)	((p)&(1 << 16))	/* true: has port indicators */
p                  43 include/linux/usb/ehci_def.h #define HCS_N_CC(p)		(((p)>>12)&0xf)	/* bits 15:12, #companion HCs */
p                  44 include/linux/usb/ehci_def.h #define HCS_N_PCC(p)		(((p)>>8)&0xf)	/* bits 11:8, ports per CC */
p                  45 include/linux/usb/ehci_def.h #define HCS_PORTROUTED(p)	((p)&(1 << 7))	/* true: port routing */
p                  46 include/linux/usb/ehci_def.h #define HCS_PPC(p)		((p)&(1 << 4))	/* true: port power control */
p                  47 include/linux/usb/ehci_def.h #define HCS_N_PORTS(p)		(((p)>>0)&0xf)	/* bits 3:0, ports on HC */
p                  51 include/linux/usb/ehci_def.h #define HCC_32FRAME_PERIODIC_LIST(p)	((p)&(1 << 19))
p                  52 include/linux/usb/ehci_def.h #define HCC_PER_PORT_CHANGE_EVENT(p)	((p)&(1 << 18))
p                  53 include/linux/usb/ehci_def.h #define HCC_LPM(p)			((p)&(1 << 17))
p                  54 include/linux/usb/ehci_def.h #define HCC_HW_PREFETCH(p)		((p)&(1 << 16))
p                  56 include/linux/usb/ehci_def.h #define HCC_EXT_CAPS(p)		(((p)>>8)&0xff)	/* for pci extended caps */
p                  57 include/linux/usb/ehci_def.h #define HCC_ISOC_CACHE(p)       ((p)&(1 << 7))  /* true: can cache isoc frame */
p                  58 include/linux/usb/ehci_def.h #define HCC_ISOC_THRES(p)       (((p)>>4)&0x7)  /* bits 6:4, uframes cached */
p                  59 include/linux/usb/ehci_def.h #define HCC_CANPARK(p)		((p)&(1 << 2))  /* true: can park on async qh */
p                  60 include/linux/usb/ehci_def.h #define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1))  /* true: periodic_size changes*/
p                  61 include/linux/usb/ehci_def.h #define HCC_64BIT_ADDR(p)       ((p)&(1))       /* true: can use 64-bit addr */
p                  30 include/linux/user-return-notifier.h static inline void clear_user_return_notifier(struct task_struct *p)
p                  32 include/linux/user-return-notifier.h 	clear_tsk_thread_flag(p, TIF_USER_RETURN_NOTIFY);
p                  46 include/linux/user-return-notifier.h static inline void clear_user_return_notifier(struct task_struct *p) {}
p                  50 include/linux/virtio_ring.h 				   __virtio16 *p, __virtio16 v)
p                  53 include/linux/virtio_ring.h 		virt_store_mb(*p, v);
p                  55 include/linux/virtio_ring.h 		WRITE_ONCE(*p, v);
p                  79 include/linux/wait.h static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
p                  82 include/linux/wait.h 	wq_entry->private	= p;
p                 240 include/linux/wait_bit.h extern wait_queue_head_t *__var_waitqueue(void *p);
p                 168 include/linux/wm97xx.h     int p;
p                  95 include/linux/xarray.h static inline void *xa_tag_pointer(void *p, unsigned long tag)
p                  97 include/linux/xarray.h 	return (void *)((unsigned long)p | tag);
p                  86 include/media/drv-intf/cx2341x.h const char * const *cx2341x_ctrl_get_menu(const struct cx2341x_mpeg_params *p, u32 id);
p                  89 include/media/drv-intf/cx2341x.h void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p);
p                  90 include/media/drv-intf/cx2341x.h void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix);
p                 127 include/media/drv-intf/exynos-fimc.h 	int (*prepare)(struct exynos_media_pipeline *p,
p                 129 include/media/drv-intf/exynos-fimc.h 	int (*unprepare)(struct exynos_media_pipeline *p);
p                 130 include/media/drv-intf/exynos-fimc.h 	int (*open)(struct exynos_media_pipeline *p, struct media_entity *me,
p                 132 include/media/drv-intf/exynos-fimc.h 	int (*close)(struct exynos_media_pipeline *p);
p                 133 include/media/drv-intf/exynos-fimc.h 	int (*set_stream)(struct exynos_media_pipeline *p, bool state);
p                 238 include/media/dvb_frontend.h 	int (*set_analog_params)(struct dvb_frontend *fe, struct analog_parameters *p);
p                 246 include/media/tpg/v4l2-tpg.h 		u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf);
p                 249 include/media/tpg/v4l2-tpg.h 			   unsigned p, u8 *vbuf);
p                 251 include/media/tpg/v4l2-tpg.h 		    unsigned p, u8 *vbuf);
p                 461 include/media/tpg/v4l2-tpg.h 	unsigned p;
p                 468 include/media/tpg/v4l2-tpg.h 	for (p = 0; p < tpg_g_planes(tpg); p++) {
p                 469 include/media/tpg/v4l2-tpg.h 		unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0];
p                 471 include/media/tpg/v4l2-tpg.h 		tpg->bytesperline[p] = plane_w / tpg->hdownsampling[p];
p                 481 include/media/tpg/v4l2-tpg.h 	unsigned p;
p                 485 include/media/tpg/v4l2-tpg.h 	for (p = 0; p < tpg_g_planes(tpg); p++) {
p                 486 include/media/tpg/v4l2-tpg.h 		unsigned plane_w = tpg_g_bytesperline(tpg, p);
p                 488 include/media/tpg/v4l2-tpg.h 		w += plane_w / tpg->vdownsampling[p];
p                 497 include/media/tpg/v4l2-tpg.h 	unsigned p;
p                 501 include/media/tpg/v4l2-tpg.h 	for (p = 0; p < tpg_g_planes(tpg); p++) {
p                 502 include/media/tpg/v4l2-tpg.h 		unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0];
p                 504 include/media/tpg/v4l2-tpg.h 		plane_w /= tpg->hdownsampling[p];
p                 505 include/media/tpg/v4l2-tpg.h 		w += plane_w / tpg->vdownsampling[p];
p                  71 include/media/v4l2-ctrls.h 	void *p;
p                  54 include/media/v4l2-subdev.h 	u8 *p;
p                 988 include/media/v4l2-subdev.h static inline void v4l2_set_subdevdata(struct v4l2_subdev *sd, void *p)
p                 990 include/media/v4l2-subdev.h 	sd->dev_priv = p;
p                1011 include/media/v4l2-subdev.h static inline void v4l2_set_subdev_hostdata(struct v4l2_subdev *sd, void *p)
p                1013 include/media/v4l2-subdev.h 	sd->host_priv = p;
p                  67 include/media/videobuf2-dvb.h int vb2_dvb_find_frontend(struct vb2_dvb_frontends *f, struct dvb_frontend *p);
p                 276 include/media/videobuf2-v4l2.h 			  struct v4l2_requestbuffers *p);
p                 278 include/media/videobuf2-v4l2.h 			  struct v4l2_create_buffers *p);
p                 280 include/media/videobuf2-v4l2.h 			  struct v4l2_buffer *p);
p                 281 include/media/videobuf2-v4l2.h int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p);
p                 282 include/media/videobuf2-v4l2.h int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p);
p                 283 include/media/videobuf2-v4l2.h int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p);
p                 287 include/media/videobuf2-v4l2.h 	struct v4l2_exportbuffer *p);
p                 440 include/net/addrconf.h 	__be64 *p = (__be64 *)addr;
p                 441 include/net/addrconf.h 	return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(1))) == 0UL;
p                 452 include/net/addrconf.h 	__be64 *p = (__be64 *)addr;
p                 453 include/net/addrconf.h 	return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(2))) == 0UL;
p                 469 include/net/addrconf.h 	__be64 *p = (__be64 *)addr;
p                 470 include/net/addrconf.h 	return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) |
p                 471 include/net/addrconf.h 		((p[1] ^ cpu_to_be64(0x00000001ff000000UL)) &
p                 484 include/net/addrconf.h 	__be64 *p = (__be64 *)addr;
p                 486 include/net/addrconf.h 	return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) |
p                 487 include/net/addrconf.h 		(p[1] ^ cpu_to_be64(0x6a))) == 0UL;
p                  42 include/net/busy_poll.h bool sk_busy_loop_end(void *p, unsigned long start_time);
p                  23 include/net/cls_cgroup.h struct cgroup_cls_state *task_cls_state(struct task_struct *p);
p                  25 include/net/cls_cgroup.h static inline u32 task_cls_classid(struct task_struct *p)
p                  33 include/net/cls_cgroup.h 	classid = container_of(task_css(p, net_cls_cgrp_id),
p                 283 include/net/dsa.h static inline const struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
p                 285 include/net/dsa.h 	return &ds->ports[p];
p                 288 include/net/dsa.h static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
p                 290 include/net/dsa.h 	return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
p                 293 include/net/dsa.h static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
p                 295 include/net/dsa.h 	return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_CPU;
p                 298 include/net/dsa.h static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
p                 300 include/net/dsa.h 	return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_DSA;
p                 303 include/net/dsa.h static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
p                 305 include/net/dsa.h 	return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
p                 311 include/net/dsa.h 	int p;
p                 313 include/net/dsa.h 	for (p = 0; p < ds->num_ports; p++)
p                 314 include/net/dsa.h 		if (dsa_is_user_port(ds, p))
p                 315 include/net/dsa.h 			mask |= BIT(p);
p                 456 include/net/dsa.h 			    struct ethtool_regs *regs, void *p);
p                 636 include/net/dsa.h #define BRCM_TAG_SET_PORT_QUEUE(p, q)	((p) << 8 | q)
p                  47 include/net/dsfield.h 	__be16 *p = (__force __be16 *)ipv6h;
p                  49 include/net/dsfield.h 	*p = (*p & htons((((u16)mask << 4) | 0xf00f))) | htons((u16)value << 4);
p                 113 include/net/dst.h 	unsigned long p = dst->_metrics;
p                 115 include/net/dst.h 	BUG_ON(!p);
p                 117 include/net/dst.h 	if (p & DST_METRICS_READ_ONLY)
p                 118 include/net/dst.h 		return dst->ops->cow_metrics(dst, p);
p                 119 include/net/dst.h 	return __DST_METRICS_PTR(p);
p                 152 include/net/dst.h 	u32 *p = DST_METRICS_PTR(dst);
p                 154 include/net/dst.h 	return p[metric-1];
p                 179 include/net/dst.h 	u32 *p = dst_metrics_write_ptr(dst);
p                 181 include/net/dst.h 	if (p)
p                 182 include/net/dst.h 		p[metric-1] = val;
p                 295 include/net/erspan.h 	md2->p = 1;
p                 147 include/net/inetpeer.h void inet_putpeer(struct inet_peer *p);
p                 488 include/net/ip.h 	struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
p                 490 include/net/ip.h 	if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
p                 491 include/net/ip.h 		kfree(p);
p                 476 include/net/ip6_fib.h 	struct seq_net_private p;
p                 272 include/net/ip_tunnels.h int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
p                 287 include/net/ip_tunnels.h 			 struct ip_tunnel_parm *p, __u32 fwmark);
p                 289 include/net/ip_tunnels.h 		      struct ip_tunnel_parm *p, __u32 fwmark);
p                 729 include/net/ip_vs.h 	int (*fill_param)(struct ip_vs_conn_param *p, struct sk_buff *skb);
p                 730 include/net/ip_vs.h 	bool (*ct_match)(const struct ip_vs_conn_param *p,
p                 732 include/net/ip_vs.h 	u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval,
p                1186 include/net/ip_vs.h 					 struct ip_vs_conn_param *p)
p                1188 include/net/ip_vs.h 	p->ipvs = ipvs;
p                1189 include/net/ip_vs.h 	p->af = af;
p                1190 include/net/ip_vs.h 	p->protocol = protocol;
p                1191 include/net/ip_vs.h 	p->caddr = caddr;
p                1192 include/net/ip_vs.h 	p->cport = cport;
p                1193 include/net/ip_vs.h 	p->vaddr = vaddr;
p                1194 include/net/ip_vs.h 	p->vport = vport;
p                1195 include/net/ip_vs.h 	p->pe = NULL;
p                1196 include/net/ip_vs.h 	p->pe_data = NULL;
p                1199 include/net/ip_vs.h struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p);
p                1200 include/net/ip_vs.h struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p);
p                1206 include/net/ip_vs.h struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
p                1230 include/net/ip_vs.h struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
p                 353 include/net/ndisc.h static inline u8 *__ndisc_opt_addr_data(struct nd_opt_hdr *p,
p                 356 include/net/ndisc.h 	u8 *lladdr = (u8 *)(p + 1);
p                 357 include/net/ndisc.h 	int lladdrlen = p->nd_opt_len << 3;
p                 363 include/net/ndisc.h static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
p                 366 include/net/ndisc.h 	return __ndisc_opt_addr_data(p, dev->addr_len,
p                  88 include/net/neighbour.h static inline void neigh_var_set(struct neigh_parms *p, int index, int val)
p                  90 include/net/neighbour.h 	set_bit(index, p->data_state);
p                  91 include/net/neighbour.h 	p->data[index] = val;
p                  94 include/net/neighbour.h #define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr])
p                  99 include/net/neighbour.h #define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val)
p                 100 include/net/neighbour.h #define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val)
p                 102 include/net/neighbour.h static inline void neigh_parms_data_state_setall(struct neigh_parms *p)
p                 104 include/net/neighbour.h 	bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX);
p                 107 include/net/neighbour.h static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p)
p                 109 include/net/neighbour.h 	bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX);
p                 238 include/net/neighbour.h static inline int neigh_parms_family(struct neigh_parms *p)
p                 240 include/net/neighbour.h 	return p->tbl->family;
p                 353 include/net/neighbour.h void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
p                 378 include/net/neighbour.h 	struct seq_net_private p;
p                 403 include/net/neighbour.h int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
p                 405 include/net/neighbour.h void neigh_sysctl_unregister(struct neigh_parms *p);
p                  22 include/net/netprio_cgroup.h static inline u32 task_netprioidx(struct task_struct *p)
p                  28 include/net/netprio_cgroup.h 	css = task_css(p, net_prio_cgrp_id);
p                  44 include/net/netprio_cgroup.h static inline u32 task_netprioidx(struct task_struct *p)
p                  71 include/net/page_pool.h 	struct page_pool_params p;
p                 132 include/net/page_pool.h 	return pool->p.dma_dir;
p                  46 include/net/ping.h 	struct seq_net_private  p;
p                 109 include/net/protocol.h void inet_register_protosw(struct inet_protosw *p);
p                 110 include/net/protocol.h void inet_unregister_protosw(struct inet_protosw *p);
p                 115 include/net/protocol.h int inet6_register_protosw(struct inet_protosw *p);
p                 116 include/net/protocol.h void inet6_unregister_protosw(struct inet_protosw *p);
p                  45 include/net/raw.h 	struct seq_net_private p;
p                 182 include/net/red.h static inline void red_set_parms(struct red_parms *p,
p                 189 include/net/red.h 	p->qth_min	= qth_min << Wlog;
p                 190 include/net/red.h 	p->qth_max	= qth_max << Wlog;
p                 191 include/net/red.h 	p->Wlog		= Wlog;
p                 192 include/net/red.h 	p->Plog		= Plog;
p                 195 include/net/red.h 	p->qth_delta	= delta;
p                 200 include/net/red.h 	p->max_P = max_P;
p                 203 include/net/red.h 	p->max_P_reciprocal  = reciprocal_value(max_p_delta);
p                 210 include/net/red.h 	p->target_min = qth_min + 2*delta;
p                 211 include/net/red.h 	p->target_max = qth_min + 3*delta;
p                 213 include/net/red.h 	p->Scell_log	= Scell_log;
p                 214 include/net/red.h 	p->Scell_max	= (255 << Scell_log);
p                 217 include/net/red.h 		memcpy(p->Stab, stab, sizeof(p->Stab));
p                 242 include/net/red.h static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p,
p                 246 include/net/red.h 	long us_idle = min_t(s64, delta, p->Scell_max);
p                 269 include/net/red.h 	shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK];
p                 281 include/net/red.h 		us_idle = (v->qavg * (u64)us_idle) >> p->Scell_log;
p                 290 include/net/red.h static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p,
p                 303 include/net/red.h 	return v->qavg + (backlog - (v->qavg >> p->Wlog));
p                 306 include/net/red.h static inline unsigned long red_calc_qavg(const struct red_parms *p,
p                 311 include/net/red.h 		return red_calc_qavg_no_idle_time(p, v, backlog);
p                 313 include/net/red.h 		return red_calc_qavg_from_idle_time(p, v);
p                 317 include/net/red.h static inline u32 red_random(const struct red_parms *p)
p                 319 include/net/red.h 	return reciprocal_divide(prandom_u32(), p->max_P_reciprocal);
p                 322 include/net/red.h static inline int red_mark_probability(const struct red_parms *p,
p                 342 include/net/red.h 	return !(((qavg - p->qth_min) >> p->Wlog) * v->qcount < v->qR);
p                 351 include/net/red.h static inline int red_cmp_thresh(const struct red_parms *p, unsigned long qavg)
p                 353 include/net/red.h 	if (qavg < p->qth_min)
p                 355 include/net/red.h 	else if (qavg >= p->qth_max)
p                 367 include/net/red.h static inline int red_action(const struct red_parms *p,
p                 371 include/net/red.h 	switch (red_cmp_thresh(p, qavg)) {
p                 378 include/net/red.h 				if (red_mark_probability(p, v, qavg)) {
p                 380 include/net/red.h 					v->qR = red_random(p);
p                 384 include/net/red.h 				v->qR = red_random(p);
p                 397 include/net/red.h static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v)
p                 404 include/net/red.h 		qavg = red_calc_qavg_from_idle_time(p, v);
p                 407 include/net/red.h 	qavg >>= p->Wlog;
p                 409 include/net/red.h 	if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
p                 410 include/net/red.h 		p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */
p                 411 include/net/red.h 	else if (qavg < p->target_min && p->max_P >= MAX_P_MIN)
p                 412 include/net/red.h 		p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */
p                 414 include/net/red.h 	max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta);
p                 416 include/net/red.h 	p->max_P_reciprocal = reciprocal_value(max_p_delta);
p                 452 include/net/sch_generic.h #define tcf_chain_dereference(p, chain)					\
p                 453 include/net/sch_generic.h 	rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
p                 455 include/net/sch_generic.h #define tcf_proto_dereference(p, tp)					\
p                 456 include/net/sch_generic.h 	rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
p                 115 include/net/sctp/sctp.h 				  const union sctp_addr *paddr, void *p);
p                 118 include/net/sctp/sctp.h 			    struct net *net, int *pos, void *p);
p                 119 include/net/sctp/sctp.h int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
p                 433 include/net/sctp/sctp.h      (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
p                 435 include/net/sctp/sctp.h      pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
p                 436 include/net/sctp/sctp.h      ntohs(pos.p->length) >= sizeof(struct sctp_paramhdr);\
p                 437 include/net/sctp/sctp.h      pos.v += SCTP_PAD4(ntohs(pos.p->length)))
p                 343 include/net/sctp/structs.h 	struct sctp_paramhdr p;
p                 353 include/net/sctp/structs.h 	struct sctp_paramhdr *p;
p                2110 include/net/sock.h 				  poll_table *p)
p                2112 include/net/sock.h 	if (!poll_does_not_wait(p)) {
p                2113 include/net/sock.h 		poll_wait(filp, &sock->wq.wait, p);
p                1912 include/net/tcp.h 	struct seq_net_private	p;
p                2148 include/net/tcp.h 	void (*update)(struct sock *sk, struct proto *p,
p                2164 include/net/tcp.h void tcp_update_ulp(struct sock *sk, struct proto *p,
p                 441 include/net/udp.h 	struct seq_net_private  p;
p                 994 include/net/xfrm.h 	struct xfrm_if_parms p;		/* interface parms */
p                  74 include/ras/ras_event.h 		  cper_mem_err_unpack(p, &__entry->data),
p                 152 include/rdma/ib_hdrs.h static inline u64 ib_u64_get(__be64 *p)
p                 154 include/rdma/ib_hdrs.h 	return get_unaligned_be64(p);
p                 157 include/rdma/ib_hdrs.h static inline void ib_u64_put(u64 val, __be64 *p)
p                 159 include/rdma/ib_hdrs.h 	put_unaligned_be64(val, p);
p                2818 include/rdma/ib_verbs.h static inline bool ib_is_buffer_cleared(const void __user *p,
p                2827 include/rdma/ib_verbs.h 	buf = memdup_user(p, len);
p                  26 include/scsi/fc_frame.h static inline u32 ntoh24(const u8 *p)
p                  28 include/scsi/fc_frame.h 	return (p[0] << 16) | (p[1] << 8) | p[2];
p                  31 include/scsi/fc_frame.h static inline void hton24(u8 *p, u32 v)
p                  33 include/scsi/fc_frame.h 	p[0] = (v >> 16) & 0xff;
p                  34 include/scsi/fc_frame.h 	p[1] = (v >> 8) & 0xff;
p                  35 include/scsi/fc_frame.h 	p[2] = v & 0xff;
p                  57 include/scsi/iscsi_proto.h #define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
p                  58 include/scsi/iscsi_proto.h #define hton24(p, v) { \
p                  59 include/scsi/iscsi_proto.h         p[0] = (((v) >> 16) & 0xFF); \
p                  60 include/scsi/iscsi_proto.h         p[1] = (((v) >> 8) & 0xFF); \
p                  61 include/scsi/iscsi_proto.h         p[2] = ((v) & 0xFF); \
p                  63 include/scsi/iscsi_proto.h #define zero_data(p) {p[0]=0;p[1]=0;p[2]=0;}
p                 271 include/soc/fsl/qman.h #define qm_fqid_set(p, v) ((p)->fqid = cpu_to_be32((v) & QM_FQID_MASK))
p                 272 include/soc/fsl/qman.h #define qm_fqid_get(p)    (be32_to_cpu((p)->fqid) & QM_FQID_MASK)
p                  44 include/sound/control.h 		const unsigned int *p;
p                  63 include/sound/control.h 		const unsigned int *p;
p                 133 include/sound/core.h #define dev_to_snd_card(p)	container_of(p, struct snd_card, card_dev)
p                  73 include/sound/info.h 	struct proc_dir_entry *p;
p                 908 include/sound/pcm.h static inline unsigned int params_channels(const struct snd_pcm_hw_params *p)
p                 910 include/sound/pcm.h 	return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_CHANNELS)->min;
p                 917 include/sound/pcm.h static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
p                 919 include/sound/pcm.h 	return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_RATE)->min;
p                 926 include/sound/pcm.h static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
p                 928 include/sound/pcm.h 	return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIOD_SIZE)->min;
p                 935 include/sound/pcm.h static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
p                 937 include/sound/pcm.h 	return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIODS)->min;
p                 944 include/sound/pcm.h static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
p                 946 include/sound/pcm.h 	return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_BUFFER_SIZE)->min;
p                 953 include/sound/pcm.h static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p)
p                 955 include/sound/pcm.h 	return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_BUFFER_BYTES)->min;
p                 301 include/sound/pcm_params.h static inline snd_pcm_access_t params_access(const struct snd_pcm_hw_params *p)
p                 303 include/sound/pcm_params.h 	return (__force snd_pcm_access_t)snd_mask_min(hw_param_mask_c(p,
p                 311 include/sound/pcm_params.h static inline snd_pcm_format_t params_format(const struct snd_pcm_hw_params *p)
p                 313 include/sound/pcm_params.h 	return (__force snd_pcm_format_t)snd_mask_min(hw_param_mask_c(p,
p                 322 include/sound/pcm_params.h params_subformat(const struct snd_pcm_hw_params *p)
p                 324 include/sound/pcm_params.h 	return (__force snd_pcm_subformat_t)snd_mask_min(hw_param_mask_c(p,
p                 333 include/sound/pcm_params.h params_period_bytes(const struct snd_pcm_hw_params *p)
p                 335 include/sound/pcm_params.h 	return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIOD_BYTES)->min;
p                 345 include/sound/pcm_params.h static inline int params_width(const struct snd_pcm_hw_params *p)
p                 347 include/sound/pcm_params.h 	return snd_pcm_format_width(params_format(p));
p                 358 include/sound/pcm_params.h static inline int params_physical_width(const struct snd_pcm_hw_params *p)
p                 360 include/sound/pcm_params.h 	return snd_pcm_format_physical_width(params_format(p));
p                 364 include/sound/pcm_params.h params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt)
p                 366 include/sound/pcm_params.h 	snd_mask_set_format(hw_param_mask(p, SNDRV_PCM_HW_PARAM_FORMAT), fmt);
p                  33 include/sound/sb16_csp.h 	int (*csp_use) (struct snd_sb_csp * p);
p                  34 include/sound/sb16_csp.h 	int (*csp_unuse) (struct snd_sb_csp * p);
p                  35 include/sound/sb16_csp.h 	int (*csp_autoload) (struct snd_sb_csp * p, snd_pcm_format_t pcm_sfmt, int play_rec_mode);
p                  36 include/sound/sb16_csp.h 	int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
p                  37 include/sound/sb16_csp.h 	int (*csp_stop) (struct snd_sb_csp * p);
p                  38 include/sound/sb16_csp.h 	int (*csp_qsound_transfer) (struct snd_sb_csp * p);
p                  80 include/sound/seq_kernel.h void snd_port_init_callback(struct snd_seq_port_callback *p);
p                  39 include/sound/seq_oss.h 	int (*open)(struct snd_seq_oss_arg *p, void *closure);
p                  40 include/sound/seq_oss.h 	int (*close)(struct snd_seq_oss_arg *p);
p                  41 include/sound/seq_oss.h 	int (*ioctl)(struct snd_seq_oss_arg *p, unsigned int cmd, unsigned long arg);
p                  42 include/sound/seq_oss.h 	int (*load_patch)(struct snd_seq_oss_arg *p, int format, const char __user *buf, int offs, int count);
p                  43 include/sound/seq_oss.h 	int (*reset)(struct snd_seq_oss_arg *p);
p                  44 include/sound/seq_oss.h 	int (*raw_event)(struct snd_seq_oss_arg *p, unsigned char *data);
p                 305 include/sound/soc-dapm.h 	.tlv.p = (tlv_array), \
p                 312 include/sound/soc-dapm.h 	.tlv.p = (tlv_array), \
p                 753 include/sound/soc-dapm.h #define snd_soc_dapm_widget_for_each_path(w, dir, p) \
p                 754 include/sound/soc-dapm.h 	list_for_each_entry(p, &w->edges[dir], list_node[dir])
p                 768 include/sound/soc-dapm.h #define snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p) \
p                 769 include/sound/soc-dapm.h 	list_for_each_entry_safe(p, next_p, &w->edges[dir], list_node[dir])
p                 777 include/sound/soc-dapm.h #define snd_soc_dapm_widget_for_each_sink_path(w, p) \
p                 778 include/sound/soc-dapm.h 	snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_IN, p)
p                 786 include/sound/soc-dapm.h #define snd_soc_dapm_widget_for_each_source_path(w, p) \
p                 787 include/sound/soc-dapm.h 	snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_OUT, p)
p                  76 include/sound/soc.h 	.tlv.p = (tlv_array), \
p                  84 include/sound/soc.h 	.tlv.p  = (tlv_array),\
p                  96 include/sound/soc.h 	.tlv.p = (tlv_array), \
p                 134 include/sound/soc.h 	.tlv.p = (tlv_array), \
p                 143 include/sound/soc.h 	.tlv.p = (tlv_array), \
p                 153 include/sound/soc.h 	.tlv.p = (tlv_array), \
p                 162 include/sound/soc.h 	.tlv.p  = (tlv_array), \
p                 174 include/sound/soc.h 	.tlv.p = (tlv_array), \
p                 183 include/sound/soc.h 	.tlv.p  = (tlv_array), \
p                 194 include/sound/soc.h 	.tlv.p  = (tlv_array), \
p                 247 include/sound/soc.h 	.tlv.p = (tlv_array), \
p                 256 include/sound/soc.h 	.tlv.p = (tlv_array), \
p                 268 include/sound/soc.h 	.tlv.p = (tlv_array), \
p                 278 include/sound/soc.h 	.tlv.p = (tlv_array), \
p                 504 include/sound/wavefront.h     wavefront_patch p;
p                 197 include/sound/wss.h   .tlv = { .p = (xtlv) } }
p                 210 include/sound/wss.h   .tlv = { .p = (xtlv) } }
p                 912 include/target/iscsi/iscsi_target_core.h 	struct task_struct *p,
p                 933 include/target/iscsi/iscsi_target_core.h 	set_cpus_allowed_ptr(p, conn->conn_cpumask);
p                 120 include/target/target_core_backend.h static inline uint32_t get_unaligned_be24(const uint8_t *const p)
p                 122 include/target/target_core_backend.h 	return get_unaligned_be32(p - 1) & 0xffffffU;
p                  44 include/trace/events/bridge.h 	TP_PROTO(struct net_bridge *br, struct net_bridge_port *p,
p                  47 include/trace/events/bridge.h 	TP_ARGS(br, p, addr, vid),
p                  51 include/trace/events/bridge.h 		__string(dev, p ? p->dev->name : "null")
p                  58 include/trace/events/bridge.h 		__assign_str(dev, p ? p->dev->name : "null");
p                 738 include/trace/events/f2fs.h 			struct victim_sel_policy *p, unsigned int pre_victim,
p                 741 include/trace/events/f2fs.h 	TP_ARGS(sb, type, gc_type, p, pre_victim, prefree, free),
p                 761 include/trace/events/f2fs.h 		__entry->alloc_mode	= p->alloc_mode;
p                 762 include/trace/events/f2fs.h 		__entry->gc_mode	= p->gc_mode;
p                 763 include/trace/events/f2fs.h 		__entry->victim		= p->min_segno;
p                 764 include/trace/events/f2fs.h 		__entry->cost		= p->min_cost;
p                 765 include/trace/events/f2fs.h 		__entry->ofs_unit	= p->ofs_unit;
p                 136 include/trace/events/libata.h #define __parse_status(s) libata_trace_parse_status(p, s)
p                 139 include/trace/events/libata.h #define __parse_eh_action(a) libata_trace_parse_eh_action(p, a)
p                 142 include/trace/events/libata.h #define __parse_eh_err_mask(m) libata_trace_parse_eh_err_mask(p, m)
p                 145 include/trace/events/libata.h #define __parse_qc_flags(f) libata_trace_parse_qc_flags(p, f)
p                 149 include/trace/events/libata.h #define __parse_subcmd(c,f,h) libata_trace_parse_subcmd(p, c, f, h)
p                1317 include/trace/events/rpcrdma.h 		__be32 *p,
p                1321 include/trace/events/rpcrdma.h 	TP_ARGS(p, hdrlen),
p                1332 include/trace/events/rpcrdma.h 		__entry->xid = be32_to_cpup(p++);
p                1333 include/trace/events/rpcrdma.h 		__entry->vers = be32_to_cpup(p++);
p                1334 include/trace/events/rpcrdma.h 		__entry->credits = be32_to_cpup(p++);
p                1335 include/trace/events/rpcrdma.h 		__entry->proc = be32_to_cpup(p);
p                1364 include/trace/events/rpcrdma.h 		__be32 *p
p                1367 include/trace/events/rpcrdma.h 	TP_ARGS(p),
p                1377 include/trace/events/rpcrdma.h 		__entry->xid = be32_to_cpup(p++);
p                1378 include/trace/events/rpcrdma.h 		__entry->vers = be32_to_cpup(p++);
p                1379 include/trace/events/rpcrdma.h 		__entry->credits = be32_to_cpup(p++);
p                1380 include/trace/events/rpcrdma.h 		__entry->proc = be32_to_cpup(p);
p                1390 include/trace/events/rpcrdma.h 					__be32 *p			\
p                1392 include/trace/events/rpcrdma.h 				TP_ARGS(p))
p                  59 include/trace/events/sched.h 	TP_PROTO(struct task_struct *p),
p                  61 include/trace/events/sched.h 	TP_ARGS(__perf_task(p)),
p                  72 include/trace/events/sched.h 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
p                  73 include/trace/events/sched.h 		__entry->pid		= p->pid;
p                  74 include/trace/events/sched.h 		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
p                  76 include/trace/events/sched.h 		__entry->target_cpu	= task_cpu(p);
p                  89 include/trace/events/sched.h 	     TP_PROTO(struct task_struct *p),
p                  90 include/trace/events/sched.h 	     TP_ARGS(p));
p                  97 include/trace/events/sched.h 	     TP_PROTO(struct task_struct *p),
p                  98 include/trace/events/sched.h 	     TP_ARGS(p));
p                 104 include/trace/events/sched.h 	     TP_PROTO(struct task_struct *p),
p                 105 include/trace/events/sched.h 	     TP_ARGS(p));
p                 108 include/trace/events/sched.h static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
p                 113 include/trace/events/sched.h 	BUG_ON(p != current);
p                 129 include/trace/events/sched.h 	state = task_state_index(p);
p                 191 include/trace/events/sched.h 	TP_PROTO(struct task_struct *p, int dest_cpu),
p                 193 include/trace/events/sched.h 	TP_ARGS(p, dest_cpu),
p                 204 include/trace/events/sched.h 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
p                 205 include/trace/events/sched.h 		__entry->pid		= p->pid;
p                 206 include/trace/events/sched.h 		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
p                 207 include/trace/events/sched.h 		__entry->orig_cpu	= task_cpu(p);
p                 218 include/trace/events/sched.h 	TP_PROTO(struct task_struct *p),
p                 220 include/trace/events/sched.h 	TP_ARGS(p),
p                 229 include/trace/events/sched.h 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
p                 230 include/trace/events/sched.h 		__entry->pid		= p->pid;
p                 231 include/trace/events/sched.h 		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
p                 242 include/trace/events/sched.h 	     TP_PROTO(struct task_struct *p),
p                 243 include/trace/events/sched.h 	     TP_ARGS(p));
p                 249 include/trace/events/sched.h 	     TP_PROTO(struct task_struct *p),
p                 250 include/trace/events/sched.h 	     TP_ARGS(p));
p                 256 include/trace/events/sched.h 	TP_PROTO(struct task_struct *p),
p                 257 include/trace/events/sched.h 	TP_ARGS(p));
p                 317 include/trace/events/sched.h 	TP_PROTO(struct task_struct *p, pid_t old_pid,
p                 320 include/trace/events/sched.h 	TP_ARGS(p, old_pid, bprm),
p                 330 include/trace/events/sched.h 		__entry->pid		= p->pid;
p                 198 include/trace/events/scsi.h #define __parse_cdb(cdb, len) scsi_trace_parse_cdb(p, cdb, len)
p                 352 include/trace/events/sunrpc.h 		__field(const void *, p)
p                 384 include/trace/events/sunrpc.h 		__entry->p = xdr->p;
p                 397 include/trace/events/sunrpc.h 		__entry->requested, __entry->p, __entry->end,
p                  60 include/trace/events/thermal_power_allocator.h 		 s64 p, s64 i, s64 d, s32 output),
p                  61 include/trace/events/thermal_power_allocator.h 	TP_ARGS(tz, err, err_integral, p, i, d, output),
p                  66 include/trace/events/thermal_power_allocator.h 		__field(s64, p           )
p                  75 include/trace/events/thermal_power_allocator.h 		__entry->p = p;
p                  83 include/trace/events/thermal_power_allocator.h 		  __entry->p, __entry->i, __entry->d, __entry->output)
p                  38 include/trace/syscall.h static inline void syscall_tracepoint_update(struct task_struct *p)
p                  41 include/trace/syscall.h 		set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
p                  43 include/trace/syscall.h 		clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
p                  46 include/trace/syscall.h static inline void syscall_tracepoint_update(struct task_struct *p)
p                 284 include/trace/trace_events.h 		trace_print_bitmask_seq(p, __bitmask, __bitmask_size);	\
p                 292 include/trace/trace_events.h 		trace_print_flags_seq(p, delim, flag, __flags);	\
p                 300 include/trace/trace_events.h 		trace_print_symbols_seq(p, value, symbols);		\
p                 310 include/trace/trace_events.h 		trace_print_flags_seq_u64(p, delim, flag, __flags);	\
p                 317 include/trace/trace_events.h 		trace_print_symbols_seq_u64(p, value, symbols);	\
p                 329 include/trace/trace_events.h 	trace_print_hex_seq(p, buf, buf_len, false)
p                 333 include/trace/trace_events.h 	trace_print_hex_seq(p, buf, buf_len, true)
p                 340 include/trace/trace_events.h 		trace_print_array_seq(p, array, count, el_size);	\
p                 350 include/trace/trace_events.h 	struct trace_seq __maybe_unused *p = &iter->tmp_seq;		\
p                 376 include/trace/trace_events.h 	struct trace_seq *p = &iter->tmp_seq;				\
p                 387 include/trace/trace_events.h 	trace_seq_init(p);						\
p                  44 include/uapi/linux/byteorder/big_endian.h static __always_inline __le64 __cpu_to_le64p(const __u64 *p)
p                  46 include/uapi/linux/byteorder/big_endian.h 	return (__force __le64)__swab64p(p);
p                  48 include/uapi/linux/byteorder/big_endian.h static __always_inline __u64 __le64_to_cpup(const __le64 *p)
p                  50 include/uapi/linux/byteorder/big_endian.h 	return __swab64p((__u64 *)p);
p                  52 include/uapi/linux/byteorder/big_endian.h static __always_inline __le32 __cpu_to_le32p(const __u32 *p)
p                  54 include/uapi/linux/byteorder/big_endian.h 	return (__force __le32)__swab32p(p);
p                  56 include/uapi/linux/byteorder/big_endian.h static __always_inline __u32 __le32_to_cpup(const __le32 *p)
p                  58 include/uapi/linux/byteorder/big_endian.h 	return __swab32p((__u32 *)p);
p                  60 include/uapi/linux/byteorder/big_endian.h static __always_inline __le16 __cpu_to_le16p(const __u16 *p)
p                  62 include/uapi/linux/byteorder/big_endian.h 	return (__force __le16)__swab16p(p);
p                  64 include/uapi/linux/byteorder/big_endian.h static __always_inline __u16 __le16_to_cpup(const __le16 *p)
p                  66 include/uapi/linux/byteorder/big_endian.h 	return __swab16p((__u16 *)p);
p                  68 include/uapi/linux/byteorder/big_endian.h static __always_inline __be64 __cpu_to_be64p(const __u64 *p)
p                  70 include/uapi/linux/byteorder/big_endian.h 	return (__force __be64)*p;
p                  72 include/uapi/linux/byteorder/big_endian.h static __always_inline __u64 __be64_to_cpup(const __be64 *p)
p                  74 include/uapi/linux/byteorder/big_endian.h 	return (__force __u64)*p;
p                  76 include/uapi/linux/byteorder/big_endian.h static __always_inline __be32 __cpu_to_be32p(const __u32 *p)
p                  78 include/uapi/linux/byteorder/big_endian.h 	return (__force __be32)*p;
p                  80 include/uapi/linux/byteorder/big_endian.h static __always_inline __u32 __be32_to_cpup(const __be32 *p)
p                  82 include/uapi/linux/byteorder/big_endian.h 	return (__force __u32)*p;
p                  84 include/uapi/linux/byteorder/big_endian.h static __always_inline __be16 __cpu_to_be16p(const __u16 *p)
p                  86 include/uapi/linux/byteorder/big_endian.h 	return (__force __be16)*p;
p                  88 include/uapi/linux/byteorder/big_endian.h static __always_inline __u16 __be16_to_cpup(const __be16 *p)
p                  90 include/uapi/linux/byteorder/big_endian.h 	return (__force __u16)*p;
p                  44 include/uapi/linux/byteorder/little_endian.h static __always_inline __le64 __cpu_to_le64p(const __u64 *p)
p                  46 include/uapi/linux/byteorder/little_endian.h 	return (__force __le64)*p;
p                  48 include/uapi/linux/byteorder/little_endian.h static __always_inline __u64 __le64_to_cpup(const __le64 *p)
p                  50 include/uapi/linux/byteorder/little_endian.h 	return (__force __u64)*p;
p                  52 include/uapi/linux/byteorder/little_endian.h static __always_inline __le32 __cpu_to_le32p(const __u32 *p)
p                  54 include/uapi/linux/byteorder/little_endian.h 	return (__force __le32)*p;
p                  56 include/uapi/linux/byteorder/little_endian.h static __always_inline __u32 __le32_to_cpup(const __le32 *p)
p                  58 include/uapi/linux/byteorder/little_endian.h 	return (__force __u32)*p;
p                  60 include/uapi/linux/byteorder/little_endian.h static __always_inline __le16 __cpu_to_le16p(const __u16 *p)
p                  62 include/uapi/linux/byteorder/little_endian.h 	return (__force __le16)*p;
p                  64 include/uapi/linux/byteorder/little_endian.h static __always_inline __u16 __le16_to_cpup(const __le16 *p)
p                  66 include/uapi/linux/byteorder/little_endian.h 	return (__force __u16)*p;
p                  68 include/uapi/linux/byteorder/little_endian.h static __always_inline __be64 __cpu_to_be64p(const __u64 *p)
p                  70 include/uapi/linux/byteorder/little_endian.h 	return (__force __be64)__swab64p(p);
p                  72 include/uapi/linux/byteorder/little_endian.h static __always_inline __u64 __be64_to_cpup(const __be64 *p)
p                  74 include/uapi/linux/byteorder/little_endian.h 	return __swab64p((__u64 *)p);
p                  76 include/uapi/linux/byteorder/little_endian.h static __always_inline __be32 __cpu_to_be32p(const __u32 *p)
p                  78 include/uapi/linux/byteorder/little_endian.h 	return (__force __be32)__swab32p(p);
p                  80 include/uapi/linux/byteorder/little_endian.h static __always_inline __u32 __be32_to_cpup(const __be32 *p)
p                  82 include/uapi/linux/byteorder/little_endian.h 	return __swab32p((__u32 *)p);
p                  84 include/uapi/linux/byteorder/little_endian.h static __always_inline __be16 __cpu_to_be16p(const __u16 *p)
p                  86 include/uapi/linux/byteorder/little_endian.h 	return (__force __be16)__swab16p(p);
p                  88 include/uapi/linux/byteorder/little_endian.h static __always_inline __u16 __be16_to_cpup(const __be16 *p)
p                  90 include/uapi/linux/byteorder/little_endian.h 	return __swab16p((__u16 *)p);
p                 865 include/uapi/linux/cec-funcs.h 	const __u8 *p = &msg->msg[4];
p                 869 include/uapi/linux/cec-funcs.h 	*rc_profile = p;
p                 871 include/uapi/linux/cec-funcs.h 	while (p < &msg->msg[14] && (*p & CEC_OP_FEAT_EXT))
p                 872 include/uapi/linux/cec-funcs.h 		p++;
p                 873 include/uapi/linux/cec-funcs.h 	if (!(*p & CEC_OP_FEAT_EXT)) {
p                 874 include/uapi/linux/cec-funcs.h 		*dev_features = p + 1;
p                 875 include/uapi/linux/cec-funcs.h 		while (p < &msg->msg[15] && (*p & CEC_OP_FEAT_EXT))
p                 876 include/uapi/linux/cec-funcs.h 			p++;
p                 878 include/uapi/linux/cec-funcs.h 	if (*p & CEC_OP_FEAT_EXT)
p                  26 include/uapi/linux/erspan.h 		p:1;
p                  32 include/uapi/linux/erspan.h 	__u8	p:1,
p                  63 include/uapi/linux/mroute6.h #define IF_SET(n, p)    ((p)->ifs_bits[(n)/NIFBITS] |= (1 << ((n) % NIFBITS)))
p                  64 include/uapi/linux/mroute6.h #define IF_CLR(n, p)    ((p)->ifs_bits[(n)/NIFBITS] &= ~(1 << ((n) % NIFBITS)))
p                  65 include/uapi/linux/mroute6.h #define IF_ISSET(n, p)  ((p)->ifs_bits[(n)/NIFBITS] & (1 << ((n) % NIFBITS)))
p                  67 include/uapi/linux/mroute6.h #define IF_ZERO(p)      bzero(p, sizeof(*(p)))
p                 107 include/uapi/linux/netfilter/x_tables.h #define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
p                 108 include/uapi/linux/netfilter/x_tables.h #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
p                 651 include/uapi/linux/pkt_sched.h 	__u32 p;
p                  23 include/uapi/linux/ppp_defs.h #define PPP_ADDRESS(p)	(((__u8 *)(p))[0])
p                  24 include/uapi/linux/ppp_defs.h #define PPP_CONTROL(p)	(((__u8 *)(p))[1])
p                  25 include/uapi/linux/ppp_defs.h #define PPP_PROTOCOL(p)	((((__u8 *)(p))[2] << 8) + ((__u8 *)(p))[3])
p                 133 include/uapi/linux/ppp_defs.h     struct pppstat	p;	/* basic PPP statistics */
p                 171 include/uapi/linux/swab.h static __always_inline __u16 __swab16p(const __u16 *p)
p                 174 include/uapi/linux/swab.h 	return __arch_swab16p(p);
p                 176 include/uapi/linux/swab.h 	return __swab16(*p);
p                 184 include/uapi/linux/swab.h static __always_inline __u32 __swab32p(const __u32 *p)
p                 187 include/uapi/linux/swab.h 	return __arch_swab32p(p);
p                 189 include/uapi/linux/swab.h 	return __swab32(*p);
p                 197 include/uapi/linux/swab.h static __always_inline __u64 __swab64p(const __u64 *p)
p                 200 include/uapi/linux/swab.h 	return __arch_swab64p(p);
p                 202 include/uapi/linux/swab.h 	return __swab64(*p);
p                 212 include/uapi/linux/swab.h static inline __u32 __swahw32p(const __u32 *p)
p                 215 include/uapi/linux/swab.h 	return __arch_swahw32p(p);
p                 217 include/uapi/linux/swab.h 	return __swahw32(*p);
p                 227 include/uapi/linux/swab.h static inline __u32 __swahb32p(const __u32 *p)
p                 230 include/uapi/linux/swab.h 	return __arch_swahb32p(p);
p                 232 include/uapi/linux/swab.h 	return __swahb32(*p);
p                 240 include/uapi/linux/swab.h static inline void __swab16s(__u16 *p)
p                 243 include/uapi/linux/swab.h 	__arch_swab16s(p);
p                 245 include/uapi/linux/swab.h 	*p = __swab16p(p);
p                 252 include/uapi/linux/swab.h static __always_inline void __swab32s(__u32 *p)
p                 255 include/uapi/linux/swab.h 	__arch_swab32s(p);
p                 257 include/uapi/linux/swab.h 	*p = __swab32p(p);
p                 265 include/uapi/linux/swab.h static __always_inline void __swab64s(__u64 *p)
p                 268 include/uapi/linux/swab.h 	__arch_swab64s(p);
p                 270 include/uapi/linux/swab.h 	*p = __swab64p(p);
p                 280 include/uapi/linux/swab.h static inline void __swahw32s(__u32 *p)
p                 283 include/uapi/linux/swab.h 	__arch_swahw32s(p);
p                 285 include/uapi/linux/swab.h 	*p = __swahw32p(p);
p                 295 include/uapi/linux/swab.h static inline void __swahb32s(__u32 *p)
p                 298 include/uapi/linux/swab.h 	__arch_swahb32s(p);
p                 300 include/uapi/linux/swab.h 	*p = __swahb32p(p);
p                  91 include/uapi/linux/ultrasound.h #define GUS_VOICEFREQ(chn, voice, p)		_GUS_CMD(chn, voice, _GUS_VOICEFREQ, \
p                  92 include/uapi/linux/ultrasound.h 							(p) & 0xffff, ((p) >> 16) & 0xffff)
p                 101 include/uapi/linux/ultrasound.h #define GUS_VOICE_POS(chn, voice, p)		_GUS_CMD(chn, voice, _GUS_VOICE_POS, \
p                 102 include/uapi/linux/ultrasound.h 							(p) & 0xffff, ((p) >> 16) & 0xffff)
p                 102 include/uapi/linux/usb/ch11.h #define USB_PORT_LPM_TIMEOUT(p)			(((p) & 0xff) << 8)
p                 200 include/uapi/linux/usb/ch11.h #define USB_EXT_PORT_RX_LANES(p) \
p                 201 include/uapi/linux/usb/ch11.h 			(((p) & USB_EXT_PORT_STAT_RX_LANES) >> 8)
p                 202 include/uapi/linux/usb/ch11.h #define USB_EXT_PORT_TX_LANES(p) \
p                 203 include/uapi/linux/usb/ch11.h 			(((p) & USB_EXT_PORT_STAT_TX_LANES) >> 12)
p                 713 include/uapi/linux/usb/ch9.h #define USB_SS_MULT(p)			(1 + ((p) & 0x3))
p                 715 include/uapi/linux/usb/ch9.h #define USB_SS_SSP_ISOC_COMP(p)		((p) & (1 << 7))
p                 897 include/uapi/linux/usb/ch9.h #define USB_SET_BESL_BASELINE(p)	(((p) & 0xf) << 8)
p                 898 include/uapi/linux/usb/ch9.h #define USB_SET_BESL_DEEP(p)		(((p) & 0xf) << 12)
p                 899 include/uapi/linux/usb/ch9.h #define USB_GET_BESL_BASELINE(p)	(((p) & (0xf << 8)) >> 8)
p                 900 include/uapi/linux/usb/ch9.h #define USB_GET_BESL_DEEP(p)		(((p) & (0xf << 12)) >> 12)
p                  77 include/uapi/linux/usb/midi.h #define USB_DT_MIDI_OUT_SIZE(p)	(7 + 2 * (p))
p                  80 include/uapi/linux/usb/midi.h #define DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(p)			\
p                  81 include/uapi/linux/usb/midi.h struct usb_midi_out_jack_descriptor_##p {			\
p                  88 include/uapi/linux/usb/midi.h 	struct usb_midi_source_pin pins[p];			\
p                 324 include/uapi/linux/usb/video.h #define UVC_DT_EXTENSION_UNIT_SIZE(p, n)		(24+(p)+(n))
p                 326 include/uapi/linux/usb/video.h #define UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \
p                 329 include/uapi/linux/usb/video.h #define DECLARE_UVC_EXTENSION_UNIT_DESCRIPTOR(p, n)	\
p                 330 include/uapi/linux/usb/video.h struct UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) {		\
p                 338 include/uapi/linux/usb/video.h 	__u8  baSourceID[p];				\
p                 371 include/uapi/linux/usb/video.h #define UVC_DT_INPUT_HEADER_SIZE(n, p)			(13+(n*p))
p                 373 include/uapi/linux/usb/video.h #define UVC_INPUT_HEADER_DESCRIPTOR(n, p) \
p                 374 include/uapi/linux/usb/video.h 	uvc_input_header_descriptor_##n_##p
p                 376 include/uapi/linux/usb/video.h #define DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(n, p)	\
p                 377 include/uapi/linux/usb/video.h struct UVC_INPUT_HEADER_DESCRIPTOR(n, p) {		\
p                 390 include/uapi/linux/usb/video.h 	__u8   bmaControls[p][n];			\
p                 406 include/uapi/linux/usb/video.h #define UVC_DT_OUTPUT_HEADER_SIZE(n, p)			(9+(n*p))
p                 408 include/uapi/linux/usb/video.h #define UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \
p                 409 include/uapi/linux/usb/video.h 	uvc_output_header_descriptor_##n_##p
p                 411 include/uapi/linux/usb/video.h #define DECLARE_UVC_OUTPUT_HEADER_DESCRIPTOR(n, p)	\
p                 412 include/uapi/linux/usb/video.h struct UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) {		\
p                 421 include/uapi/linux/usb/video.h 	__u8   bmaControls[p][n];			\
p                 167 include/uapi/linux/virtio_ring.h static inline void vring_init(struct vring *vr, unsigned int num, void *p,
p                 171 include/uapi/linux/virtio_ring.h 	vr->desc = p;
p                 172 include/uapi/linux/virtio_ring.h 	vr->avail = p + num*sizeof(struct vring_desc);
p                 467 include/video/pm3fb.h #define PM3RD_CursorPalette(p)				(0x303 + (p))
p                 468 include/video/pm3fb.h #define PM3RD_CursorPattern(p)				(0x400 + (p))
p                 654 include/video/pm3fb.h 	#define PM3FBWriteMode_StripePitch(p)		(((p) & 0x7) << 6)
p                 684 include/video/pm3fb.h 	#define PM3LBDestReadMode_StripePitch(p)	(((p) & 0x7) << 2)
p                 694 include/video/pm3fb.h 	#define PM3LBReadFormat_StencilPosition(p)	(((p) & 0x1f) << 6)
p                 696 include/video/pm3fb.h 	#define PM3LBReadFormat_FCPPosition(p)		(((p) & 0x1f) << 15)
p                 698 include/video/pm3fb.h 	#define PM3LBReadFormat_GIDPosition(p)		(((p) & 0x1f) << 23)
p                 705 include/video/pm3fb.h 	#define PM3LBSourceReadMode_StripePitch(p)	(((p) & 0x7) << 2)
p                 717 include/video/pm3fb.h 	#define PM3LBWriteFormat_StencilPosition(p)	(((p) & 0x1f) << 6)
p                 719 include/video/pm3fb.h 	#define PM3LBWriteFormat_GIDPosition(p)		(((p) & 0x1f) << 23)
p                 725 include/video/pm3fb.h 	#define PM3LBWriteMode_StripePitch(p)		(((p) & 0x7) << 3)
p                 321 include/video/sstfb.h 	unsigned int p;
p                  16 include/xen/arm/interface.h 	typedef struct { union { type *p; uint64_aligned_t q; }; }  \
p                  28 include/xen/arm/interface.h 		(hnd).p = val;				\
p                 131 include/xen/interface/vcpu.h 				uint64_t p;
p                 218 include/xen/interface/vcpu.h 		uint64_t p;
p                  11 include/xen/page.h #define xen_offset_in_page(p)	((unsigned long)(p) & ~XEN_PAGE_MASK)
p                 225 init/do_mounts.c 	char *p;
p                 260 init/do_mounts.c 			res = new_decode_dev(simple_strtoul(name, &p, 16));
p                 261 init/do_mounts.c 			if (*p)
p                 278 init/do_mounts.c 	for (p = s; *p; p++)
p                 279 init/do_mounts.c 		if (*p == '/')
p                 280 init/do_mounts.c 			*p = '!';
p                 289 init/do_mounts.c 	while (p > s && isdigit(p[-1]))
p                 290 init/do_mounts.c 		p--;
p                 291 init/do_mounts.c 	if (p == s || !*p || *p == '0')
p                 295 init/do_mounts.c 	part = simple_strtoul(p, NULL, 10);
p                 296 init/do_mounts.c 	*p = '\0';
p                 302 init/do_mounts.c 	if (p < s + 2 || !isdigit(p[-2]) || p[-1] != 'p')
p                 304 init/do_mounts.c 	p[-1] = '\0';
p                 371 init/do_mounts.c 		char *p, *next;
p                 374 init/do_mounts.c 		for (p = page-1; p; p = next) {
p                 375 init/do_mounts.c 			next = strchr(++p, '\n');
p                 376 init/do_mounts.c 			if (*p++ != '\t')
p                 378 init/do_mounts.c 			while ((*s++ = *p++) != '\n')
p                 408 init/do_mounts.c 	char *p;
p                 417 init/do_mounts.c 	for (p = fs_names; *p; p += strlen(p)+1) {
p                 418 init/do_mounts.c 		int err = do_mount_root(name, p, flags, root_mount_data);
p                 453 init/do_mounts.c 	for (p = fs_names; *p; p += strlen(p)+1)
p                 454 init/do_mounts.c 		printk(" %s", p);
p                  31 init/do_mounts_initrd.c static int __init early_initrd(char *p)
p                  37 init/do_mounts_initrd.c 	start = memparse(p, &endp);
p                 141 init/do_mounts_md.c 			char *p;
p                 145 init/do_mounts_md.c 			p = strchr(devname, ',');
p                 146 init/do_mounts_md.c 			if (p)
p                 147 init/do_mounts_md.c 				*p++ = 0;
p                 163 init/do_mounts_md.c 			devname = p;
p                  14 init/initramfs.c static ssize_t __init xwrite(int fd, const char *p, size_t count)
p                  20 init/initramfs.c 		ssize_t rv = ksys_write(fd, p, count);
p                  29 init/initramfs.c 		p += rv;
p                  65 init/initramfs.c 	struct hash **p, *q;
p                  66 init/initramfs.c 	for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) {
p                  67 init/initramfs.c 		if ((*p)->ino != ino)
p                  69 init/initramfs.c 		if ((*p)->minor != minor)
p                  71 init/initramfs.c 		if ((*p)->major != major)
p                  73 init/initramfs.c 		if (((*p)->mode ^ mode) & S_IFMT)
p                  75 init/initramfs.c 		return (*p)->name;
p                  86 init/initramfs.c 	*p = q;
p                  92 init/initramfs.c 	struct hash **p, *q;
p                  93 init/initramfs.c 	for (p = head; p < head + 32; p++) {
p                  94 init/initramfs.c 		while (*p) {
p                  95 init/initramfs.c 			q = *p;
p                  96 init/initramfs.c 			*p = q->next;
p                 180 init/main.c    	const struct obs_kernel_param *p;
p                 183 init/main.c    	p = __setup_start;
p                 185 init/main.c    		int n = strlen(p->str);
p                 186 init/main.c    		if (parameqn(line, p->str, n)) {
p                 187 init/main.c    			if (p->early) {
p                 194 init/main.c    			} else if (!p->setup_func) {
p                 196 init/main.c    					p->str);
p                 198 init/main.c    			} else if (p->setup_func(line + n))
p                 201 init/main.c    		p++;
p                 202 init/main.c    	} while (p < __setup_end);
p                 458 init/main.c    	const struct obs_kernel_param *p;
p                 460 init/main.c    	for (p = __setup_start; p < __setup_end; p++) {
p                 461 init/main.c    		if ((p->early && parameq(param, p->str)) ||
p                 463 init/main.c    		     strcmp(p->str, "earlycon") == 0)
p                 465 init/main.c    			if (p->setup_func(val) != 0)
p                 134 ipc/mqueue.c   	struct rb_node **p, *parent = NULL;
p                 138 ipc/mqueue.c   	p = &info->msg_tree.rb_node;
p                 139 ipc/mqueue.c   	while (*p) {
p                 140 ipc/mqueue.c   		parent = *p;
p                 146 ipc/mqueue.c   			p = &(*p)->rb_left;
p                 149 ipc/mqueue.c   			p = &(*p)->rb_right;
p                 165 ipc/mqueue.c   	rb_link_node(&leaf->rb_node, parent, p);
p                 418 ipc/mqueue.c   	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
p                 420 ipc/mqueue.c   	inode_init_once(&p->vfs_inode);
p                1204 ipc/mqueue.c   	struct timespec64 ts, *p = NULL;
p                1209 ipc/mqueue.c   		p = &ts;
p                1211 ipc/mqueue.c   	return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
p                1218 ipc/mqueue.c   	struct timespec64 ts, *p = NULL;
p                1223 ipc/mqueue.c   		p = &ts;
p                1225 ipc/mqueue.c   	return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
p                1359 ipc/mqueue.c   	struct sigevent n, *p = NULL;
p                1363 ipc/mqueue.c   		p = &n;
p                1365 ipc/mqueue.c   	return do_mq_notify(mqdes, p);
p                1482 ipc/mqueue.c   	struct mq_attr attr, *p = NULL;
p                1484 ipc/mqueue.c   		p = &attr;
p                1488 ipc/mqueue.c   	return do_mq_open(u_name, oflag, mode, p);
p                1494 ipc/mqueue.c   	struct sigevent n, *p = NULL;
p                1500 ipc/mqueue.c   		p = &n;
p                1502 ipc/mqueue.c   	return do_mq_notify(mqdes, p);
p                1532 ipc/mqueue.c   static int compat_prepare_timeout(const struct old_timespec32 __user *p,
p                1535 ipc/mqueue.c   	if (get_old_timespec32(ts, p))
p                1547 ipc/mqueue.c   	struct timespec64 ts, *p = NULL;
p                1552 ipc/mqueue.c   		p = &ts;
p                1554 ipc/mqueue.c   	return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
p                1562 ipc/mqueue.c   	struct timespec64 ts, *p = NULL;
p                1567 ipc/mqueue.c   		p = &ts;
p                1569 ipc/mqueue.c   	return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
p                 119 ipc/msg.c      	struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
p                 120 ipc/msg.c      	struct msg_queue *msq = container_of(p, struct msg_queue, q_perm);
p                 492 ipc/msg.c      			 int cmd, struct msqid64_ds *p)
p                 497 ipc/msg.c      	memset(p, 0, sizeof(*p));
p                 535 ipc/msg.c      	kernel_to_ipc64_perm(&msq->q_perm, &p->msg_perm);
p                 536 ipc/msg.c      	p->msg_stime  = msq->q_stime;
p                 537 ipc/msg.c      	p->msg_rtime  = msq->q_rtime;
p                 538 ipc/msg.c      	p->msg_ctime  = msq->q_ctime;
p                 540 ipc/msg.c      	p->msg_stime_high = msq->q_stime >> 32;
p                 541 ipc/msg.c      	p->msg_rtime_high = msq->q_rtime >> 32;
p                 542 ipc/msg.c      	p->msg_ctime_high = msq->q_ctime >> 32;
p                 544 ipc/msg.c      	p->msg_cbytes = msq->q_cbytes;
p                 545 ipc/msg.c      	p->msg_qnum   = msq->q_qnum;
p                 546 ipc/msg.c      	p->msg_qbytes = msq->q_qbytes;
p                 547 ipc/msg.c      	p->msg_lspid  = pid_vnr(msq->q_lspid);
p                 548 ipc/msg.c      	p->msg_lrpid  = pid_vnr(msq->q_lrpid);
p                 655 ipc/msg.c      		struct compat_msqid64_ds __user *p = buf;
p                 656 ipc/msg.c      		if (get_compat_ipc64_perm(&out->msg_perm, &p->msg_perm))
p                 658 ipc/msg.c      		if (get_user(out->msg_qbytes, &p->msg_qbytes))
p                 661 ipc/msg.c      		struct compat_msqid_ds __user *p = buf;
p                 662 ipc/msg.c      		if (get_compat_ipc_perm(&out->msg_perm, &p->msg_perm))
p                 664 ipc/msg.c      		if (get_user(out->msg_qbytes, &p->msg_qbytes))
p                 300 ipc/sem.c      	struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
p                 301 ipc/sem.c      	struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
p                1290 ipc/sem.c      			 int cmd, void __user *p)
p                1319 ipc/sem.c      	if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
p                1387 ipc/sem.c      		int cmd, void __user *p)
p                1417 ipc/sem.c      		ushort __user *array = p;
p                1475 ipc/sem.c      		if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
p                1636 ipc/sem.c      	void __user *p = (void __user *)arg;
p                1648 ipc/sem.c      		return semctl_info(ns, semid, cmd, p);
p                1655 ipc/sem.c      		if (copy_semid_to_user(p, &semid64, version))
p                1664 ipc/sem.c      		return semctl_main(ns, semid, semnum, cmd, p);
p                1677 ipc/sem.c      		if (copy_semid_from_user(&semid64, p, version))
p                1724 ipc/sem.c      		struct compat_semid64_ds __user *p = buf;
p                1725 ipc/sem.c      		return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
p                1727 ipc/sem.c      		struct compat_semid_ds __user *p = buf;
p                1728 ipc/sem.c      		return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
p                1758 ipc/sem.c      	void __user *p = compat_ptr(arg);
p                1771 ipc/sem.c      		return semctl_info(ns, semid, cmd, p);
p                1778 ipc/sem.c      		if (copy_compat_semid_to_user(p, &semid64, version))
p                1787 ipc/sem.c      		return semctl_main(ns, semid, semnum, cmd, p);
p                1791 ipc/sem.c      		if (copy_compat_semid_from_user(&semid64, p, version))
p                 352 ipc/shm.c      static int shm_try_destroy_orphaned(int id, void *p, void *data)
p                 355 ipc/shm.c      	struct kern_ipc_perm *ipcp = p;
p                1327 ipc/shm.c      		struct compat_shmid64_ds __user *p = buf;
p                1328 ipc/shm.c      		return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
p                1330 ipc/shm.c      		struct compat_shmid_ds __user *p = buf;
p                1331 ipc/shm.c      		return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
p                 133 kernel/acct.c  static void acct_put(struct bsd_acct_struct *p)
p                 135 kernel/acct.c  	if (atomic_long_dec_and_test(&p->count))
p                 136 kernel/acct.c  		kfree_rcu(p, rcu);
p                 139 kernel/acct.c  static inline struct bsd_acct_struct *to_acct(struct fs_pin *p)
p                 141 kernel/acct.c  	return p ? container_of(p, struct bsd_acct_struct, pin) : NULL;
p                1977 kernel/audit.c 	const unsigned char *p;
p                1978 kernel/audit.c 	for (p = string; p < (const unsigned char *)string + len; p++) {
p                1979 kernel/audit.c 		if (*p == '"' || *p < 0x21 || *p > 0x7e)
p                2025 kernel/audit.c 	char *p, *pathname;
p                2036 kernel/audit.c 	p = d_path(path, pathname, PATH_MAX+11);
p                2037 kernel/audit.c 	if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */
p                2041 kernel/audit.c 		audit_log_untrustedstring(ab, p);
p                 297 kernel/audit.h #define audit_to_watch(k, p, l, o) (-EINVAL)
p                 303 kernel/audit.h #define audit_alloc_mark(k, p, l) (ERR_PTR(-EINVAL))
p                 248 kernel/audit_tree.c 	struct audit_chunk *p;
p                 250 kernel/audit_tree.c 	list_for_each_entry_rcu(p, list, hash) {
p                 255 kernel/audit_tree.c 		if (READ_ONCE(p->key) == key) {
p                 256 kernel/audit_tree.c 			atomic_long_inc(&p->refs);
p                 257 kernel/audit_tree.c 			return p;
p                 274 kernel/audit_tree.c static struct audit_chunk *find_chunk(struct node *p)
p                 276 kernel/audit_tree.c 	int index = p->index & ~(1U<<31);
p                 277 kernel/audit_tree.c 	p -= index;
p                 278 kernel/audit_tree.c 	return container_of(p, struct audit_chunk, owners[0]);
p                 327 kernel/audit_tree.c static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
p                 329 kernel/audit_tree.c 	struct audit_tree *owner = p->owner;
p                 335 kernel/audit_tree.c 	list_del_init(&p->list);
p                 336 kernel/audit_tree.c 	p->owner = NULL;
p                 464 kernel/audit_tree.c 	struct node *p;
p                 505 kernel/audit_tree.c 	p = &chunk->owners[chunk->count - 1];
p                 506 kernel/audit_tree.c 	p->index = (chunk->count - 1) | (1U<<31);
p                 507 kernel/audit_tree.c 	p->owner = tree;
p                 509 kernel/audit_tree.c 	list_add(&p->list, &tree->chunks);
p                 575 kernel/audit_tree.c 		struct node *p;
p                 579 kernel/audit_tree.c 		p = list_first_entry(&victim->chunks, struct node, list);
p                 581 kernel/audit_tree.c 		if (tagged && !(p->index & (1U<<31)))
p                 583 kernel/audit_tree.c 		chunk = find_chunk(p);
p                 585 kernel/audit_tree.c 		remove_chunk_node(chunk, p);
p                 613 kernel/audit_tree.c 	struct list_head *p, *q;
p                 620 kernel/audit_tree.c 	for (p = tree->chunks.next; p != &tree->chunks; p = q) {
p                 621 kernel/audit_tree.c 		struct node *node = list_entry(p, struct node, list);
p                 622 kernel/audit_tree.c 		q = p->next;
p                 624 kernel/audit_tree.c 			list_del_init(p);
p                 625 kernel/audit_tree.c 			list_add(p, &tree->chunks);
p                 167 kernel/auditfilter.c 	__u32 *p = kcalloc(AUDIT_BITMASK_SIZE, sizeof(__u32), GFP_KERNEL);
p                 168 kernel/auditfilter.c 	if (!p)
p                 173 kernel/auditfilter.c 			kfree(p);
p                 176 kernel/auditfilter.c 		p[AUDIT_WORD(n)] |= AUDIT_BIT(n);
p                 179 kernel/auditfilter.c 		kfree(p);
p                 182 kernel/auditfilter.c 	classes[class] = p;
p                 282 kernel/auditfilter.c 		__u32 *p = &entry->rule.mask[AUDIT_WORD(bit)];
p                 285 kernel/auditfilter.c 		if (!(*p & AUDIT_BIT(bit)))
p                 287 kernel/auditfilter.c 		*p &= ~AUDIT_BIT(bit);
p                 892 kernel/auditfilter.c 					   struct list_head **p)
p                 900 kernel/auditfilter.c 		*p = list = &audit_inode_hash[h];
p                 913 kernel/auditfilter.c 		*p = list = &audit_filter_list[entry->rule.listnr];
p                1272 kernel/auditfilter.c 	const char *p;
p                1280 kernel/auditfilter.c 	p = path + plen - 1;
p                1281 kernel/auditfilter.c 	while ((*p == '/') && (p > path))
p                1282 kernel/auditfilter.c 		p--;
p                1285 kernel/auditfilter.c 	while ((*p != '/') && (p > path))
p                1286 kernel/auditfilter.c 		p--;
p                1289 kernel/auditfilter.c 	if (*p == '/')
p                1290 kernel/auditfilter.c 		p++;
p                1292 kernel/auditfilter.c 	return p - path;
p                1306 kernel/auditfilter.c 	const char *p;
p                1317 kernel/auditfilter.c 	p = path + parentlen;
p                1319 kernel/auditfilter.c 	return strncmp(p, dname->name, dlen);
p                 213 kernel/auditsc.c 	struct audit_tree_refs *p = ctx->trees;
p                 216 kernel/auditsc.c 		p->c[--left] = chunk;
p                 220 kernel/auditsc.c 	if (!p)
p                 222 kernel/auditsc.c 	p = p->next;
p                 223 kernel/auditsc.c 	if (p) {
p                 224 kernel/auditsc.c 		p->c[30] = chunk;
p                 225 kernel/auditsc.c 		ctx->trees = p;
p                 234 kernel/auditsc.c 	struct audit_tree_refs *p = ctx->trees;
p                 237 kernel/auditsc.c 		ctx->trees = p;
p                 240 kernel/auditsc.c 	if (p)
p                 241 kernel/auditsc.c 		p->next = ctx->trees;
p                 249 kernel/auditsc.c 		      struct audit_tree_refs *p, int count)
p                 253 kernel/auditsc.c 	if (!p) {
p                 255 kernel/auditsc.c 		p = ctx->first_trees;
p                 258 kernel/auditsc.c 		if (!p)
p                 262 kernel/auditsc.c 	for (q = p; q != ctx->trees; q = q->next, n = 31) {
p                 272 kernel/auditsc.c 	ctx->trees = p;
p                 278 kernel/auditsc.c 	struct audit_tree_refs *p, *q;
p                 279 kernel/auditsc.c 	for (p = ctx->first_trees; p; p = q) {
p                 280 kernel/auditsc.c 		q = p->next;
p                 281 kernel/auditsc.c 		kfree(p);
p                 287 kernel/auditsc.c 	struct audit_tree_refs *p;
p                 292 kernel/auditsc.c 	for (p = ctx->first_trees; p != ctx->trees; p = p->next) {
p                 294 kernel/auditsc.c 			if (audit_tree_match(p->c[n], tree))
p                 298 kernel/auditsc.c 	if (p) {
p                 300 kernel/auditsc.c 			if (audit_tree_match(p->c[n], tree))
p                1002 kernel/auditsc.c 	const char __user *p = (const char __user *)current->mm->arg_start;
p                1042 kernel/auditsc.c 			len_full = strnlen_user(p, MAX_ARG_STRLEN) - 1;
p                1053 kernel/auditsc.c 			len_tmp = strncpy_from_user(&buf_head[len_buf], p,
p                1068 kernel/auditsc.c 				p += len_tmp;
p                1078 kernel/auditsc.c 				p += len_tmp + 1;
p                1739 kernel/auditsc.c 	struct audit_tree_refs *p;
p                1745 kernel/auditsc.c 	p = context->trees;
p                1758 kernel/auditsc.c 		unroll_tree_refs(context, p, count);
p                1767 kernel/auditsc.c 	struct audit_tree_refs *p;
p                1774 kernel/auditsc.c 	p = context->trees;
p                1802 kernel/auditsc.c 			unroll_tree_refs(context, p, count);
p                1808 kernel/auditsc.c 			unroll_tree_refs(context, p, count);
p                1813 kernel/auditsc.c 		unroll_tree_refs(context, p, count);
p                2220 kernel/auditsc.c 	struct timespec64 *p = &context->mq_sendrecv.abs_timeout;
p                2223 kernel/auditsc.c 		memcpy(p, abs_timeout, sizeof(*p));
p                2225 kernel/auditsc.c 		memset(p, 0, sizeof(*p));
p                2356 kernel/auditsc.c 		void *p = kmalloc(sizeof(struct sockaddr_storage), GFP_KERNEL);
p                2357 kernel/auditsc.c 		if (!p)
p                2359 kernel/auditsc.c 		context->sockaddr = p;
p                  38 kernel/bpf/cgroup.c 	struct cgroup *p, *cgrp = container_of(work, struct cgroup,
p                  68 kernel/bpf/cgroup.c 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
p                  69 kernel/bpf/cgroup.c 		cgroup_bpf_put(p);
p                 112 kernel/bpf/cgroup.c 	struct cgroup *p;
p                 114 kernel/bpf/cgroup.c 	p = cgroup_parent(cgrp);
p                 115 kernel/bpf/cgroup.c 	if (!p)
p                 118 kernel/bpf/cgroup.c 		u32 flags = p->bpf.flags[type];
p                 123 kernel/bpf/cgroup.c 		cnt = prog_list_length(&p->bpf.progs[type]);
p                 127 kernel/bpf/cgroup.c 		p = cgroup_parent(p);
p                 128 kernel/bpf/cgroup.c 	} while (p);
p                 145 kernel/bpf/cgroup.c 	struct cgroup *p = cgrp;
p                 150 kernel/bpf/cgroup.c 		if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
p                 151 kernel/bpf/cgroup.c 			cnt += prog_list_length(&p->bpf.progs[type]);
p                 152 kernel/bpf/cgroup.c 		p = cgroup_parent(p);
p                 153 kernel/bpf/cgroup.c 	} while (p);
p                 161 kernel/bpf/cgroup.c 	p = cgrp;
p                 163 kernel/bpf/cgroup.c 		if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
p                 166 kernel/bpf/cgroup.c 		list_for_each_entry(pl, &p->bpf.progs[type], node) {
p                 176 kernel/bpf/cgroup.c 	} while ((p = cgroup_parent(p)));
p                 205 kernel/bpf/cgroup.c 	struct cgroup *p;
p                 213 kernel/bpf/cgroup.c 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
p                 214 kernel/bpf/cgroup.c 		cgroup_bpf_get(p);
p                 231 kernel/bpf/cgroup.c 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
p                 232 kernel/bpf/cgroup.c 		cgroup_bpf_put(p);
p                1089 kernel/bpf/verifier.c 	struct bpf_subprog_info *p;
p                1091 kernel/bpf/verifier.c 	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
p                1093 kernel/bpf/verifier.c 	if (!p)
p                1095 kernel/bpf/verifier.c 	return p - env->subprog_info;
p                1408 kernel/bpf/verifier.c 	struct bpf_idx_pair *p;
p                1411 kernel/bpf/verifier.c 	p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
p                1412 kernel/bpf/verifier.c 	if (!p)
p                1414 kernel/bpf/verifier.c 	p[cnt - 1].idx = env->insn_idx;
p                1415 kernel/bpf/verifier.c 	p[cnt - 1].prev_idx = env->prev_insn_idx;
p                1416 kernel/bpf/verifier.c 	cur->jmp_history = p;
p                 468 kernel/cgroup/cgroup-v1.c 	pid_t *p = v;
p                 474 kernel/cgroup/cgroup-v1.c 	p++;
p                 475 kernel/cgroup/cgroup-v1.c 	if (p >= end) {
p                 479 kernel/cgroup/cgroup-v1.c 		*pos = *p;
p                 480 kernel/cgroup/cgroup-v1.c 		return p;
p                1896 kernel/cgroup/cgroup.c 	struct task_struct *p, *g;
p                1913 kernel/cgroup/cgroup.c 	do_each_thread(g, p) {
p                1914 kernel/cgroup/cgroup.c 		WARN_ON_ONCE(!list_empty(&p->cg_list) ||
p                1915 kernel/cgroup/cgroup.c 			     task_css_set(p) != &init_css_set);
p                1928 kernel/cgroup/cgroup.c 		spin_lock(&p->sighand->siglock);
p                1929 kernel/cgroup/cgroup.c 		if (!(p->flags & PF_EXITING)) {
p                1930 kernel/cgroup/cgroup.c 			struct css_set *cset = task_css_set(p);
p                1934 kernel/cgroup/cgroup.c 			list_add_tail(&p->cg_list, &cset->tasks);
p                1938 kernel/cgroup/cgroup.c 		spin_unlock(&p->sighand->siglock);
p                1939 kernel/cgroup/cgroup.c 	} while_each_thread(g, p);
p                 444 kernel/cgroup/cpuset.c static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
p                 446 kernel/cgroup/cpuset.c 	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
p                 447 kernel/cgroup/cpuset.c 		nodes_subset(p->mems_allowed, q->mems_allowed) &&
p                 448 kernel/cgroup/cpuset.c 		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
p                 449 kernel/cgroup/cpuset.c 		is_mem_exclusive(p) <= is_mem_exclusive(q);
p                 111 kernel/cgroup/pids.c 	struct pids_cgroup *p;
p                 113 kernel/cgroup/pids.c 	for (p = pids; parent_pids(p); p = parent_pids(p))
p                 114 kernel/cgroup/pids.c 		pids_cancel(p, num);
p                 128 kernel/cgroup/pids.c 	struct pids_cgroup *p;
p                 130 kernel/cgroup/pids.c 	for (p = pids; parent_pids(p); p = parent_pids(p))
p                 131 kernel/cgroup/pids.c 		atomic64_add(num, &p->counter);
p                 145 kernel/cgroup/pids.c 	struct pids_cgroup *p, *q;
p                 147 kernel/cgroup/pids.c 	for (p = pids; parent_pids(p); p = parent_pids(p)) {
p                 148 kernel/cgroup/pids.c 		int64_t new = atomic64_add_return(num, &p->counter);
p                 149 kernel/cgroup/pids.c 		int64_t limit = atomic64_read(&p->limit);
p                 163 kernel/cgroup/pids.c 	for (q = pids; q != p; q = parent_pids(q))
p                 165 kernel/cgroup/pids.c 	pids_cancel(p, num);
p                 210 kernel/cgroup/rdma.c 	struct rdma_cgroup *p;
p                 214 kernel/cgroup/rdma.c 	for (p = cg; p != stop_cg; p = parent_rdmacg(p))
p                 215 kernel/cgroup/rdma.c 		uncharge_cg_locked(p, device, index);
p                 263 kernel/cgroup/rdma.c 	struct rdma_cgroup *cg, *p;
p                 278 kernel/cgroup/rdma.c 	for (p = cg; p; p = parent_rdmacg(p)) {
p                 279 kernel/cgroup/rdma.c 		rpool = get_cg_rpool_locked(p, device);
p                 301 kernel/cgroup/rdma.c 	rdmacg_uncharge_hierarchy(cg, device, p, index);
p                 816 kernel/cpu.c   	struct task_struct *p;
p                 827 kernel/cpu.c   	for_each_process(p) {
p                 834 kernel/cpu.c   		t = find_lock_task_mm(p);
p                 190 kernel/crash_core.c 	char *p = cmdline, *ck_cmdline = NULL;
p                 193 kernel/crash_core.c 	p = strstr(p, name);
p                 194 kernel/crash_core.c 	while (p) {
p                 195 kernel/crash_core.c 		char *end_p = strchr(p, ' ');
p                 199 kernel/crash_core.c 			end_p = p + strlen(p);
p                 211 kernel/crash_core.c 			ck_cmdline = p;
p                 215 kernel/crash_core.c 				ck_cmdline = p;
p                 218 kernel/crash_core.c 		p = strstr(p+1, name);
p                 330 kernel/cred.c  int copy_creds(struct task_struct *p, unsigned long clone_flags)
p                 336 kernel/cred.c  	p->cached_requested_key = NULL;
p                 341 kernel/cred.c  		!p->cred->thread_keyring &&
p                 345 kernel/cred.c  		p->real_cred = get_cred(p->cred);
p                 346 kernel/cred.c  		get_cred(p->cred);
p                 347 kernel/cred.c  		alter_cred_subscribers(p->cred, 2);
p                 349 kernel/cred.c  		       p->cred, atomic_read(&p->cred->usage),
p                 350 kernel/cred.c  		       read_cred_subscribers(p->cred));
p                 351 kernel/cred.c  		atomic_inc(&p->cred->user->processes);
p                 385 kernel/cred.c  	p->cred = p->real_cred = get_cred(new);
p                 701 kernel/debug/gdbstub.c 	struct task_struct *p;
p                 728 kernel/debug/gdbstub.c 		do_each_thread(g, p) {
p                 730 kernel/debug/gdbstub.c 				int_to_threadref(thref, p->pid);
p                 738 kernel/debug/gdbstub.c 		} while_each_thread(g, p);
p                  22 kernel/debug/kdb/kdb_bt.c static void kdb_show_stack(struct task_struct *p, void *addr)
p                  27 kernel/debug/kdb/kdb_bt.c 	kdb_set_current_task(p);
p                  29 kernel/debug/kdb/kdb_bt.c 		show_stack((struct task_struct *)p, addr);
p                  32 kernel/debug/kdb/kdb_bt.c 		show_stack(p, &kdb_current_regs->sp);
p                  34 kernel/debug/kdb/kdb_bt.c 		show_stack(p, NULL);
p                  37 kernel/debug/kdb/kdb_bt.c 		show_stack(p, NULL);
p                  81 kernel/debug/kdb/kdb_bt.c kdb_bt1(struct task_struct *p, unsigned long mask,
p                  85 kernel/debug/kdb/kdb_bt.c 	if (kdb_getarea(buffer[0], (unsigned long)p) ||
p                  86 kernel/debug/kdb/kdb_bt.c 	    kdb_getarea(buffer[0], (unsigned long)(p+1)-1))
p                  88 kernel/debug/kdb/kdb_bt.c 	if (!kdb_task_state(p, mask))
p                  90 kernel/debug/kdb/kdb_bt.c 	kdb_printf("Stack traceback for pid %d\n", p->pid);
p                  91 kernel/debug/kdb/kdb_bt.c 	kdb_ps1(p);
p                  92 kernel/debug/kdb/kdb_bt.c 	kdb_show_stack(p, NULL);
p                 119 kernel/debug/kdb/kdb_bt.c 		struct task_struct *g, *p;
p                 127 kernel/debug/kdb/kdb_bt.c 			p = kdb_curr_task(cpu);
p                 128 kernel/debug/kdb/kdb_bt.c 			if (kdb_bt1(p, mask, argcount, btaprompt))
p                 132 kernel/debug/kdb/kdb_bt.c 		kdb_do_each_thread(g, p) {
p                 135 kernel/debug/kdb/kdb_bt.c 			if (task_curr(p))
p                 137 kernel/debug/kdb/kdb_bt.c 			if (kdb_bt1(p, mask, argcount, btaprompt))
p                 139 kernel/debug/kdb/kdb_bt.c 		} kdb_while_each_thread(g, p);
p                 141 kernel/debug/kdb/kdb_bt.c 		struct task_struct *p;
p                 148 kernel/debug/kdb/kdb_bt.c 		p = find_task_by_pid_ns(pid, &init_pid_ns);
p                 149 kernel/debug/kdb/kdb_bt.c 		if (p) {
p                 150 kernel/debug/kdb/kdb_bt.c 			kdb_set_current_task(p);
p                 151 kernel/debug/kdb/kdb_bt.c 			return kdb_bt1(p, ~0UL, argcount, 0);
p                 192 kernel/debug/kdb/kdb_main.c 	struct task_struct *p = curr_task(cpu);
p                 194 kernel/debug/kdb/kdb_main.c 	if ((task_thread_info(p)->flags & _TIF_MCA_INIT) && KDB_TSK(cpu))
p                 195 kernel/debug/kdb/kdb_main.c 		p = krp->p;
p                 197 kernel/debug/kdb/kdb_main.c 	return p;
p                1142 kernel/debug/kdb/kdb_main.c void kdb_set_current_task(struct task_struct *p)
p                1144 kernel/debug/kdb/kdb_main.c 	kdb_current_task = p;
p                1146 kernel/debug/kdb/kdb_main.c 	if (kdb_task_has_cpu(p)) {
p                1147 kernel/debug/kdb/kdb_main.c 		kdb_current_regs = KDB_TSKREGS(kdb_process_cpu(p));
p                1591 kernel/debug/kdb/kdb_main.c 			char *p;
p                1592 kernel/debug/kdb/kdb_main.c 			repeat = simple_strtoul(argv[0] + 4, &p, 10);
p                1594 kernel/debug/kdb/kdb_main.c 			valid = !*p;
p                2292 kernel/debug/kdb/kdb_main.c 	const struct task_struct *p, *g;
p                2294 kernel/debug/kdb/kdb_main.c 		p = kdb_curr_task(cpu);
p                2295 kernel/debug/kdb/kdb_main.c 		if (kdb_task_state(p, mask_I))
p                2298 kernel/debug/kdb/kdb_main.c 	kdb_do_each_thread(g, p) {
p                2299 kernel/debug/kdb/kdb_main.c 		if (kdb_task_state(p, mask_M))
p                2301 kernel/debug/kdb/kdb_main.c 	} kdb_while_each_thread(g, p);
p                2320 kernel/debug/kdb/kdb_main.c void kdb_ps1(const struct task_struct *p)
p                2325 kernel/debug/kdb/kdb_main.c 	if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long)))
p                2328 kernel/debug/kdb/kdb_main.c 	cpu = kdb_process_cpu(p);
p                2330 kernel/debug/kdb/kdb_main.c 		   (void *)p, p->pid, p->parent->pid,
p                2331 kernel/debug/kdb/kdb_main.c 		   kdb_task_has_cpu(p), kdb_process_cpu(p),
p                2332 kernel/debug/kdb/kdb_main.c 		   kdb_task_state_char(p),
p                2333 kernel/debug/kdb/kdb_main.c 		   (void *)(&p->thread),
p                2334 kernel/debug/kdb/kdb_main.c 		   p == kdb_curr_task(raw_smp_processor_id()) ? '*' : ' ',
p                2335 kernel/debug/kdb/kdb_main.c 		   p->comm);
p                2336 kernel/debug/kdb/kdb_main.c 	if (kdb_task_has_cpu(p)) {
p                2340 kernel/debug/kdb/kdb_main.c 			if (KDB_TSK(cpu) != p)
p                2349 kernel/debug/kdb/kdb_main.c 	struct task_struct *g, *p;
p                2362 kernel/debug/kdb/kdb_main.c 		p = kdb_curr_task(cpu);
p                2363 kernel/debug/kdb/kdb_main.c 		if (kdb_task_state(p, mask))
p                2364 kernel/debug/kdb/kdb_main.c 			kdb_ps1(p);
p                2368 kernel/debug/kdb/kdb_main.c 	kdb_do_each_thread(g, p) {
p                2371 kernel/debug/kdb/kdb_main.c 		if (kdb_task_state(p, mask))
p                2372 kernel/debug/kdb/kdb_main.c 			kdb_ps1(p);
p                2373 kernel/debug/kdb/kdb_main.c 	} kdb_while_each_thread(g, p);
p                2385 kernel/debug/kdb/kdb_main.c 	struct task_struct *p;
p                2394 kernel/debug/kdb/kdb_main.c 			p = KDB_TSK(kdb_initial_cpu);
p                2400 kernel/debug/kdb/kdb_main.c 			p = find_task_by_pid_ns((pid_t)val,	&init_pid_ns);
p                2401 kernel/debug/kdb/kdb_main.c 			if (!p) {
p                2406 kernel/debug/kdb/kdb_main.c 		kdb_set_current_task(p);
p                2454 kernel/debug/kdb/kdb_main.c 	struct task_struct *p;
p                2477 kernel/debug/kdb/kdb_main.c 	p = find_task_by_pid_ns(pid, &init_pid_ns);
p                2478 kernel/debug/kdb/kdb_main.c 	if (!p) {
p                2482 kernel/debug/kdb/kdb_main.c 	p = p->group_leader;
p                2483 kernel/debug/kdb/kdb_main.c 	kdb_send_sig(p, sig);
p                 206 kernel/debug/kdb/kdb_private.h extern unsigned long kdb_task_state(const struct task_struct *p,
p                 209 kernel/debug/kdb/kdb_private.h extern void kdb_ps1(const struct task_struct *p);
p                 211 kernel/debug/kdb/kdb_private.h extern void kdb_send_sig(struct task_struct *p, int sig);
p                 230 kernel/debug/kdb/kdb_private.h #define kdb_task_has_cpu(p) (task_curr(p))
p                 233 kernel/debug/kdb/kdb_private.h #define	kdb_do_each_thread(g, p) do_each_thread(g, p)
p                 234 kernel/debug/kdb/kdb_private.h #define	kdb_while_each_thread(g, p) while_each_thread(g, p)
p                 621 kernel/debug/kdb/kdb_support.c char kdb_task_state_char (const struct task_struct *p)
p                 627 kernel/debug/kdb/kdb_support.c 	if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long)))
p                 630 kernel/debug/kdb/kdb_support.c 	cpu = kdb_process_cpu(p);
p                 631 kernel/debug/kdb/kdb_support.c 	state = (p->state == 0) ? 'R' :
p                 632 kernel/debug/kdb/kdb_support.c 		(p->state < 0) ? 'U' :
p                 633 kernel/debug/kdb/kdb_support.c 		(p->state & TASK_UNINTERRUPTIBLE) ? 'D' :
p                 634 kernel/debug/kdb/kdb_support.c 		(p->state & TASK_STOPPED) ? 'T' :
p                 635 kernel/debug/kdb/kdb_support.c 		(p->state & TASK_TRACED) ? 'C' :
p                 636 kernel/debug/kdb/kdb_support.c 		(p->exit_state & EXIT_ZOMBIE) ? 'Z' :
p                 637 kernel/debug/kdb/kdb_support.c 		(p->exit_state & EXIT_DEAD) ? 'E' :
p                 638 kernel/debug/kdb/kdb_support.c 		(p->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
p                 639 kernel/debug/kdb/kdb_support.c 	if (is_idle_task(p)) {
p                 642 kernel/debug/kdb/kdb_support.c 		if (!kdb_task_has_cpu(p) || kgdb_info[cpu].irq_depth == 1) {
p                 646 kernel/debug/kdb/kdb_support.c 	} else if (!p->mm && state == 'S') {
p                 661 kernel/debug/kdb/kdb_support.c unsigned long kdb_task_state(const struct task_struct *p, unsigned long mask)
p                 663 kernel/debug/kdb/kdb_support.c 	char state[] = { kdb_task_state_char(p), '\0' };
p                 749 kernel/debug/kdb/kdb_support.c 	void *p = NULL;
p                 795 kernel/debug/kdb/kdb_support.c 	p = (char *)best + dah_overhead;
p                 796 kernel/debug/kdb/kdb_support.c 	memset(p, POISON_INUSE, best->size - 1);
p                 797 kernel/debug/kdb/kdb_support.c 	*((char *)p + best->size - 1) = POISON_END;
p                 800 kernel/debug/kdb/kdb_support.c 	return p;
p                 803 kernel/debug/kdb/kdb_support.c void debug_kfree(void *p)
p                 807 kernel/debug/kdb/kdb_support.c 	if (!p)
p                 809 kernel/debug/kdb/kdb_support.c 	if ((char *)p < debug_alloc_pool ||
p                 810 kernel/debug/kdb/kdb_support.c 	    (char *)p >= debug_alloc_pool + sizeof(debug_alloc_pool_aligned)) {
p                 811 kernel/debug/kdb/kdb_support.c 		kfree(p);
p                 818 kernel/debug/kdb/kdb_support.c 	h = (struct debug_alloc_header *)((char *)p - dah_overhead);
p                 819 kernel/debug/kdb/kdb_support.c 	memset(p, POISON_FREE, h->size - 1);
p                 820 kernel/debug/kdb/kdb_support.c 	*((char *)p + h->size - 1) = POISON_END;
p                  68 kernel/delayacct.c void __delayacct_blkio_end(struct task_struct *p)
p                  70 kernel/delayacct.c 	struct task_delay_info *delays = p->delays;
p                  74 kernel/delayacct.c 	if (p->delays->flags & DELAYACCT_PF_SWAPIN) {
p                  50 kernel/dma/contiguous.c static int __init early_cma(char *p)
p                  52 kernel/dma/contiguous.c 	if (!p) {
p                  57 kernel/dma/contiguous.c 	size_cmdline = memparse(p, &p);
p                  58 kernel/dma/contiguous.c 	if (*p != '@')
p                  60 kernel/dma/contiguous.c 	base_cmdline = memparse(p + 1, &p);
p                  61 kernel/dma/contiguous.c 	if (*p != '-') {
p                  65 kernel/dma/contiguous.c 	limit_cmdline = memparse(p + 1, &p);
p                 107 kernel/dma/remap.c static int __init early_coherent_pool(char *p)
p                 109 kernel/dma/remap.c 	atomic_pool_size = memparse(p, &p);
p                  60 kernel/events/core.c 	struct task_struct	*p;
p                  69 kernel/events/core.c 	struct task_struct *p = tfc->p;
p                  71 kernel/events/core.c 	if (p) {
p                  73 kernel/events/core.c 		if (task_cpu(p) != smp_processor_id())
p                  82 kernel/events/core.c 		if (p != current)
p                 103 kernel/events/core.c task_function_call(struct task_struct *p, remote_function_f func, void *info)
p                 106 kernel/events/core.c 		.p	= p,
p                 114 kernel/events/core.c 		ret = smp_call_function_single(task_cpu(p), remote_function,
p                 139 kernel/events/core.c 		.p	= NULL,
p                1325 kernel/events/core.c static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
p                1335 kernel/events/core.c 	nr = __task_pid_nr_ns(p, type, event->ns);
p                1337 kernel/events/core.c 	if (!nr && !pid_alive(p))
p                1342 kernel/events/core.c static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
p                1344 kernel/events/core.c 	return perf_event_pid_type(event, p, PIDTYPE_TGID);
p                1347 kernel/events/core.c static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
p                1349 kernel/events/core.c 	return perf_event_pid_type(event, p, PIDTYPE_PID);
p                5143 kernel/events/core.c static inline int perf_fget_light(int fd, struct fd *p)
p                5153 kernel/events/core.c 	*p = f;
p                6527 kernel/events/core.c 	struct page *p = NULL;
p                6547 kernel/events/core.c 			if (__get_user_pages_fast(virt, 1, 0, &p) == 1)
p                6548 kernel/events/core.c 				phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
p                6552 kernel/events/core.c 		if (p)
p                6553 kernel/events/core.c 			put_page(p);
p                 677 kernel/events/uprobes.c 	struct rb_node **p = &uprobes_tree.rb_node;
p                 682 kernel/events/uprobes.c 	while (*p) {
p                 683 kernel/events/uprobes.c 		parent = *p;
p                 690 kernel/events/uprobes.c 			p = &parent->rb_left;
p                 692 kernel/events/uprobes.c 			p = &parent->rb_right;
p                 697 kernel/events/uprobes.c 	rb_link_node(&uprobe->rb_node, parent, p);
p                1761 kernel/events/uprobes.c 	struct return_instance **p, *o, *n;
p                1768 kernel/events/uprobes.c 	p = &n_utask->return_instances;
p                1778 kernel/events/uprobes.c 		*p = n;
p                1779 kernel/events/uprobes.c 		p = &n->next;
p                  72 kernel/exit.c  static void __unhash_process(struct task_struct *p, bool group_dead)
p                  75 kernel/exit.c  	detach_pid(p, PIDTYPE_PID);
p                  77 kernel/exit.c  		detach_pid(p, PIDTYPE_TGID);
p                  78 kernel/exit.c  		detach_pid(p, PIDTYPE_PGID);
p                  79 kernel/exit.c  		detach_pid(p, PIDTYPE_SID);
p                  81 kernel/exit.c  		list_del_rcu(&p->tasks);
p                  82 kernel/exit.c  		list_del_init(&p->sibling);
p                  85 kernel/exit.c  	list_del_rcu(&p->thread_group);
p                  86 kernel/exit.c  	list_del_rcu(&p->thread_node);
p                 191 kernel/exit.c  void release_task(struct task_struct *p)
p                 199 kernel/exit.c  	atomic_dec(&__task_cred(p)->user->processes);
p                 202 kernel/exit.c  	proc_flush_task(p);
p                 203 kernel/exit.c  	cgroup_release(p);
p                 206 kernel/exit.c  	ptrace_release_task(p);
p                 207 kernel/exit.c  	__exit_signal(p);
p                 215 kernel/exit.c  	leader = p->group_leader;
p                 216 kernel/exit.c  	if (leader != p && thread_group_empty(leader)
p                 229 kernel/exit.c  	release_thread(p);
p                 230 kernel/exit.c  	put_task_struct_rcu_user(p);
p                 232 kernel/exit.c  	p = leader;
p                 273 kernel/exit.c  	struct task_struct *p;
p                 275 kernel/exit.c  	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
p                 276 kernel/exit.c  		if ((p == ignored_task) ||
p                 277 kernel/exit.c  		    (p->exit_state && thread_group_empty(p)) ||
p                 278 kernel/exit.c  		    is_global_init(p->real_parent))
p                 281 kernel/exit.c  		if (task_pgrp(p->real_parent) != pgrp &&
p                 282 kernel/exit.c  		    task_session(p->real_parent) == task_session(p))
p                 284 kernel/exit.c  	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
p                 302 kernel/exit.c  	struct task_struct *p;
p                 304 kernel/exit.c  	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
p                 305 kernel/exit.c  		if (p->signal->flags & SIGNAL_STOP_STOPPED)
p                 307 kernel/exit.c  	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
p                 349 kernel/exit.c  	struct task_struct *c, *g, *p = current;
p                 356 kernel/exit.c  	if (mm->owner != p)
p                 372 kernel/exit.c  	list_for_each_entry(c, &p->children, sibling) {
p                 380 kernel/exit.c  	list_for_each_entry(c, &p->real_parent->children, sibling) {
p                 408 kernel/exit.c  	BUG_ON(c == p);
p                 490 kernel/exit.c  static struct task_struct *find_alive_thread(struct task_struct *p)
p                 494 kernel/exit.c  	for_each_thread(p, t) {
p                 508 kernel/exit.c  	struct task_struct *p, *n;
p                 521 kernel/exit.c  	list_for_each_entry_safe(p, n, dead, ptrace_entry) {
p                 522 kernel/exit.c  		list_del_init(&p->ptrace_entry);
p                 523 kernel/exit.c  		release_task(p);
p                 577 kernel/exit.c  static void reparent_leader(struct task_struct *father, struct task_struct *p,
p                 580 kernel/exit.c  	if (unlikely(p->exit_state == EXIT_DEAD))
p                 584 kernel/exit.c  	p->exit_signal = SIGCHLD;
p                 587 kernel/exit.c  	if (!p->ptrace &&
p                 588 kernel/exit.c  	    p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
p                 589 kernel/exit.c  		if (do_notify_parent(p, p->exit_signal)) {
p                 590 kernel/exit.c  			p->exit_state = EXIT_DEAD;
p                 591 kernel/exit.c  			list_add(&p->ptrace_entry, dead);
p                 595 kernel/exit.c  	kill_orphaned_pgrp(p, father);
p                 609 kernel/exit.c  	struct task_struct *p, *t, *reaper;
p                 620 kernel/exit.c  	list_for_each_entry(p, &father->children, sibling) {
p                 621 kernel/exit.c  		for_each_thread(p, t) {
p                 636 kernel/exit.c  			reparent_leader(father, p, dead);
p                 648 kernel/exit.c  	struct task_struct *p, *n;
p                 681 kernel/exit.c  	list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
p                 682 kernel/exit.c  		list_del_init(&p->ptrace_entry);
p                 683 kernel/exit.c  		release_task(p);
p                 935 kernel/exit.c  static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
p                 938 kernel/exit.c  		task_pid_type(p, wo->wo_type) == wo->wo_pid;
p                 942 kernel/exit.c  eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
p                 944 kernel/exit.c  	if (!eligible_pid(wo, p))
p                 962 kernel/exit.c  	if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
p                 974 kernel/exit.c  static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
p                 977 kernel/exit.c  	pid_t pid = task_pid_vnr(p);
p                 978 kernel/exit.c  	uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
p                 985 kernel/exit.c  		status = p->exit_code;
p                 986 kernel/exit.c  		get_task_struct(p);
p                 990 kernel/exit.c  			getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
p                 991 kernel/exit.c  		put_task_struct(p);
p                 997 kernel/exit.c  	state = (ptrace_reparented(p) && thread_group_leader(p)) ?
p                 999 kernel/exit.c  	if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
p                1010 kernel/exit.c  	if (state == EXIT_DEAD && thread_group_leader(p)) {
p                1011 kernel/exit.c  		struct signal_struct *sig = p->signal;
p                1036 kernel/exit.c  		thread_group_cputime_adjusted(p, &tgutime, &tgstime);
p                1041 kernel/exit.c  		psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
p                1043 kernel/exit.c  			p->min_flt + sig->min_flt + sig->cmin_flt;
p                1045 kernel/exit.c  			p->maj_flt + sig->maj_flt + sig->cmaj_flt;
p                1047 kernel/exit.c  			p->nvcsw + sig->nvcsw + sig->cnvcsw;
p                1049 kernel/exit.c  			p->nivcsw + sig->nivcsw + sig->cnivcsw;
p                1051 kernel/exit.c  			task_io_get_inblock(p) +
p                1054 kernel/exit.c  			task_io_get_oublock(p) +
p                1059 kernel/exit.c  		task_io_accounting_add(&psig->ioac, &p->ioac);
p                1066 kernel/exit.c  		getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
p                1067 kernel/exit.c  	status = (p->signal->flags & SIGNAL_GROUP_EXIT)
p                1068 kernel/exit.c  		? p->signal->group_exit_code : p->exit_code;
p                1074 kernel/exit.c  		ptrace_unlink(p);
p                1078 kernel/exit.c  		if (do_notify_parent(p, p->exit_signal))
p                1080 kernel/exit.c  		p->exit_state = state;
p                1084 kernel/exit.c  		release_task(p);
p                1103 kernel/exit.c  static int *task_stopped_code(struct task_struct *p, bool ptrace)
p                1106 kernel/exit.c  		if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
p                1107 kernel/exit.c  			return &p->exit_code;
p                1109 kernel/exit.c  		if (p->signal->flags & SIGNAL_STOP_STOPPED)
p                1110 kernel/exit.c  			return &p->signal->group_exit_code;
p                1134 kernel/exit.c  				int ptrace, struct task_struct *p)
p                1147 kernel/exit.c  	if (!task_stopped_code(p, ptrace))
p                1151 kernel/exit.c  	spin_lock_irq(&p->sighand->siglock);
p                1153 kernel/exit.c  	p_code = task_stopped_code(p, ptrace);
p                1164 kernel/exit.c  	uid = from_kuid_munged(current_user_ns(), task_uid(p));
p                1166 kernel/exit.c  	spin_unlock_irq(&p->sighand->siglock);
p                1177 kernel/exit.c  	get_task_struct(p);
p                1178 kernel/exit.c  	pid = task_pid_vnr(p);
p                1183 kernel/exit.c  		getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
p                1184 kernel/exit.c  	put_task_struct(p);
p                1205 kernel/exit.c  static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
p                1214 kernel/exit.c  	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
p                1217 kernel/exit.c  	spin_lock_irq(&p->sighand->siglock);
p                1219 kernel/exit.c  	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
p                1220 kernel/exit.c  		spin_unlock_irq(&p->sighand->siglock);
p                1224 kernel/exit.c  		p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
p                1225 kernel/exit.c  	uid = from_kuid_munged(current_user_ns(), task_uid(p));
p                1226 kernel/exit.c  	spin_unlock_irq(&p->sighand->siglock);
p                1228 kernel/exit.c  	pid = task_pid_vnr(p);
p                1229 kernel/exit.c  	get_task_struct(p);
p                1233 kernel/exit.c  		getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
p                1234 kernel/exit.c  	put_task_struct(p);
p                1258 kernel/exit.c  				struct task_struct *p)
p                1265 kernel/exit.c  	int exit_state = READ_ONCE(p->exit_state);
p                1271 kernel/exit.c  	ret = eligible_child(wo, ptrace, p);
p                1285 kernel/exit.c  	if (likely(!ptrace) && unlikely(p->ptrace)) {
p                1297 kernel/exit.c  		if (!ptrace_reparented(p))
p                1304 kernel/exit.c  		if (!delay_group_leader(p)) {
p                1310 kernel/exit.c  			if (unlikely(ptrace) || likely(!p->ptrace))
p                1311 kernel/exit.c  				return wait_task_zombie(wo, p);
p                1348 kernel/exit.c  	ret = wait_task_stopped(wo, ptrace, p);
p                1357 kernel/exit.c  	return wait_task_continued(wo, p);
p                1371 kernel/exit.c  	struct task_struct *p;
p                1373 kernel/exit.c  	list_for_each_entry(p, &tsk->children, sibling) {
p                1374 kernel/exit.c  		int ret = wait_consider_task(wo, 0, p);
p                1385 kernel/exit.c  	struct task_struct *p;
p                1387 kernel/exit.c  	list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
p                1388 kernel/exit.c  		int ret = wait_consider_task(wo, 1, p);
p                1402 kernel/exit.c  	struct task_struct *p = key;
p                1404 kernel/exit.c  	if (!eligible_pid(wo, p))
p                1407 kernel/exit.c  	if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
p                1413 kernel/exit.c  void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
p                1416 kernel/exit.c  				TASK_INTERRUPTIBLE, 1, p);
p                 981 kernel/fork.c  					   struct task_struct *p)
p                 984 kernel/fork.c  	if (mm->owner == p)
p                 989 kernel/fork.c  static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
p                 992 kernel/fork.c  	mm->owner = p;
p                1003 kernel/fork.c  static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
p                1023 kernel/fork.c  	mm_init_owner(mm, p);
p                1043 kernel/fork.c  	if (init_new_context(p, mm))
p                1593 kernel/fork.c  static void copy_seccomp(struct task_struct *p)
p                1606 kernel/fork.c  	p->seccomp = current->seccomp;
p                1614 kernel/fork.c  		task_set_no_new_privs(p);
p                1621 kernel/fork.c  	if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
p                1622 kernel/fork.c  		set_tsk_thread_flag(p, TIF_SECCOMP);
p                1633 kernel/fork.c  static void rt_mutex_init_task(struct task_struct *p)
p                1635 kernel/fork.c  	raw_spin_lock_init(&p->pi_lock);
p                1637 kernel/fork.c  	p->pi_waiters = RB_ROOT_CACHED;
p                1638 kernel/fork.c  	p->pi_top_task = NULL;
p                1639 kernel/fork.c  	p->pi_blocked_on = NULL;
p                1661 kernel/fork.c  static inline void rcu_copy_process(struct task_struct *p)
p                1664 kernel/fork.c  	p->rcu_read_lock_nesting = 0;
p                1665 kernel/fork.c  	p->rcu_read_unlock_special.s = 0;
p                1666 kernel/fork.c  	p->rcu_blocked_node = NULL;
p                1667 kernel/fork.c  	INIT_LIST_HEAD(&p->rcu_node_entry);
p                1670 kernel/fork.c  	p->rcu_tasks_holdout = false;
p                1671 kernel/fork.c  	INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
p                1672 kernel/fork.c  	p->rcu_tasks_idle_cpu = -1;
p                1767 kernel/fork.c  	struct task_struct *p;
p                1847 kernel/fork.c  	p = dup_task_struct(current, node);
p                1848 kernel/fork.c  	if (!p)
p                1857 kernel/fork.c  	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL;
p                1861 kernel/fork.c  	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL;
p                1863 kernel/fork.c  	ftrace_graph_init_task(p);
p                1865 kernel/fork.c  	rt_mutex_init_task(p);
p                1868 kernel/fork.c  	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
p                1869 kernel/fork.c  	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
p                1872 kernel/fork.c  	if (atomic_read(&p->real_cred->user->processes) >=
p                1873 kernel/fork.c  			task_rlimit(p, RLIMIT_NPROC)) {
p                1874 kernel/fork.c  		if (p->real_cred->user != INIT_USER &&
p                1880 kernel/fork.c  	retval = copy_creds(p, clone_flags);
p                1893 kernel/fork.c  	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
p                1894 kernel/fork.c  	p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
p                1895 kernel/fork.c  	p->flags |= PF_FORKNOEXEC;
p                1896 kernel/fork.c  	INIT_LIST_HEAD(&p->children);
p                1897 kernel/fork.c  	INIT_LIST_HEAD(&p->sibling);
p                1898 kernel/fork.c  	rcu_copy_process(p);
p                1899 kernel/fork.c  	p->vfork_done = NULL;
p                1900 kernel/fork.c  	spin_lock_init(&p->alloc_lock);
p                1902 kernel/fork.c  	init_sigpending(&p->pending);
p                1904 kernel/fork.c  	p->utime = p->stime = p->gtime = 0;
p                1906 kernel/fork.c  	p->utimescaled = p->stimescaled = 0;
p                1908 kernel/fork.c  	prev_cputime_init(&p->prev_cputime);
p                1911 kernel/fork.c  	seqcount_init(&p->vtime.seqcount);
p                1912 kernel/fork.c  	p->vtime.starttime = 0;
p                1913 kernel/fork.c  	p->vtime.state = VTIME_INACTIVE;
p                1917 kernel/fork.c  	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
p                1920 kernel/fork.c  	p->default_timer_slack_ns = current->timer_slack_ns;
p                1923 kernel/fork.c  	p->psi_flags = 0;
p                1926 kernel/fork.c  	task_io_accounting_init(&p->ioac);
p                1927 kernel/fork.c  	acct_clear_integrals(p);
p                1929 kernel/fork.c  	posix_cputimers_init(&p->posix_cputimers);
p                1931 kernel/fork.c  	p->io_context = NULL;
p                1932 kernel/fork.c  	audit_set_context(p, NULL);
p                1933 kernel/fork.c  	cgroup_fork(p);
p                1935 kernel/fork.c  	p->mempolicy = mpol_dup(p->mempolicy);
p                1936 kernel/fork.c  	if (IS_ERR(p->mempolicy)) {
p                1937 kernel/fork.c  		retval = PTR_ERR(p->mempolicy);
p                1938 kernel/fork.c  		p->mempolicy = NULL;
p                1943 kernel/fork.c  	p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
p                1944 kernel/fork.c  	p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
p                1945 kernel/fork.c  	seqcount_init(&p->mems_allowed_seq);
p                1948 kernel/fork.c  	p->irq_events = 0;
p                1949 kernel/fork.c  	p->hardirqs_enabled = 0;
p                1950 kernel/fork.c  	p->hardirq_enable_ip = 0;
p                1951 kernel/fork.c  	p->hardirq_enable_event = 0;
p                1952 kernel/fork.c  	p->hardirq_disable_ip = _THIS_IP_;
p                1953 kernel/fork.c  	p->hardirq_disable_event = 0;
p                1954 kernel/fork.c  	p->softirqs_enabled = 1;
p                1955 kernel/fork.c  	p->softirq_enable_ip = _THIS_IP_;
p                1956 kernel/fork.c  	p->softirq_enable_event = 0;
p                1957 kernel/fork.c  	p->softirq_disable_ip = 0;
p                1958 kernel/fork.c  	p->softirq_disable_event = 0;
p                1959 kernel/fork.c  	p->hardirq_context = 0;
p                1960 kernel/fork.c  	p->softirq_context = 0;
p                1963 kernel/fork.c  	p->pagefault_disabled = 0;
p                1966 kernel/fork.c  	lockdep_init_task(p);
p                1970 kernel/fork.c  	p->blocked_on = NULL; /* not blocked yet */
p                1973 kernel/fork.c  	p->sequential_io	= 0;
p                1974 kernel/fork.c  	p->sequential_io_avg	= 0;
p                1978 kernel/fork.c  	retval = sched_fork(clone_flags, p);
p                1982 kernel/fork.c  	retval = perf_event_init_task(p);
p                1985 kernel/fork.c  	retval = audit_alloc(p);
p                1989 kernel/fork.c  	shm_init_task(p);
p                1990 kernel/fork.c  	retval = security_task_alloc(p, clone_flags);
p                1993 kernel/fork.c  	retval = copy_semundo(clone_flags, p);
p                1996 kernel/fork.c  	retval = copy_files(clone_flags, p);
p                1999 kernel/fork.c  	retval = copy_fs(clone_flags, p);
p                2002 kernel/fork.c  	retval = copy_sighand(clone_flags, p);
p                2005 kernel/fork.c  	retval = copy_signal(clone_flags, p);
p                2008 kernel/fork.c  	retval = copy_mm(clone_flags, p);
p                2011 kernel/fork.c  	retval = copy_namespaces(clone_flags, p);
p                2014 kernel/fork.c  	retval = copy_io(clone_flags, p);
p                2017 kernel/fork.c  	retval = copy_thread_tls(clone_flags, args->stack, args->stack_size, p,
p                2022 kernel/fork.c  	stackleak_task_init(p);
p                2025 kernel/fork.c  		pid = alloc_pid(p->nsproxy->pid_ns_for_children);
p                2059 kernel/fork.c  	p->plug = NULL;
p                2061 kernel/fork.c  	futex_init_task(p);
p                2067 kernel/fork.c  		sas_ss_reset(p);
p                2073 kernel/fork.c  	user_disable_single_step(p);
p                2074 kernel/fork.c  	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
p                2076 kernel/fork.c  	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
p                2078 kernel/fork.c  	clear_tsk_latency_tracing(p);
p                2081 kernel/fork.c  	p->pid = pid_nr(pid);
p                2083 kernel/fork.c  		p->exit_signal = -1;
p                2084 kernel/fork.c  		p->group_leader = current->group_leader;
p                2085 kernel/fork.c  		p->tgid = current->tgid;
p                2088 kernel/fork.c  			p->exit_signal = current->group_leader->exit_signal;
p                2090 kernel/fork.c  			p->exit_signal = args->exit_signal;
p                2091 kernel/fork.c  		p->group_leader = p;
p                2092 kernel/fork.c  		p->tgid = p->pid;
p                2095 kernel/fork.c  	p->nr_dirtied = 0;
p                2096 kernel/fork.c  	p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
p                2097 kernel/fork.c  	p->dirty_paused_when = 0;
p                2099 kernel/fork.c  	p->pdeath_signal = 0;
p                2100 kernel/fork.c  	INIT_LIST_HEAD(&p->thread_group);
p                2101 kernel/fork.c  	p->task_works = NULL;
p                2110 kernel/fork.c  	retval = cgroup_can_fork(p);
p                2122 kernel/fork.c  	p->start_time = ktime_get_ns();
p                2123 kernel/fork.c  	p->real_start_time = ktime_get_boottime_ns();
p                2133 kernel/fork.c  		p->real_parent = current->real_parent;
p                2134 kernel/fork.c  		p->parent_exec_id = current->parent_exec_id;
p                2136 kernel/fork.c  		p->real_parent = current;
p                2137 kernel/fork.c  		p->parent_exec_id = current->self_exec_id;
p                2140 kernel/fork.c  	klp_copy_process(p);
p                2148 kernel/fork.c  	copy_seccomp(p);
p                2150 kernel/fork.c  	rseq_fork(p, clone_flags);
p                2168 kernel/fork.c  	init_task_pid_links(p);
p                2169 kernel/fork.c  	if (likely(p->pid)) {
p                2170 kernel/fork.c  		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
p                2172 kernel/fork.c  		init_task_pid(p, PIDTYPE_PID, pid);
p                2173 kernel/fork.c  		if (thread_group_leader(p)) {
p                2174 kernel/fork.c  			init_task_pid(p, PIDTYPE_TGID, pid);
p                2175 kernel/fork.c  			init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
p                2176 kernel/fork.c  			init_task_pid(p, PIDTYPE_SID, task_session(current));
p                2179 kernel/fork.c  				ns_of_pid(pid)->child_reaper = p;
p                2180 kernel/fork.c  				p->signal->flags |= SIGNAL_UNKILLABLE;
p                2182 kernel/fork.c  			p->signal->shared_pending.signal = delayed.signal;
p                2183 kernel/fork.c  			p->signal->tty = tty_kref_get(current->signal->tty);
p                2189 kernel/fork.c  			p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper ||
p                2190 kernel/fork.c  							 p->real_parent->signal->is_child_subreaper;
p                2191 kernel/fork.c  			list_add_tail(&p->sibling, &p->real_parent->children);
p                2192 kernel/fork.c  			list_add_tail_rcu(&p->tasks, &init_task.tasks);
p                2193 kernel/fork.c  			attach_pid(p, PIDTYPE_TGID);
p                2194 kernel/fork.c  			attach_pid(p, PIDTYPE_PGID);
p                2195 kernel/fork.c  			attach_pid(p, PIDTYPE_SID);
p                2201 kernel/fork.c  			task_join_group_stop(p);
p                2202 kernel/fork.c  			list_add_tail_rcu(&p->thread_group,
p                2203 kernel/fork.c  					  &p->group_leader->thread_group);
p                2204 kernel/fork.c  			list_add_tail_rcu(&p->thread_node,
p                2205 kernel/fork.c  					  &p->signal->thread_head);
p                2207 kernel/fork.c  		attach_pid(p, PIDTYPE_PID);
p                2213 kernel/fork.c  	syscall_tracepoint_update(p);
p                2216 kernel/fork.c  	proc_fork_connector(p);
p                2217 kernel/fork.c  	cgroup_post_fork(p);
p                2219 kernel/fork.c  	perf_event_fork(p);
p                2221 kernel/fork.c  	trace_task_newtask(p, clone_flags);
p                2222 kernel/fork.c  	uprobe_copy_process(p, clone_flags);
p                2224 kernel/fork.c  	return p;
p                2229 kernel/fork.c  	cgroup_cancel_fork(p);
p                2241 kernel/fork.c  	exit_thread(p);
p                2243 kernel/fork.c  	if (p->io_context)
p                2244 kernel/fork.c  		exit_io_context(p);
p                2246 kernel/fork.c  	exit_task_namespaces(p);
p                2248 kernel/fork.c  	if (p->mm) {
p                2249 kernel/fork.c  		mm_clear_owner(p->mm, p);
p                2250 kernel/fork.c  		mmput(p->mm);
p                2254 kernel/fork.c  		free_signal_struct(p->signal);
p                2256 kernel/fork.c  	__cleanup_sighand(p->sighand);
p                2258 kernel/fork.c  	exit_fs(p); /* blocking */
p                2260 kernel/fork.c  	exit_files(p); /* blocking */
p                2262 kernel/fork.c  	exit_sem(p);
p                2264 kernel/fork.c  	security_task_free(p);
p                2266 kernel/fork.c  	audit_free(p);
p                2268 kernel/fork.c  	perf_event_free_task(p);
p                2270 kernel/fork.c  	lockdep_free_task(p);
p                2272 kernel/fork.c  	mpol_put(p->mempolicy);
p                2275 kernel/fork.c  	delayacct_tsk_free(p);
p                2277 kernel/fork.c  	atomic_dec(&p->cred->user->processes);
p                2278 kernel/fork.c  	exit_creds(p);
p                2280 kernel/fork.c  	p->state = TASK_DEAD;
p                2281 kernel/fork.c  	put_task_stack(p);
p                2282 kernel/fork.c  	delayed_free_task(p);
p                2334 kernel/fork.c  	struct task_struct *p;
p                2356 kernel/fork.c  	p = copy_process(NULL, trace, NUMA_NO_NODE, args);
p                2359 kernel/fork.c  	if (IS_ERR(p))
p                2360 kernel/fork.c  		return PTR_ERR(p);
p                2366 kernel/fork.c  	trace_sched_process_fork(current, p);
p                2368 kernel/fork.c  	pid = get_task_pid(p, PIDTYPE_PID);
p                2375 kernel/fork.c  		p->vfork_done = &vfork;
p                2377 kernel/fork.c  		get_task_struct(p);
p                2380 kernel/fork.c  	wake_up_new_task(p);
p                2387 kernel/fork.c  		if (!wait_for_vfork_done(p, &vfork))
p                  37 kernel/freezer.c bool freezing_slow_path(struct task_struct *p)
p                  39 kernel/freezer.c 	if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
p                  42 kernel/freezer.c 	if (test_tsk_thread_flag(p, TIF_MEMDIE))
p                  45 kernel/freezer.c 	if (pm_nosig_freezing || cgroup_freezing(p))
p                  48 kernel/freezer.c 	if (pm_freezing && !(p->flags & PF_KTHREAD))
p                  94 kernel/freezer.c static void fake_signal_wake_up(struct task_struct *p)
p                  98 kernel/freezer.c 	if (lock_task_sighand(p, &flags)) {
p                  99 kernel/freezer.c 		signal_wake_up(p, 0);
p                 100 kernel/freezer.c 		unlock_task_sighand(p, &flags);
p                 115 kernel/freezer.c bool freeze_task(struct task_struct *p)
p                 128 kernel/freezer.c 	if (freezer_should_skip(p))
p                 132 kernel/freezer.c 	if (!freezing(p) || frozen(p)) {
p                 137 kernel/freezer.c 	if (!(p->flags & PF_KTHREAD))
p                 138 kernel/freezer.c 		fake_signal_wake_up(p);
p                 140 kernel/freezer.c 		wake_up_state(p, TASK_INTERRUPTIBLE);
p                 146 kernel/freezer.c void __thaw_task(struct task_struct *p)
p                 151 kernel/freezer.c 	if (frozen(p))
p                 152 kernel/freezer.c 		wake_up_process(p);
p                1292 kernel/futex.c 	struct task_struct *p;
p                1303 kernel/futex.c 	p = find_get_task_by_vpid(pid);
p                1304 kernel/futex.c 	if (!p)
p                1307 kernel/futex.c 	if (unlikely(p->flags & PF_KTHREAD)) {
p                1308 kernel/futex.c 		put_task_struct(p);
p                1317 kernel/futex.c 	raw_spin_lock_irq(&p->pi_lock);
p                1318 kernel/futex.c 	if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
p                1324 kernel/futex.c 		int ret = handle_exit_race(uaddr, uval, p);
p                1326 kernel/futex.c 		raw_spin_unlock_irq(&p->pi_lock);
p                1337 kernel/futex.c 			*exiting = p;
p                1339 kernel/futex.c 			put_task_struct(p);
p                1355 kernel/futex.c 	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
p                1361 kernel/futex.c 	list_add(&pi_state->list, &p->pi_state_list);
p                1366 kernel/futex.c 	pi_state->owner = p;
p                1367 kernel/futex.c 	raw_spin_unlock_irq(&p->pi_lock);
p                1369 kernel/futex.c 	put_task_struct(p);
p                1542 kernel/futex.c 	struct task_struct *p = q->task;
p                1547 kernel/futex.c 	get_task_struct(p);
p                1562 kernel/futex.c 	wake_q_add_safe(wake_q, p);
p                3525 kernel/futex.c 	struct task_struct *p;
p                3534 kernel/futex.c 		p = current;
p                3536 kernel/futex.c 		p = find_task_by_vpid(pid);
p                3537 kernel/futex.c 		if (!p)
p                3542 kernel/futex.c 	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
p                3545 kernel/futex.c 	head = p->robust_list;
p                4086 kernel/futex.c 	struct task_struct *p;
p                4095 kernel/futex.c 		p = current;
p                4097 kernel/futex.c 		p = find_task_by_vpid(pid);
p                4098 kernel/futex.c 		if (!p)
p                4103 kernel/futex.c 	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
p                4106 kernel/futex.c 	head = p->compat_robust_list;
p                 149 kernel/irq/debugfs.c static int irq_debug_show(struct seq_file *m, void *p)
p                 152 kernel/irq/irqdesc.c 	char *p = "";
p                 157 kernel/irq/irqdesc.c 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
p                 158 kernel/irq/irqdesc.c 		p = ",";
p                 251 kernel/irq/irqdesc.c 	char *p = "";
p                 256 kernel/irq/irqdesc.c 				 p, action->name);
p                 257 kernel/irq/irqdesc.c 		p = ",";
p                1758 kernel/irq/irqdomain.c static int irq_domain_debug_show(struct seq_file *m, void *p)
p                 451 kernel/irq/proc.c int __weak arch_show_interrupts(struct seq_file *p, int prec)
p                 460 kernel/irq/proc.c int show_interrupts(struct seq_file *p, void *v)
p                 473 kernel/irq/proc.c 		return arch_show_interrupts(p, prec);
p                 480 kernel/irq/proc.c 		seq_printf(p, "%*s", prec + 8, "");
p                 482 kernel/irq/proc.c 			seq_printf(p, "CPU%-8d", j);
p                 483 kernel/irq/proc.c 		seq_putc(p, '\n');
p                 498 kernel/irq/proc.c 	seq_printf(p, "%*d: ", prec, i);
p                 500 kernel/irq/proc.c 		seq_printf(p, "%10u ", desc->kstat_irqs ?
p                 506 kernel/irq/proc.c 			desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
p                 508 kernel/irq/proc.c 			seq_printf(p, " %8s", desc->irq_data.chip->name);
p                 510 kernel/irq/proc.c 			seq_printf(p, " %8s", "-");
p                 512 kernel/irq/proc.c 		seq_printf(p, " %8s", "None");
p                 515 kernel/irq/proc.c 		seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
p                 517 kernel/irq/proc.c 		seq_printf(p, " %*s", prec, "");
p                 519 kernel/irq/proc.c 	seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
p                 522 kernel/irq/proc.c 		seq_printf(p, "-%-8s", desc->name);
p                 526 kernel/irq/proc.c 		seq_printf(p, "  %s", action->name);
p                 528 kernel/irq/proc.c 			seq_printf(p, ", %s", action->name);
p                 531 kernel/irq/proc.c 	seq_putc(p, '\n');
p                 575 kernel/kallsyms.c static void *s_next(struct seq_file *m, void *p, loff_t *pos)
p                 581 kernel/kallsyms.c 	return p;
p                 591 kernel/kallsyms.c static void s_stop(struct seq_file *m, void *p)
p                 595 kernel/kallsyms.c static int s_show(struct seq_file *m, void *p)
p                  73 kernel/kexec_core.c int kexec_should_crash(struct task_struct *p)
p                  86 kernel/kexec_core.c 	if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
p                 326 kernel/kprobes.c 	struct kprobe *p;
p                 329 kernel/kprobes.c 	hlist_for_each_entry_rcu(p, head, hlist) {
p                 330 kernel/kprobes.c 		if (p->addr == addr)
p                 331 kernel/kprobes.c 			return p;
p                 338 kernel/kprobes.c static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
p                 341 kernel/kprobes.c static inline int kprobe_aggrprobe(struct kprobe *p)
p                 343 kernel/kprobes.c 	return p->pre_handler == aggr_pre_handler;
p                 347 kernel/kprobes.c static inline int kprobe_unused(struct kprobe *p)
p                 349 kernel/kprobes.c 	return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
p                 350 kernel/kprobes.c 	       list_empty(&p->list);
p                 356 kernel/kprobes.c static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
p                 358 kernel/kprobes.c 	memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
p                 359 kernel/kprobes.c 	memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
p                 370 kernel/kprobes.c void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
p                 374 kernel/kprobes.c 	list_for_each_entry_rcu(kp, &p->list, list) {
p                 385 kernel/kprobes.c static void free_aggr_kprobe(struct kprobe *p)
p                 389 kernel/kprobes.c 	op = container_of(p, struct optimized_kprobe, kp);
p                 391 kernel/kprobes.c 	arch_remove_kprobe(p);
p                 396 kernel/kprobes.c static inline int kprobe_optready(struct kprobe *p)
p                 400 kernel/kprobes.c 	if (kprobe_aggrprobe(p)) {
p                 401 kernel/kprobes.c 		op = container_of(p, struct optimized_kprobe, kp);
p                 409 kernel/kprobes.c static inline int kprobe_disarmed(struct kprobe *p)
p                 414 kernel/kprobes.c 	if (!kprobe_aggrprobe(p))
p                 415 kernel/kprobes.c 		return kprobe_disabled(p);
p                 417 kernel/kprobes.c 	op = container_of(p, struct optimized_kprobe, kp);
p                 419 kernel/kprobes.c 	return kprobe_disabled(p) && list_empty(&op->list);
p                 423 kernel/kprobes.c static int kprobe_queued(struct kprobe *p)
p                 427 kernel/kprobes.c 	if (kprobe_aggrprobe(p)) {
p                 428 kernel/kprobes.c 		op = container_of(p, struct optimized_kprobe, kp);
p                 442 kernel/kprobes.c 	struct kprobe *p = NULL;
p                 446 kernel/kprobes.c 	for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
p                 447 kernel/kprobes.c 		p = get_kprobe((void *)(addr - i));
p                 449 kernel/kprobes.c 	if (p && kprobe_optready(p)) {
p                 450 kernel/kprobes.c 		op = container_of(p, struct optimized_kprobe, kp);
p                 452 kernel/kprobes.c 			return p;
p                 628 kernel/kprobes.c static void optimize_kprobe(struct kprobe *p)
p                 633 kernel/kprobes.c 	if (!kprobe_optready(p) || !kprobes_allow_optimization ||
p                 634 kernel/kprobes.c 	    (kprobe_disabled(p) || kprobes_all_disarmed))
p                 638 kernel/kprobes.c 	if (p->post_handler)
p                 641 kernel/kprobes.c 	op = container_of(p, struct optimized_kprobe, kp);
p                 676 kernel/kprobes.c static void unoptimize_kprobe(struct kprobe *p, bool force)
p                 680 kernel/kprobes.c 	if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
p                 683 kernel/kprobes.c 	op = container_of(p, struct optimized_kprobe, kp);
p                 684 kernel/kprobes.c 	if (!kprobe_optimized(p))
p                 738 kernel/kprobes.c static void kill_optimized_kprobe(struct kprobe *p)
p                 742 kernel/kprobes.c 	op = container_of(p, struct optimized_kprobe, kp);
p                 748 kernel/kprobes.c 	if (kprobe_unused(p)) {
p                 764 kernel/kprobes.c void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
p                 766 kernel/kprobes.c 	if (!kprobe_ftrace(p))
p                 767 kernel/kprobes.c 		arch_prepare_optimized_kprobe(op, p);
p                 771 kernel/kprobes.c static void prepare_optimized_kprobe(struct kprobe *p)
p                 775 kernel/kprobes.c 	op = container_of(p, struct optimized_kprobe, kp);
p                 776 kernel/kprobes.c 	__prepare_optimized_kprobe(op, p);
p                 780 kernel/kprobes.c static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
p                 789 kernel/kprobes.c 	op->kp.addr = p->addr;
p                 790 kernel/kprobes.c 	__prepare_optimized_kprobe(op, p);
p                 795 kernel/kprobes.c static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
p                 801 kernel/kprobes.c static void try_to_optimize_kprobe(struct kprobe *p)
p                 807 kernel/kprobes.c 	if (kprobe_ftrace(p))
p                 815 kernel/kprobes.c 	ap = alloc_aggr_kprobe(p);
p                 827 kernel/kprobes.c 	init_aggr_kprobe(ap, p);
p                 840 kernel/kprobes.c 	struct kprobe *p;
p                 852 kernel/kprobes.c 		hlist_for_each_entry_rcu(p, head, hlist)
p                 853 kernel/kprobes.c 			if (!kprobe_disabled(p))
p                 854 kernel/kprobes.c 				optimize_kprobe(p);
p                 865 kernel/kprobes.c 	struct kprobe *p;
p                 879 kernel/kprobes.c 		hlist_for_each_entry_rcu(p, head, hlist) {
p                 880 kernel/kprobes.c 			if (!kprobe_disabled(p))
p                 881 kernel/kprobes.c 				unoptimize_kprobe(p, false);
p                 915 kernel/kprobes.c static void __arm_kprobe(struct kprobe *p)
p                 920 kernel/kprobes.c 	_p = get_optimized_kprobe((unsigned long)p->addr);
p                 925 kernel/kprobes.c 	arch_arm_kprobe(p);
p                 926 kernel/kprobes.c 	optimize_kprobe(p);	/* Try to optimize (add kprobe to a list) */
p                 930 kernel/kprobes.c static void __disarm_kprobe(struct kprobe *p, bool reopt)
p                 935 kernel/kprobes.c 	unoptimize_kprobe(p, kprobes_all_disarmed);
p                 937 kernel/kprobes.c 	if (!kprobe_queued(p)) {
p                 938 kernel/kprobes.c 		arch_disarm_kprobe(p);
p                 940 kernel/kprobes.c 		_p = get_optimized_kprobe((unsigned long)p->addr);
p                 949 kernel/kprobes.c #define optimize_kprobe(p)			do {} while (0)
p                 950 kernel/kprobes.c #define unoptimize_kprobe(p, f)			do {} while (0)
p                 951 kernel/kprobes.c #define kill_optimized_kprobe(p)		do {} while (0)
p                 952 kernel/kprobes.c #define prepare_optimized_kprobe(p)		do {} while (0)
p                 953 kernel/kprobes.c #define try_to_optimize_kprobe(p)		do {} while (0)
p                 954 kernel/kprobes.c #define __arm_kprobe(p)				arch_arm_kprobe(p)
p                 955 kernel/kprobes.c #define __disarm_kprobe(p, o)			arch_disarm_kprobe(p)
p                 956 kernel/kprobes.c #define kprobe_disarmed(p)			kprobe_disabled(p)
p                 971 kernel/kprobes.c static void free_aggr_kprobe(struct kprobe *p)
p                 973 kernel/kprobes.c 	arch_remove_kprobe(p);
p                 974 kernel/kprobes.c 	kfree(p);
p                 977 kernel/kprobes.c static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
p                 998 kernel/kprobes.c static int prepare_kprobe(struct kprobe *p)
p                1000 kernel/kprobes.c 	if (!kprobe_ftrace(p))
p                1001 kernel/kprobes.c 		return arch_prepare_kprobe(p);
p                1003 kernel/kprobes.c 	return arch_prepare_kprobe_ftrace(p);
p                1007 kernel/kprobes.c static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
p                1012 kernel/kprobes.c 	ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
p                1015 kernel/kprobes.c 			 p->addr, ret);
p                1035 kernel/kprobes.c 	ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
p                1039 kernel/kprobes.c static int arm_kprobe_ftrace(struct kprobe *p)
p                1041 kernel/kprobes.c 	bool ipmodify = (p->post_handler != NULL);
p                1043 kernel/kprobes.c 	return __arm_kprobe_ftrace(p,
p                1049 kernel/kprobes.c static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
p                1062 kernel/kprobes.c 	ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
p                1064 kernel/kprobes.c 		  p->addr, ret);
p                1068 kernel/kprobes.c static int disarm_kprobe_ftrace(struct kprobe *p)
p                1070 kernel/kprobes.c 	bool ipmodify = (p->post_handler != NULL);
p                1072 kernel/kprobes.c 	return __disarm_kprobe_ftrace(p,
p                1077 kernel/kprobes.c #define prepare_kprobe(p)	arch_prepare_kprobe(p)
p                1078 kernel/kprobes.c #define arm_kprobe_ftrace(p)	(-ENODEV)
p                1079 kernel/kprobes.c #define disarm_kprobe_ftrace(p)	(-ENODEV)
p                1116 kernel/kprobes.c static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
p                1120 kernel/kprobes.c 	list_for_each_entry_rcu(kp, &p->list, list) {
p                1132 kernel/kprobes.c static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
p                1137 kernel/kprobes.c 	list_for_each_entry_rcu(kp, &p->list, list) {
p                1147 kernel/kprobes.c static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
p                1165 kernel/kprobes.c void kprobes_inc_nmissed_count(struct kprobe *p)
p                1168 kernel/kprobes.c 	if (!kprobe_aggrprobe(p)) {
p                1169 kernel/kprobes.c 		p->nmissed++;
p                1171 kernel/kprobes.c 		list_for_each_entry_rcu(kp, &p->list, list)
p                1305 kernel/kprobes.c static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
p                1307 kernel/kprobes.c 	if (p->post_handler)
p                1310 kernel/kprobes.c 	list_add_rcu(&p->list, &ap->list);
p                1311 kernel/kprobes.c 	if (p->post_handler && !ap->post_handler)
p                1321 kernel/kprobes.c static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
p                1324 kernel/kprobes.c 	copy_kprobe(p, ap);
p                1326 kernel/kprobes.c 	ap->addr = p->addr;
p                1327 kernel/kprobes.c 	ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
p                1331 kernel/kprobes.c 	if (p->post_handler && !kprobe_gone(p))
p                1337 kernel/kprobes.c 	list_add_rcu(&p->list, &ap->list);
p                1338 kernel/kprobes.c 	hlist_replace_rcu(&p->hlist, &ap->hlist);
p                1345 kernel/kprobes.c static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
p                1399 kernel/kprobes.c 	copy_kprobe(ap, p);
p                1400 kernel/kprobes.c 	ret = add_new_kprobe(ap, p);
p                1407 kernel/kprobes.c 	if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
p                1414 kernel/kprobes.c 				list_del_rcu(&p->list);
p                1448 kernel/kprobes.c 	char symname[KSYM_NAME_LEN], *p;
p                1455 kernel/kprobes.c 		p = strchr(symname, '.');
p                1456 kernel/kprobes.c 		if (!p)
p                1458 kernel/kprobes.c 		*p = '\0';
p                1492 kernel/kprobes.c static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
p                1494 kernel/kprobes.c 	return _kprobe_addr(p->addr, p->symbol_name, p->offset);
p                1498 kernel/kprobes.c static struct kprobe *__get_valid_kprobe(struct kprobe *p)
p                1502 kernel/kprobes.c 	ap = get_kprobe(p->addr);
p                1506 kernel/kprobes.c 	if (p != ap) {
p                1508 kernel/kprobes.c 			if (list_p == p)
p                1518 kernel/kprobes.c static inline int check_kprobe_rereg(struct kprobe *p)
p                1523 kernel/kprobes.c 	if (__get_valid_kprobe(p))
p                1530 kernel/kprobes.c int __weak arch_check_ftrace_location(struct kprobe *p)
p                1534 kernel/kprobes.c 	ftrace_addr = ftrace_location((unsigned long)p->addr);
p                1538 kernel/kprobes.c 		if ((unsigned long)p->addr != ftrace_addr)
p                1540 kernel/kprobes.c 		p->flags |= KPROBE_FLAG_FTRACE;
p                1548 kernel/kprobes.c static int check_kprobe_address_safe(struct kprobe *p,
p                1553 kernel/kprobes.c 	ret = arch_check_ftrace_location(p);
p                1560 kernel/kprobes.c 	if (!kernel_text_address((unsigned long) p->addr) ||
p                1561 kernel/kprobes.c 	    within_kprobe_blacklist((unsigned long) p->addr) ||
p                1562 kernel/kprobes.c 	    jump_label_text_reserved(p->addr, p->addr) ||
p                1563 kernel/kprobes.c 	    find_bug((unsigned long)p->addr)) {
p                1569 kernel/kprobes.c 	*probed_mod = __module_text_address((unsigned long) p->addr);
p                1584 kernel/kprobes.c 		if (within_module_init((unsigned long)p->addr, *probed_mod) &&
p                1598 kernel/kprobes.c int register_kprobe(struct kprobe *p)
p                1606 kernel/kprobes.c 	addr = kprobe_addr(p);
p                1609 kernel/kprobes.c 	p->addr = addr;
p                1611 kernel/kprobes.c 	ret = check_kprobe_rereg(p);
p                1616 kernel/kprobes.c 	p->flags &= KPROBE_FLAG_DISABLED;
p                1617 kernel/kprobes.c 	p->nmissed = 0;
p                1618 kernel/kprobes.c 	INIT_LIST_HEAD(&p->list);
p                1620 kernel/kprobes.c 	ret = check_kprobe_address_safe(p, &probed_mod);
p                1626 kernel/kprobes.c 	old_p = get_kprobe(p->addr);
p                1629 kernel/kprobes.c 		ret = register_aggr_kprobe(old_p, p);
p                1636 kernel/kprobes.c 	ret = prepare_kprobe(p);
p                1642 kernel/kprobes.c 	INIT_HLIST_NODE(&p->hlist);
p                1643 kernel/kprobes.c 	hlist_add_head_rcu(&p->hlist,
p                1644 kernel/kprobes.c 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
p                1646 kernel/kprobes.c 	if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
p                1647 kernel/kprobes.c 		ret = arm_kprobe(p);
p                1649 kernel/kprobes.c 			hlist_del_rcu(&p->hlist);
p                1656 kernel/kprobes.c 	try_to_optimize_kprobe(p);
p                1684 kernel/kprobes.c static struct kprobe *__disable_kprobe(struct kprobe *p)
p                1690 kernel/kprobes.c 	orig_p = __get_valid_kprobe(p);
p                1694 kernel/kprobes.c 	if (!kprobe_disabled(p)) {
p                1696 kernel/kprobes.c 		if (p != orig_p)
p                1697 kernel/kprobes.c 			p->flags |= KPROBE_FLAG_DISABLED;
p                1700 kernel/kprobes.c 		if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
p                1709 kernel/kprobes.c 					p->flags &= ~KPROBE_FLAG_DISABLED;
p                1723 kernel/kprobes.c static int __unregister_kprobe_top(struct kprobe *p)
p                1728 kernel/kprobes.c 	ap = __disable_kprobe(p);
p                1732 kernel/kprobes.c 	if (ap == p)
p                1750 kernel/kprobes.c 		if (p->post_handler && !kprobe_gone(p)) {
p                1752 kernel/kprobes.c 				if ((list_p != p) && (list_p->post_handler))
p                1762 kernel/kprobes.c 		list_del_rcu(&p->list);
p                1777 kernel/kprobes.c static void __unregister_kprobe_bottom(struct kprobe *p)
p                1781 kernel/kprobes.c 	if (list_empty(&p->list))
p                1783 kernel/kprobes.c 		arch_remove_kprobe(p);
p                1784 kernel/kprobes.c 	else if (list_is_singular(&p->list)) {
p                1786 kernel/kprobes.c 		ap = list_entry(p->list.next, struct kprobe, list);
p                1787 kernel/kprobes.c 		list_del(&p->list);
p                1811 kernel/kprobes.c void unregister_kprobe(struct kprobe *p)
p                1813 kernel/kprobes.c 	unregister_kprobes(&p, 1);
p                1858 kernel/kprobes.c static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
p                1860 kernel/kprobes.c 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
p                2052 kernel/kprobes.c static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
p                2061 kernel/kprobes.c static void kill_kprobe(struct kprobe *p)
p                2065 kernel/kprobes.c 	p->flags |= KPROBE_FLAG_GONE;
p                2066 kernel/kprobes.c 	if (kprobe_aggrprobe(p)) {
p                2071 kernel/kprobes.c 		list_for_each_entry_rcu(kp, &p->list, list)
p                2073 kernel/kprobes.c 		p->post_handler = NULL;
p                2074 kernel/kprobes.c 		kill_optimized_kprobe(p);
p                2080 kernel/kprobes.c 	arch_remove_kprobe(p);
p                2087 kernel/kprobes.c 	struct kprobe *p;
p                2092 kernel/kprobes.c 	p = __disable_kprobe(kp);
p                2093 kernel/kprobes.c 	if (IS_ERR(p))
p                2094 kernel/kprobes.c 		ret = PTR_ERR(p);
p                2105 kernel/kprobes.c 	struct kprobe *p;
p                2110 kernel/kprobes.c 	p = __get_valid_kprobe(kp);
p                2111 kernel/kprobes.c 	if (unlikely(p == NULL)) {
p                2122 kernel/kprobes.c 	if (p != kp)
p                2125 kernel/kprobes.c 	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
p                2126 kernel/kprobes.c 		p->flags &= ~KPROBE_FLAG_DISABLED;
p                2127 kernel/kprobes.c 		ret = arm_kprobe(p);
p                2129 kernel/kprobes.c 			p->flags |= KPROBE_FLAG_DISABLED;
p                2224 kernel/kprobes.c 	struct kprobe *p;
p                2240 kernel/kprobes.c 		hlist_for_each_entry_rcu(p, head, hlist)
p                2241 kernel/kprobes.c 			if (within_module_init((unsigned long)p->addr, mod) ||
p                2243 kernel/kprobes.c 			     within_module_core((unsigned long)p->addr, mod))) {
p                2255 kernel/kprobes.c 				kill_kprobe(p);
p                2328 kernel/kprobes.c static void report_probe(struct seq_file *pi, struct kprobe *p,
p                2332 kernel/kprobes.c 	void *addr = p->addr;
p                2334 kernel/kprobes.c 	if (p->pre_handler == pre_handler_kretprobe)
p                2348 kernel/kprobes.c 			addr, kprobe_type, p->addr);
p                2351 kernel/kprobes.c 		pp = p;
p                2353 kernel/kprobes.c 		(kprobe_gone(p) ? "[GONE]" : ""),
p                2354 kernel/kprobes.c 		((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
p                2380 kernel/kprobes.c 	struct kprobe *p, *kp;
p                2388 kernel/kprobes.c 	hlist_for_each_entry_rcu(p, head, hlist) {
p                2389 kernel/kprobes.c 		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
p                2391 kernel/kprobes.c 		if (kprobe_aggrprobe(p)) {
p                2392 kernel/kprobes.c 			list_for_each_entry_rcu(kp, &p->list, list)
p                2393 kernel/kprobes.c 				report_probe(pi, kp, sym, offset, modname, p);
p                2395 kernel/kprobes.c 			report_probe(pi, p, sym, offset, modname, NULL);
p                2471 kernel/kprobes.c 	struct kprobe *p;
p                2491 kernel/kprobes.c 		hlist_for_each_entry_rcu(p, head, hlist) {
p                2492 kernel/kprobes.c 			if (!kprobe_disabled(p)) {
p                2493 kernel/kprobes.c 				err = arm_kprobe(p);
p                2517 kernel/kprobes.c 	struct kprobe *p;
p                2534 kernel/kprobes.c 		hlist_for_each_entry_rcu(p, head, hlist) {
p                2535 kernel/kprobes.c 			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
p                2536 kernel/kprobes.c 				err = disarm_kprobe(p, false);
p                 395 kernel/kthread.c static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
p                 399 kernel/kthread.c 	if (!wait_task_inactive(p, state)) {
p                 405 kernel/kthread.c 	raw_spin_lock_irqsave(&p->pi_lock, flags);
p                 406 kernel/kthread.c 	do_set_cpus_allowed(p, mask);
p                 407 kernel/kthread.c 	p->flags |= PF_NO_SETAFFINITY;
p                 408 kernel/kthread.c 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
p                 411 kernel/kthread.c static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
p                 413 kernel/kthread.c 	__kthread_bind_mask(p, cpumask_of(cpu), state);
p                 416 kernel/kthread.c void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
p                 418 kernel/kthread.c 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
p                 430 kernel/kthread.c void kthread_bind(struct task_struct *p, unsigned int cpu)
p                 432 kernel/kthread.c 	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
p                 451 kernel/kthread.c 	struct task_struct *p;
p                 453 kernel/kthread.c 	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
p                 455 kernel/kthread.c 	if (IS_ERR(p))
p                 456 kernel/kthread.c 		return p;
p                 457 kernel/kthread.c 	kthread_bind(p, cpu);
p                 459 kernel/kthread.c 	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
p                 460 kernel/kthread.c 	to_kthread(p)->cpu = cpu;
p                 461 kernel/kthread.c 	return p;
p                  66 kernel/latencytop.c void clear_tsk_latency_tracing(struct task_struct *p)
p                  71 kernel/latencytop.c 	memset(&p->latency_record, 0, sizeof(p->latency_record));
p                  72 kernel/latencytop.c 	p->latency_record_count = 0;
p                 695 kernel/locking/lockdep.c static void lockdep_print_held_locks(struct task_struct *p)
p                 697 kernel/locking/lockdep.c 	int i, depth = READ_ONCE(p->lockdep_depth);
p                 700 kernel/locking/lockdep.c 		printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
p                 703 kernel/locking/lockdep.c 		       depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
p                 708 kernel/locking/lockdep.c 	if (p->state == TASK_RUNNING && p != current)
p                 712 kernel/locking/lockdep.c 		print_lock(p->held_locks + i);
p                4888 kernel/locking/lockdep.c 	void *const p = class;
p                4894 kernel/locking/lockdep.c 	memset(p + offset, 0, sizeof(*class) - offset);
p                5336 kernel/locking/lockdep.c 	struct task_struct *g, *p;
p                5345 kernel/locking/lockdep.c 	for_each_process_thread(g, p) {
p                5346 kernel/locking/lockdep.c 		if (!p->lockdep_depth)
p                5348 kernel/locking/lockdep.c 		lockdep_print_held_locks(p);
p                 134 kernel/locking/qspinlock_stat.h #define pv_wait(p, v)	__pv_wait(p, v)
p                  35 kernel/locking/rtmutex-debug.c static void printk_task(struct task_struct *p)
p                  37 kernel/locking/rtmutex-debug.c 	if (p)
p                  38 kernel/locking/rtmutex-debug.c 		printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio);
p                  71 kernel/locking/rtmutex.c 	unsigned long owner, *p = (unsigned long *) &lock->owner;
p                 134 kernel/locking/rtmutex.c 	owner = READ_ONCE(*p);
p                 136 kernel/locking/rtmutex.c 		WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
p                 155 kernel/locking/rtmutex.c 	unsigned long owner, *p = (unsigned long *) &lock->owner;
p                 158 kernel/locking/rtmutex.c 		owner = *p;
p                 159 kernel/locking/rtmutex.c 	} while (cmpxchg_relaxed(p, owner,
p                 231 kernel/locking/rtmutex.c #define task_to_waiter(p)	\
p                 232 kernel/locking/rtmutex.c 	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
p                 338 kernel/locking/rtmutex.c static void rt_mutex_adjust_prio(struct task_struct *p)
p                 342 kernel/locking/rtmutex.c 	lockdep_assert_held(&p->pi_lock);
p                 344 kernel/locking/rtmutex.c 	if (task_has_pi_waiters(p))
p                 345 kernel/locking/rtmutex.c 		pi_task = task_top_pi_waiter(p)->task;
p                 347 kernel/locking/rtmutex.c 	rt_mutex_setprio(p, pi_task);
p                 381 kernel/locking/rtmutex.c static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
p                 383 kernel/locking/rtmutex.c 	return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
p                  18 kernel/locking/rtmutex.h #define debug_rt_mutex_proxy_lock(l,p)			do { } while (0)
p                  65 kernel/locking/rtmutex_common.h static inline int task_has_pi_waiters(struct task_struct *p)
p                  67 kernel/locking/rtmutex_common.h 	return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root);
p                  71 kernel/locking/rtmutex_common.h task_top_pi_waiter(struct task_struct *p)
p                  73 kernel/locking/rtmutex_common.h 	return rb_entry(p->pi_waiters.rb_leftmost,
p                  90 kernel/locking/rtmutex_common.h static inline int task_has_pi_waiters(struct task_struct *p)
p                  96 kernel/locking/rtmutex_common.h task_top_pi_waiter(struct task_struct *p)
p                2544 kernel/module.c 	char *p;
p                2560 kernel/module.c 	for (p = modinfo; p; p = next_string(p, &size)) {
p                2561 kernel/module.c 		if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
p                2562 kernel/module.c 			return p + taglen + 1;
p                3391 kernel/module.c 	const char *p;
p                3397 kernel/module.c 	for (p = module_blacklist; *p; p += len) {
p                3398 kernel/module.c 		len = strcspn(p, ",");
p                3399 kernel/module.c 		if (strlen(module_name) == len && !memcmp(module_name, p, len))
p                3401 kernel/module.c 		if (p[len] == ',')
p                4328 kernel/module.c static void *m_next(struct seq_file *m, void *p, loff_t *pos)
p                4330 kernel/module.c 	return seq_list_next(p, &modules, pos);
p                4333 kernel/module.c static void m_stop(struct seq_file *m, void *p)
p                4338 kernel/module.c static int m_show(struct seq_file *m, void *p)
p                4340 kernel/module.c 	struct module *mod = list_entry(p, struct module, list);
p                 213 kernel/nsproxy.c void switch_task_namespaces(struct task_struct *p, struct nsproxy *new)
p                 219 kernel/nsproxy.c 	task_lock(p);
p                 220 kernel/nsproxy.c 	ns = p->nsproxy;
p                 221 kernel/nsproxy.c 	p->nsproxy = new;
p                 222 kernel/nsproxy.c 	task_unlock(p);
p                 228 kernel/nsproxy.c void exit_task_namespaces(struct task_struct *p)
p                 230 kernel/nsproxy.c 	switch_task_namespaces(p, NULL);
p                  48 kernel/params.c 	struct kmalloced_param *p;
p                  50 kernel/params.c 	p = kmalloc(sizeof(*p) + size, GFP_KERNEL);
p                  51 kernel/params.c 	if (!p)
p                  55 kernel/params.c 	list_add(&p->list, &kmalloced_params);
p                  58 kernel/params.c 	return p->val;
p                  64 kernel/params.c 	struct kmalloced_param *p;
p                  67 kernel/params.c 	list_for_each_entry(p, &kmalloced_params, list) {
p                  68 kernel/params.c 		if (p->val == param) {
p                  69 kernel/params.c 			list_del(&p->list);
p                  70 kernel/params.c 			kfree(p);
p                 457 kernel/params.c 	struct kernel_param p = *kp;
p                 463 kernel/params.c 		p.arg = arr->elem + arr->elemsize * i;
p                 464 kernel/params.c 		check_kparam_locked(p.mod);
p                 465 kernel/params.c 		ret = arr->ops->get(buffer + off, &p);
p                 850 kernel/params.c 	const struct module_version_attribute **p;
p                 854 kernel/params.c 	for (p = __start___modver; p < __stop___modver; p++) {
p                 855 kernel/params.c 		const struct module_version_attribute *vattr = *p;
p                 501 kernel/pid.c   	struct pid *p;
p                 509 kernel/pid.c   	p = find_get_pid(pid);
p                 510 kernel/pid.c   	if (!p)
p                 515 kernel/pid.c   	if (!pid_task(p, PIDTYPE_TGID))
p                 519 kernel/pid.c   	fd = ret ?: pidfd_create(p);
p                 520 kernel/pid.c   	put_pid(p);
p                 132 kernel/pid_namespace.c static void delayed_free_pidns(struct rcu_head *p)
p                 134 kernel/pid_namespace.c 	struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu);
p                 417 kernel/pid_namespace.c 	struct pid_namespace *pid_ns, *p;
p                 420 kernel/pid_namespace.c 	pid_ns = p = to_pid_ns(ns)->parent;
p                 422 kernel/pid_namespace.c 		if (!p)
p                 424 kernel/pid_namespace.c 		if (p == active)
p                 426 kernel/pid_namespace.c 		p = p->parent;
p                1005 kernel/power/hibernate.c 	char *p;
p                1011 kernel/power/hibernate.c 	p = memchr(buf, '\n', n);
p                1012 kernel/power/hibernate.c 	len = p ? p - buf : n;
p                 150 kernel/power/main.c 	char *p;
p                 153 kernel/power/main.c 	p = memchr(buf, '\n', n);
p                 154 kernel/power/main.c 	len = p ? p - buf : n;
p                 233 kernel/power/main.c 	char *p;
p                 237 kernel/power/main.c 	p = memchr(buf, '\n', n);
p                 238 kernel/power/main.c 	len = p ? p - buf : n;
p                 577 kernel/power/main.c 	char *p;
p                 580 kernel/power/main.c 	p = memchr(buf, '\n', n);
p                 581 kernel/power/main.c 	len = p ? p - buf : n;
p                  33 kernel/power/process.c 	struct task_struct *g, *p;
p                  52 kernel/power/process.c 		for_each_process_thread(g, p) {
p                  53 kernel/power/process.c 			if (p == current || !freeze_task(p))
p                  56 kernel/power/process.c 			if (!freezer_should_skip(p))
p                 101 kernel/power/process.c 			for_each_process_thread(g, p) {
p                 102 kernel/power/process.c 				if (p != current && !freezer_should_skip(p)
p                 103 kernel/power/process.c 				    && freezing(p) && !frozen(p))
p                 104 kernel/power/process.c 					sched_show_task(p);
p                 191 kernel/power/process.c 	struct task_struct *g, *p;
p                 210 kernel/power/process.c 	for_each_process_thread(g, p) {
p                 212 kernel/power/process.c 		WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
p                 213 kernel/power/process.c 		__thaw_task(p);
p                 229 kernel/power/process.c 	struct task_struct *g, *p;
p                 237 kernel/power/process.c 	for_each_process_thread(g, p) {
p                 238 kernel/power/process.c 		if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
p                 239 kernel/power/process.c 			__thaw_task(p);
p                1267 kernel/power/snapshot.c static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
p                 751 kernel/printk/printk.c 	char *p = buf, *e = buf + size;
p                 759 kernel/printk/printk.c 			p += scnprintf(p, e - p, "\\x%02x", c);
p                 761 kernel/printk/printk.c 			append_char(&p, e, c);
p                 763 kernel/printk/printk.c 	append_char(&p, e, '\n');
p                 772 kernel/printk/printk.c 				append_char(&p, e, ' ');
p                 777 kernel/printk/printk.c 				append_char(&p, e, '\n');
p                 783 kernel/printk/printk.c 				p += scnprintf(p, e - p, "\\x%02x", c);
p                 787 kernel/printk/printk.c 			append_char(&p, e, c);
p                 789 kernel/printk/printk.c 		append_char(&p, e, '\n');
p                 792 kernel/printk/printk.c 	return p - buf;
p                 468 kernel/profile.c 	unsigned long p = *ppos;
p                 474 kernel/profile.c 	if (p >= (prof_len+1)*sizeof(unsigned int))
p                 476 kernel/profile.c 	if (count > (prof_len+1)*sizeof(unsigned int) - p)
p                 477 kernel/profile.c 		count = (prof_len+1)*sizeof(unsigned int) - p;
p                 480 kernel/profile.c 	while (p < sizeof(unsigned int) && count > 0) {
p                 481 kernel/profile.c 		if (put_user(*((char *)(&sample_step)+p), buf))
p                 483 kernel/profile.c 		buf++; p++; count--; read++;
p                 485 kernel/profile.c 	pnt = (char *)prof_buffer + p - sizeof(atomic_t);
p                 524 kernel/ptrace.c static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
p                 528 kernel/ptrace.c 	__ptrace_unlink(p);
p                 530 kernel/ptrace.c 	if (p->exit_state != EXIT_ZOMBIE)
p                 533 kernel/ptrace.c 	dead = !thread_group_leader(p);
p                 535 kernel/ptrace.c 	if (!dead && thread_group_empty(p)) {
p                 536 kernel/ptrace.c 		if (!same_thread_group(p->real_parent, tracer))
p                 537 kernel/ptrace.c 			dead = do_notify_parent(p, p->exit_signal);
p                 539 kernel/ptrace.c 			__wake_up_parent(p, tracer);
p                 545 kernel/ptrace.c 		p->exit_state = EXIT_DEAD;
p                 582 kernel/ptrace.c 	struct task_struct *p, *n;
p                 584 kernel/ptrace.c 	list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
p                 585 kernel/ptrace.c 		if (unlikely(p->ptrace & PT_EXITKILL))
p                 586 kernel/ptrace.c 			send_sig_info(SIGKILL, SEND_SIG_PRIV, p);
p                 588 kernel/ptrace.c 		if (__ptrace_detach(tracer, p))
p                 589 kernel/ptrace.c 			list_add(&p->ptrace_entry, dead);
p                 381 kernel/rcu/rcu.h #define raw_spin_lock_rcu_node(p)					\
p                 383 kernel/rcu/rcu.h 	raw_spin_lock(&ACCESS_PRIVATE(p, lock));			\
p                 387 kernel/rcu/rcu.h #define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
p                 389 kernel/rcu/rcu.h #define raw_spin_lock_irq_rcu_node(p)					\
p                 391 kernel/rcu/rcu.h 	raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock));			\
p                 395 kernel/rcu/rcu.h #define raw_spin_unlock_irq_rcu_node(p)					\
p                 396 kernel/rcu/rcu.h 	raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
p                 398 kernel/rcu/rcu.h #define raw_spin_lock_irqsave_rcu_node(p, flags)			\
p                 400 kernel/rcu/rcu.h 	raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags);	\
p                 404 kernel/rcu/rcu.h #define raw_spin_unlock_irqrestore_rcu_node(p, flags)			\
p                 405 kernel/rcu/rcu.h 	raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)
p                 407 kernel/rcu/rcu.h #define raw_spin_trylock_rcu_node(p)					\
p                 409 kernel/rcu/rcu.h 	bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock));	\
p                 416 kernel/rcu/rcu.h #define raw_lockdep_assert_held_rcu_node(p)				\
p                 417 kernel/rcu/rcu.h 	lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
p                 258 kernel/rcu/rcutorture.c 	struct list_head *p;
p                 267 kernel/rcu/rcutorture.c 	p = rcu_torture_freelist.next;
p                 268 kernel/rcu/rcutorture.c 	list_del_init(p);
p                 270 kernel/rcu/rcutorture.c 	return container_of(p, struct rcu_torture, rtort_free);
p                 277 kernel/rcu/rcutorture.c rcu_torture_free(struct rcu_torture *p)
p                 281 kernel/rcu/rcutorture.c 	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
p                 299 kernel/rcu/rcutorture.c 	void (*deferred_free)(struct rcu_torture *p);
p                 409 kernel/rcu/rcutorture.c rcu_torture_cb(struct rcu_head *p)
p                 411 kernel/rcu/rcutorture.c 	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
p                 429 kernel/rcu/rcutorture.c static void rcu_torture_deferred_free(struct rcu_torture *p)
p                 431 kernel/rcu/rcutorture.c 	call_rcu(&p->rtort_rcu, rcu_torture_cb);
p                 470 kernel/rcu/rcutorture.c static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
p                 473 kernel/rcu/rcutorture.c 	rcu_torture_cb(&p->rtort_rcu);
p                 661 kernel/rcu/rcutorture.c static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
p                 663 kernel/rcu/rcutorture.c 	call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
p                1266 kernel/rcu/rcutorture.c 	struct rcu_torture *p;
p                1278 kernel/rcu/rcutorture.c 	p = rcu_dereference_check(rcu_torture_current,
p                1283 kernel/rcu/rcutorture.c 	if (p == NULL) {
p                1288 kernel/rcu/rcutorture.c 	if (p->rtort_mbtest == 0)
p                1292 kernel/rcu/rcutorture.c 	pipe_count = p->rtort_pipe_count;
p                1299 kernel/rcu/rcutorture.c 		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
p                  51 kernel/rcu/srcutree.c #define spin_lock_rcu_node(p)					\
p                  53 kernel/rcu/srcutree.c 	spin_lock(&ACCESS_PRIVATE(p, lock));			\
p                  57 kernel/rcu/srcutree.c #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
p                  59 kernel/rcu/srcutree.c #define spin_lock_irq_rcu_node(p)					\
p                  61 kernel/rcu/srcutree.c 	spin_lock_irq(&ACCESS_PRIVATE(p, lock));			\
p                  65 kernel/rcu/srcutree.c #define spin_unlock_irq_rcu_node(p)					\
p                  66 kernel/rcu/srcutree.c 	spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
p                  68 kernel/rcu/srcutree.c #define spin_lock_irqsave_rcu_node(p, flags)			\
p                  70 kernel/rcu/srcutree.c 	spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags);	\
p                  74 kernel/rcu/srcutree.c #define spin_unlock_irqrestore_rcu_node(p, flags)			\
p                  75 kernel/rcu/srcutree.c 	spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)	\
p                 634 kernel/relay.c 	struct rchan_percpu_buf_dispatcher *p = info;
p                 636 kernel/relay.c 	relay_set_buf_dentry(p->buf, p->dentry);
p                  64 kernel/resource.c static struct resource *next_resource(struct resource *p, bool sibling_only)
p                  68 kernel/resource.c 		return p->sibling;
p                  70 kernel/resource.c 	if (p->child)
p                  71 kernel/resource.c 		return p->child;
p                  72 kernel/resource.c 	while (!p->sibling && p->parent)
p                  73 kernel/resource.c 		p = p->parent;
p                  74 kernel/resource.c 	return p->sibling;
p                  79 kernel/resource.c 	struct resource *p = v;
p                  81 kernel/resource.c 	return (void *)next_resource(p, false);
p                  91 kernel/resource.c 	struct resource *p = PDE_DATA(file_inode(m->file));
p                  94 kernel/resource.c 	for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
p                  96 kernel/resource.c 	return p;
p                 108 kernel/resource.c 	struct resource *r = v, *p;
p                 113 kernel/resource.c 	for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
p                 114 kernel/resource.c 		if (p->parent == root)
p                 189 kernel/resource.c 	struct resource *tmp, **p;
p                 197 kernel/resource.c 	p = &root->child;
p                 199 kernel/resource.c 		tmp = *p;
p                 202 kernel/resource.c 			*p = new;
p                 206 kernel/resource.c 		p = &tmp->sibling;
p                 215 kernel/resource.c 	struct resource *tmp, **p, *chd;
p                 217 kernel/resource.c 	p = &old->parent->child;
p                 219 kernel/resource.c 		tmp = *p;
p                 224 kernel/resource.c 				*p = tmp->sibling;
p                 231 kernel/resource.c 				*p = tmp->child;
p                 237 kernel/resource.c 		p = &tmp->sibling;
p                 244 kernel/resource.c 	struct resource *tmp, *p;
p                 247 kernel/resource.c 	p = r->child;
p                 249 kernel/resource.c 	while (p) {
p                 250 kernel/resource.c 		tmp = p;
p                 251 kernel/resource.c 		p = p->sibling;
p                 346 kernel/resource.c 	struct resource *p;
p                 356 kernel/resource.c 	for (p = iomem_resource.child; p; p = next_resource(p, siblings_only)) {
p                 358 kernel/resource.c 		if (p->start > end) {
p                 359 kernel/resource.c 			p = NULL;
p                 364 kernel/resource.c 		if (p->end < start)
p                 374 kernel/resource.c 		if ((p->flags & flags) != flags)
p                 376 kernel/resource.c 		if ((desc != IORES_DESC_NONE) && (desc != p->desc))
p                 383 kernel/resource.c 	if (p) {
p                 385 kernel/resource.c 		res->start = max(start, p->start);
p                 386 kernel/resource.c 		res->end = min(end, p->end);
p                 387 kernel/resource.c 		res->flags = p->flags;
p                 388 kernel/resource.c 		res->desc = p->desc;
p                 392 kernel/resource.c 	return p ? 0 : -ENODEV;
p                 541 kernel/resource.c 	struct resource *p;
p                 547 kernel/resource.c 	for (p = iomem_resource.child; p ; p = p->sibling) {
p                 548 kernel/resource.c 		bool is_type = (((p->flags & flags) == flags) &&
p                 550 kernel/resource.c 				 (desc == p->desc)));
p                 552 kernel/resource.c 		if (resource_overlaps(p, &res))
p                1194 kernel/resource.c 	struct resource **p;
p                1197 kernel/resource.c 	p = &parent->child;
p                1203 kernel/resource.c 		struct resource *res = *p;
p                1209 kernel/resource.c 				p = &res->child;
p                1214 kernel/resource.c 			*p = res->sibling;
p                1221 kernel/resource.c 		p = &res->sibling;
p                1256 kernel/resource.c 	struct resource **p;
p                1269 kernel/resource.c 	p = &parent->child;
p                1272 kernel/resource.c 	while ((res = *p)) {
p                1278 kernel/resource.c 			p = &res->sibling;
p                1301 kernel/resource.c 			p = &res->child;
p                1308 kernel/resource.c 			*p = res->sibling;
p                1534 kernel/resource.c 	struct resource *p = &iomem_resource;
p                1539 kernel/resource.c 	for (p = p->child; p ; p = r_next(NULL, p, &l)) {
p                1544 kernel/resource.c 		if (p->start >= addr + size)
p                1546 kernel/resource.c 		if (p->end < addr)
p                1548 kernel/resource.c 		if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
p                1549 kernel/resource.c 		    PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
p                1557 kernel/resource.c 		if (p->flags & IORESOURCE_BUSY)
p                1563 kernel/resource.c 		       p->name, p);
p                1584 kernel/resource.c 	struct resource *p = &iomem_resource;
p                1595 kernel/resource.c 	for (p = p->child; p ; p = r_next(NULL, p, &l)) {
p                1600 kernel/resource.c 		if (p->start >= addr + size)
p                1602 kernel/resource.c 		if (p->end < addr)
p                1609 kernel/resource.c 		if ((p->flags & IORESOURCE_BUSY) == 0)
p                1612 kernel/resource.c 				|| p->flags & IORESOURCE_EXCLUSIVE) {
p                  49 kernel/sched/autogroup.c static inline struct autogroup *autogroup_task_get(struct task_struct *p)
p                  54 kernel/sched/autogroup.c 	if (!lock_task_sighand(p, &flags))
p                  57 kernel/sched/autogroup.c 	ag = autogroup_kref_get(p->signal->autogroup);
p                  58 kernel/sched/autogroup.c 	unlock_task_sighand(p, &flags);
p                 107 kernel/sched/autogroup.c bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
p                 119 kernel/sched/autogroup.c 	if (p->flags & PF_EXITING)
p                 125 kernel/sched/autogroup.c void sched_autogroup_exit_task(struct task_struct *p)
p                 132 kernel/sched/autogroup.c 	sched_move_task(p);
p                 136 kernel/sched/autogroup.c autogroup_move_group(struct task_struct *p, struct autogroup *ag)
p                 142 kernel/sched/autogroup.c 	BUG_ON(!lock_task_sighand(p, &flags));
p                 144 kernel/sched/autogroup.c 	prev = p->signal->autogroup;
p                 146 kernel/sched/autogroup.c 		unlock_task_sighand(p, &flags);
p                 150 kernel/sched/autogroup.c 	p->signal->autogroup = autogroup_kref_get(ag);
p                 162 kernel/sched/autogroup.c 	for_each_thread(p, t)
p                 165 kernel/sched/autogroup.c 	unlock_task_sighand(p, &flags);
p                 170 kernel/sched/autogroup.c void sched_autogroup_create_attach(struct task_struct *p)
p                 174 kernel/sched/autogroup.c 	autogroup_move_group(p, ag);
p                 182 kernel/sched/autogroup.c void sched_autogroup_detach(struct task_struct *p)
p                 184 kernel/sched/autogroup.c 	autogroup_move_group(p, &autogroup_default);
p                 208 kernel/sched/autogroup.c int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
p                 230 kernel/sched/autogroup.c 	ag = autogroup_task_get(p);
p                 246 kernel/sched/autogroup.c void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
p                 248 kernel/sched/autogroup.c 	struct autogroup *ag = autogroup_task_get(p);
p                  25 kernel/sched/autogroup.h extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
p                  28 kernel/sched/autogroup.h autogroup_task_group(struct task_struct *p, struct task_group *tg)
p                  32 kernel/sched/autogroup.h 	if (enabled && task_wants_autogroup(p, tg))
p                  33 kernel/sched/autogroup.h 		return p->signal->autogroup->tg;
p                  50 kernel/sched/autogroup.h autogroup_task_group(struct task_struct *p, struct task_group *tg)
p                  78 kernel/sched/core.c struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
p                  83 kernel/sched/core.c 	lockdep_assert_held(&p->pi_lock);
p                  86 kernel/sched/core.c 		rq = task_rq(p);
p                  88 kernel/sched/core.c 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
p                  94 kernel/sched/core.c 		while (unlikely(task_on_rq_migrating(p)))
p                 102 kernel/sched/core.c struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
p                 103 kernel/sched/core.c 	__acquires(p->pi_lock)
p                 109 kernel/sched/core.c 		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
p                 110 kernel/sched/core.c 		rq = task_rq(p);
p                 129 kernel/sched/core.c 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
p                 134 kernel/sched/core.c 		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
p                 136 kernel/sched/core.c 		while (unlikely(task_on_rq_migrating(p)))
p                 368 kernel/sched/core.c static bool set_nr_and_not_polling(struct task_struct *p)
p                 370 kernel/sched/core.c 	struct thread_info *ti = task_thread_info(p);
p                 380 kernel/sched/core.c static bool set_nr_if_polling(struct task_struct *p)
p                 382 kernel/sched/core.c 	struct thread_info *ti = task_thread_info(p);
p                 399 kernel/sched/core.c static bool set_nr_and_not_polling(struct task_struct *p)
p                 401 kernel/sched/core.c 	set_tsk_need_resched(p);
p                 406 kernel/sched/core.c static bool set_nr_if_polling(struct task_struct *p)
p                 747 kernel/sched/core.c static void set_load_weight(struct task_struct *p, bool update_load)
p                 749 kernel/sched/core.c 	int prio = p->static_prio - MAX_RT_PRIO;
p                 750 kernel/sched/core.c 	struct load_weight *load = &p->se.load;
p                 755 kernel/sched/core.c 	if (task_has_idle_policy(p)) {
p                 758 kernel/sched/core.c 		p->se.runnable_weight = load->weight;
p                 766 kernel/sched/core.c 	if (update_load && p->sched_class == &fair_sched_class) {
p                 767 kernel/sched/core.c 		reweight_task(p, prio);
p                 771 kernel/sched/core.c 		p->se.runnable_weight = load->weight;
p                 877 kernel/sched/core.c uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
p                 879 kernel/sched/core.c 	struct uclamp_se uc_req = p->uclamp_req[clamp_id];
p                 887 kernel/sched/core.c 	if (task_group_is_autogroup(task_group(p)))
p                 889 kernel/sched/core.c 	if (task_group(p) == &root_task_group)
p                 892 kernel/sched/core.c 	uc_max = task_group(p)->uclamp[clamp_id];
p                 909 kernel/sched/core.c uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
p                 911 kernel/sched/core.c 	struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
p                 921 kernel/sched/core.c unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
p                 926 kernel/sched/core.c 	if (p->uclamp[clamp_id].active)
p                 927 kernel/sched/core.c 		return p->uclamp[clamp_id].value;
p                 929 kernel/sched/core.c 	uc_eff = uclamp_eff_get(p, clamp_id);
p                 944 kernel/sched/core.c static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
p                 948 kernel/sched/core.c 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
p                 954 kernel/sched/core.c 	p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
p                 982 kernel/sched/core.c static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
p                 986 kernel/sched/core.c 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
p                1020 kernel/sched/core.c static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
p                1024 kernel/sched/core.c 	if (unlikely(!p->sched_class->uclamp_enabled))
p                1028 kernel/sched/core.c 		uclamp_rq_inc_id(rq, p, clamp_id);
p                1035 kernel/sched/core.c static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
p                1039 kernel/sched/core.c 	if (unlikely(!p->sched_class->uclamp_enabled))
p                1043 kernel/sched/core.c 		uclamp_rq_dec_id(rq, p, clamp_id);
p                1047 kernel/sched/core.c uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
p                1060 kernel/sched/core.c 	rq = task_rq_lock(p, &rf);
p                1068 kernel/sched/core.c 	if (p->uclamp[clamp_id].active) {
p                1069 kernel/sched/core.c 		uclamp_rq_dec_id(rq, p, clamp_id);
p                1070 kernel/sched/core.c 		uclamp_rq_inc_id(rq, p, clamp_id);
p                1073 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
p                1083 kernel/sched/core.c 	struct task_struct *p;
p                1086 kernel/sched/core.c 	while ((p = css_task_iter_next(&it))) {
p                1089 kernel/sched/core.c 				uclamp_update_active(p, clamp_id);
p                1168 kernel/sched/core.c static int uclamp_validate(struct task_struct *p,
p                1171 kernel/sched/core.c 	unsigned int lower_bound = p->uclamp_req[UCLAMP_MIN].value;
p                1172 kernel/sched/core.c 	unsigned int upper_bound = p->uclamp_req[UCLAMP_MAX].value;
p                1187 kernel/sched/core.c static void __setscheduler_uclamp(struct task_struct *p,
p                1197 kernel/sched/core.c 		struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
p                1205 kernel/sched/core.c 		if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
p                1215 kernel/sched/core.c 		uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
p                1220 kernel/sched/core.c 		uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
p                1225 kernel/sched/core.c static void uclamp_fork(struct task_struct *p)
p                1230 kernel/sched/core.c 		p->uclamp[clamp_id].active = false;
p                1232 kernel/sched/core.c 	if (likely(!p->sched_reset_on_fork))
p                1236 kernel/sched/core.c 		uclamp_se_set(&p->uclamp_req[clamp_id],
p                1272 kernel/sched/core.c static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
p                1273 kernel/sched/core.c static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
p                1274 kernel/sched/core.c static inline int uclamp_validate(struct task_struct *p,
p                1279 kernel/sched/core.c static void __setscheduler_uclamp(struct task_struct *p,
p                1281 kernel/sched/core.c static inline void uclamp_fork(struct task_struct *p) { }
p                1285 kernel/sched/core.c static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
p                1291 kernel/sched/core.c 		sched_info_queued(rq, p);
p                1292 kernel/sched/core.c 		psi_enqueue(p, flags & ENQUEUE_WAKEUP);
p                1295 kernel/sched/core.c 	uclamp_rq_inc(rq, p);
p                1296 kernel/sched/core.c 	p->sched_class->enqueue_task(rq, p, flags);
p                1299 kernel/sched/core.c static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
p                1305 kernel/sched/core.c 		sched_info_dequeued(rq, p);
p                1306 kernel/sched/core.c 		psi_dequeue(p, flags & DEQUEUE_SLEEP);
p                1309 kernel/sched/core.c 	uclamp_rq_dec(rq, p);
p                1310 kernel/sched/core.c 	p->sched_class->dequeue_task(rq, p, flags);
p                1313 kernel/sched/core.c void activate_task(struct rq *rq, struct task_struct *p, int flags)
p                1315 kernel/sched/core.c 	if (task_contributes_to_load(p))
p                1318 kernel/sched/core.c 	enqueue_task(rq, p, flags);
p                1320 kernel/sched/core.c 	p->on_rq = TASK_ON_RQ_QUEUED;
p                1323 kernel/sched/core.c void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
p                1325 kernel/sched/core.c 	p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
p                1327 kernel/sched/core.c 	if (task_contributes_to_load(p))
p                1330 kernel/sched/core.c 	dequeue_task(rq, p, flags);
p                1336 kernel/sched/core.c static inline int __normal_prio(struct task_struct *p)
p                1338 kernel/sched/core.c 	return p->static_prio;
p                1348 kernel/sched/core.c static inline int normal_prio(struct task_struct *p)
p                1352 kernel/sched/core.c 	if (task_has_dl_policy(p))
p                1354 kernel/sched/core.c 	else if (task_has_rt_policy(p))
p                1355 kernel/sched/core.c 		prio = MAX_RT_PRIO-1 - p->rt_priority;
p                1357 kernel/sched/core.c 		prio = __normal_prio(p);
p                1368 kernel/sched/core.c static int effective_prio(struct task_struct *p)
p                1370 kernel/sched/core.c 	p->normal_prio = normal_prio(p);
p                1376 kernel/sched/core.c 	if (!rt_prio(p->prio))
p                1377 kernel/sched/core.c 		return p->normal_prio;
p                1378 kernel/sched/core.c 	return p->prio;
p                1387 kernel/sched/core.c inline int task_curr(const struct task_struct *p)
p                1389 kernel/sched/core.c 	return cpu_curr(task_cpu(p)) == p;
p                1399 kernel/sched/core.c static inline void check_class_changed(struct rq *rq, struct task_struct *p,
p                1403 kernel/sched/core.c 	if (prev_class != p->sched_class) {
p                1405 kernel/sched/core.c 			prev_class->switched_from(rq, p);
p                1407 kernel/sched/core.c 		p->sched_class->switched_to(rq, p);
p                1408 kernel/sched/core.c 	} else if (oldprio != p->prio || dl_task(p))
p                1409 kernel/sched/core.c 		p->sched_class->prio_changed(rq, p, oldprio);
p                1412 kernel/sched/core.c void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
p                1416 kernel/sched/core.c 	if (p->sched_class == rq->curr->sched_class) {
p                1417 kernel/sched/core.c 		rq->curr->sched_class->check_preempt_curr(rq, p, flags);
p                1422 kernel/sched/core.c 			if (class == p->sched_class) {
p                1439 kernel/sched/core.c static inline bool is_per_cpu_kthread(struct task_struct *p)
p                1441 kernel/sched/core.c 	if (!(p->flags & PF_KTHREAD))
p                1444 kernel/sched/core.c 	if (p->nr_cpus_allowed != 1)
p                1454 kernel/sched/core.c static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
p                1456 kernel/sched/core.c 	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
p                1459 kernel/sched/core.c 	if (is_per_cpu_kthread(p))
p                1485 kernel/sched/core.c 				   struct task_struct *p, int new_cpu)
p                1489 kernel/sched/core.c 	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
p                1490 kernel/sched/core.c 	dequeue_task(rq, p, DEQUEUE_NOCLOCK);
p                1491 kernel/sched/core.c 	set_task_cpu(p, new_cpu);
p                1497 kernel/sched/core.c 	BUG_ON(task_cpu(p) != new_cpu);
p                1498 kernel/sched/core.c 	enqueue_task(rq, p, 0);
p                1499 kernel/sched/core.c 	p->on_rq = TASK_ON_RQ_QUEUED;
p                1500 kernel/sched/core.c 	check_preempt_curr(rq, p, 0);
p                1520 kernel/sched/core.c 				 struct task_struct *p, int dest_cpu)
p                1523 kernel/sched/core.c 	if (!is_cpu_allowed(p, dest_cpu))
p                1527 kernel/sched/core.c 	rq = move_queued_task(rq, rf, p, dest_cpu);
p                1540 kernel/sched/core.c 	struct task_struct *p = arg->task;
p                1556 kernel/sched/core.c 	raw_spin_lock(&p->pi_lock);
p                1563 kernel/sched/core.c 	if (task_rq(p) == rq) {
p                1564 kernel/sched/core.c 		if (task_on_rq_queued(p))
p                1565 kernel/sched/core.c 			rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
p                1567 kernel/sched/core.c 			p->wake_cpu = arg->dest_cpu;
p                1570 kernel/sched/core.c 	raw_spin_unlock(&p->pi_lock);
p                1580 kernel/sched/core.c void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
p                1582 kernel/sched/core.c 	cpumask_copy(&p->cpus_mask, new_mask);
p                1583 kernel/sched/core.c 	p->nr_cpus_allowed = cpumask_weight(new_mask);
p                1586 kernel/sched/core.c void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
p                1588 kernel/sched/core.c 	struct rq *rq = task_rq(p);
p                1591 kernel/sched/core.c 	lockdep_assert_held(&p->pi_lock);
p                1593 kernel/sched/core.c 	queued = task_on_rq_queued(p);
p                1594 kernel/sched/core.c 	running = task_current(rq, p);
p                1602 kernel/sched/core.c 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
p                1605 kernel/sched/core.c 		put_prev_task(rq, p);
p                1607 kernel/sched/core.c 	p->sched_class->set_cpus_allowed(p, new_mask);
p                1610 kernel/sched/core.c 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
p                1612 kernel/sched/core.c 		set_next_task(rq, p);
p                1624 kernel/sched/core.c static int __set_cpus_allowed_ptr(struct task_struct *p,
p                1633 kernel/sched/core.c 	rq = task_rq_lock(p, &rf);
p                1636 kernel/sched/core.c 	if (p->flags & PF_KTHREAD) {
p                1647 kernel/sched/core.c 	if (check && (p->flags & PF_NO_SETAFFINITY)) {
p                1652 kernel/sched/core.c 	if (cpumask_equal(p->cpus_ptr, new_mask))
p                1661 kernel/sched/core.c 	do_set_cpus_allowed(p, new_mask);
p                1663 kernel/sched/core.c 	if (p->flags & PF_KTHREAD) {
p                1670 kernel/sched/core.c 			p->nr_cpus_allowed != 1);
p                1674 kernel/sched/core.c 	if (cpumask_test_cpu(task_cpu(p), new_mask))
p                1677 kernel/sched/core.c 	if (task_running(rq, p) || p->state == TASK_WAKING) {
p                1678 kernel/sched/core.c 		struct migration_arg arg = { p, dest_cpu };
p                1680 kernel/sched/core.c 		task_rq_unlock(rq, p, &rf);
p                1683 kernel/sched/core.c 	} else if (task_on_rq_queued(p)) {
p                1688 kernel/sched/core.c 		rq = move_queued_task(rq, &rf, p, dest_cpu);
p                1691 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
p                1696 kernel/sched/core.c int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
p                1698 kernel/sched/core.c 	return __set_cpus_allowed_ptr(p, new_mask, false);
p                1702 kernel/sched/core.c void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
p                1709 kernel/sched/core.c 	WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
p                1710 kernel/sched/core.c 			!p->on_rq);
p                1717 kernel/sched/core.c 	WARN_ON_ONCE(p->state == TASK_RUNNING &&
p                1718 kernel/sched/core.c 		     p->sched_class == &fair_sched_class &&
p                1719 kernel/sched/core.c 		     (p->on_rq && !task_on_rq_migrating(p)));
p                1732 kernel/sched/core.c 	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
p                1733 kernel/sched/core.c 				      lockdep_is_held(&task_rq(p)->lock)));
p                1741 kernel/sched/core.c 	trace_sched_migrate_task(p, new_cpu);
p                1743 kernel/sched/core.c 	if (task_cpu(p) != new_cpu) {
p                1744 kernel/sched/core.c 		if (p->sched_class->migrate_task_rq)
p                1745 kernel/sched/core.c 			p->sched_class->migrate_task_rq(p, new_cpu);
p                1746 kernel/sched/core.c 		p->se.nr_migrations++;
p                1747 kernel/sched/core.c 		rseq_migrate(p);
p                1748 kernel/sched/core.c 		perf_event_task_migrate(p);
p                1751 kernel/sched/core.c 	__set_task_cpu(p, new_cpu);
p                1755 kernel/sched/core.c static void __migrate_swap_task(struct task_struct *p, int cpu)
p                1757 kernel/sched/core.c 	if (task_on_rq_queued(p)) {
p                1761 kernel/sched/core.c 		src_rq = task_rq(p);
p                1767 kernel/sched/core.c 		deactivate_task(src_rq, p, 0);
p                1768 kernel/sched/core.c 		set_task_cpu(p, cpu);
p                1769 kernel/sched/core.c 		activate_task(dst_rq, p, 0);
p                1770 kernel/sched/core.c 		check_preempt_curr(dst_rq, p, 0);
p                1781 kernel/sched/core.c 		p->wake_cpu = cpu;
p                1834 kernel/sched/core.c int migrate_swap(struct task_struct *cur, struct task_struct *p,
p                1843 kernel/sched/core.c 		.dst_task = p,
p                1863 kernel/sched/core.c 	trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
p                1887 kernel/sched/core.c unsigned long wait_task_inactive(struct task_struct *p, long match_state)
p                1901 kernel/sched/core.c 		rq = task_rq(p);
p                1914 kernel/sched/core.c 		while (task_running(rq, p)) {
p                1915 kernel/sched/core.c 			if (match_state && unlikely(p->state != match_state))
p                1925 kernel/sched/core.c 		rq = task_rq_lock(p, &rf);
p                1926 kernel/sched/core.c 		trace_sched_wait_task(p);
p                1927 kernel/sched/core.c 		running = task_running(rq, p);
p                1928 kernel/sched/core.c 		queued = task_on_rq_queued(p);
p                1930 kernel/sched/core.c 		if (!match_state || p->state == match_state)
p                1931 kernel/sched/core.c 			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
p                1932 kernel/sched/core.c 		task_rq_unlock(rq, p, &rf);
p                1992 kernel/sched/core.c void kick_process(struct task_struct *p)
p                1997 kernel/sched/core.c 	cpu = task_cpu(p);
p                1998 kernel/sched/core.c 	if ((cpu != smp_processor_id()) && task_curr(p))
p                2026 kernel/sched/core.c static int select_fallback_rq(int cpu, struct task_struct *p)
p                2045 kernel/sched/core.c 			if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
p                2052 kernel/sched/core.c 		for_each_cpu(dest_cpu, p->cpus_ptr) {
p                2053 kernel/sched/core.c 			if (!is_cpu_allowed(p, dest_cpu))
p                2063 kernel/sched/core.c 				cpuset_cpus_allowed_fallback(p);
p                2069 kernel/sched/core.c 			do_set_cpus_allowed(p, cpu_possible_mask);
p                2086 kernel/sched/core.c 		if (p->mm && printk_ratelimit()) {
p                2088 kernel/sched/core.c 					task_pid_nr(p), p->comm, cpu);
p                2099 kernel/sched/core.c int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
p                2101 kernel/sched/core.c 	lockdep_assert_held(&p->pi_lock);
p                2103 kernel/sched/core.c 	if (p->nr_cpus_allowed > 1)
p                2104 kernel/sched/core.c 		cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
p                2106 kernel/sched/core.c 		cpu = cpumask_any(p->cpus_ptr);
p                2118 kernel/sched/core.c 	if (unlikely(!is_cpu_allowed(p, cpu)))
p                2119 kernel/sched/core.c 		cpu = select_fallback_rq(task_cpu(p), p);
p                2162 kernel/sched/core.c static inline int __set_cpus_allowed_ptr(struct task_struct *p,
p                2165 kernel/sched/core.c 	return set_cpus_allowed_ptr(p, new_mask);
p                2171 kernel/sched/core.c ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
p                2183 kernel/sched/core.c 		__schedstat_inc(p->se.statistics.nr_wakeups_local);
p                2187 kernel/sched/core.c 		__schedstat_inc(p->se.statistics.nr_wakeups_remote);
p                2199 kernel/sched/core.c 		__schedstat_inc(p->se.statistics.nr_wakeups_migrate);
p                2203 kernel/sched/core.c 	__schedstat_inc(p->se.statistics.nr_wakeups);
p                2206 kernel/sched/core.c 		__schedstat_inc(p->se.statistics.nr_wakeups_sync);
p                2212 kernel/sched/core.c static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
p                2215 kernel/sched/core.c 	check_preempt_curr(rq, p, wake_flags);
p                2216 kernel/sched/core.c 	p->state = TASK_RUNNING;
p                2217 kernel/sched/core.c 	trace_sched_wakeup(p);
p                2220 kernel/sched/core.c 	if (p->sched_class->task_woken) {
p                2226 kernel/sched/core.c 		p->sched_class->task_woken(rq, p);
p                2245 kernel/sched/core.c ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
p                2253 kernel/sched/core.c 	if (p->sched_contributes_to_load)
p                2260 kernel/sched/core.c 	activate_task(rq, p, en_flags);
p                2261 kernel/sched/core.c 	ttwu_do_wakeup(rq, p, wake_flags, rf);
p                2270 kernel/sched/core.c static int ttwu_remote(struct task_struct *p, int wake_flags)
p                2276 kernel/sched/core.c 	rq = __task_rq_lock(p, &rf);
p                2277 kernel/sched/core.c 	if (task_on_rq_queued(p)) {
p                2280 kernel/sched/core.c 		ttwu_do_wakeup(rq, p, wake_flags, &rf);
p                2293 kernel/sched/core.c 	struct task_struct *p, *t;
p                2302 kernel/sched/core.c 	llist_for_each_entry_safe(p, t, llist, wake_entry)
p                2303 kernel/sched/core.c 		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
p                2346 kernel/sched/core.c static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
p                2350 kernel/sched/core.c 	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
p                2352 kernel/sched/core.c 	if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
p                2390 kernel/sched/core.c static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
p                2398 kernel/sched/core.c 		ttwu_queue_remote(p, cpu, wake_flags);
p                2405 kernel/sched/core.c 	ttwu_do_activate(rq, p, wake_flags, &rf);
p                2512 kernel/sched/core.c try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
p                2518 kernel/sched/core.c 	if (p == current) {
p                2530 kernel/sched/core.c 		if (!(p->state & state))
p                2534 kernel/sched/core.c 		cpu = task_cpu(p);
p                2535 kernel/sched/core.c 		trace_sched_waking(p);
p                2536 kernel/sched/core.c 		p->state = TASK_RUNNING;
p                2537 kernel/sched/core.c 		trace_sched_wakeup(p);
p                2547 kernel/sched/core.c 	raw_spin_lock_irqsave(&p->pi_lock, flags);
p                2549 kernel/sched/core.c 	if (!(p->state & state))
p                2552 kernel/sched/core.c 	trace_sched_waking(p);
p                2556 kernel/sched/core.c 	cpu = task_cpu(p);
p                2579 kernel/sched/core.c 	if (p->on_rq && ttwu_remote(p, wake_flags))
p                2613 kernel/sched/core.c 	smp_cond_load_acquire(&p->on_cpu, !VAL);
p                2615 kernel/sched/core.c 	p->sched_contributes_to_load = !!task_contributes_to_load(p);
p                2616 kernel/sched/core.c 	p->state = TASK_WAKING;
p                2618 kernel/sched/core.c 	if (p->in_iowait) {
p                2619 kernel/sched/core.c 		delayacct_blkio_end(p);
p                2620 kernel/sched/core.c 		atomic_dec(&task_rq(p)->nr_iowait);
p                2623 kernel/sched/core.c 	cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
p                2624 kernel/sched/core.c 	if (task_cpu(p) != cpu) {
p                2626 kernel/sched/core.c 		psi_ttwu_dequeue(p);
p                2627 kernel/sched/core.c 		set_task_cpu(p, cpu);
p                2632 kernel/sched/core.c 	if (p->in_iowait) {
p                2633 kernel/sched/core.c 		delayacct_blkio_end(p);
p                2634 kernel/sched/core.c 		atomic_dec(&task_rq(p)->nr_iowait);
p                2639 kernel/sched/core.c 	ttwu_queue(p, cpu, wake_flags);
p                2641 kernel/sched/core.c 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
p                2644 kernel/sched/core.c 		ttwu_stat(p, cpu, wake_flags);
p                2661 kernel/sched/core.c int wake_up_process(struct task_struct *p)
p                2663 kernel/sched/core.c 	return try_to_wake_up(p, TASK_NORMAL, 0);
p                2667 kernel/sched/core.c int wake_up_state(struct task_struct *p, unsigned int state)
p                2669 kernel/sched/core.c 	return try_to_wake_up(p, state, 0);
p                2678 kernel/sched/core.c static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p                2680 kernel/sched/core.c 	p->on_rq			= 0;
p                2682 kernel/sched/core.c 	p->se.on_rq			= 0;
p                2683 kernel/sched/core.c 	p->se.exec_start		= 0;
p                2684 kernel/sched/core.c 	p->se.sum_exec_runtime		= 0;
p                2685 kernel/sched/core.c 	p->se.prev_sum_exec_runtime	= 0;
p                2686 kernel/sched/core.c 	p->se.nr_migrations		= 0;
p                2687 kernel/sched/core.c 	p->se.vruntime			= 0;
p                2688 kernel/sched/core.c 	INIT_LIST_HEAD(&p->se.group_node);
p                2691 kernel/sched/core.c 	p->se.cfs_rq			= NULL;
p                2696 kernel/sched/core.c 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
p                2699 kernel/sched/core.c 	RB_CLEAR_NODE(&p->dl.rb_node);
p                2700 kernel/sched/core.c 	init_dl_task_timer(&p->dl);
p                2701 kernel/sched/core.c 	init_dl_inactive_task_timer(&p->dl);
p                2702 kernel/sched/core.c 	__dl_clear_params(p);
p                2704 kernel/sched/core.c 	INIT_LIST_HEAD(&p->rt.run_list);
p                2705 kernel/sched/core.c 	p->rt.timeout		= 0;
p                2706 kernel/sched/core.c 	p->rt.time_slice	= sched_rr_timeslice;
p                2707 kernel/sched/core.c 	p->rt.on_rq		= 0;
p                2708 kernel/sched/core.c 	p->rt.on_list		= 0;
p                2711 kernel/sched/core.c 	INIT_HLIST_HEAD(&p->preempt_notifiers);
p                2715 kernel/sched/core.c 	p->capture_control = NULL;
p                2717 kernel/sched/core.c 	init_numa_balancing(clone_flags, p);
p                2835 kernel/sched/core.c int sched_fork(unsigned long clone_flags, struct task_struct *p)
p                2839 kernel/sched/core.c 	__sched_fork(clone_flags, p);
p                2845 kernel/sched/core.c 	p->state = TASK_NEW;
p                2850 kernel/sched/core.c 	p->prio = current->normal_prio;
p                2852 kernel/sched/core.c 	uclamp_fork(p);
p                2857 kernel/sched/core.c 	if (unlikely(p->sched_reset_on_fork)) {
p                2858 kernel/sched/core.c 		if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
p                2859 kernel/sched/core.c 			p->policy = SCHED_NORMAL;
p                2860 kernel/sched/core.c 			p->static_prio = NICE_TO_PRIO(0);
p                2861 kernel/sched/core.c 			p->rt_priority = 0;
p                2862 kernel/sched/core.c 		} else if (PRIO_TO_NICE(p->static_prio) < 0)
p                2863 kernel/sched/core.c 			p->static_prio = NICE_TO_PRIO(0);
p                2865 kernel/sched/core.c 		p->prio = p->normal_prio = __normal_prio(p);
p                2866 kernel/sched/core.c 		set_load_weight(p, false);
p                2872 kernel/sched/core.c 		p->sched_reset_on_fork = 0;
p                2875 kernel/sched/core.c 	if (dl_prio(p->prio))
p                2877 kernel/sched/core.c 	else if (rt_prio(p->prio))
p                2878 kernel/sched/core.c 		p->sched_class = &rt_sched_class;
p                2880 kernel/sched/core.c 		p->sched_class = &fair_sched_class;
p                2882 kernel/sched/core.c 	init_entity_runnable_average(&p->se);
p                2891 kernel/sched/core.c 	raw_spin_lock_irqsave(&p->pi_lock, flags);
p                2896 kernel/sched/core.c 	__set_task_cpu(p, smp_processor_id());
p                2897 kernel/sched/core.c 	if (p->sched_class->task_fork)
p                2898 kernel/sched/core.c 		p->sched_class->task_fork(p);
p                2899 kernel/sched/core.c 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
p                2903 kernel/sched/core.c 		memset(&p->sched_info, 0, sizeof(p->sched_info));
p                2906 kernel/sched/core.c 	p->on_cpu = 0;
p                2908 kernel/sched/core.c 	init_task_preempt_count(p);
p                2910 kernel/sched/core.c 	plist_node_init(&p->pushable_tasks, MAX_PRIO);
p                2911 kernel/sched/core.c 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
p                2939 kernel/sched/core.c void wake_up_new_task(struct task_struct *p)
p                2944 kernel/sched/core.c 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
p                2945 kernel/sched/core.c 	p->state = TASK_RUNNING;
p                2955 kernel/sched/core.c 	p->recent_used_cpu = task_cpu(p);
p                2956 kernel/sched/core.c 	__set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
p                2958 kernel/sched/core.c 	rq = __task_rq_lock(p, &rf);
p                2960 kernel/sched/core.c 	post_init_entity_util_avg(p);
p                2962 kernel/sched/core.c 	activate_task(rq, p, ENQUEUE_NOCLOCK);
p                2963 kernel/sched/core.c 	trace_sched_wakeup_new(p);
p                2964 kernel/sched/core.c 	check_preempt_curr(rq, p, WF_FORK);
p                2966 kernel/sched/core.c 	if (p->sched_class->task_woken) {
p                2972 kernel/sched/core.c 		p->sched_class->task_woken(rq, p);
p                2976 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
p                3492 kernel/sched/core.c 	struct task_struct *p = current;
p                3496 kernel/sched/core.c 	raw_spin_lock_irqsave(&p->pi_lock, flags);
p                3497 kernel/sched/core.c 	dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
p                3502 kernel/sched/core.c 		struct migration_arg arg = { p, dest_cpu };
p                3504 kernel/sched/core.c 		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
p                3505 kernel/sched/core.c 		stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
p                3509 kernel/sched/core.c 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
p                3526 kernel/sched/core.c static inline void prefetch_curr_exec_start(struct task_struct *p)
p                3529 kernel/sched/core.c 	struct sched_entity *curr = (&p->se)->cfs_rq->curr;
p                3531 kernel/sched/core.c 	struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
p                3542 kernel/sched/core.c unsigned long long task_sched_runtime(struct task_struct *p)
p                3560 kernel/sched/core.c 	if (!p->on_cpu || !task_on_rq_queued(p))
p                3561 kernel/sched/core.c 		return p->se.sum_exec_runtime;
p                3564 kernel/sched/core.c 	rq = task_rq_lock(p, &rf);
p                3570 kernel/sched/core.c 	if (task_current(rq, p) && task_on_rq_queued(p)) {
p                3571 kernel/sched/core.c 		prefetch_curr_exec_start(p);
p                3573 kernel/sched/core.c 		p->sched_class->update_curr(rq);
p                3575 kernel/sched/core.c 	ns = p->se.sum_exec_runtime;
p                3576 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
p                3830 kernel/sched/core.c static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
p                3833 kernel/sched/core.c 	return p->preempt_disable_ip;
p                3907 kernel/sched/core.c 	struct task_struct *p;
p                3919 kernel/sched/core.c 		p = fair_sched_class.pick_next_task(rq, prev, rf);
p                3920 kernel/sched/core.c 		if (unlikely(p == RETRY_TASK))
p                3924 kernel/sched/core.c 		if (unlikely(!p))
p                3925 kernel/sched/core.c 			p = idle_sched_class.pick_next_task(rq, prev, rf);
p                3927 kernel/sched/core.c 		return p;
p                3949 kernel/sched/core.c 		p = class->pick_next_task(rq, NULL, NULL);
p                3950 kernel/sched/core.c 		if (p)
p                3951 kernel/sched/core.c 			return p;
p                4352 kernel/sched/core.c static inline int rt_effective_prio(struct task_struct *p, int prio)
p                4354 kernel/sched/core.c 	struct task_struct *pi_task = rt_mutex_get_top_task(p);
p                4370 kernel/sched/core.c void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
p                4379 kernel/sched/core.c 	prio = __rt_effective_prio(pi_task, p->normal_prio);
p                4384 kernel/sched/core.c 	if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
p                4387 kernel/sched/core.c 	rq = __task_rq_lock(p, &rf);
p                4399 kernel/sched/core.c 	p->pi_top_task = pi_task;
p                4404 kernel/sched/core.c 	if (prio == p->prio && !dl_prio(prio))
p                4419 kernel/sched/core.c 	if (unlikely(p == rq->idle)) {
p                4420 kernel/sched/core.c 		WARN_ON(p != rq->curr);
p                4421 kernel/sched/core.c 		WARN_ON(p->pi_blocked_on);
p                4425 kernel/sched/core.c 	trace_sched_pi_setprio(p, pi_task);
p                4426 kernel/sched/core.c 	oldprio = p->prio;
p                4431 kernel/sched/core.c 	prev_class = p->sched_class;
p                4432 kernel/sched/core.c 	queued = task_on_rq_queued(p);
p                4433 kernel/sched/core.c 	running = task_current(rq, p);
p                4435 kernel/sched/core.c 		dequeue_task(rq, p, queue_flag);
p                4437 kernel/sched/core.c 		put_prev_task(rq, p);
p                4449 kernel/sched/core.c 		if (!dl_prio(p->normal_prio) ||
p                4450 kernel/sched/core.c 		    (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
p                4451 kernel/sched/core.c 			p->dl.dl_boosted = 1;
p                4454 kernel/sched/core.c 			p->dl.dl_boosted = 0;
p                4455 kernel/sched/core.c 		p->sched_class = &dl_sched_class;
p                4458 kernel/sched/core.c 			p->dl.dl_boosted = 0;
p                4461 kernel/sched/core.c 		p->sched_class = &rt_sched_class;
p                4464 kernel/sched/core.c 			p->dl.dl_boosted = 0;
p                4466 kernel/sched/core.c 			p->rt.timeout = 0;
p                4467 kernel/sched/core.c 		p->sched_class = &fair_sched_class;
p                4470 kernel/sched/core.c 	p->prio = prio;
p                4473 kernel/sched/core.c 		enqueue_task(rq, p, queue_flag);
p                4475 kernel/sched/core.c 		set_next_task(rq, p);
p                4477 kernel/sched/core.c 	check_class_changed(rq, p, prev_class, oldprio);
p                4487 kernel/sched/core.c static inline int rt_effective_prio(struct task_struct *p, int prio)
p                4493 kernel/sched/core.c void set_user_nice(struct task_struct *p, long nice)
p                4500 kernel/sched/core.c 	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
p                4506 kernel/sched/core.c 	rq = task_rq_lock(p, &rf);
p                4515 kernel/sched/core.c 	if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
p                4516 kernel/sched/core.c 		p->static_prio = NICE_TO_PRIO(nice);
p                4519 kernel/sched/core.c 	queued = task_on_rq_queued(p);
p                4520 kernel/sched/core.c 	running = task_current(rq, p);
p                4522 kernel/sched/core.c 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
p                4524 kernel/sched/core.c 		put_prev_task(rq, p);
p                4526 kernel/sched/core.c 	p->static_prio = NICE_TO_PRIO(nice);
p                4527 kernel/sched/core.c 	set_load_weight(p, true);
p                4528 kernel/sched/core.c 	old_prio = p->prio;
p                4529 kernel/sched/core.c 	p->prio = effective_prio(p);
p                4530 kernel/sched/core.c 	delta = p->prio - old_prio;
p                4533 kernel/sched/core.c 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
p                4538 kernel/sched/core.c 		if (delta < 0 || (delta > 0 && task_running(rq, p)))
p                4542 kernel/sched/core.c 		set_next_task(rq, p);
p                4544 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
p                4553 kernel/sched/core.c int can_nice(const struct task_struct *p, const int nice)
p                4558 kernel/sched/core.c 	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
p                4605 kernel/sched/core.c int task_prio(const struct task_struct *p)
p                4607 kernel/sched/core.c 	return p->prio - MAX_RT_PRIO;
p                4679 kernel/sched/core.c static void __setscheduler_params(struct task_struct *p,
p                4685 kernel/sched/core.c 		policy = p->policy;
p                4687 kernel/sched/core.c 	p->policy = policy;
p                4690 kernel/sched/core.c 		__setparam_dl(p, attr);
p                4692 kernel/sched/core.c 		p->static_prio = NICE_TO_PRIO(attr->sched_nice);
p                4699 kernel/sched/core.c 	p->rt_priority = attr->sched_priority;
p                4700 kernel/sched/core.c 	p->normal_prio = normal_prio(p);
p                4701 kernel/sched/core.c 	set_load_weight(p, true);
p                4705 kernel/sched/core.c static void __setscheduler(struct rq *rq, struct task_struct *p,
p                4715 kernel/sched/core.c 	__setscheduler_params(p, attr);
p                4721 kernel/sched/core.c 	p->prio = normal_prio(p);
p                4723 kernel/sched/core.c 		p->prio = rt_effective_prio(p, p->prio);
p                4725 kernel/sched/core.c 	if (dl_prio(p->prio))
p                4726 kernel/sched/core.c 		p->sched_class = &dl_sched_class;
p                4727 kernel/sched/core.c 	else if (rt_prio(p->prio))
p                4728 kernel/sched/core.c 		p->sched_class = &rt_sched_class;
p                4730 kernel/sched/core.c 		p->sched_class = &fair_sched_class;
p                4736 kernel/sched/core.c static bool check_same_owner(struct task_struct *p)
p                4742 kernel/sched/core.c 	pcred = __task_cred(p);
p                4749 kernel/sched/core.c static int __sched_setscheduler(struct task_struct *p,
p                4768 kernel/sched/core.c 		reset_on_fork = p->sched_reset_on_fork;
p                4769 kernel/sched/core.c 		policy = oldpolicy = p->policy;
p                4785 kernel/sched/core.c 	if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
p                4786 kernel/sched/core.c 	    (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
p                4797 kernel/sched/core.c 			if (attr->sched_nice < task_nice(p) &&
p                4798 kernel/sched/core.c 			    !can_nice(p, attr->sched_nice))
p                4804 kernel/sched/core.c 					task_rlimit(p, RLIMIT_RTPRIO);
p                4807 kernel/sched/core.c 			if (policy != p->policy && !rlim_rtprio)
p                4811 kernel/sched/core.c 			if (attr->sched_priority > p->rt_priority &&
p                4829 kernel/sched/core.c 		if (task_has_idle_policy(p) && !idle_policy(policy)) {
p                4830 kernel/sched/core.c 			if (!can_nice(p, task_nice(p)))
p                4835 kernel/sched/core.c 		if (!check_same_owner(p))
p                4839 kernel/sched/core.c 		if (p->sched_reset_on_fork && !reset_on_fork)
p                4847 kernel/sched/core.c 		retval = security_task_setscheduler(p);
p                4854 kernel/sched/core.c 		retval = uclamp_validate(p, attr);
p                4869 kernel/sched/core.c 	rq = task_rq_lock(p, &rf);
p                4875 kernel/sched/core.c 	if (p == rq->stop) {
p                4884 kernel/sched/core.c 	if (unlikely(policy == p->policy)) {
p                4885 kernel/sched/core.c 		if (fair_policy(policy) && attr->sched_nice != task_nice(p))
p                4887 kernel/sched/core.c 		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
p                4889 kernel/sched/core.c 		if (dl_policy(policy) && dl_param_changed(p, attr))
p                4894 kernel/sched/core.c 		p->sched_reset_on_fork = reset_on_fork;
p                4907 kernel/sched/core.c 				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
p                4908 kernel/sched/core.c 				!task_group_is_autogroup(task_group(p))) {
p                4923 kernel/sched/core.c 			if (!cpumask_subset(span, p->cpus_ptr) ||
p                4933 kernel/sched/core.c 	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
p                4935 kernel/sched/core.c 		task_rq_unlock(rq, p, &rf);
p                4946 kernel/sched/core.c 	if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
p                4951 kernel/sched/core.c 	p->sched_reset_on_fork = reset_on_fork;
p                4952 kernel/sched/core.c 	oldprio = p->prio;
p                4962 kernel/sched/core.c 		new_effective_prio = rt_effective_prio(p, newprio);
p                4967 kernel/sched/core.c 	queued = task_on_rq_queued(p);
p                4968 kernel/sched/core.c 	running = task_current(rq, p);
p                4970 kernel/sched/core.c 		dequeue_task(rq, p, queue_flags);
p                4972 kernel/sched/core.c 		put_prev_task(rq, p);
p                4974 kernel/sched/core.c 	prev_class = p->sched_class;
p                4976 kernel/sched/core.c 	__setscheduler(rq, p, attr, pi);
p                4977 kernel/sched/core.c 	__setscheduler_uclamp(p, attr);
p                4984 kernel/sched/core.c 		if (oldprio < p->prio)
p                4987 kernel/sched/core.c 		enqueue_task(rq, p, queue_flags);
p                4990 kernel/sched/core.c 		set_next_task(rq, p);
p                4992 kernel/sched/core.c 	check_class_changed(rq, p, prev_class, oldprio);
p                4996 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
p                5000 kernel/sched/core.c 		rt_mutex_adjust_pi(p);
p                5010 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
p                5016 kernel/sched/core.c static int _sched_setscheduler(struct task_struct *p, int policy,
p                5022 kernel/sched/core.c 		.sched_nice	= PRIO_TO_NICE(p->static_prio),
p                5032 kernel/sched/core.c 	return __sched_setscheduler(p, &attr, check, true);
p                5044 kernel/sched/core.c int sched_setscheduler(struct task_struct *p, int policy,
p                5047 kernel/sched/core.c 	return _sched_setscheduler(p, policy, param, true);
p                5051 kernel/sched/core.c int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
p                5053 kernel/sched/core.c 	return __sched_setscheduler(p, attr, true, true);
p                5057 kernel/sched/core.c int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
p                5059 kernel/sched/core.c 	return __sched_setscheduler(p, attr, false, true);
p                5075 kernel/sched/core.c int sched_setscheduler_nocheck(struct task_struct *p, int policy,
p                5078 kernel/sched/core.c 	return _sched_setscheduler(p, policy, param, false);
p                5086 kernel/sched/core.c 	struct task_struct *p;
p                5096 kernel/sched/core.c 	p = find_process_by_pid(pid);
p                5097 kernel/sched/core.c 	if (likely(p))
p                5098 kernel/sched/core.c 		get_task_struct(p);
p                5101 kernel/sched/core.c 	if (likely(p)) {
p                5102 kernel/sched/core.c 		retval = sched_setscheduler(p, policy, &lparam);
p                5103 kernel/sched/core.c 		put_task_struct(p);
p                5192 kernel/sched/core.c 	struct task_struct *p;
p                5209 kernel/sched/core.c 	p = find_process_by_pid(pid);
p                5210 kernel/sched/core.c 	if (likely(p))
p                5211 kernel/sched/core.c 		get_task_struct(p);
p                5214 kernel/sched/core.c 	if (likely(p)) {
p                5215 kernel/sched/core.c 		retval = sched_setattr(p, &attr);
p                5216 kernel/sched/core.c 		put_task_struct(p);
p                5231 kernel/sched/core.c 	struct task_struct *p;
p                5239 kernel/sched/core.c 	p = find_process_by_pid(pid);
p                5240 kernel/sched/core.c 	if (p) {
p                5241 kernel/sched/core.c 		retval = security_task_getscheduler(p);
p                5243 kernel/sched/core.c 			retval = p->policy
p                5244 kernel/sched/core.c 				| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
p                5261 kernel/sched/core.c 	struct task_struct *p;
p                5268 kernel/sched/core.c 	p = find_process_by_pid(pid);
p                5270 kernel/sched/core.c 	if (!p)
p                5273 kernel/sched/core.c 	retval = security_task_getscheduler(p);
p                5277 kernel/sched/core.c 	if (task_has_rt_policy(p))
p                5278 kernel/sched/core.c 		lp.sched_priority = p->rt_priority;
p                5343 kernel/sched/core.c 	struct task_struct *p;
p                5351 kernel/sched/core.c 	p = find_process_by_pid(pid);
p                5353 kernel/sched/core.c 	if (!p)
p                5356 kernel/sched/core.c 	retval = security_task_getscheduler(p);
p                5360 kernel/sched/core.c 	kattr.sched_policy = p->policy;
p                5361 kernel/sched/core.c 	if (p->sched_reset_on_fork)
p                5363 kernel/sched/core.c 	if (task_has_dl_policy(p))
p                5364 kernel/sched/core.c 		__getparam_dl(p, &kattr);
p                5365 kernel/sched/core.c 	else if (task_has_rt_policy(p))
p                5366 kernel/sched/core.c 		kattr.sched_priority = p->rt_priority;
p                5368 kernel/sched/core.c 		kattr.sched_nice = task_nice(p);
p                5371 kernel/sched/core.c 	kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
p                5372 kernel/sched/core.c 	kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
p                5387 kernel/sched/core.c 	struct task_struct *p;
p                5392 kernel/sched/core.c 	p = find_process_by_pid(pid);
p                5393 kernel/sched/core.c 	if (!p) {
p                5399 kernel/sched/core.c 	get_task_struct(p);
p                5402 kernel/sched/core.c 	if (p->flags & PF_NO_SETAFFINITY) {
p                5415 kernel/sched/core.c 	if (!check_same_owner(p)) {
p                5417 kernel/sched/core.c 		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
p                5424 kernel/sched/core.c 	retval = security_task_setscheduler(p);
p                5429 kernel/sched/core.c 	cpuset_cpus_allowed(p, cpus_allowed);
p                5439 kernel/sched/core.c 	if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
p                5441 kernel/sched/core.c 		if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
p                5450 kernel/sched/core.c 	retval = __set_cpus_allowed_ptr(p, new_mask, true);
p                5453 kernel/sched/core.c 		cpuset_cpus_allowed(p, cpus_allowed);
p                5469 kernel/sched/core.c 	put_task_struct(p);
p                5510 kernel/sched/core.c 	struct task_struct *p;
p                5517 kernel/sched/core.c 	p = find_process_by_pid(pid);
p                5518 kernel/sched/core.c 	if (!p)
p                5521 kernel/sched/core.c 	retval = security_task_getscheduler(p);
p                5525 kernel/sched/core.c 	raw_spin_lock_irqsave(&p->pi_lock, flags);
p                5526 kernel/sched/core.c 	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
p                5527 kernel/sched/core.c 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
p                5692 kernel/sched/core.c int __sched yield_to(struct task_struct *p, bool preempt)
p                5703 kernel/sched/core.c 	p_rq = task_rq(p);
p                5714 kernel/sched/core.c 	if (task_rq(p) != p_rq) {
p                5722 kernel/sched/core.c 	if (curr->sched_class != p->sched_class)
p                5725 kernel/sched/core.c 	if (task_running(p_rq, p) || p->state)
p                5728 kernel/sched/core.c 	yielded = curr->sched_class->yield_to_task(rq, p, preempt);
p                5848 kernel/sched/core.c 	struct task_struct *p;
p                5859 kernel/sched/core.c 	p = find_process_by_pid(pid);
p                5860 kernel/sched/core.c 	if (!p)
p                5863 kernel/sched/core.c 	retval = security_task_getscheduler(p);
p                5867 kernel/sched/core.c 	rq = task_rq_lock(p, &rf);
p                5869 kernel/sched/core.c 	if (p->sched_class->get_rr_interval)
p                5870 kernel/sched/core.c 		time_slice = p->sched_class->get_rr_interval(rq, p);
p                5871 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
p                5918 kernel/sched/core.c void sched_show_task(struct task_struct *p)
p                5923 kernel/sched/core.c 	if (!try_get_task_stack(p))
p                5926 kernel/sched/core.c 	printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p));
p                5928 kernel/sched/core.c 	if (p->state == TASK_RUNNING)
p                5931 kernel/sched/core.c 	free = stack_not_used(p);
p                5935 kernel/sched/core.c 	if (pid_alive(p))
p                5936 kernel/sched/core.c 		ppid = task_pid_nr(rcu_dereference(p->real_parent));
p                5939 kernel/sched/core.c 		task_pid_nr(p), ppid,
p                5940 kernel/sched/core.c 		(unsigned long)task_thread_info(p)->flags);
p                5942 kernel/sched/core.c 	print_worker_info(KERN_INFO, p);
p                5943 kernel/sched/core.c 	show_stack(p, NULL);
p                5944 kernel/sched/core.c 	put_task_stack(p);
p                5949 kernel/sched/core.c state_filter_match(unsigned long state_filter, struct task_struct *p)
p                5956 kernel/sched/core.c 	if (!(p->state & state_filter))
p                5963 kernel/sched/core.c 	if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
p                5972 kernel/sched/core.c 	struct task_struct *g, *p;
p                5982 kernel/sched/core.c 	for_each_process_thread(g, p) {
p                5992 kernel/sched/core.c 		if (state_filter_match(state_filter, p))
p                5993 kernel/sched/core.c 			sched_show_task(p);
p                6093 kernel/sched/core.c int task_can_attach(struct task_struct *p,
p                6107 kernel/sched/core.c 	if (p->flags & PF_NO_SETAFFINITY) {
p                6112 kernel/sched/core.c 	if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
p                6114 kernel/sched/core.c 		ret = dl_task_can_attach(p, cs_cpus_allowed);
p                6124 kernel/sched/core.c int migrate_task_to(struct task_struct *p, int target_cpu)
p                6126 kernel/sched/core.c 	struct migration_arg arg = { p, target_cpu };
p                6127 kernel/sched/core.c 	int curr_cpu = task_cpu(p);
p                6132 kernel/sched/core.c 	if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
p                6137 kernel/sched/core.c 	trace_sched_move_numa(p, curr_cpu, target_cpu);
p                6145 kernel/sched/core.c void sched_setnuma(struct task_struct *p, int nid)
p                6151 kernel/sched/core.c 	rq = task_rq_lock(p, &rf);
p                6152 kernel/sched/core.c 	queued = task_on_rq_queued(p);
p                6153 kernel/sched/core.c 	running = task_current(rq, p);
p                6156 kernel/sched/core.c 		dequeue_task(rq, p, DEQUEUE_SAVE);
p                6158 kernel/sched/core.c 		put_prev_task(rq, p);
p                6160 kernel/sched/core.c 	p->numa_preferred_nid = nid;
p                6163 kernel/sched/core.c 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
p                6165 kernel/sched/core.c 		set_next_task(rq, p);
p                6166 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
p                6826 kernel/sched/core.c 	struct task_struct *g, *p;
p                6832 kernel/sched/core.c 	for_each_process_thread(g, p) {
p                6836 kernel/sched/core.c 		if (p->flags & PF_KTHREAD)
p                6839 kernel/sched/core.c 		p->se.exec_start = 0;
p                6840 kernel/sched/core.c 		schedstat_set(p->se.statistics.wait_start,  0);
p                6841 kernel/sched/core.c 		schedstat_set(p->se.statistics.sleep_start, 0);
p                6842 kernel/sched/core.c 		schedstat_set(p->se.statistics.block_start, 0);
p                6844 kernel/sched/core.c 		if (!dl_task(p) && !rt_task(p)) {
p                6849 kernel/sched/core.c 			if (task_nice(p) < 0)
p                6850 kernel/sched/core.c 				set_user_nice(p, 0);
p                6854 kernel/sched/core.c 		__sched_setscheduler(p, &attr, false, false);
p                6903 kernel/sched/core.c void ia64_set_curr_task(int cpu, struct task_struct *p)
p                6905 kernel/sched/core.c 	cpu_curr(cpu) = p;
p                  71 kernel/sched/cpudeadline.c 	int p;
p                  80 kernel/sched/cpudeadline.c 		p = parent(idx);
p                  81 kernel/sched/cpudeadline.c 		if (dl_time_before(orig_dl, cp->elements[p].dl))
p                  84 kernel/sched/cpudeadline.c 		cp->elements[idx].cpu = cp->elements[p].cpu;
p                  85 kernel/sched/cpudeadline.c 		cp->elements[idx].dl = cp->elements[p].dl;
p                  87 kernel/sched/cpudeadline.c 		idx = p;
p                 117 kernel/sched/cpudeadline.c int cpudl_find(struct cpudl *cp, struct task_struct *p,
p                 120 kernel/sched/cpudeadline.c 	const struct sched_dl_entity *dl_se = &p->dl;
p                 123 kernel/sched/cpudeadline.c 	    cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) {
p                 130 kernel/sched/cpudeadline.c 		if (cpumask_test_cpu(best_cpu, p->cpus_ptr) &&
p                  19 kernel/sched/cpudeadline.h int  cpudl_find(struct cpudl *cp, struct task_struct *p, struct cpumask *later_mask);
p                 208 kernel/sched/cpufreq_schedutil.c 				 struct task_struct *p)
p                 241 kernel/sched/cpufreq_schedutil.c 		util = uclamp_util_with(rq, util, p);
p                  59 kernel/sched/cpupri.c int cpupri_find(struct cpupri *cp, struct task_struct *p,
p                  63 kernel/sched/cpupri.c 	int task_pri = convert_prio(p->prio);
p                  97 kernel/sched/cpupri.c 		if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
p                 101 kernel/sched/cpupri.c 			cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
p                  21 kernel/sched/cpupri.h int  cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask);
p                  98 kernel/sched/cputime.c static inline void task_group_account_field(struct task_struct *p, int index,
p                 109 kernel/sched/cputime.c 	cgroup_account_cputime_field(p, index, tmp);
p                 117 kernel/sched/cputime.c void account_user_time(struct task_struct *p, u64 cputime)
p                 122 kernel/sched/cputime.c 	p->utime += cputime;
p                 123 kernel/sched/cputime.c 	account_group_user_time(p, cputime);
p                 125 kernel/sched/cputime.c 	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
p                 128 kernel/sched/cputime.c 	task_group_account_field(p, index, cputime);
p                 131 kernel/sched/cputime.c 	acct_account_cputime(p);
p                 139 kernel/sched/cputime.c void account_guest_time(struct task_struct *p, u64 cputime)
p                 144 kernel/sched/cputime.c 	p->utime += cputime;
p                 145 kernel/sched/cputime.c 	account_group_user_time(p, cputime);
p                 146 kernel/sched/cputime.c 	p->gtime += cputime;
p                 149 kernel/sched/cputime.c 	if (task_nice(p) > 0) {
p                 164 kernel/sched/cputime.c void account_system_index_time(struct task_struct *p,
p                 168 kernel/sched/cputime.c 	p->stime += cputime;
p                 169 kernel/sched/cputime.c 	account_group_system_time(p, cputime);
p                 172 kernel/sched/cputime.c 	task_group_account_field(p, index, cputime);
p                 175 kernel/sched/cputime.c 	acct_account_cputime(p);
p                 184 kernel/sched/cputime.c void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
p                 188 kernel/sched/cputime.c 	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
p                 189 kernel/sched/cputime.c 		account_guest_time(p, cputime);
p                 200 kernel/sched/cputime.c 	account_system_index_time(p, cputime, index);
p                 357 kernel/sched/cputime.c static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
p                 375 kernel/sched/cputime.c 	if (this_cpu_ksoftirqd() == p) {
p                 381 kernel/sched/cputime.c 		account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
p                 383 kernel/sched/cputime.c 		account_user_time(p, cputime);
p                 384 kernel/sched/cputime.c 	} else if (p == rq->idle) {
p                 386 kernel/sched/cputime.c 	} else if (p->flags & PF_VCPU) { /* System time or guest time */
p                 387 kernel/sched/cputime.c 		account_guest_time(p, cputime);
p                 389 kernel/sched/cputime.c 		account_system_index_time(p, cputime, CPUTIME_SYSTEM);
p                 401 kernel/sched/cputime.c static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
p                 451 kernel/sched/cputime.c void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
p                 453 kernel/sched/cputime.c 	*ut = p->utime;
p                 454 kernel/sched/cputime.c 	*st = p->stime;
p                 458 kernel/sched/cputime.c void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
p                 462 kernel/sched/cputime.c 	thread_group_cputime(p, &cputime);
p                 475 kernel/sched/cputime.c void account_process_tick(struct task_struct *p, int user_tick)
p                 484 kernel/sched/cputime.c 		irqtime_account_process_tick(p, user_tick, rq, 1);
p                 497 kernel/sched/cputime.c 		account_user_time(p, cputime);
p                 498 kernel/sched/cputime.c 	else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
p                 499 kernel/sched/cputime.c 		account_system_time(p, HARDIRQ_OFFSET, cputime);
p                 663 kernel/sched/cputime.c void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
p                 666 kernel/sched/cputime.c 		.sum_exec_runtime = p->se.sum_exec_runtime,
p                 669 kernel/sched/cputime.c 	task_cputime(p, &cputime.utime, &cputime.stime);
p                 670 kernel/sched/cputime.c 	cputime_adjust(&cputime, &p->prev_cputime, ut, st);
p                 674 kernel/sched/cputime.c void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
p                 678 kernel/sched/cputime.c 	thread_group_cputime(p, &cputime);
p                 679 kernel/sched/cputime.c 	cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
p                  35 kernel/sched/deadline.c 	struct task_struct *p = dl_task_of(dl_se);
p                  36 kernel/sched/deadline.c 	struct rq *rq = task_rq(p);
p                 156 kernel/sched/deadline.c void dl_change_utilization(struct task_struct *p, u64 new_bw)
p                 160 kernel/sched/deadline.c 	BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
p                 162 kernel/sched/deadline.c 	if (task_on_rq_queued(p))
p                 165 kernel/sched/deadline.c 	rq = task_rq(p);
p                 166 kernel/sched/deadline.c 	if (p->dl.dl_non_contending) {
p                 167 kernel/sched/deadline.c 		sub_running_bw(&p->dl, &rq->dl);
p                 168 kernel/sched/deadline.c 		p->dl.dl_non_contending = 0;
p                 176 kernel/sched/deadline.c 		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
p                 177 kernel/sched/deadline.c 			put_task_struct(p);
p                 179 kernel/sched/deadline.c 	__sub_rq_bw(p->dl.dl_bw, &rq->dl);
p                 237 kernel/sched/deadline.c static void task_non_contending(struct task_struct *p)
p                 239 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &p->dl;
p                 272 kernel/sched/deadline.c 		if (dl_task(p))
p                 274 kernel/sched/deadline.c 		if (!dl_task(p) || p->state == TASK_DEAD) {
p                 275 kernel/sched/deadline.c 			struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
p                 277 kernel/sched/deadline.c 			if (p->state == TASK_DEAD)
p                 278 kernel/sched/deadline.c 				sub_rq_bw(&p->dl, &rq->dl);
p                 280 kernel/sched/deadline.c 			__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
p                 281 kernel/sched/deadline.c 			__dl_clear_params(p);
p                 289 kernel/sched/deadline.c 	get_task_struct(p);
p                 330 kernel/sched/deadline.c static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
p                 332 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &p->dl;
p                 423 kernel/sched/deadline.c 	struct task_struct *p = dl_task_of(dl_se);
p                 425 kernel/sched/deadline.c 	if (p->nr_cpus_allowed > 1)
p                 433 kernel/sched/deadline.c 	struct task_struct *p = dl_task_of(dl_se);
p                 435 kernel/sched/deadline.c 	if (p->nr_cpus_allowed > 1)
p                 445 kernel/sched/deadline.c static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
p                 453 kernel/sched/deadline.c 	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
p                 459 kernel/sched/deadline.c 		if (dl_entity_preempt(&p->dl, &entry->dl))
p                 468 kernel/sched/deadline.c 		dl_rq->earliest_dl.next = p->dl.deadline;
p                 470 kernel/sched/deadline.c 	rb_link_node(&p->pushable_dl_tasks, parent, link);
p                 471 kernel/sched/deadline.c 	rb_insert_color_cached(&p->pushable_dl_tasks,
p                 475 kernel/sched/deadline.c static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
p                 479 kernel/sched/deadline.c 	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
p                 482 kernel/sched/deadline.c 	if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
p                 485 kernel/sched/deadline.c 		next_node = rb_next(&p->pushable_dl_tasks);
p                 492 kernel/sched/deadline.c 	rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
p                 493 kernel/sched/deadline.c 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
p                 529 kernel/sched/deadline.c static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
p                 534 kernel/sched/deadline.c 	later_rq = find_lock_later_rq(p, rq);
p                 542 kernel/sched/deadline.c 		cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
p                 561 kernel/sched/deadline.c 	if (p->dl.dl_non_contending || p->dl.dl_throttled) {
p                 568 kernel/sched/deadline.c 		sub_running_bw(&p->dl, &rq->dl);
p                 569 kernel/sched/deadline.c 		sub_rq_bw(&p->dl, &rq->dl);
p                 571 kernel/sched/deadline.c 		add_rq_bw(&p->dl, &later_rq->dl);
p                 572 kernel/sched/deadline.c 		add_running_bw(&p->dl, &later_rq->dl);
p                 574 kernel/sched/deadline.c 		sub_rq_bw(&p->dl, &rq->dl);
p                 575 kernel/sched/deadline.c 		add_rq_bw(&p->dl, &later_rq->dl);
p                 585 kernel/sched/deadline.c 	__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
p                 590 kernel/sched/deadline.c 	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
p                 593 kernel/sched/deadline.c 	set_task_cpu(p, later_rq->cpu);
p                 602 kernel/sched/deadline.c void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
p                 607 kernel/sched/deadline.c void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
p                 639 kernel/sched/deadline.c static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
p                 640 kernel/sched/deadline.c static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
p                 641 kernel/sched/deadline.c static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
p                 920 kernel/sched/deadline.c static int start_dl_timer(struct task_struct *p)
p                 922 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &p->dl;
p                 924 kernel/sched/deadline.c 	struct rq *rq = task_rq(p);
p                 958 kernel/sched/deadline.c 		get_task_struct(p);
p                 983 kernel/sched/deadline.c 	struct task_struct *p = dl_task_of(dl_se);
p                 987 kernel/sched/deadline.c 	rq = task_rq_lock(p, &rf);
p                 993 kernel/sched/deadline.c 	if (!dl_task(p))
p                1027 kernel/sched/deadline.c 	if (!task_on_rq_queued(p)) {
p                1039 kernel/sched/deadline.c 		rq = dl_task_offline_migration(rq, p);
p                1051 kernel/sched/deadline.c 	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
p                1053 kernel/sched/deadline.c 		check_preempt_curr_dl(rq, p, 0);
p                1074 kernel/sched/deadline.c 	task_rq_unlock(rq, p, &rf);
p                1080 kernel/sched/deadline.c 	put_task_struct(p);
p                1113 kernel/sched/deadline.c 	struct task_struct *p = dl_task_of(dl_se);
p                1118 kernel/sched/deadline.c 		if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
p                1287 kernel/sched/deadline.c 	struct task_struct *p = dl_task_of(dl_se);
p                1291 kernel/sched/deadline.c 	rq = task_rq_lock(p, &rf);
p                1296 kernel/sched/deadline.c 	if (!dl_task(p) || p->state == TASK_DEAD) {
p                1297 kernel/sched/deadline.c 		struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
p                1299 kernel/sched/deadline.c 		if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
p                1300 kernel/sched/deadline.c 			sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
p                1301 kernel/sched/deadline.c 			sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
p                1306 kernel/sched/deadline.c 		__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
p                1308 kernel/sched/deadline.c 		__dl_clear_params(p);
p                1318 kernel/sched/deadline.c 	task_rq_unlock(rq, p, &rf);
p                1319 kernel/sched/deadline.c 	put_task_struct(p);
p                1472 kernel/sched/deadline.c static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
p                1474 kernel/sched/deadline.c 	struct task_struct *pi_task = rt_mutex_get_top_task(p);
p                1475 kernel/sched/deadline.c 	struct sched_dl_entity *pi_se = &p->dl;
p                1485 kernel/sched/deadline.c 	if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
p                1487 kernel/sched/deadline.c 	} else if (!dl_prio(p->normal_prio)) {
p                1495 kernel/sched/deadline.c 		BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
p                1505 kernel/sched/deadline.c 	if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
p                1506 kernel/sched/deadline.c 		dl_check_constrained_dl(&p->dl);
p                1508 kernel/sched/deadline.c 	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
p                1509 kernel/sched/deadline.c 		add_rq_bw(&p->dl, &rq->dl);
p                1510 kernel/sched/deadline.c 		add_running_bw(&p->dl, &rq->dl);
p                1525 kernel/sched/deadline.c 	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
p                1527 kernel/sched/deadline.c 			task_contending(&p->dl, flags);
p                1532 kernel/sched/deadline.c 	enqueue_dl_entity(&p->dl, pi_se, flags);
p                1534 kernel/sched/deadline.c 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
p                1535 kernel/sched/deadline.c 		enqueue_pushable_dl_task(rq, p);
p                1538 kernel/sched/deadline.c static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
p                1540 kernel/sched/deadline.c 	dequeue_dl_entity(&p->dl);
p                1541 kernel/sched/deadline.c 	dequeue_pushable_dl_task(rq, p);
p                1544 kernel/sched/deadline.c static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
p                1547 kernel/sched/deadline.c 	__dequeue_task_dl(rq, p, flags);
p                1549 kernel/sched/deadline.c 	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
p                1550 kernel/sched/deadline.c 		sub_running_bw(&p->dl, &rq->dl);
p                1551 kernel/sched/deadline.c 		sub_rq_bw(&p->dl, &rq->dl);
p                1564 kernel/sched/deadline.c 		task_non_contending(p);
p                1602 kernel/sched/deadline.c select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
p                1626 kernel/sched/deadline.c 	     !dl_entity_preempt(&p->dl, &curr->dl)) &&
p                1627 kernel/sched/deadline.c 	    (p->nr_cpus_allowed > 1)) {
p                1628 kernel/sched/deadline.c 		int target = find_later_rq(p);
p                1631 kernel/sched/deadline.c 				(dl_time_before(p->dl.deadline,
p                1642 kernel/sched/deadline.c static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
p                1646 kernel/sched/deadline.c 	if (p->state != TASK_WAKING)
p                1649 kernel/sched/deadline.c 	rq = task_rq(p);
p                1656 kernel/sched/deadline.c 	if (p->dl.dl_non_contending) {
p                1657 kernel/sched/deadline.c 		sub_running_bw(&p->dl, &rq->dl);
p                1658 kernel/sched/deadline.c 		p->dl.dl_non_contending = 0;
p                1666 kernel/sched/deadline.c 		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
p                1667 kernel/sched/deadline.c 			put_task_struct(p);
p                1669 kernel/sched/deadline.c 	sub_rq_bw(&p->dl, &rq->dl);
p                1673 kernel/sched/deadline.c static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
p                1687 kernel/sched/deadline.c 	if (p->nr_cpus_allowed != 1 &&
p                1688 kernel/sched/deadline.c 	    cpudl_find(&rq->rd->cpudl, p, NULL))
p                1694 kernel/sched/deadline.c static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
p                1696 kernel/sched/deadline.c 	if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
p                1716 kernel/sched/deadline.c static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
p                1719 kernel/sched/deadline.c 	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
p                1729 kernel/sched/deadline.c 	if ((p->dl.deadline == rq->curr->dl.deadline) &&
p                1731 kernel/sched/deadline.c 		check_preempt_equal_dl(rq, p);
p                1736 kernel/sched/deadline.c static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
p                1738 kernel/sched/deadline.c 	hrtick_start(rq, p->dl.runtime);
p                1741 kernel/sched/deadline.c static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
p                1746 kernel/sched/deadline.c static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
p                1748 kernel/sched/deadline.c 	p->se.exec_start = rq_clock_task(rq);
p                1751 kernel/sched/deadline.c 	dequeue_pushable_dl_task(rq, p);
p                1757 kernel/sched/deadline.c 		start_hrtick_dl(rq, p);
p                1781 kernel/sched/deadline.c 	struct task_struct *p;
p                1790 kernel/sched/deadline.c 	p = dl_task_of(dl_se);
p                1791 kernel/sched/deadline.c 	set_next_task_dl(rq, p, true);
p                1792 kernel/sched/deadline.c 	return p;
p                1795 kernel/sched/deadline.c static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
p                1800 kernel/sched/deadline.c 	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
p                1801 kernel/sched/deadline.c 		enqueue_pushable_dl_task(rq, p);
p                1812 kernel/sched/deadline.c static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
p                1822 kernel/sched/deadline.c 	if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
p                1823 kernel/sched/deadline.c 	    is_leftmost(p, &rq->dl))
p                1824 kernel/sched/deadline.c 		start_hrtick_dl(rq, p);
p                1827 kernel/sched/deadline.c static void task_fork_dl(struct task_struct *p)
p                1840 kernel/sched/deadline.c static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
p                1842 kernel/sched/deadline.c 	if (!task_running(rq, p) &&
p                1843 kernel/sched/deadline.c 	    cpumask_test_cpu(cpu, p->cpus_ptr))
p                1855 kernel/sched/deadline.c 	struct task_struct *p = NULL;
p                1862 kernel/sched/deadline.c 		p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
p                1864 kernel/sched/deadline.c 		if (pick_dl_task(rq, p, cpu))
p                1865 kernel/sched/deadline.c 			return p;
p                2023 kernel/sched/deadline.c 	struct task_struct *p;
p                2028 kernel/sched/deadline.c 	p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
p                2031 kernel/sched/deadline.c 	BUG_ON(rq->cpu != task_cpu(p));
p                2032 kernel/sched/deadline.c 	BUG_ON(task_current(rq, p));
p                2033 kernel/sched/deadline.c 	BUG_ON(p->nr_cpus_allowed <= 1);
p                2035 kernel/sched/deadline.c 	BUG_ON(!task_on_rq_queued(p));
p                2036 kernel/sched/deadline.c 	BUG_ON(!dl_task(p));
p                2038 kernel/sched/deadline.c 	return p;
p                2137 kernel/sched/deadline.c 	struct task_struct *p;
p                2176 kernel/sched/deadline.c 		p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
p                2183 kernel/sched/deadline.c 		if (p && dl_time_before(p->dl.deadline, dmin) &&
p                2185 kernel/sched/deadline.c 		     dl_time_before(p->dl.deadline,
p                2187 kernel/sched/deadline.c 			WARN_ON(p == src_rq->curr);
p                2188 kernel/sched/deadline.c 			WARN_ON(!task_on_rq_queued(p));
p                2194 kernel/sched/deadline.c 			if (dl_time_before(p->dl.deadline,
p                2200 kernel/sched/deadline.c 			deactivate_task(src_rq, p, 0);
p                2201 kernel/sched/deadline.c 			set_task_cpu(p, this_cpu);
p                2202 kernel/sched/deadline.c 			activate_task(this_rq, p, 0);
p                2203 kernel/sched/deadline.c 			dmin = p->dl.deadline;
p                2219 kernel/sched/deadline.c static void task_woken_dl(struct rq *rq, struct task_struct *p)
p                2221 kernel/sched/deadline.c 	if (!task_running(rq, p) &&
p                2223 kernel/sched/deadline.c 	    p->nr_cpus_allowed > 1 &&
p                2226 kernel/sched/deadline.c 	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
p                2231 kernel/sched/deadline.c static void set_cpus_allowed_dl(struct task_struct *p,
p                2237 kernel/sched/deadline.c 	BUG_ON(!dl_task(p));
p                2239 kernel/sched/deadline.c 	rq = task_rq(p);
p                2257 kernel/sched/deadline.c 		__dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
p                2261 kernel/sched/deadline.c 	set_cpus_allowed_common(p, new_mask);
p                2294 kernel/sched/deadline.c void dl_add_task_root_domain(struct task_struct *p)
p                2300 kernel/sched/deadline.c 	rq = task_rq_lock(p, &rf);
p                2301 kernel/sched/deadline.c 	if (!dl_task(p))
p                2307 kernel/sched/deadline.c 	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
p                2312 kernel/sched/deadline.c 	task_rq_unlock(rq, p, &rf);
p                2326 kernel/sched/deadline.c static void switched_from_dl(struct rq *rq, struct task_struct *p)
p                2336 kernel/sched/deadline.c 	if (task_on_rq_queued(p) && p->dl.dl_runtime)
p                2337 kernel/sched/deadline.c 		task_non_contending(p);
p                2339 kernel/sched/deadline.c 	if (!task_on_rq_queued(p)) {
p                2346 kernel/sched/deadline.c 		if (p->dl.dl_non_contending)
p                2347 kernel/sched/deadline.c 			sub_running_bw(&p->dl, &rq->dl);
p                2348 kernel/sched/deadline.c 		sub_rq_bw(&p->dl, &rq->dl);
p                2356 kernel/sched/deadline.c 	if (p->dl.dl_non_contending)
p                2357 kernel/sched/deadline.c 		p->dl.dl_non_contending = 0;
p                2364 kernel/sched/deadline.c 	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
p                2374 kernel/sched/deadline.c static void switched_to_dl(struct rq *rq, struct task_struct *p)
p                2376 kernel/sched/deadline.c 	if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
p                2377 kernel/sched/deadline.c 		put_task_struct(p);
p                2380 kernel/sched/deadline.c 	if (!task_on_rq_queued(p)) {
p                2381 kernel/sched/deadline.c 		add_rq_bw(&p->dl, &rq->dl);
p                2386 kernel/sched/deadline.c 	if (rq->curr != p) {
p                2388 kernel/sched/deadline.c 		if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
p                2392 kernel/sched/deadline.c 			check_preempt_curr_dl(rq, p, 0);
p                2402 kernel/sched/deadline.c static void prio_changed_dl(struct rq *rq, struct task_struct *p,
p                2405 kernel/sched/deadline.c 	if (task_on_rq_queued(p) || rq->curr == p) {
p                2421 kernel/sched/deadline.c 		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
p                2552 kernel/sched/deadline.c int sched_dl_overflow(struct task_struct *p, int policy,
p                2555 kernel/sched/deadline.c 	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
p                2565 kernel/sched/deadline.c 	if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
p                2574 kernel/sched/deadline.c 	cpus = dl_bw_cpus(task_cpu(p));
p                2575 kernel/sched/deadline.c 	if (dl_policy(policy) && !task_has_dl_policy(p) &&
p                2577 kernel/sched/deadline.c 		if (hrtimer_active(&p->dl.inactive_timer))
p                2578 kernel/sched/deadline.c 			__dl_sub(dl_b, p->dl.dl_bw, cpus);
p                2581 kernel/sched/deadline.c 	} else if (dl_policy(policy) && task_has_dl_policy(p) &&
p                2582 kernel/sched/deadline.c 		   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
p                2590 kernel/sched/deadline.c 		__dl_sub(dl_b, p->dl.dl_bw, cpus);
p                2592 kernel/sched/deadline.c 		dl_change_utilization(p, new_bw);
p                2594 kernel/sched/deadline.c 	} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
p                2615 kernel/sched/deadline.c void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
p                2617 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &p->dl;
p                2627 kernel/sched/deadline.c void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
p                2629 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &p->dl;
p                2631 kernel/sched/deadline.c 	attr->sched_priority = p->rt_priority;
p                2685 kernel/sched/deadline.c void __dl_clear_params(struct task_struct *p)
p                2687 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &p->dl;
p                2702 kernel/sched/deadline.c bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
p                2704 kernel/sched/deadline.c 	struct sched_dl_entity *dl_se = &p->dl;
p                2716 kernel/sched/deadline.c int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
p                2730 kernel/sched/deadline.c 	overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
p                2740 kernel/sched/deadline.c 		__dl_add(dl_b, p->dl.dl_bw, cpus);
p                 434 kernel/sched/debug.c print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
p                 436 kernel/sched/debug.c 	if (rq->curr == p)
p                 439 kernel/sched/debug.c 		SEQ_printf(m, " %c", task_state_to_char(p));
p                 442 kernel/sched/debug.c 		p->comm, task_pid_nr(p),
p                 443 kernel/sched/debug.c 		SPLIT_NS(p->se.vruntime),
p                 444 kernel/sched/debug.c 		(long long)(p->nvcsw + p->nivcsw),
p                 445 kernel/sched/debug.c 		p->prio);
p                 448 kernel/sched/debug.c 		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
p                 449 kernel/sched/debug.c 		SPLIT_NS(p->se.sum_exec_runtime),
p                 450 kernel/sched/debug.c 		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
p                 453 kernel/sched/debug.c 	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
p                 456 kernel/sched/debug.c 	SEQ_printf(m, " %s", task_group_path(task_group(p)));
p                 464 kernel/sched/debug.c 	struct task_struct *g, *p;
p                 474 kernel/sched/debug.c 	for_each_process_thread(g, p) {
p                 475 kernel/sched/debug.c 		if (task_cpu(p) != rq_cpu)
p                 478 kernel/sched/debug.c 		print_task(m, rq, p);
p                 815 kernel/sched/debug.c #define   P(F)	SEQ_printf(m, "%-45s:%21Ld\n",	     #F, (long long)p->F)
p                 817 kernel/sched/debug.c #define   PN(F)	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
p                 831 kernel/sched/debug.c static void sched_show_numa(struct task_struct *p, struct seq_file *m)
p                 836 kernel/sched/debug.c 	if (p->mm)
p                 839 kernel/sched/debug.c 	task_lock(p);
p                 840 kernel/sched/debug.c 	pol = p->mempolicy;
p                 844 kernel/sched/debug.c 	task_unlock(p);
p                 850 kernel/sched/debug.c 			task_node(p), task_numa_group_id(p));
p                 851 kernel/sched/debug.c 	show_numa_stats(p, m);
p                 856 kernel/sched/debug.c void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
p                 861 kernel/sched/debug.c 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
p                 862 kernel/sched/debug.c 						get_nr_threads(p));
p                 869 kernel/sched/debug.c 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
p                 871 kernel/sched/debug.c 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
p                 875 kernel/sched/debug.c 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
p                 877 kernel/sched/debug.c 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
p                 883 kernel/sched/debug.c 	nr_switches = p->nvcsw + p->nivcsw;
p                 918 kernel/sched/debug.c 		avg_atom = p->se.sum_exec_runtime;
p                 924 kernel/sched/debug.c 		avg_per_cpu = p->se.sum_exec_runtime;
p                 925 kernel/sched/debug.c 		if (p->se.nr_migrations) {
p                 927 kernel/sched/debug.c 						p->se.nr_migrations);
p                 938 kernel/sched/debug.c 		   "nr_voluntary_switches", (long long)p->nvcsw);
p                 940 kernel/sched/debug.c 		   "nr_involuntary_switches", (long long)p->nivcsw);
p                 957 kernel/sched/debug.c 	if (task_has_dl_policy(p)) {
p                 978 kernel/sched/debug.c 	sched_show_numa(p, m);
p                 981 kernel/sched/debug.c void proc_sched_set_task(struct task_struct *p)
p                 984 kernel/sched/debug.c 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
p                 261 kernel/sched/fair.c static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
p                 263 kernel/sched/fair.c 	return p->se.cfs_rq;
p                 446 kernel/sched/fair.c static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
p                 448 kernel/sched/fair.c 	return &task_rq(p)->cfs;
p                 453 kernel/sched/fair.c 	struct task_struct *p = task_of(se);
p                 454 kernel/sched/fair.c 	struct rq *rq = task_rq(p);
p                 727 kernel/sched/fair.c static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
p                 728 kernel/sched/fair.c static unsigned long task_h_load(struct task_struct *p);
p                 780 kernel/sched/fair.c void post_init_entity_util_avg(struct task_struct *p)
p                 782 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
p                 800 kernel/sched/fair.c 	if (p->sched_class != &fair_sched_class) {
p                 822 kernel/sched/fair.c void post_init_entity_util_avg(struct task_struct *p)
p                 894 kernel/sched/fair.c 	struct task_struct *p;
p                 903 kernel/sched/fair.c 		p = task_of(se);
p                 904 kernel/sched/fair.c 		if (task_on_rq_migrating(p)) {
p                 913 kernel/sched/fair.c 		trace_sched_stat_wait(p, delta);
p                1092 kernel/sched/fair.c static struct numa_group *deref_task_numa_group(struct task_struct *p)
p                1094 kernel/sched/fair.c 	return rcu_dereference_check(p->numa_group, p == current ||
p                1095 kernel/sched/fair.c 		(lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu)));
p                1098 kernel/sched/fair.c static struct numa_group *deref_curr_numa_group(struct task_struct *p)
p                1100 kernel/sched/fair.c 	return rcu_dereference_protected(p->numa_group, p == current);
p                1106 kernel/sched/fair.c static unsigned int task_nr_scan_windows(struct task_struct *p)
p                1117 kernel/sched/fair.c 	rss = get_mm_rss(p->mm);
p                1128 kernel/sched/fair.c static unsigned int task_scan_min(struct task_struct *p)
p                1138 kernel/sched/fair.c 	scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
p                1142 kernel/sched/fair.c static unsigned int task_scan_start(struct task_struct *p)
p                1144 kernel/sched/fair.c 	unsigned long smin = task_scan_min(p);
p                1150 kernel/sched/fair.c 	ng = rcu_dereference(p->numa_group);
p                1164 kernel/sched/fair.c static unsigned int task_scan_max(struct task_struct *p)
p                1166 kernel/sched/fair.c 	unsigned long smin = task_scan_min(p);
p                1171 kernel/sched/fair.c 	smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
p                1174 kernel/sched/fair.c 	ng = deref_curr_numa_group(p);
p                1190 kernel/sched/fair.c static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
p                1192 kernel/sched/fair.c 	rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
p                1193 kernel/sched/fair.c 	rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
p                1196 kernel/sched/fair.c static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
p                1198 kernel/sched/fair.c 	rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
p                1199 kernel/sched/fair.c 	rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
p                1211 kernel/sched/fair.c pid_t task_numa_group_id(struct task_struct *p)
p                1217 kernel/sched/fair.c 	ng = rcu_dereference(p->numa_group);
p                1236 kernel/sched/fair.c static inline unsigned long task_faults(struct task_struct *p, int nid)
p                1238 kernel/sched/fair.c 	if (!p->numa_faults)
p                1241 kernel/sched/fair.c 	return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
p                1242 kernel/sched/fair.c 		p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
p                1245 kernel/sched/fair.c static inline unsigned long group_faults(struct task_struct *p, int nid)
p                1247 kernel/sched/fair.c 	struct numa_group *ng = deref_task_numa_group(p);
p                1299 kernel/sched/fair.c static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
p                1340 kernel/sched/fair.c 			faults = task_faults(p, node);
p                1342 kernel/sched/fair.c 			faults = group_faults(p, node);
p                1369 kernel/sched/fair.c static inline unsigned long task_weight(struct task_struct *p, int nid,
p                1374 kernel/sched/fair.c 	if (!p->numa_faults)
p                1377 kernel/sched/fair.c 	total_faults = p->total_numa_faults;
p                1382 kernel/sched/fair.c 	faults = task_faults(p, nid);
p                1383 kernel/sched/fair.c 	faults += score_nearby_nodes(p, nid, dist, true);
p                1388 kernel/sched/fair.c static inline unsigned long group_weight(struct task_struct *p, int nid,
p                1391 kernel/sched/fair.c 	struct numa_group *ng = deref_task_numa_group(p);
p                1402 kernel/sched/fair.c 	faults = group_faults(p, nid);
p                1403 kernel/sched/fair.c 	faults += score_nearby_nodes(p, nid, dist, false);
p                1408 kernel/sched/fair.c bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
p                1411 kernel/sched/fair.c 	struct numa_group *ng = deref_curr_numa_group(p);
p                1424 kernel/sched/fair.c 	if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) &&
p                1425 kernel/sched/fair.c 	    (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
p                1450 kernel/sched/fair.c 	if (cpupid_match_pid(p, last_cpupid))
p                1473 kernel/sched/fair.c 	return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
p                1474 kernel/sched/fair.c 	       group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
p                1505 kernel/sched/fair.c 	struct task_struct *p;
p                1521 kernel/sched/fair.c 			     struct task_struct *p, long imp)
p                1540 kernel/sched/fair.c 	if (p)
p                1541 kernel/sched/fair.c 		get_task_struct(p);
p                1543 kernel/sched/fair.c 	env->best_task = p;
p                1592 kernel/sched/fair.c 	struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
p                1613 kernel/sched/fair.c 	if (cur == env->p)
p                1679 kernel/sched/fair.c 	load = task_h_load(env->p) - task_h_load(cur);
p                1700 kernel/sched/fair.c 		env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
p                1717 kernel/sched/fair.c 	load = task_h_load(env->p);
p                1729 kernel/sched/fair.c 		if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
p                1737 kernel/sched/fair.c static int task_numa_migrate(struct task_struct *p)
p                1740 kernel/sched/fair.c 		.p = p,
p                1742 kernel/sched/fair.c 		.src_cpu = task_cpu(p),
p                1743 kernel/sched/fair.c 		.src_nid = task_node(p),
p                1779 kernel/sched/fair.c 		sched_setnuma(p, task_node(p));
p                1783 kernel/sched/fair.c 	env.dst_nid = p->numa_preferred_nid;
p                1785 kernel/sched/fair.c 	taskweight = task_weight(p, env.src_nid, dist);
p                1786 kernel/sched/fair.c 	groupweight = group_weight(p, env.src_nid, dist);
p                1788 kernel/sched/fair.c 	taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
p                1789 kernel/sched/fair.c 	groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
p                1802 kernel/sched/fair.c 	ng = deref_curr_numa_group(p);
p                1805 kernel/sched/fair.c 			if (nid == env.src_nid || nid == p->numa_preferred_nid)
p                1811 kernel/sched/fair.c 				taskweight = task_weight(p, env.src_nid, dist);
p                1812 kernel/sched/fair.c 				groupweight = group_weight(p, env.src_nid, dist);
p                1816 kernel/sched/fair.c 			taskimp = task_weight(p, nid, dist) - taskweight;
p                1817 kernel/sched/fair.c 			groupimp = group_weight(p, nid, dist) - groupweight;
p                1842 kernel/sched/fair.c 		if (nid != p->numa_preferred_nid)
p                1843 kernel/sched/fair.c 			sched_setnuma(p, nid);
p                1852 kernel/sched/fair.c 		ret = migrate_task_to(p, env.best_cpu);
p                1855 kernel/sched/fair.c 			trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
p                1859 kernel/sched/fair.c 	ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
p                1863 kernel/sched/fair.c 		trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
p                1869 kernel/sched/fair.c static void numa_migrate_preferred(struct task_struct *p)
p                1874 kernel/sched/fair.c 	if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults))
p                1878 kernel/sched/fair.c 	interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
p                1879 kernel/sched/fair.c 	p->numa_migrate_retry = jiffies + interval;
p                1882 kernel/sched/fair.c 	if (task_node(p) == p->numa_preferred_nid)
p                1886 kernel/sched/fair.c 	task_numa_migrate(p);
p                1932 kernel/sched/fair.c static void update_task_scan_period(struct task_struct *p,
p                1939 kernel/sched/fair.c 	unsigned long remote = p->numa_faults_locality[0];
p                1940 kernel/sched/fair.c 	unsigned long local = p->numa_faults_locality[1];
p                1949 kernel/sched/fair.c 	if (local + shared == 0 || p->numa_faults_locality[2]) {
p                1950 kernel/sched/fair.c 		p->numa_scan_period = min(p->numa_scan_period_max,
p                1951 kernel/sched/fair.c 			p->numa_scan_period << 1);
p                1953 kernel/sched/fair.c 		p->mm->numa_next_scan = jiffies +
p                1954 kernel/sched/fair.c 			msecs_to_jiffies(p->numa_scan_period);
p                1965 kernel/sched/fair.c 	period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
p                1998 kernel/sched/fair.c 	p->numa_scan_period = clamp(p->numa_scan_period + diff,
p                1999 kernel/sched/fair.c 			task_scan_min(p), task_scan_max(p));
p                2000 kernel/sched/fair.c 	memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
p                2010 kernel/sched/fair.c static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
p                2014 kernel/sched/fair.c 	now = p->se.exec_start;
p                2015 kernel/sched/fair.c 	runtime = p->se.sum_exec_runtime;
p                2017 kernel/sched/fair.c 	if (p->last_task_numa_placement) {
p                2018 kernel/sched/fair.c 		delta = runtime - p->last_sum_exec_runtime;
p                2019 kernel/sched/fair.c 		*period = now - p->last_task_numa_placement;
p                2025 kernel/sched/fair.c 		delta = p->se.avg.load_sum;
p                2029 kernel/sched/fair.c 	p->last_sum_exec_runtime = runtime;
p                2030 kernel/sched/fair.c 	p->last_task_numa_placement = now;
p                2040 kernel/sched/fair.c static int preferred_group_nid(struct task_struct *p, int nid)
p                2061 kernel/sched/fair.c 			score = group_weight(p, node, dist);
p                2097 kernel/sched/fair.c 					faults += group_faults(p, b);
p                2123 kernel/sched/fair.c static void task_numa_placement(struct task_struct *p)
p                2138 kernel/sched/fair.c 	seq = READ_ONCE(p->mm->numa_scan_seq);
p                2139 kernel/sched/fair.c 	if (p->numa_scan_seq == seq)
p                2141 kernel/sched/fair.c 	p->numa_scan_seq = seq;
p                2142 kernel/sched/fair.c 	p->numa_scan_period_max = task_scan_max(p);
p                2144 kernel/sched/fair.c 	total_faults = p->numa_faults_locality[0] +
p                2145 kernel/sched/fair.c 		       p->numa_faults_locality[1];
p                2146 kernel/sched/fair.c 	runtime = numa_get_avg_runtime(p, &period);
p                2149 kernel/sched/fair.c 	ng = deref_curr_numa_group(p);
p                2171 kernel/sched/fair.c 			diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
p                2172 kernel/sched/fair.c 			fault_types[priv] += p->numa_faults[membuf_idx];
p                2173 kernel/sched/fair.c 			p->numa_faults[membuf_idx] = 0;
p                2183 kernel/sched/fair.c 			f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
p                2185 kernel/sched/fair.c 			f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
p                2186 kernel/sched/fair.c 			p->numa_faults[cpubuf_idx] = 0;
p                2188 kernel/sched/fair.c 			p->numa_faults[mem_idx] += diff;
p                2189 kernel/sched/fair.c 			p->numa_faults[cpu_idx] += f_diff;
p                2190 kernel/sched/fair.c 			faults += p->numa_faults[mem_idx];
p                2191 kernel/sched/fair.c 			p->total_numa_faults += diff;
p                2221 kernel/sched/fair.c 		max_nid = preferred_group_nid(p, max_nid);
p                2226 kernel/sched/fair.c 		if (max_nid != p->numa_preferred_nid)
p                2227 kernel/sched/fair.c 			sched_setnuma(p, max_nid);
p                2230 kernel/sched/fair.c 	update_task_scan_period(p, fault_types[0], fault_types[1]);
p                2244 kernel/sched/fair.c static void task_numa_group(struct task_struct *p, int cpupid, int flags,
p                2253 kernel/sched/fair.c 	if (unlikely(!deref_curr_numa_group(p))) {
p                2265 kernel/sched/fair.c 		grp->gid = p->pid;
p                2271 kernel/sched/fair.c 			grp->faults[i] = p->numa_faults[i];
p                2273 kernel/sched/fair.c 		grp->total_faults = p->total_numa_faults;
p                2276 kernel/sched/fair.c 		rcu_assign_pointer(p->numa_group, grp);
p                2289 kernel/sched/fair.c 	my_grp = deref_curr_numa_group(p);
p                2329 kernel/sched/fair.c 		my_grp->faults[i] -= p->numa_faults[i];
p                2330 kernel/sched/fair.c 		grp->faults[i] += p->numa_faults[i];
p                2332 kernel/sched/fair.c 	my_grp->total_faults -= p->total_numa_faults;
p                2333 kernel/sched/fair.c 	grp->total_faults += p->total_numa_faults;
p                2341 kernel/sched/fair.c 	rcu_assign_pointer(p->numa_group, grp);
p                2358 kernel/sched/fair.c void task_numa_free(struct task_struct *p, bool final)
p                2361 kernel/sched/fair.c 	struct numa_group *grp = rcu_dereference_raw(p->numa_group);
p                2362 kernel/sched/fair.c 	unsigned long *numa_faults = p->numa_faults;
p                2372 kernel/sched/fair.c 			grp->faults[i] -= p->numa_faults[i];
p                2373 kernel/sched/fair.c 		grp->total_faults -= p->total_numa_faults;
p                2377 kernel/sched/fair.c 		RCU_INIT_POINTER(p->numa_group, NULL);
p                2382 kernel/sched/fair.c 		p->numa_faults = NULL;
p                2385 kernel/sched/fair.c 		p->total_numa_faults = 0;
p                2396 kernel/sched/fair.c 	struct task_struct *p = current;
p                2407 kernel/sched/fair.c 	if (!p->mm)
p                2411 kernel/sched/fair.c 	if (unlikely(!p->numa_faults)) {
p                2412 kernel/sched/fair.c 		int size = sizeof(*p->numa_faults) *
p                2415 kernel/sched/fair.c 		p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
p                2416 kernel/sched/fair.c 		if (!p->numa_faults)
p                2419 kernel/sched/fair.c 		p->total_numa_faults = 0;
p                2420 kernel/sched/fair.c 		memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
p                2430 kernel/sched/fair.c 		priv = cpupid_match_pid(p, last_cpupid);
p                2432 kernel/sched/fair.c 			task_numa_group(p, last_cpupid, flags, &priv);
p                2441 kernel/sched/fair.c 	ng = deref_curr_numa_group(p);
p                2451 kernel/sched/fair.c 	if (time_after(jiffies, p->numa_migrate_retry)) {
p                2452 kernel/sched/fair.c 		task_numa_placement(p);
p                2453 kernel/sched/fair.c 		numa_migrate_preferred(p);
p                2457 kernel/sched/fair.c 		p->numa_pages_migrated += pages;
p                2459 kernel/sched/fair.c 		p->numa_faults_locality[2] += pages;
p                2461 kernel/sched/fair.c 	p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
p                2462 kernel/sched/fair.c 	p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
p                2463 kernel/sched/fair.c 	p->numa_faults_locality[local] += pages;
p                2466 kernel/sched/fair.c static void reset_ptenuma_scan(struct task_struct *p)
p                2476 kernel/sched/fair.c 	WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
p                2477 kernel/sched/fair.c 	p->mm->numa_scan_offset = 0;
p                2487 kernel/sched/fair.c 	struct task_struct *p = current;
p                2488 kernel/sched/fair.c 	struct mm_struct *mm = p->mm;
p                2489 kernel/sched/fair.c 	u64 runtime = p->se.sum_exec_runtime;
p                2495 kernel/sched/fair.c 	SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
p                2506 kernel/sched/fair.c 	if (p->flags & PF_EXITING)
p                2521 kernel/sched/fair.c 	if (p->numa_scan_period == 0) {
p                2522 kernel/sched/fair.c 		p->numa_scan_period_max = task_scan_max(p);
p                2523 kernel/sched/fair.c 		p->numa_scan_period = task_scan_start(p);
p                2526 kernel/sched/fair.c 	next_scan = now + msecs_to_jiffies(p->numa_scan_period);
p                2534 kernel/sched/fair.c 	p->node_stamp += 2 * TICK_NSEC;
p                2548 kernel/sched/fair.c 		reset_ptenuma_scan(p);
p                2611 kernel/sched/fair.c 		reset_ptenuma_scan(p);
p                2620 kernel/sched/fair.c 	if (unlikely(p->se.sum_exec_runtime != runtime)) {
p                2621 kernel/sched/fair.c 		u64 diff = p->se.sum_exec_runtime - runtime;
p                2622 kernel/sched/fair.c 		p->node_stamp += 32 * diff;
p                2626 kernel/sched/fair.c void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
p                2629 kernel/sched/fair.c 	struct mm_struct *mm = p->mm;
p                2638 kernel/sched/fair.c 	p->node_stamp			= 0;
p                2639 kernel/sched/fair.c 	p->numa_scan_seq		= mm ? mm->numa_scan_seq : 0;
p                2640 kernel/sched/fair.c 	p->numa_scan_period		= sysctl_numa_balancing_scan_delay;
p                2642 kernel/sched/fair.c 	p->numa_work.next		= &p->numa_work;
p                2643 kernel/sched/fair.c 	p->numa_faults			= NULL;
p                2644 kernel/sched/fair.c 	RCU_INIT_POINTER(p->numa_group, NULL);
p                2645 kernel/sched/fair.c 	p->last_task_numa_placement	= 0;
p                2646 kernel/sched/fair.c 	p->last_sum_exec_runtime	= 0;
p                2648 kernel/sched/fair.c 	init_task_work(&p->numa_work, task_numa_work);
p                2652 kernel/sched/fair.c 		p->numa_preferred_nid = NUMA_NO_NODE;
p                2666 kernel/sched/fair.c 		p->node_stamp = delay;
p                2703 kernel/sched/fair.c static void update_scan_period(struct task_struct *p, int new_cpu)
p                2705 kernel/sched/fair.c 	int src_nid = cpu_to_node(task_cpu(p));
p                2711 kernel/sched/fair.c 	if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
p                2722 kernel/sched/fair.c 	if (p->numa_scan_seq) {
p                2728 kernel/sched/fair.c 		if (dst_nid == p->numa_preferred_nid ||
p                2729 kernel/sched/fair.c 		    (p->numa_preferred_nid != NUMA_NO_NODE &&
p                2730 kernel/sched/fair.c 			src_nid != p->numa_preferred_nid))
p                2734 kernel/sched/fair.c 	p->numa_scan_period = task_scan_start(p);
p                2742 kernel/sched/fair.c static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
p                2746 kernel/sched/fair.c static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
p                2750 kernel/sched/fair.c static inline void update_scan_period(struct task_struct *p, int new_cpu)
p                2908 kernel/sched/fair.c void reweight_task(struct task_struct *p, int prio)
p                2910 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
p                3693 kernel/sched/fair.c static inline unsigned long task_util(struct task_struct *p)
p                3695 kernel/sched/fair.c 	return READ_ONCE(p->se.avg.util_avg);
p                3698 kernel/sched/fair.c static inline unsigned long _task_util_est(struct task_struct *p)
p                3700 kernel/sched/fair.c 	struct util_est ue = READ_ONCE(p->se.avg.util_est);
p                3705 kernel/sched/fair.c static inline unsigned long task_util_est(struct task_struct *p)
p                3707 kernel/sched/fair.c 	return max(task_util(p), _task_util_est(p));
p                3711 kernel/sched/fair.c 				    struct task_struct *p)
p                3720 kernel/sched/fair.c 	enqueued += _task_util_est(p);
p                3738 kernel/sched/fair.c util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
p                3749 kernel/sched/fair.c 	ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
p                3763 kernel/sched/fair.c 	ue = p->se.avg.util_est;
p                3771 kernel/sched/fair.c 	ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
p                3781 kernel/sched/fair.c 	if (task_util(p) > capacity_orig_of(cpu))
p                3804 kernel/sched/fair.c 	WRITE_ONCE(p->se.avg.util_est, ue);
p                3807 kernel/sched/fair.c static inline int task_fits_capacity(struct task_struct *p, long capacity)
p                3809 kernel/sched/fair.c 	return fits_capacity(task_util_est(p), capacity);
p                3812 kernel/sched/fair.c static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
p                3817 kernel/sched/fair.c 	if (!p) {
p                3822 kernel/sched/fair.c 	if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
p                3827 kernel/sched/fair.c 	rq->misfit_task_load = task_h_load(p);
p                3854 kernel/sched/fair.c util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
p                3857 kernel/sched/fair.c util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
p                3859 kernel/sched/fair.c static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
p                5135 kernel/sched/fair.c static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
p                5137 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
p                5140 kernel/sched/fair.c 	SCHED_WARN_ON(task_rq(p) != rq);
p                5148 kernel/sched/fair.c 			if (rq->curr == p)
p                5173 kernel/sched/fair.c hrtick_start_fair(struct rq *rq, struct task_struct *p)
p                5207 kernel/sched/fair.c enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
p                5210 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
p                5211 kernel/sched/fair.c 	int idle_h_nr_running = task_has_idle_policy(p);
p                5219 kernel/sched/fair.c 	util_est_enqueue(&rq->cfs, p);
p                5226 kernel/sched/fair.c 	if (p->in_iowait)
p                5315 kernel/sched/fair.c static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
p                5318 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
p                5320 kernel/sched/fair.c 	int idle_h_nr_running = task_has_idle_policy(p);
p                5367 kernel/sched/fair.c 	util_est_dequeue(&rq->cfs, p, task_sleep);
p                5420 kernel/sched/fair.c static void record_wakee(struct task_struct *p)
p                5431 kernel/sched/fair.c 	if (current->last_wakee != p) {
p                5432 kernel/sched/fair.c 		current->last_wakee = p;
p                5454 kernel/sched/fair.c static int wake_wide(struct task_struct *p)
p                5457 kernel/sched/fair.c 	unsigned int slave = p->wakee_flips;
p                5504 kernel/sched/fair.c wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
p                5521 kernel/sched/fair.c 	task_load = task_h_load(p);
p                5546 kernel/sched/fair.c static int wake_affine(struct sched_domain *sd, struct task_struct *p,
p                5555 kernel/sched/fair.c 		target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
p                5557 kernel/sched/fair.c 	schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
p                5562 kernel/sched/fair.c 	schedstat_inc(p->se.statistics.nr_wakeups_affine);
p                5566 kernel/sched/fair.c static unsigned long cpu_util_without(int cpu, struct task_struct *p);
p                5568 kernel/sched/fair.c static unsigned long capacity_spare_without(int cpu, struct task_struct *p)
p                5570 kernel/sched/fair.c 	return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0);
p                5580 kernel/sched/fair.c find_idlest_group(struct sched_domain *sd, struct task_struct *p,
p                5601 kernel/sched/fair.c 					p->cpus_ptr))
p                5621 kernel/sched/fair.c 			spare_cap = capacity_spare_without(i, p);
p                5677 kernel/sched/fair.c 	if (this_spare > task_util(p) / 2 &&
p                5681 kernel/sched/fair.c 	if (most_spare > task_util(p) / 2)
p                5714 kernel/sched/fair.c find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
p                5728 kernel/sched/fair.c 	for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
p                5772 kernel/sched/fair.c static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
p                5777 kernel/sched/fair.c 	if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
p                5785 kernel/sched/fair.c 		sync_entity_load_avg(&p->se);
p                5797 kernel/sched/fair.c 		group = find_idlest_group(sd, p, cpu, sd_flag);
p                5803 kernel/sched/fair.c 		new_cpu = find_idlest_group_cpu(group, p, cpu);
p                5883 kernel/sched/fair.c static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
p                5894 kernel/sched/fair.c 	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
p                5920 kernel/sched/fair.c static int select_idle_smt(struct task_struct *p, int target)
p                5928 kernel/sched/fair.c 		if (!cpumask_test_cpu(cpu, p->cpus_ptr))
p                5941 kernel/sched/fair.c static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
p                5946 kernel/sched/fair.c static inline int select_idle_smt(struct task_struct *p, int target)
p                5958 kernel/sched/fair.c static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
p                5992 kernel/sched/fair.c 	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
p                6014 kernel/sched/fair.c static int select_idle_sibling(struct task_struct *p, int prev, int target)
p                6030 kernel/sched/fair.c 	recent_used_cpu = p->recent_used_cpu;
p                6035 kernel/sched/fair.c 	    cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) {
p                6040 kernel/sched/fair.c 		p->recent_used_cpu = prev;
p                6048 kernel/sched/fair.c 	i = select_idle_core(p, sd, target);
p                6052 kernel/sched/fair.c 	i = select_idle_cpu(p, sd, target);
p                6056 kernel/sched/fair.c 	i = select_idle_smt(p, target);
p                6128 kernel/sched/fair.c static unsigned long cpu_util_without(int cpu, struct task_struct *p)
p                6134 kernel/sched/fair.c 	if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
p                6141 kernel/sched/fair.c 	lsub_positive(&util, task_util(p));
p                6190 kernel/sched/fair.c 		if (unlikely(task_on_rq_queued(p) || current == p))
p                6191 kernel/sched/fair.c 			lsub_positive(&estimated, _task_util_est(p));
p                6211 kernel/sched/fair.c static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
p                6226 kernel/sched/fair.c 	sync_entity_load_avg(&p->se);
p                6228 kernel/sched/fair.c 	return !task_fits_capacity(p, min_cap);
p                6235 kernel/sched/fair.c static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
p                6246 kernel/sched/fair.c 	if (task_cpu(p) == cpu && dst_cpu != cpu)
p                6247 kernel/sched/fair.c 		sub_positive(&util, task_util(p));
p                6248 kernel/sched/fair.c 	else if (task_cpu(p) != cpu && dst_cpu == cpu)
p                6249 kernel/sched/fair.c 		util += task_util(p);
p                6261 kernel/sched/fair.c 			util_est += _task_util_est(p);
p                6277 kernel/sched/fair.c compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
p                6294 kernel/sched/fair.c 		unsigned long cpu_util, util_cfs = cpu_util_next(cpu, p, dst_cpu);
p                6295 kernel/sched/fair.c 		struct task_struct *tsk = cpu == dst_cpu ? p : NULL;
p                6360 kernel/sched/fair.c static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
p                6384 kernel/sched/fair.c 	sync_entity_load_avg(&p->se);
p                6385 kernel/sched/fair.c 	if (!task_util_est(p))
p                6394 kernel/sched/fair.c 		base_energy_pd = compute_energy(p, -1, pd);
p                6398 kernel/sched/fair.c 			if (!cpumask_test_cpu(cpu, p->cpus_ptr))
p                6402 kernel/sched/fair.c 			util = cpu_util_next(cpu, p, cpu);
p                6409 kernel/sched/fair.c 				prev_delta = compute_energy(p, prev_cpu, pd);
p                6427 kernel/sched/fair.c 			cur_delta = compute_energy(p, max_spare_cap_cpu, pd);
p                6469 kernel/sched/fair.c select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
p                6478 kernel/sched/fair.c 		record_wakee(p);
p                6481 kernel/sched/fair.c 			new_cpu = find_energy_efficient_cpu(p, prev_cpu);
p                6487 kernel/sched/fair.c 		want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) &&
p                6488 kernel/sched/fair.c 			      cpumask_test_cpu(cpu, p->cpus_ptr);
p                6503 kernel/sched/fair.c 				new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
p                6517 kernel/sched/fair.c 		new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
p                6521 kernel/sched/fair.c 		new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
p                6538 kernel/sched/fair.c static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
p                6546 kernel/sched/fair.c 	if (p->state == TASK_WAKING) {
p                6547 kernel/sched/fair.c 		struct sched_entity *se = &p->se;
p                6566 kernel/sched/fair.c 	if (p->on_rq == TASK_ON_RQ_MIGRATING) {
p                6571 kernel/sched/fair.c 		lockdep_assert_held(&task_rq(p)->lock);
p                6572 kernel/sched/fair.c 		detach_entity_cfs_rq(&p->se);
p                6583 kernel/sched/fair.c 		remove_entity_load_avg(&p->se);
p                6587 kernel/sched/fair.c 	p->se.avg.last_update_time = 0;
p                6590 kernel/sched/fair.c 	p->se.exec_start = 0;
p                6592 kernel/sched/fair.c 	update_scan_period(p, new_cpu);
p                6595 kernel/sched/fair.c static void task_dead_fair(struct task_struct *p)
p                6597 kernel/sched/fair.c 	remove_entity_load_avg(&p->se);
p                6692 kernel/sched/fair.c static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
p                6695 kernel/sched/fair.c 	struct sched_entity *se = &curr->se, *pse = &p->se;
p                6732 kernel/sched/fair.c 	    likely(!task_has_idle_policy(p)))
p                6739 kernel/sched/fair.c 	if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
p                6780 kernel/sched/fair.c 	struct task_struct *p;
p                6834 kernel/sched/fair.c 	p = task_of(se);
p                6841 kernel/sched/fair.c 	if (prev != p) {
p                6874 kernel/sched/fair.c 	p = task_of(se);
p                6883 kernel/sched/fair.c 	list_move(&p->se.group_node, &rq->cfs_tasks);
p                6887 kernel/sched/fair.c 		hrtick_start_fair(rq, p);
p                6889 kernel/sched/fair.c 	update_misfit_status(p, rq);
p                6891 kernel/sched/fair.c 	return p;
p                6969 kernel/sched/fair.c static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
p                6971 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
p                7152 kernel/sched/fair.c static int task_hot(struct task_struct *p, struct lb_env *env)
p                7158 kernel/sched/fair.c 	if (p->sched_class != &fair_sched_class)
p                7161 kernel/sched/fair.c 	if (unlikely(task_has_idle_policy(p)))
p                7168 kernel/sched/fair.c 			(&p->se == cfs_rq_of(&p->se)->next ||
p                7169 kernel/sched/fair.c 			 &p->se == cfs_rq_of(&p->se)->last))
p                7177 kernel/sched/fair.c 	delta = rq_clock_task(env->src_rq) - p->se.exec_start;
p                7188 kernel/sched/fair.c static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
p                7190 kernel/sched/fair.c 	struct numa_group *numa_group = rcu_dereference(p->numa_group);
p                7197 kernel/sched/fair.c 	if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
p                7207 kernel/sched/fair.c 	if (src_nid == p->numa_preferred_nid) {
p                7215 kernel/sched/fair.c 	if (dst_nid == p->numa_preferred_nid)
p                7224 kernel/sched/fair.c 		src_weight = group_weight(p, src_nid, dist);
p                7225 kernel/sched/fair.c 		dst_weight = group_weight(p, dst_nid, dist);
p                7227 kernel/sched/fair.c 		src_weight = task_weight(p, src_nid, dist);
p                7228 kernel/sched/fair.c 		dst_weight = task_weight(p, dst_nid, dist);
p                7235 kernel/sched/fair.c static inline int migrate_degrades_locality(struct task_struct *p,
p                7246 kernel/sched/fair.c int can_migrate_task(struct task_struct *p, struct lb_env *env)
p                7259 kernel/sched/fair.c 	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
p                7262 kernel/sched/fair.c 	if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
p                7265 kernel/sched/fair.c 		schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
p                7282 kernel/sched/fair.c 			if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
p                7295 kernel/sched/fair.c 	if (task_running(env->src_rq, p)) {
p                7296 kernel/sched/fair.c 		schedstat_inc(p->se.statistics.nr_failed_migrations_running);
p                7306 kernel/sched/fair.c 	tsk_cache_hot = migrate_degrades_locality(p, env);
p                7308 kernel/sched/fair.c 		tsk_cache_hot = task_hot(p, env);
p                7314 kernel/sched/fair.c 			schedstat_inc(p->se.statistics.nr_forced_migrations);
p                7319 kernel/sched/fair.c 	schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
p                7326 kernel/sched/fair.c static void detach_task(struct task_struct *p, struct lb_env *env)
p                7330 kernel/sched/fair.c 	deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
p                7331 kernel/sched/fair.c 	set_task_cpu(p, env->dst_cpu);
p                7342 kernel/sched/fair.c 	struct task_struct *p;
p                7346 kernel/sched/fair.c 	list_for_each_entry_reverse(p,
p                7348 kernel/sched/fair.c 		if (!can_migrate_task(p, env))
p                7351 kernel/sched/fair.c 		detach_task(p, env);
p                7360 kernel/sched/fair.c 		return p;
p                7376 kernel/sched/fair.c 	struct task_struct *p;
p                7393 kernel/sched/fair.c 		p = list_last_entry(tasks, struct task_struct, se.group_node);
p                7407 kernel/sched/fair.c 		if (!can_migrate_task(p, env))
p                7410 kernel/sched/fair.c 		load = task_h_load(p);
p                7418 kernel/sched/fair.c 		detach_task(p, env);
p                7419 kernel/sched/fair.c 		list_add(&p->se.group_node, &env->tasks);
p                7443 kernel/sched/fair.c 		list_move(&p->se.group_node, tasks);
p                7459 kernel/sched/fair.c static void attach_task(struct rq *rq, struct task_struct *p)
p                7463 kernel/sched/fair.c 	BUG_ON(task_rq(p) != rq);
p                7464 kernel/sched/fair.c 	activate_task(rq, p, ENQUEUE_NOCLOCK);
p                7465 kernel/sched/fair.c 	check_preempt_curr(rq, p, 0);
p                7472 kernel/sched/fair.c static void attach_one_task(struct rq *rq, struct task_struct *p)
p                7478 kernel/sched/fair.c 	attach_task(rq, p);
p                7489 kernel/sched/fair.c 	struct task_struct *p;
p                7496 kernel/sched/fair.c 		p = list_first_entry(tasks, struct task_struct, se.group_node);
p                7497 kernel/sched/fair.c 		list_del_init(&p->se.group_node);
p                7499 kernel/sched/fair.c 		attach_task(env->dst_rq, p);
p                7665 kernel/sched/fair.c static unsigned long task_h_load(struct task_struct *p)
p                7667 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
p                7670 kernel/sched/fair.c 	return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
p                7686 kernel/sched/fair.c static unsigned long task_h_load(struct task_struct *p)
p                7688 kernel/sched/fair.c 	return p->se.avg.load_avg;
p                9144 kernel/sched/fair.c 	struct task_struct *p = NULL;
p                9200 kernel/sched/fair.c 		p = detach_one_task(&env);
p                9201 kernel/sched/fair.c 		if (p) {
p                9214 kernel/sched/fair.c 	if (p)
p                9215 kernel/sched/fair.c 		attach_one_task(target_rq, p);
p                10001 kernel/sched/fair.c static void task_fork_fair(struct task_struct *p)
p                10004 kernel/sched/fair.c 	struct sched_entity *se = &p->se, *curr;
p                10037 kernel/sched/fair.c prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
p                10039 kernel/sched/fair.c 	if (!task_on_rq_queued(p))
p                10047 kernel/sched/fair.c 	if (rq->curr == p) {
p                10048 kernel/sched/fair.c 		if (p->prio > oldprio)
p                10051 kernel/sched/fair.c 		check_preempt_curr(rq, p, 0);
p                10054 kernel/sched/fair.c static inline bool vruntime_normalized(struct task_struct *p)
p                10056 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
p                10063 kernel/sched/fair.c 	if (p->on_rq)
p                10076 kernel/sched/fair.c 	    (p->state == TASK_WAKING && p->sched_remote_wakeup))
p                10137 kernel/sched/fair.c static void detach_task_cfs_rq(struct task_struct *p)
p                10139 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
p                10142 kernel/sched/fair.c 	if (!vruntime_normalized(p)) {
p                10154 kernel/sched/fair.c static void attach_task_cfs_rq(struct task_struct *p)
p                10156 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
p                10161 kernel/sched/fair.c 	if (!vruntime_normalized(p))
p                10165 kernel/sched/fair.c static void switched_from_fair(struct rq *rq, struct task_struct *p)
p                10167 kernel/sched/fair.c 	detach_task_cfs_rq(p);
p                10170 kernel/sched/fair.c static void switched_to_fair(struct rq *rq, struct task_struct *p)
p                10172 kernel/sched/fair.c 	attach_task_cfs_rq(p);
p                10174 kernel/sched/fair.c 	if (task_on_rq_queued(p)) {
p                10180 kernel/sched/fair.c 		if (rq->curr == p)
p                10183 kernel/sched/fair.c 			check_preempt_curr(rq, p, 0);
p                10192 kernel/sched/fair.c static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
p                10194 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
p                10197 kernel/sched/fair.c 	if (task_on_rq_queued(p)) {
p                10228 kernel/sched/fair.c static void task_set_group_fair(struct task_struct *p)
p                10230 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
p                10232 kernel/sched/fair.c 	set_task_rq(p, task_cpu(p));
p                10236 kernel/sched/fair.c static void task_move_group_fair(struct task_struct *p)
p                10238 kernel/sched/fair.c 	detach_task_cfs_rq(p);
p                10239 kernel/sched/fair.c 	set_task_rq(p, task_cpu(p));
p                10243 kernel/sched/fair.c 	p->se.avg.last_update_time = 0;
p                10245 kernel/sched/fair.c 	attach_task_cfs_rq(p);
p                10248 kernel/sched/fair.c static void task_change_group_fair(struct task_struct *p, int type)
p                10252 kernel/sched/fair.c 		task_set_group_fair(p);
p                10256 kernel/sched/fair.c 		task_move_group_fair(p);
p                10522 kernel/sched/fair.c void show_numa_stats(struct task_struct *p, struct seq_file *m)
p                10529 kernel/sched/fair.c 	ng = rcu_dereference(p->numa_group);
p                10531 kernel/sched/fair.c 		if (p->numa_faults) {
p                10532 kernel/sched/fair.c 			tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
p                10533 kernel/sched/fair.c 			tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
p                 364 kernel/sched/idle.c select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
p                 366 kernel/sched/idle.c 	return task_cpu(p); /* IDLE tasks as never migrated */
p                 379 kernel/sched/idle.c static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
p                 412 kernel/sched/idle.c dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
p                 432 kernel/sched/idle.c static void switched_to_idle(struct rq *rq, struct task_struct *p)
p                 438 kernel/sched/idle.c prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
p                  86 kernel/sched/membarrier.c 		struct task_struct *p;
p                 108 kernel/sched/membarrier.c 		p = rcu_dereference(cpu_rq(cpu)->curr);
p                 109 kernel/sched/membarrier.c 		if (p->flags & PF_KTHREAD)
p                 165 kernel/sched/membarrier.c 		struct task_struct *p;
p                 177 kernel/sched/membarrier.c 		p = rcu_dereference(cpu_rq(cpu)->curr);
p                 178 kernel/sched/membarrier.c 		if (p && p->mm == mm)
p                 241 kernel/sched/membarrier.c 		struct task_struct *p;
p                 243 kernel/sched/membarrier.c 		p = rcu_dereference(rq->curr);
p                 244 kernel/sched/membarrier.c 		if (p && p->mm == mm)
p                 261 kernel/sched/membarrier.c 	struct task_struct *p = current;
p                 262 kernel/sched/membarrier.c 	struct mm_struct *mm = p->mm;
p                 280 kernel/sched/membarrier.c 	struct task_struct *p = current;
p                 281 kernel/sched/membarrier.c 	struct mm_struct *mm = p->mm;
p                 239 kernel/sched/rt.c 	struct task_struct *p = rt_task_of(rt_se);
p                 241 kernel/sched/rt.c 	return task_rq(p);
p                 318 kernel/sched/rt.c 	struct task_struct *p;
p                 323 kernel/sched/rt.c 	p = rt_task_of(rt_se);
p                 327 kernel/sched/rt.c 	if (p->nr_cpus_allowed > 1)
p                 335 kernel/sched/rt.c 	struct task_struct *p;
p                 340 kernel/sched/rt.c 	p = rt_task_of(rt_se);
p                 344 kernel/sched/rt.c 	if (p->nr_cpus_allowed > 1)
p                 374 kernel/sched/rt.c static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
p                 376 kernel/sched/rt.c 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
p                 377 kernel/sched/rt.c 	plist_node_init(&p->pushable_tasks, p->prio);
p                 378 kernel/sched/rt.c 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
p                 381 kernel/sched/rt.c 	if (p->prio < rq->rt.highest_prio.next)
p                 382 kernel/sched/rt.c 		rq->rt.highest_prio.next = p->prio;
p                 385 kernel/sched/rt.c static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
p                 387 kernel/sched/rt.c 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
p                 391 kernel/sched/rt.c 		p = plist_first_entry(&rq->rt.pushable_tasks,
p                 393 kernel/sched/rt.c 		rq->rt.highest_prio.next = p->prio;
p                 400 kernel/sched/rt.c static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
p                 404 kernel/sched/rt.c static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
p                 531 kernel/sched/rt.c 	struct task_struct *p;
p                 536 kernel/sched/rt.c 	p = rt_task_of(rt_se);
p                 537 kernel/sched/rt.c 	return p->prio != p->normal_prio;
p                1329 kernel/sched/rt.c enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
p                1331 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &p->rt;
p                1338 kernel/sched/rt.c 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
p                1339 kernel/sched/rt.c 		enqueue_pushable_task(rq, p);
p                1342 kernel/sched/rt.c static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
p                1344 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &p->rt;
p                1349 kernel/sched/rt.c 	dequeue_pushable_task(rq, p);
p                1370 kernel/sched/rt.c static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
p                1372 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &p->rt;
p                1390 kernel/sched/rt.c select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
p                1428 kernel/sched/rt.c 	     curr->prio <= p->prio)) {
p                1429 kernel/sched/rt.c 		int target = find_lowest_rq(p);
p                1436 kernel/sched/rt.c 		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
p                1445 kernel/sched/rt.c static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
p                1459 kernel/sched/rt.c 	if (p->nr_cpus_allowed != 1
p                1460 kernel/sched/rt.c 	    && cpupri_find(&rq->rd->cpupri, p, NULL))
p                1468 kernel/sched/rt.c 	requeue_task_rt(rq, p, 1);
p                1472 kernel/sched/rt.c static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
p                1474 kernel/sched/rt.c 	if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
p                1493 kernel/sched/rt.c static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
p                1495 kernel/sched/rt.c 	if (p->prio < rq->curr->prio) {
p                1513 kernel/sched/rt.c 	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
p                1514 kernel/sched/rt.c 		check_preempt_equal_prio(rq, p);
p                1518 kernel/sched/rt.c static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
p                1520 kernel/sched/rt.c 	p->se.exec_start = rq_clock_task(rq);
p                1523 kernel/sched/rt.c 	dequeue_pushable_task(rq, p);
p                1573 kernel/sched/rt.c 	struct task_struct *p;
p                1580 kernel/sched/rt.c 	p = _pick_next_task_rt(rq);
p                1581 kernel/sched/rt.c 	set_next_task_rt(rq, p, true);
p                1582 kernel/sched/rt.c 	return p;
p                1585 kernel/sched/rt.c static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
p                1595 kernel/sched/rt.c 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
p                1596 kernel/sched/rt.c 		enqueue_pushable_task(rq, p);
p                1604 kernel/sched/rt.c static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
p                1606 kernel/sched/rt.c 	if (!task_running(rq, p) &&
p                1607 kernel/sched/rt.c 	    cpumask_test_cpu(cpu, p->cpus_ptr))
p                1620 kernel/sched/rt.c 	struct task_struct *p;
p                1625 kernel/sched/rt.c 	plist_for_each_entry(p, head, pushable_tasks) {
p                1626 kernel/sched/rt.c 		if (pick_rt_task(rq, p, cpu))
p                1627 kernel/sched/rt.c 			return p;
p                1769 kernel/sched/rt.c 	struct task_struct *p;
p                1774 kernel/sched/rt.c 	p = plist_first_entry(&rq->rt.pushable_tasks,
p                1777 kernel/sched/rt.c 	BUG_ON(rq->cpu != task_cpu(p));
p                1778 kernel/sched/rt.c 	BUG_ON(task_current(rq, p));
p                1779 kernel/sched/rt.c 	BUG_ON(p->nr_cpus_allowed <= 1);
p                1781 kernel/sched/rt.c 	BUG_ON(!task_on_rq_queued(p));
p                1782 kernel/sched/rt.c 	BUG_ON(!rt_task(p));
p                1784 kernel/sched/rt.c 	return p;
p                2053 kernel/sched/rt.c 	struct task_struct *p;
p                2106 kernel/sched/rt.c 		p = pick_highest_pushable_task(src_rq, this_cpu);
p                2112 kernel/sched/rt.c 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
p                2113 kernel/sched/rt.c 			WARN_ON(p == src_rq->curr);
p                2114 kernel/sched/rt.c 			WARN_ON(!task_on_rq_queued(p));
p                2124 kernel/sched/rt.c 			if (p->prio < src_rq->curr->prio)
p                2129 kernel/sched/rt.c 			deactivate_task(src_rq, p, 0);
p                2130 kernel/sched/rt.c 			set_task_cpu(p, this_cpu);
p                2131 kernel/sched/rt.c 			activate_task(this_rq, p, 0);
p                2151 kernel/sched/rt.c static void task_woken_rt(struct rq *rq, struct task_struct *p)
p                2153 kernel/sched/rt.c 	if (!task_running(rq, p) &&
p                2155 kernel/sched/rt.c 	    p->nr_cpus_allowed > 1 &&
p                2158 kernel/sched/rt.c 	     rq->curr->prio <= p->prio))
p                2188 kernel/sched/rt.c static void switched_from_rt(struct rq *rq, struct task_struct *p)
p                2197 kernel/sched/rt.c 	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
p                2219 kernel/sched/rt.c static void switched_to_rt(struct rq *rq, struct task_struct *p)
p                2228 kernel/sched/rt.c 	if (task_on_rq_queued(p) && rq->curr != p) {
p                2230 kernel/sched/rt.c 		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
p                2233 kernel/sched/rt.c 		if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
p                2243 kernel/sched/rt.c prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
p                2245 kernel/sched/rt.c 	if (!task_on_rq_queued(p))
p                2248 kernel/sched/rt.c 	if (rq->curr == p) {
p                2254 kernel/sched/rt.c 		if (oldprio < p->prio)
p                2261 kernel/sched/rt.c 		if (p->prio > rq->rt.highest_prio.curr)
p                2265 kernel/sched/rt.c 		if (oldprio < p->prio)
p                2274 kernel/sched/rt.c 		if (p->prio < rq->curr->prio)
p                2280 kernel/sched/rt.c static void watchdog(struct rq *rq, struct task_struct *p)
p                2285 kernel/sched/rt.c 	soft = task_rlimit(p, RLIMIT_RTTIME);
p                2286 kernel/sched/rt.c 	hard = task_rlimit_max(p, RLIMIT_RTTIME);
p                2291 kernel/sched/rt.c 		if (p->rt.watchdog_stamp != jiffies) {
p                2292 kernel/sched/rt.c 			p->rt.timeout++;
p                2293 kernel/sched/rt.c 			p->rt.watchdog_stamp = jiffies;
p                2297 kernel/sched/rt.c 		if (p->rt.timeout > next) {
p                2298 kernel/sched/rt.c 			posix_cputimers_rt_watchdog(&p->posix_cputimers,
p                2299 kernel/sched/rt.c 						    p->se.sum_exec_runtime);
p                2304 kernel/sched/rt.c static inline void watchdog(struct rq *rq, struct task_struct *p) { }
p                2315 kernel/sched/rt.c static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
p                2317 kernel/sched/rt.c 	struct sched_rt_entity *rt_se = &p->rt;
p                2322 kernel/sched/rt.c 	watchdog(rq, p);
p                2328 kernel/sched/rt.c 	if (p->policy != SCHED_RR)
p                2331 kernel/sched/rt.c 	if (--p->rt.time_slice)
p                2334 kernel/sched/rt.c 	p->rt.time_slice = sched_rr_timeslice;
p                2342 kernel/sched/rt.c 			requeue_task_rt(rq, p, 0);
p                2405 kernel/sched/rt.c 	struct task_struct *g, *p;
p                2413 kernel/sched/rt.c 	for_each_process_thread(g, p) {
p                2414 kernel/sched/rt.c 		if (rt_task(p) && task_group(p) == tg)
p                 181 kernel/sched/sched.h static inline int task_has_idle_policy(struct task_struct *p)
p                 183 kernel/sched/sched.h 	return idle_policy(p->policy);
p                 186 kernel/sched/sched.h static inline int task_has_rt_policy(struct task_struct *p)
p                 188 kernel/sched/sched.h 	return rt_policy(p->policy);
p                 191 kernel/sched/sched.h static inline int task_has_dl_policy(struct task_struct *p)
p                 193 kernel/sched/sched.h 	return dl_policy(p->policy);
p                 248 kernel/sched/sched.h void __dl_clear_params(struct task_struct *p);
p                 314 kernel/sched/sched.h extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
p                 318 kernel/sched/sched.h extern int  sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
p                 319 kernel/sched/sched.h extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
p                 320 kernel/sched/sched.h extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
p                 322 kernel/sched/sched.h extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
p                 323 kernel/sched/sched.h extern int  dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
p                1053 kernel/sched/sched.h #define task_rq(p)		cpu_rq(task_cpu(p))
p                1177 kernel/sched/sched.h struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
p                1180 kernel/sched/sched.h struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
p                1181 kernel/sched/sched.h 	__acquires(p->pi_lock)
p                1192 kernel/sched/sched.h task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
p                1194 kernel/sched/sched.h 	__releases(p->pi_lock)
p                1198 kernel/sched/sched.h 	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
p                1300 kernel/sched/sched.h extern void sched_setnuma(struct task_struct *p, int node);
p                1301 kernel/sched/sched.h extern int migrate_task_to(struct task_struct *p, int cpu);
p                1302 kernel/sched/sched.h extern int migrate_swap(struct task_struct *p, struct task_struct *t,
p                1304 kernel/sched/sched.h extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
p                1307 kernel/sched/sched.h init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
p                1331 kernel/sched/sched.h #define rcu_dereference_check_sched_domain(p) \
p                1332 kernel/sched/sched.h 	rcu_dereference_check((p), \
p                1496 kernel/sched/sched.h static inline struct task_group *task_group(struct task_struct *p)
p                1498 kernel/sched/sched.h 	return p->sched_task_group;
p                1502 kernel/sched/sched.h static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
p                1505 kernel/sched/sched.h 	struct task_group *tg = task_group(p);
p                1509 kernel/sched/sched.h 	set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
p                1510 kernel/sched/sched.h 	p->se.cfs_rq = tg->cfs_rq[cpu];
p                1511 kernel/sched/sched.h 	p->se.parent = tg->se[cpu];
p                1515 kernel/sched/sched.h 	p->rt.rt_rq  = tg->rt_rq[cpu];
p                1516 kernel/sched/sched.h 	p->rt.parent = tg->rt_se[cpu];
p                1522 kernel/sched/sched.h static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
p                1523 kernel/sched/sched.h static inline struct task_group *task_group(struct task_struct *p)
p                1530 kernel/sched/sched.h static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
p                1532 kernel/sched/sched.h 	set_task_rq(p, cpu);
p                1541 kernel/sched/sched.h 	WRITE_ONCE(p->cpu, cpu);
p                1543 kernel/sched/sched.h 	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
p                1545 kernel/sched/sched.h 	p->wake_cpu = cpu;
p                1623 kernel/sched/sched.h static inline int task_current(struct rq *rq, struct task_struct *p)
p                1625 kernel/sched/sched.h 	return rq->curr == p;
p                1628 kernel/sched/sched.h static inline int task_running(struct rq *rq, struct task_struct *p)
p                1631 kernel/sched/sched.h 	return p->on_cpu;
p                1633 kernel/sched/sched.h 	return task_current(rq, p);
p                1637 kernel/sched/sched.h static inline int task_on_rq_queued(struct task_struct *p)
p                1639 kernel/sched/sched.h 	return p->on_rq == TASK_ON_RQ_QUEUED;
p                1642 kernel/sched/sched.h static inline int task_on_rq_migrating(struct task_struct *p)
p                1644 kernel/sched/sched.h 	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
p                1715 kernel/sched/sched.h 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
p                1716 kernel/sched/sched.h 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
p                1718 kernel/sched/sched.h 	bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);
p                1720 kernel/sched/sched.h 	void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
p                1736 kernel/sched/sched.h 	void (*put_prev_task)(struct rq *rq, struct task_struct *p);
p                1737 kernel/sched/sched.h 	void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
p                1741 kernel/sched/sched.h 	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
p                1742 kernel/sched/sched.h 	void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
p                1746 kernel/sched/sched.h 	void (*set_cpus_allowed)(struct task_struct *p,
p                1753 kernel/sched/sched.h 	void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
p                1754 kernel/sched/sched.h 	void (*task_fork)(struct task_struct *p);
p                1755 kernel/sched/sched.h 	void (*task_dead)(struct task_struct *p);
p                1776 kernel/sched/sched.h 	void (*task_change_group)(struct task_struct *p, int type);
p                1836 kernel/sched/sched.h extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
p                1875 kernel/sched/sched.h extern void reweight_task(struct task_struct *p, int prio);
p                1895 kernel/sched/sched.h extern void post_init_entity_util_avg(struct task_struct *p);
p                1951 kernel/sched/sched.h extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
p                1952 kernel/sched/sched.h extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
p                1954 kernel/sched/sched.h extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
p                2192 kernel/sched/sched.h show_numa_stats(struct task_struct *p, struct seq_file *m);
p                2318 kernel/sched/sched.h unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
p                2322 kernel/sched/sched.h 			      struct task_struct *p)
p                2327 kernel/sched/sched.h 	if (p) {
p                2328 kernel/sched/sched.h 		min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
p                2329 kernel/sched/sched.h 		max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX));
p                2349 kernel/sched/sched.h 					    struct task_struct *p)
p                2393 kernel/sched/sched.h 				 struct task_struct *p);
p                2424 kernel/sched/sched.h 				 struct task_struct *p)
p                  65 kernel/sched/stats.h static inline void psi_enqueue(struct task_struct *p, bool wakeup)
p                  72 kernel/sched/stats.h 	if (!wakeup || p->sched_psi_wake_requeue) {
p                  73 kernel/sched/stats.h 		if (p->flags & PF_MEMSTALL)
p                  75 kernel/sched/stats.h 		if (p->sched_psi_wake_requeue)
p                  76 kernel/sched/stats.h 			p->sched_psi_wake_requeue = 0;
p                  78 kernel/sched/stats.h 		if (p->in_iowait)
p                  82 kernel/sched/stats.h 	psi_task_change(p, clear, set);
p                  85 kernel/sched/stats.h static inline void psi_dequeue(struct task_struct *p, bool sleep)
p                  93 kernel/sched/stats.h 		if (p->flags & PF_MEMSTALL)
p                  96 kernel/sched/stats.h 		if (p->in_iowait)
p                 100 kernel/sched/stats.h 	psi_task_change(p, clear, set);
p                 103 kernel/sched/stats.h static inline void psi_ttwu_dequeue(struct task_struct *p)
p                 112 kernel/sched/stats.h 	if (unlikely(p->in_iowait || (p->flags & PF_MEMSTALL))) {
p                 117 kernel/sched/stats.h 		if (p->in_iowait)
p                 119 kernel/sched/stats.h 		if (p->flags & PF_MEMSTALL)
p                 122 kernel/sched/stats.h 		rq = __task_rq_lock(p, &rf);
p                 123 kernel/sched/stats.h 		psi_task_change(p, clear, 0);
p                 124 kernel/sched/stats.h 		p->sched_psi_wake_requeue = 1;
p                 138 kernel/sched/stats.h static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
p                 139 kernel/sched/stats.h static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
p                 140 kernel/sched/stats.h static inline void psi_ttwu_dequeue(struct task_struct *p) {}
p                  14 kernel/sched/stop_task.c select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
p                  16 kernel/sched/stop_task.c 	return task_cpu(p); /* stop tasks as never migrate */
p                  27 kernel/sched/stop_task.c check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
p                  50 kernel/sched/stop_task.c enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
p                  56 kernel/sched/stop_task.c dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
p                  97 kernel/sched/stop_task.c static void switched_to_stop(struct rq *rq, struct task_struct *p)
p                 103 kernel/sched/stop_task.c prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
p                 153 kernel/sched/wait_bit.c wait_queue_head_t *__var_waitqueue(void *p)
p                 155 kernel/sched/wait_bit.c 	return bit_wait_table + hash_ptr(p, WAIT_TABLE_BITS);
p                 152 kernel/signal.c #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
p                 899 kernel/signal.c static bool prepare_signal(int sig, struct task_struct *p, bool force)
p                 901 kernel/signal.c 	struct signal_struct *signal = p->signal;
p                 917 kernel/signal.c 		for_each_thread(p, t)
p                 926 kernel/signal.c 		for_each_thread(p, t) {
p                 961 kernel/signal.c 	return !sig_ignored(p, sig, force);
p                 972 kernel/signal.c static inline bool wants_signal(int sig, struct task_struct *p)
p                 974 kernel/signal.c 	if (sigismember(&p->blocked, sig))
p                 977 kernel/signal.c 	if (p->flags & PF_EXITING)
p                 983 kernel/signal.c 	if (task_is_stopped_or_traced(p))
p                 986 kernel/signal.c 	return task_curr(p) || !signal_pending(p);
p                 989 kernel/signal.c static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
p                 991 kernel/signal.c 	struct signal_struct *signal = p->signal;
p                1000 kernel/signal.c 	if (wants_signal(sig, p))
p                1001 kernel/signal.c 		t = p;
p                1002 kernel/signal.c 	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
p                1030 kernel/signal.c 	if (sig_fatal(p, sig) &&
p                1033 kernel/signal.c 	    (sig == SIGKILL || !p->ptrace)) {
p                1047 kernel/signal.c 			t = p;
p                1052 kernel/signal.c 			} while_each_thread(p, t);
p                1278 kernel/signal.c __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
p                1280 kernel/signal.c 	return send_signal(sig, info, p, PIDTYPE_TGID);
p                1283 kernel/signal.c int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
p                1289 kernel/signal.c 	if (lock_task_sighand(p, &flags)) {
p                1290 kernel/signal.c 		ret = send_signal(sig, info, p, type);
p                1291 kernel/signal.c 		unlock_task_sighand(p, &flags);
p                1347 kernel/signal.c int zap_other_threads(struct task_struct *p)
p                1349 kernel/signal.c 	struct task_struct *t = p;
p                1352 kernel/signal.c 	p->signal->group_stop_count = 0;
p                1354 kernel/signal.c 	while_each_thread(p, t) {
p                1404 kernel/signal.c 			struct task_struct *p, enum pid_type type)
p                1409 kernel/signal.c 	ret = check_kill_permission(sig, info, p);
p                1413 kernel/signal.c 		ret = do_send_sig_info(sig, info, p, type);
p                1425 kernel/signal.c 	struct task_struct *p = NULL;
p                1430 kernel/signal.c 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
p                1431 kernel/signal.c 		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
p                1434 kernel/signal.c 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
p                1441 kernel/signal.c 	struct task_struct *p;
p                1445 kernel/signal.c 		p = pid_task(pid, PIDTYPE_PID);
p                1446 kernel/signal.c 		if (p)
p                1447 kernel/signal.c 			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
p                1449 kernel/signal.c 		if (likely(!p || error != -ESRCH))
p                1509 kernel/signal.c 	struct task_struct *p;
p                1523 kernel/signal.c 	p = pid_task(pid, PIDTYPE_PID);
p                1524 kernel/signal.c 	if (!p) {
p                1528 kernel/signal.c 	if (!kill_as_cred_perm(cred, p)) {
p                1532 kernel/signal.c 	ret = security_task_kill(p, &info, sig, cred);
p                1537 kernel/signal.c 		if (lock_task_sighand(p, &flags)) {
p                1538 kernel/signal.c 			ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
p                1539 kernel/signal.c 			unlock_task_sighand(p, &flags);
p                1577 kernel/signal.c 		struct task_struct * p;
p                1579 kernel/signal.c 		for_each_process(p) {
p                1580 kernel/signal.c 			if (task_pid_vnr(p) > 1 &&
p                1581 kernel/signal.c 					!same_thread_group(p, current)) {
p                1582 kernel/signal.c 				int err = group_send_sig_info(sig, info, p,
p                1600 kernel/signal.c int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
p                1609 kernel/signal.c 	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
p                1617 kernel/signal.c send_sig(int sig, struct task_struct *p, int priv)
p                1619 kernel/signal.c 	return send_sig_info(sig, __si_special(priv), p);
p                1645 kernel/signal.c 	struct task_struct *p = current;
p                1649 kernel/signal.c 		spin_lock_irqsave(&p->sighand->siglock, flags);
p                1650 kernel/signal.c 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
p                1651 kernel/signal.c 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
p                3660 kernel/signal.c 	struct pid_namespace *p = ns_of_pid(pid);
p                3663 kernel/signal.c 		if (!p)
p                3665 kernel/signal.c 		if (p == active)
p                3667 kernel/signal.c 		p = p->parent;
p                3772 kernel/signal.c 	struct task_struct *p;
p                3776 kernel/signal.c 	p = find_task_by_vpid(pid);
p                3777 kernel/signal.c 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
p                3778 kernel/signal.c 		error = check_kill_permission(sig, info, p);
p                3784 kernel/signal.c 			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
p                3960 kernel/signal.c 	struct task_struct *p = current, *t;
p                3967 kernel/signal.c 	k = &p->sighand->action[sig-1];
p                3969 kernel/signal.c 	spin_lock_irq(&p->sighand->siglock);
p                3990 kernel/signal.c 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
p                3993 kernel/signal.c 			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
p                3994 kernel/signal.c 			for_each_thread(p, t)
p                3999 kernel/signal.c 	spin_unlock_irq(&p->sighand->siglock);
p                 156 kernel/sys.c   static bool set_one_prio_perm(struct task_struct *p)
p                 158 kernel/sys.c   	const struct cred *cred = current_cred(), *pcred = __task_cred(p);
p                 172 kernel/sys.c   static int set_one_prio(struct task_struct *p, int niceval, int error)
p                 176 kernel/sys.c   	if (!set_one_prio_perm(p)) {
p                 180 kernel/sys.c   	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
p                 184 kernel/sys.c   	no_nice = security_task_setnice(p, niceval);
p                 191 kernel/sys.c   	set_user_nice(p, niceval);
p                 198 kernel/sys.c   	struct task_struct *g, *p;
p                 220 kernel/sys.c   			p = find_task_by_vpid(who);
p                 222 kernel/sys.c   			p = current;
p                 223 kernel/sys.c   		if (p)
p                 224 kernel/sys.c   			error = set_one_prio(p, niceval, error);
p                 231 kernel/sys.c   		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
p                 232 kernel/sys.c   			error = set_one_prio(p, niceval, error);
p                 233 kernel/sys.c   		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
p                 245 kernel/sys.c   		do_each_thread(g, p) {
p                 246 kernel/sys.c   			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
p                 247 kernel/sys.c   				error = set_one_prio(p, niceval, error);
p                 248 kernel/sys.c   		} while_each_thread(g, p);
p                 268 kernel/sys.c   	struct task_struct *g, *p;
p                 283 kernel/sys.c   			p = find_task_by_vpid(who);
p                 285 kernel/sys.c   			p = current;
p                 286 kernel/sys.c   		if (p) {
p                 287 kernel/sys.c   			niceval = nice_to_rlimit(task_nice(p));
p                 297 kernel/sys.c   		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
p                 298 kernel/sys.c   			niceval = nice_to_rlimit(task_nice(p));
p                 301 kernel/sys.c   		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
p                 313 kernel/sys.c   		do_each_thread(g, p) {
p                 314 kernel/sys.c   			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
p                 315 kernel/sys.c   				niceval = nice_to_rlimit(task_nice(p));
p                 319 kernel/sys.c   		} while_each_thread(g, p);
p                1008 kernel/sys.c   	struct task_struct *p;
p                1027 kernel/sys.c   	p = find_task_by_vpid(pid);
p                1028 kernel/sys.c   	if (!p)
p                1032 kernel/sys.c   	if (!thread_group_leader(p))
p                1035 kernel/sys.c   	if (same_thread_group(p->real_parent, group_leader)) {
p                1037 kernel/sys.c   		if (task_session(p) != task_session(group_leader))
p                1040 kernel/sys.c   		if (!(p->flags & PF_FORKNOEXEC))
p                1044 kernel/sys.c   		if (p != group_leader)
p                1049 kernel/sys.c   	if (p->signal->leader)
p                1052 kernel/sys.c   	pgrp = task_pid(p);
p                1062 kernel/sys.c   	err = security_task_setpgid(p, pgid);
p                1066 kernel/sys.c   	if (task_pgrp(p) != pgrp)
p                1067 kernel/sys.c   		change_pid(p, PIDTYPE_PGID, pgrp);
p                1079 kernel/sys.c   	struct task_struct *p;
p                1088 kernel/sys.c   		p = find_task_by_vpid(pid);
p                1089 kernel/sys.c   		if (!p)
p                1091 kernel/sys.c   		grp = task_pgrp(p);
p                1095 kernel/sys.c   		retval = security_task_getpgid(p);
p                1121 kernel/sys.c   	struct task_struct *p;
p                1130 kernel/sys.c   		p = find_task_by_vpid(pid);
p                1131 kernel/sys.c   		if (!p)
p                1133 kernel/sys.c   		sid = task_session(p);
p                1137 kernel/sys.c   		retval = security_task_getsid(p);
p                1705 kernel/sys.c   void getrusage(struct task_struct *p, int who, struct rusage *r)
p                1717 kernel/sys.c   		accumulate_thread_rusage(p, r);
p                1718 kernel/sys.c   		maxrss = p->signal->maxrss;
p                1722 kernel/sys.c   	if (!lock_task_sighand(p, &flags))
p                1728 kernel/sys.c   		utime = p->signal->cutime;
p                1729 kernel/sys.c   		stime = p->signal->cstime;
p                1730 kernel/sys.c   		r->ru_nvcsw = p->signal->cnvcsw;
p                1731 kernel/sys.c   		r->ru_nivcsw = p->signal->cnivcsw;
p                1732 kernel/sys.c   		r->ru_minflt = p->signal->cmin_flt;
p                1733 kernel/sys.c   		r->ru_majflt = p->signal->cmaj_flt;
p                1734 kernel/sys.c   		r->ru_inblock = p->signal->cinblock;
p                1735 kernel/sys.c   		r->ru_oublock = p->signal->coublock;
p                1736 kernel/sys.c   		maxrss = p->signal->cmaxrss;
p                1743 kernel/sys.c   		thread_group_cputime_adjusted(p, &tgutime, &tgstime);
p                1746 kernel/sys.c   		r->ru_nvcsw += p->signal->nvcsw;
p                1747 kernel/sys.c   		r->ru_nivcsw += p->signal->nivcsw;
p                1748 kernel/sys.c   		r->ru_minflt += p->signal->min_flt;
p                1749 kernel/sys.c   		r->ru_majflt += p->signal->maj_flt;
p                1750 kernel/sys.c   		r->ru_inblock += p->signal->inblock;
p                1751 kernel/sys.c   		r->ru_oublock += p->signal->oublock;
p                1752 kernel/sys.c   		if (maxrss < p->signal->maxrss)
p                1753 kernel/sys.c   			maxrss = p->signal->maxrss;
p                1754 kernel/sys.c   		t = p;
p                1757 kernel/sys.c   		} while_each_thread(p, t);
p                1763 kernel/sys.c   	unlock_task_sighand(p, &flags);
p                1770 kernel/sys.c   		struct mm_struct *mm = get_task_mm(p);
p                2233 kernel/sys.c   static int propagate_has_child_subreaper(struct task_struct *p, void *data)
p                2243 kernel/sys.c   	if (p->signal->has_child_subreaper ||
p                2244 kernel/sys.c   	    is_child_reaper(task_pid(p)))
p                2247 kernel/sys.c   	p->signal->has_child_subreaper = 1;
p                2005 kernel/sysctl.c 	char __user *p;
p                2029 kernel/sysctl.c 		p = buffer;
p                2030 kernel/sysctl.c 		while ((p - buffer) < *lenp && len < maxlen - 1) {
p                2031 kernel/sysctl.c 			if (get_user(c, p++))
p                2204 kernel/sysctl.c 	char *p, tmp[TMPBUFLEN];
p                2216 kernel/sysctl.c 	p = tmp;
p                2217 kernel/sysctl.c 	if (*p == '-' && *size > 1) {
p                2219 kernel/sysctl.c 		p++;
p                2222 kernel/sysctl.c 	if (!isdigit(*p))
p                2225 kernel/sysctl.c 	if (strtoul_lenient(p, &p, 0, val))
p                2228 kernel/sysctl.c 	len = p - tmp;
p                2236 kernel/sysctl.c 	if (len < *size && perm_tr_len && !memchr(perm_tr, *p, perm_tr_len))
p                2240 kernel/sysctl.c 		*tr = *p;
p                2263 kernel/sysctl.c 	char tmp[TMPBUFLEN], *p = tmp;
p                2265 kernel/sysctl.c 	sprintf(p, "%s%lu", neg ? "-" : "", val);
p                2342 kernel/sysctl.c 	char *kbuf = NULL, *p;
p                2362 kernel/sysctl.c 		p = kbuf = memdup_user_nul(buffer, left);
p                2372 kernel/sysctl.c 			left -= proc_skip_spaces(&p);
p                2376 kernel/sysctl.c 			err = proc_get_long(&p, &left, &lval, &neg,
p                2403 kernel/sysctl.c 		left -= proc_skip_spaces(&p);
p                2438 kernel/sysctl.c 	char *kbuf = NULL, *p;
p                2448 kernel/sysctl.c 	p = kbuf = memdup_user_nul(buffer, left);
p                2452 kernel/sysctl.c 	left -= proc_skip_spaces(&p);
p                2458 kernel/sysctl.c 	err = proc_get_long(&p, &left, &lval, &neg,
p                2472 kernel/sysctl.c 		left -= proc_skip_spaces(&p);
p                2856 kernel/sysctl.c 	char *kbuf = NULL, *p;
p                2875 kernel/sysctl.c 		p = kbuf = memdup_user_nul(buffer, left);
p                2886 kernel/sysctl.c 			left -= proc_skip_spaces(&p);
p                2890 kernel/sysctl.c 			err = proc_get_long(&p, &left, &val, &neg,
p                2919 kernel/sysctl.c 		left -= proc_skip_spaces(&p);
p                3183 kernel/sysctl.c 		char *kbuf, *p;
p                3192 kernel/sysctl.c 		p = kbuf = memdup_user_nul(buffer, left);
p                3201 kernel/sysctl.c 		proc_skip_char(&p, &left, '\n');
p                3209 kernel/sysctl.c 			err = proc_get_long(&p, &left, &val_a, &neg, tr_a,
p                3230 kernel/sysctl.c 				p++;
p                3235 kernel/sysctl.c 				err = proc_get_long(&p, &left, &val_b,
p                3255 kernel/sysctl.c 					p++;
p                3262 kernel/sysctl.c 			proc_skip_char(&p, &left, '\n');
p                  26 kernel/test_kprobes.c static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
p                  36 kernel/test_kprobes.c static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
p                  87 kernel/test_kprobes.c static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs)
p                  93 kernel/test_kprobes.c static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs,
p                 287 kernel/time/hrtimer.c # define switch_hrtimer_base(t, b, p)	(b)
p                  53 kernel/time/posix-cpu-timers.c 	struct task_struct *p;
p                  62 kernel/time/posix-cpu-timers.c 	p = find_task_by_vpid(pid);
p                  63 kernel/time/posix-cpu-timers.c 	if (!p)
p                  64 kernel/time/posix-cpu-timers.c 		return p;
p                  67 kernel/time/posix-cpu-timers.c 		return same_thread_group(p, current) ? p : NULL;
p                  79 kernel/time/posix-cpu-timers.c 		return (p == current || thread_group_leader(p)) ? p : NULL;
p                  85 kernel/time/posix-cpu-timers.c 	return has_group_leader_pid(p) ? p : NULL;
p                  93 kernel/time/posix-cpu-timers.c 	struct task_struct *p;
p                  99 kernel/time/posix-cpu-timers.c 	p = lookup_task(pid, thread, gettime);
p                 100 kernel/time/posix-cpu-timers.c 	if (p && getref)
p                 101 kernel/time/posix-cpu-timers.c 		get_task_struct(p);
p                 103 kernel/time/posix-cpu-timers.c 	return p;
p                 197 kernel/time/posix-cpu-timers.c static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
p                 202 kernel/time/posix-cpu-timers.c 		return task_sched_runtime(p);
p                 204 kernel/time/posix-cpu-timers.c 	task_cputime(p, &utime, &stime);
p                 224 kernel/time/posix-cpu-timers.c static void task_sample_cputime(struct task_struct *p, u64 *samples)
p                 228 kernel/time/posix-cpu-timers.c 	task_cputime(p, &utime, &stime);
p                 229 kernel/time/posix-cpu-timers.c 	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
p                 343 kernel/time/posix-cpu-timers.c static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
p                 346 kernel/time/posix-cpu-timers.c 	struct thread_group_cputimer *cputimer = &p->signal->cputimer;
p                 347 kernel/time/posix-cpu-timers.c 	struct posix_cputimers *pct = &p->signal->posix_cputimers;
p                 352 kernel/time/posix-cpu-timers.c 			thread_group_start_cputime(p, samples);
p                 354 kernel/time/posix-cpu-timers.c 			__thread_group_cputime(p, samples);
p                 389 kernel/time/posix-cpu-timers.c 	struct task_struct *p = get_task_for_clock(new_timer->it_clock);
p                 391 kernel/time/posix-cpu-timers.c 	if (!p)
p                 396 kernel/time/posix-cpu-timers.c 	new_timer->it.cpu.task = p;
p                 409 kernel/time/posix-cpu-timers.c 	struct task_struct *p = ctmr->task;
p                 414 kernel/time/posix-cpu-timers.c 	if (WARN_ON_ONCE(!p))
p                 421 kernel/time/posix-cpu-timers.c 	sighand = lock_task_sighand(p, &flags);
p                 434 kernel/time/posix-cpu-timers.c 		unlock_task_sighand(p, &flags);
p                 438 kernel/time/posix-cpu-timers.c 		put_task_struct(p);
p                 492 kernel/time/posix-cpu-timers.c 	struct task_struct *p = ctmr->task;
p                 496 kernel/time/posix-cpu-timers.c 		base = p->posix_cputimers.bases + clkidx;
p                 498 kernel/time/posix-cpu-timers.c 		base = p->signal->posix_cputimers.bases + clkidx;
p                 513 kernel/time/posix-cpu-timers.c 		tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
p                 515 kernel/time/posix-cpu-timers.c 		tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
p                 567 kernel/time/posix-cpu-timers.c 	struct task_struct *p = ctmr->task;
p                 572 kernel/time/posix-cpu-timers.c 	if (WARN_ON_ONCE(!p))
p                 585 kernel/time/posix-cpu-timers.c 	sighand = lock_task_sighand(p, &flags);
p                 615 kernel/time/posix-cpu-timers.c 		val = cpu_clock_sample(clkid, p);
p                 617 kernel/time/posix-cpu-timers.c 		val = cpu_clock_sample_group(clkid, p, true);
p                 650 kernel/time/posix-cpu-timers.c 		unlock_task_sighand(p, &flags);
p                 668 kernel/time/posix-cpu-timers.c 	unlock_task_sighand(p, &flags);
p                 707 kernel/time/posix-cpu-timers.c 	struct task_struct *p = ctmr->task;
p                 709 kernel/time/posix-cpu-timers.c 	if (WARN_ON_ONCE(!p))
p                 724 kernel/time/posix-cpu-timers.c 		now = cpu_clock_sample(clkid, p);
p                 734 kernel/time/posix-cpu-timers.c 		sighand = lock_task_sighand(p, &flags);
p                 744 kernel/time/posix-cpu-timers.c 			now = cpu_clock_sample_group(clkid, p, false);
p                 745 kernel/time/posix-cpu-timers.c 			unlock_task_sighand(p, &flags);
p                 980 kernel/time/posix-cpu-timers.c 	struct task_struct *p = ctmr->task;
p                 985 kernel/time/posix-cpu-timers.c 	if (WARN_ON_ONCE(!p))
p                 992 kernel/time/posix-cpu-timers.c 		now = cpu_clock_sample(clkid, p);
p                 994 kernel/time/posix-cpu-timers.c 		if (unlikely(p->exit_state))
p                 998 kernel/time/posix-cpu-timers.c 		sighand = lock_task_sighand(p, &flags);
p                1006 kernel/time/posix-cpu-timers.c 		sighand = lock_task_sighand(p, &flags);
p                1014 kernel/time/posix-cpu-timers.c 		} else if (unlikely(p->exit_state) && thread_group_empty(p)) {
p                1018 kernel/time/posix-cpu-timers.c 		now = cpu_clock_sample_group(clkid, p, true);
p                1028 kernel/time/posix-cpu-timers.c 	unlock_task_sighand(p, &flags);
p                1721 kernel/time/timer.c 	struct task_struct *p = current;
p                1724 kernel/time/timer.c 	account_process_tick(p, user_tick);
p                 361 kernel/torture.c 	struct shuffle_task *p;
p                 364 kernel/torture.c 	list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) {
p                1771 kernel/trace/blktrace.c 	char *p = buf;
p                1775 kernel/trace/blktrace.c 			p += sprintf(p, "%s%s",
p                1776 kernel/trace/blktrace.c 				    (p == buf) ? "" : ",", mask_maps[i].str);
p                1779 kernel/trace/blktrace.c 	*p++ = '\n';
p                1781 kernel/trace/blktrace.c 	return p - buf;
p                1796 kernel/trace/blktrace.c 	struct hd_struct *p = dev_to_part(dev);
p                1802 kernel/trace/blktrace.c 	bdev = bdget(part_devt(p));
p                1844 kernel/trace/blktrace.c 	struct hd_struct *p;
p                1865 kernel/trace/blktrace.c 	p = dev_to_part(dev);
p                1866 kernel/trace/blktrace.c 	bdev = bdget(part_devt(p));
p                  22 kernel/trace/bpf_trace.c #define bpf_event_rcu_dereference(p)					\
p                  23 kernel/trace/bpf_trace.c 	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
p                 281 kernel/trace/ftrace.c 	struct ftrace_ops **p;
p                 295 kernel/trace/ftrace.c 	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
p                 296 kernel/trace/ftrace.c 		if (*p == ops)
p                 299 kernel/trace/ftrace.c 	if (*p != ops)
p                 302 kernel/trace/ftrace.c 	*p = (*p)->next;
p                1233 kernel/trace/ftrace.c 	struct ftrace_mod_load *p, *n;
p                1240 kernel/trace/ftrace.c 	list_for_each_entry_safe(p, n, head, list)
p                1241 kernel/trace/ftrace.c 		free_ftrace_mod(p);
p                1952 kernel/trace/ftrace.c static void print_ip_ins(const char *fmt, const unsigned char *p)
p                1959 kernel/trace/ftrace.c 		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
p                2908 kernel/trace/ftrace.c 	struct dyn_ftrace *p;
p                2938 kernel/trace/ftrace.c 			p = &pg->records[i];
p                2939 kernel/trace/ftrace.c 			p->flags = rec_flags;
p                2946 kernel/trace/ftrace.c 			    !ftrace_code_disable(mod, p))
p                3149 kernel/trace/ftrace.c 	void *p = NULL;
p                3162 kernel/trace/ftrace.c 		p = t_probe_next(m, &l);
p                3163 kernel/trace/ftrace.c 		if (!p)
p                3166 kernel/trace/ftrace.c 	if (!p)
p                3224 kernel/trace/ftrace.c 	void *p = NULL;
p                3237 kernel/trace/ftrace.c 		p = t_mod_next(m, &l);
p                3238 kernel/trace/ftrace.c 		if (!p)
p                3241 kernel/trace/ftrace.c 	if (!p) {
p                3352 kernel/trace/ftrace.c 	void *p = NULL;
p                3393 kernel/trace/ftrace.c 		p = t_func_next(m, &l);
p                3394 kernel/trace/ftrace.c 		if (!p)
p                3398 kernel/trace/ftrace.c 	if (!p)
p                3404 kernel/trace/ftrace.c static void t_stop(struct seq_file *m, void *p)
p                4598 kernel/trace/ftrace.c 	struct ftrace_func_command *p;
p                4602 kernel/trace/ftrace.c 	list_for_each_entry(p, &ftrace_commands, list) {
p                4603 kernel/trace/ftrace.c 		if (strcmp(cmd->name, p->name) == 0) {
p                4621 kernel/trace/ftrace.c 	struct ftrace_func_command *p, *n;
p                4625 kernel/trace/ftrace.c 	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
p                4626 kernel/trace/ftrace.c 		if (strcmp(cmd->name, p->name) == 0) {
p                4628 kernel/trace/ftrace.c 			list_del_init(&p->list);
p                4644 kernel/trace/ftrace.c 	struct ftrace_func_command *p;
p                4663 kernel/trace/ftrace.c 	list_for_each_entry(p, &ftrace_commands, list) {
p                4664 kernel/trace/ftrace.c 		if (strcmp(p->name, command) == 0) {
p                4665 kernel/trace/ftrace.c 			ret = p->func(tr, hash, func, command, next, enable);
p                5185 kernel/trace/ftrace.c static void g_stop(struct seq_file *m, void *p)
p                5582 kernel/trace/ftrace.c 	unsigned long *p;
p                5623 kernel/trace/ftrace.c 	p = start;
p                5625 kernel/trace/ftrace.c 	while (p < end) {
p                5626 kernel/trace/ftrace.c 		addr = ftrace_call_adjust(*p++);
p                5692 kernel/trace/ftrace.c #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
p                6547 kernel/trace/ftrace.c static void fpid_stop(struct seq_file *m, void *p)
p                1001 kernel/trace/ring_buffer.c 	struct list_head *p = rb_list_head((*bpage)->list.next);
p                1003 kernel/trace/ring_buffer.c 	*bpage = list_entry(p, struct buffer_page, list);
p                1733 kernel/trace/trace.c 	struct trace_selftests *p, *n;
p                1747 kernel/trace/trace.c 	list_for_each_entry_safe(p, n, &postponed_selftests, list) {
p                1752 kernel/trace/trace.c 		ret = run_tracer_selftest(p->type);
p                1756 kernel/trace/trace.c 			     p->type->name);
p                1759 kernel/trace/trace.c 				if (t == p->type) {
p                1766 kernel/trace/trace.c 		list_del(&p->list);
p                1767 kernel/trace/trace.c 		kfree(p);
p                2696 kernel/trace/trace.c 	struct trace_export **p;
p                2698 kernel/trace/trace.c 	for (p = list; *p != NULL; p = &(*p)->next)
p                2699 kernel/trace/trace.c 		if (*p == export)
p                2702 kernel/trace/trace.c 	if (*p != export)
p                2705 kernel/trace/trace.c 	rcu_assign_pointer(*p, (*p)->next);
p                3447 kernel/trace/trace.c 	void *p = NULL;
p                3482 kernel/trace/trace.c 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
p                3491 kernel/trace/trace.c 			p = iter;
p                3494 kernel/trace/trace.c 			p = s_next(m, p, &l);
p                3500 kernel/trace/trace.c 	return p;
p                3503 kernel/trace/trace.c static void s_stop(struct seq_file *m, void *p)
p                4350 kernel/trace/trace.c static void t_stop(struct seq_file *m, void *p)
p                7600 kernel/trace/trace.c 	unsigned long *p = filp->private_data;
p                7604 kernel/trace/trace.c 	r = scnprintf(buf, 63, "%ld", *p);
p                  41 kernel/trace/trace_branch.c 	const char *p;
p                  72 kernel/trace/trace_branch.c 	p = f->data.file + strlen(f->data.file);
p                  73 kernel/trace/trace_branch.c 	while (p >= f->data.file && *p != '/')
p                  74 kernel/trace/trace_branch.c 		p--;
p                  75 kernel/trace/trace_branch.c 	p++;
p                  78 kernel/trace/trace_branch.c 	strncpy(entry->file, p, TRACE_FILE_SIZE);
p                 247 kernel/trace/trace_branch.c static inline long get_incorrect_percent(struct ftrace_branch_data *p)
p                 251 kernel/trace/trace_branch.c 	if (p->correct) {
p                 252 kernel/trace/trace_branch.c 		percent = p->incorrect * 100;
p                 253 kernel/trace/trace_branch.c 		percent /= p->correct + p->incorrect;
p                 255 kernel/trace/trace_branch.c 		percent = p->incorrect ? 100 : -1;
p                 260 kernel/trace/trace_branch.c static const char *branch_stat_process_file(struct ftrace_branch_data *p)
p                 265 kernel/trace/trace_branch.c 	f = p->file + strlen(p->file);
p                 266 kernel/trace/trace_branch.c 	while (f >= p->file && *f != '/')
p                 272 kernel/trace/trace_branch.c 			     struct ftrace_branch_data *p, const char *f)
p                 279 kernel/trace/trace_branch.c 	percent = get_incorrect_percent(p);
p                 286 kernel/trace/trace_branch.c 	seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
p                 290 kernel/trace/trace_branch.c 				   struct ftrace_branch_data *p, const char *f)
p                 292 kernel/trace/trace_branch.c 	seq_printf(m, "%8lu %8lu ",  p->correct, p->incorrect);
p                 293 kernel/trace/trace_branch.c 	branch_stat_show(m, p, f);
p                 299 kernel/trace/trace_branch.c 	struct ftrace_likely_data *p = v;
p                 303 kernel/trace/trace_branch.c 	f = branch_stat_process_file(&p->data);
p                 305 kernel/trace/trace_branch.c 	if (!p->constant)
p                 306 kernel/trace/trace_branch.c 		return branch_stat_show_normal(m, &p->data, f);
p                 308 kernel/trace/trace_branch.c 	l = snprintf(NULL, 0, "/%lu", p->constant);
p                 312 kernel/trace/trace_branch.c 		   p->data.correct, p->constant, l, p->data.incorrect);
p                 313 kernel/trace/trace_branch.c 	branch_stat_show(m, &p->data, f);
p                 325 kernel/trace/trace_branch.c 	struct ftrace_likely_data *p = v;
p                 327 kernel/trace/trace_branch.c 	++p;
p                 329 kernel/trace/trace_branch.c 	if ((void *)p >= (void *)__stop_annotated_branch_profile)
p                 332 kernel/trace/trace_branch.c 	return p;
p                 415 kernel/trace/trace_branch.c 	struct ftrace_branch_data *p = v;
p                 417 kernel/trace/trace_branch.c 	++p;
p                 419 kernel/trace/trace_branch.c 	if ((void *)p >= (void *)__stop_branch_profile)
p                 422 kernel/trace/trace_branch.c 	return p;
p                 427 kernel/trace/trace_branch.c 	struct ftrace_branch_data *p = v;
p                 430 kernel/trace/trace_branch.c 	f = branch_stat_process_file(p);
p                 431 kernel/trace/trace_branch.c 	return branch_stat_show_normal(m, p, f);
p                  37 kernel/trace/trace_dynevent.c 	char *system = NULL, *event, *p;
p                  52 kernel/trace/trace_dynevent.c 	p = strchr(event, '/');
p                  53 kernel/trace/trace_dynevent.c 	if (p) {
p                  55 kernel/trace/trace_dynevent.c 		event = p + 1;
p                  56 kernel/trace/trace_dynevent.c 		*p = '\0';
p                 981 kernel/trace/trace_events.c static void t_stop(struct seq_file *m, void *p)
p                1018 kernel/trace/trace_events.c static void p_stop(struct seq_file *m, void *p)
p                1267 kernel/trace/trace_events.c 	void *p = (void *)FORMAT_HEADER;
p                1275 kernel/trace/trace_events.c 	while (l < *pos && p)
p                1276 kernel/trace/trace_events.c 		p = f_next(m, p, &l);
p                1278 kernel/trace/trace_events.c 	return p;
p                1281 kernel/trace/trace_events.c static void f_stop(struct seq_file *m, void *p)
p                2204 kernel/trace/trace_events.c 	struct trace_event_call *call, *p;
p                2211 kernel/trace/trace_events.c 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
p                2406 kernel/trace/trace_events.c 	struct trace_event_call *call, *p;
p                2409 kernel/trace/trace_events.c 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
p                4300 kernel/trace/trace_events_hist.c 		char *p;
p                4302 kernel/trace/trace_events_hist.c 		p = param = kstrdup(data->params[i], GFP_KERNEL);
p                4315 kernel/trace/trace_events_hist.c 				kfree(p);
p                4333 kernel/trace/trace_events_hist.c 			kfree(p);
p                4342 kernel/trace/trace_events_hist.c 				kfree(p);
p                4356 kernel/trace/trace_events_hist.c 			kfree(p);
p                4361 kernel/trace/trace_events_hist.c 		kfree(p);
p                6218 kernel/trace/trace_events_hist.c 	char *trigger, *p;
p                6238 kernel/trace/trace_events_hist.c 	p = trigger = param;
p                6240 kernel/trace/trace_events_hist.c 		p = strstr(p, "if");
p                6241 kernel/trace/trace_events_hist.c 		if (!p)
p                6243 kernel/trace/trace_events_hist.c 		if (p == param)
p                6245 kernel/trace/trace_events_hist.c 		if (*(p - 1) != ' ' && *(p - 1) != '\t') {
p                6246 kernel/trace/trace_events_hist.c 			p++;
p                6249 kernel/trace/trace_events_hist.c 		if (p >= param + strlen(param) - (sizeof("if") - 1) - 1)
p                6251 kernel/trace/trace_events_hist.c 		if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
p                6252 kernel/trace/trace_events_hist.c 			p++;
p                6256 kernel/trace/trace_events_hist.c 	} while (p);
p                6258 kernel/trace/trace_events_hist.c 	if (!p)
p                6261 kernel/trace/trace_events_hist.c 		*(p - 1) = '\0';
p                6262 kernel/trace/trace_events_hist.c 		param = strstrip(p);
p                 150 kernel/trace/trace_events_trigger.c 	struct event_command *p;
p                 156 kernel/trace/trace_events_trigger.c 		list_for_each_entry_reverse(p, &trigger_commands, list)
p                 157 kernel/trace/trace_events_trigger.c 			seq_printf(m, " %s", p->name);
p                 194 kernel/trace/trace_events_trigger.c 		struct event_command *p;
p                 198 kernel/trace/trace_events_trigger.c 		list_for_each_entry(p, &trigger_commands, list) {
p                 199 kernel/trace/trace_events_trigger.c 			if (p->unreg_all)
p                 200 kernel/trace/trace_events_trigger.c 				p->unreg_all(event_file);
p                 220 kernel/trace/trace_events_trigger.c 	struct event_command *p;
p                 227 kernel/trace/trace_events_trigger.c 	list_for_each_entry(p, &trigger_commands, list) {
p                 228 kernel/trace/trace_events_trigger.c 		if (strcmp(p->name, command) == 0) {
p                 229 kernel/trace/trace_events_trigger.c 			ret = p->func(p, file, buff, command, next);
p                 325 kernel/trace/trace_events_trigger.c 	struct event_command *p;
p                 329 kernel/trace/trace_events_trigger.c 	list_for_each_entry(p, &trigger_commands, list) {
p                 330 kernel/trace/trace_events_trigger.c 		if (strcmp(cmd->name, p->name) == 0) {
p                 348 kernel/trace/trace_events_trigger.c 	struct event_command *p, *n;
p                 352 kernel/trace/trace_events_trigger.c 	list_for_each_entry_safe(p, n, &trigger_commands, list) {
p                 353 kernel/trace/trace_events_trigger.c 		if (strcmp(cmd->name, p->name) == 0) {
p                 355 kernel/trace/trace_events_trigger.c 			list_del_init(&p->list);
p                 117 kernel/trace/trace_kprobe.c 	char *p;
p                 122 kernel/trace/trace_kprobe.c 	p = strchr(tk->symbol, ':');
p                 123 kernel/trace/trace_kprobe.c 	if (!p)
p                 125 kernel/trace/trace_kprobe.c 	*p = '\0';
p                 129 kernel/trace/trace_kprobe.c 	*p = ':';
p                 458 kernel/trace/trace_kprobe.c 	char symname[KSYM_NAME_LEN], *p;
p                 465 kernel/trace/trace_kprobe.c 		p = strchr(symname, '.');
p                 466 kernel/trace/trace_kprobe.c 		if (!p)
p                 468 kernel/trace/trace_kprobe.c 		*p = '\0';
p                1669 kernel/trace/trace_kprobe.c 	char *p, *cmd = kprobe_boot_events_buf;
p                1675 kernel/trace/trace_kprobe.c 		p = strchr(cmd, ';');
p                1676 kernel/trace/trace_kprobe.c 		if (p)
p                1677 kernel/trace/trace_kprobe.c 			*p++ = '\0';
p                1685 kernel/trace/trace_kprobe.c 		cmd = p;
p                  65 kernel/trace/trace_output.c trace_print_flags_seq(struct trace_seq *p, const char *delim,
p                  71 kernel/trace/trace_output.c 	const char *ret = trace_seq_buffer_ptr(p);
p                  83 kernel/trace/trace_output.c 			trace_seq_puts(p, delim);
p                  86 kernel/trace/trace_output.c 		trace_seq_puts(p, str);
p                  92 kernel/trace/trace_output.c 			trace_seq_puts(p, delim);
p                  93 kernel/trace/trace_output.c 		trace_seq_printf(p, "0x%lx", flags);
p                  96 kernel/trace/trace_output.c 	trace_seq_putc(p, 0);
p                 103 kernel/trace/trace_output.c trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
p                 107 kernel/trace/trace_output.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 114 kernel/trace/trace_output.c 		trace_seq_puts(p, symbol_array[i].name);
p                 118 kernel/trace/trace_output.c 	if (ret == (const char *)(trace_seq_buffer_ptr(p)))
p                 119 kernel/trace/trace_output.c 		trace_seq_printf(p, "0x%lx", val);
p                 121 kernel/trace/trace_output.c 	trace_seq_putc(p, 0);
p                 129 kernel/trace/trace_output.c trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
p                 135 kernel/trace/trace_output.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 147 kernel/trace/trace_output.c 			trace_seq_puts(p, delim);
p                 150 kernel/trace/trace_output.c 		trace_seq_puts(p, str);
p                 156 kernel/trace/trace_output.c 			trace_seq_puts(p, delim);
p                 157 kernel/trace/trace_output.c 		trace_seq_printf(p, "0x%llx", flags);
p                 160 kernel/trace/trace_output.c 	trace_seq_putc(p, 0);
p                 167 kernel/trace/trace_output.c trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
p                 171 kernel/trace/trace_output.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 178 kernel/trace/trace_output.c 		trace_seq_puts(p, symbol_array[i].name);
p                 182 kernel/trace/trace_output.c 	if (ret == (const char *)(trace_seq_buffer_ptr(p)))
p                 183 kernel/trace/trace_output.c 		trace_seq_printf(p, "0x%llx", val);
p                 185 kernel/trace/trace_output.c 	trace_seq_putc(p, 0);
p                 193 kernel/trace/trace_output.c trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
p                 196 kernel/trace/trace_output.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 198 kernel/trace/trace_output.c 	trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
p                 199 kernel/trace/trace_output.c 	trace_seq_putc(p, 0);
p                 217 kernel/trace/trace_output.c trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len,
p                 221 kernel/trace/trace_output.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 225 kernel/trace/trace_output.c 		trace_seq_printf(p, fmt, min(buf_len - i, 16), &buf[i]);
p                 226 kernel/trace/trace_output.c 	trace_seq_putc(p, 0);
p                 233 kernel/trace/trace_output.c trace_print_array_seq(struct trace_seq *p, const void *buf, int count,
p                 236 kernel/trace/trace_output.c 	const char *ret = trace_seq_buffer_ptr(p);
p                 241 kernel/trace/trace_output.c 	trace_seq_putc(p, '{');
p                 246 kernel/trace/trace_output.c 			trace_seq_printf(p, "%s0x%x", prefix,
p                 250 kernel/trace/trace_output.c 			trace_seq_printf(p, "%s0x%x", prefix,
p                 254 kernel/trace/trace_output.c 			trace_seq_printf(p, "%s0x%x", prefix,
p                 258 kernel/trace/trace_output.c 			trace_seq_printf(p, "%s0x%llx", prefix,
p                 262 kernel/trace/trace_output.c 			trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size,
p                 270 kernel/trace/trace_output.c 	trace_seq_putc(p, '}');
p                 271 kernel/trace/trace_output.c 	trace_seq_putc(p, 0);
p                 282 kernel/trace/trace_output.c 	struct trace_seq *p = &iter->tmp_seq;
p                 293 kernel/trace/trace_output.c 	trace_seq_init(p);
p                1052 kernel/trace/trace_output.c 	unsigned long *p;
p                1060 kernel/trace/trace_output.c 	for (p = field->caller; p && p < end && *p != ULONG_MAX; p++) {
p                1066 kernel/trace/trace_output.c 		seq_print_ip_sym(s, *p, flags);
p                 137 kernel/trace/trace_printk.c 		struct trace_bprintk_fmt *p;
p                 140 kernel/trace/trace_printk.c 		list_for_each_entry(p, &trace_bprintk_fmt_list, list) {
p                 142 kernel/trace/trace_printk.c 				return &p->fmt;
p                 337 kernel/trace/trace_printk.c static void t_stop(struct seq_file *m, void *p)
p                 165 kernel/trace/trace_probe.c 	char *command, *p;
p                 192 kernel/trace/trace_probe.c 	p = command;
p                 195 kernel/trace/trace_probe.c 		strcpy(p, trace_probe_log.argv[i]);
p                 196 kernel/trace/trace_probe.c 		p[len] = ' ';
p                 197 kernel/trace/trace_probe.c 		p += len + 1;
p                 199 kernel/trace/trace_probe.c 	*(p - 1) = '\0';
p                 220 kernel/trace/trace_probe_tmpl.h 	void *p;
p                 233 kernel/trace/trace_probe_tmpl.h 		p = data + a->offset;
p                 235 kernel/trace/trace_probe_tmpl.h 			if (!a->type->print(s, p, field))
p                 238 kernel/trace/trace_probe_tmpl.h 			p += a->type->size;
p                 526 kernel/trace/trace_sched_wakeup.c probe_wakeup(void *ignore, struct task_struct *p)
p                 537 kernel/trace/trace_sched_wakeup.c 	tracing_record_cmdline(p);
p                 548 kernel/trace/trace_sched_wakeup.c 	if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
p                 549 kernel/trace/trace_sched_wakeup.c 	    (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
p                 550 kernel/trace/trace_sched_wakeup.c 	    (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
p                 563 kernel/trace/trace_sched_wakeup.c 	    (!dl_task(p) && p->prio >= wakeup_prio))
p                 569 kernel/trace/trace_sched_wakeup.c 	wakeup_cpu = task_cpu(p);
p                 571 kernel/trace/trace_sched_wakeup.c 	wakeup_prio = p->prio;
p                 577 kernel/trace/trace_sched_wakeup.c 	if (dl_task(p))
p                 582 kernel/trace/trace_sched_wakeup.c 	wakeup_task = get_task_struct(p);
p                 588 kernel/trace/trace_sched_wakeup.c 	tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
p                1088 kernel/trace/trace_selftest.c 	struct task_struct *p;
p                1098 kernel/trace/trace_selftest.c 	p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
p                1099 kernel/trace/trace_selftest.c 	if (IS_ERR(p)) {
p                1117 kernel/trace/trace_selftest.c 	while (p->on_rq) {
p                1131 kernel/trace/trace_selftest.c 	wake_up_process(p);
p                1150 kernel/trace/trace_selftest.c 	kthread_stop(p);
p                 157 kernel/trace/trace_stack.c 	unsigned long this_size, flags; unsigned long *p, *top, *start;
p                 227 kernel/trace/trace_stack.c 		p = start;
p                 229 kernel/trace/trace_stack.c 		for (; p < top && i < stack_trace_nr_entries; p++) {
p                 234 kernel/trace/trace_stack.c 			if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
p                 237 kernel/trace/trace_stack.c 					(top - p) * sizeof(unsigned long);
p                 240 kernel/trace/trace_stack.c 				start = p + 1;
p                 249 kernel/trace/trace_stack.c 					tracer_frame = (p - stack) *
p                 409 kernel/trace/trace_stack.c static void t_stop(struct seq_file *m, void *p)
p                 197 kernel/trace/trace_stat.c static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
p                 200 kernel/trace/trace_stat.c 	struct rb_node *node = p;
p                 204 kernel/trace/trace_stat.c 	if (p == SEQ_START_TOKEN)
p                 210 kernel/trace/trace_stat.c static void stat_seq_stop(struct seq_file *s, void *p)
p                  21 kernel/trace/trace_stat.h 	int			(*stat_show)(struct seq_file *s, void *p);
p                 852 kernel/trace/trace_uprobe.c 		struct page *p = alloc_pages_node(cpu_to_node(cpu),
p                 854 kernel/trace/trace_uprobe.c 		if (p == NULL) {
p                 858 kernel/trace/trace_uprobe.c 		per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
p                  58 kernel/tracepoint.c 	struct tp_probes *p  = kmalloc(struct_size(p, probes, count),
p                  60 kernel/tracepoint.c 	return p == NULL ? NULL : p->probes;
p                 565 kernel/tracepoint.c 	struct task_struct *p, *t;
p                 569 kernel/tracepoint.c 		for_each_process_thread(p, t) {
p                 581 kernel/tracepoint.c 	struct task_struct *p, *t;
p                 586 kernel/tracepoint.c 		for_each_process_thread(p, t) {
p                  83 kernel/tsacct.c void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
p                  88 kernel/tsacct.c 	stats->coremem = p->acct_rss_mem1 * PAGE_SIZE;
p                  90 kernel/tsacct.c 	stats->virtmem = p->acct_vm_mem1 * PAGE_SIZE;
p                  92 kernel/tsacct.c 	mm = get_task_mm(p);
p                  99 kernel/tsacct.c 	stats->read_char	= p->ioac.rchar & KB_MASK;
p                 100 kernel/tsacct.c 	stats->write_char	= p->ioac.wchar & KB_MASK;
p                 101 kernel/tsacct.c 	stats->read_syscalls	= p->ioac.syscr & KB_MASK;
p                 102 kernel/tsacct.c 	stats->write_syscalls	= p->ioac.syscw & KB_MASK;
p                 104 kernel/tsacct.c 	stats->read_bytes	= p->ioac.read_bytes & KB_MASK;
p                 105 kernel/tsacct.c 	stats->write_bytes	= p->ioac.write_bytes & KB_MASK;
p                 106 kernel/tsacct.c 	stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK;
p                1290 kernel/user_namespace.c 	struct user_namespace *owner, *p;
p                1293 kernel/user_namespace.c 	owner = p = ns->ops->owner(ns);
p                1295 kernel/user_namespace.c 		if (!p)
p                1297 kernel/user_namespace.c 		if (p == my_user_ns)
p                1299 kernel/user_namespace.c 		p = p->parent;
p                 109 lib/842/842_compress.c #define get_input_data(p, o, b)						\
p                 110 lib/842/842_compress.c 	be##b##_to_cpu(get_unaligned((__be##b *)((p)->in + (o))))
p                 112 lib/842/842_compress.c #define init_hashtable_nodes(p, b)	do {			\
p                 114 lib/842/842_compress.c 	hash_init((p)->htable##b);				\
p                 115 lib/842/842_compress.c 	for (_i = 0; _i < ARRAY_SIZE((p)->node##b); _i++) {	\
p                 116 lib/842/842_compress.c 		(p)->node##b[_i].index = _i;			\
p                 117 lib/842/842_compress.c 		(p)->node##b[_i].data = 0;			\
p                 118 lib/842/842_compress.c 		INIT_HLIST_NODE(&(p)->node##b[_i].node);	\
p                 122 lib/842/842_compress.c #define find_index(p, b, n)	({					\
p                 124 lib/842/842_compress.c 	p->index##b[n] = INDEX_NOT_FOUND;				\
p                 125 lib/842/842_compress.c 	hash_for_each_possible(p->htable##b, _n, node, p->data##b[n]) {	\
p                 126 lib/842/842_compress.c 		if (p->data##b[n] == _n->data) {			\
p                 127 lib/842/842_compress.c 			p->index##b[n] = _n->index;			\
p                 131 lib/842/842_compress.c 	p->index##b[n] >= 0;						\
p                 134 lib/842/842_compress.c #define check_index(p, b, n)			\
p                 135 lib/842/842_compress.c 	((p)->index##b[n] == INDEX_NOT_CHECKED	\
p                 136 lib/842/842_compress.c 	 ? find_index(p, b, n)			\
p                 137 lib/842/842_compress.c 	 : (p)->index##b[n] >= 0)
p                 139 lib/842/842_compress.c #define replace_hash(p, b, i, d)	do {				\
p                 140 lib/842/842_compress.c 	struct sw842_hlist_node##b *_n = &(p)->node##b[(i)+(d)];	\
p                 142 lib/842/842_compress.c 	_n->data = (p)->data##b[d];					\
p                 145 lib/842/842_compress.c 		 (unsigned int)((p)->in - (p)->instart),		\
p                 147 lib/842/842_compress.c 	hash_add((p)->htable##b, &_n->node, _n->data);			\
p                 152 lib/842/842_compress.c static int add_bits(struct sw842_param *p, u64 d, u8 n);
p                 154 lib/842/842_compress.c static int __split_add_bits(struct sw842_param *p, u64 d, u8 n, u8 s)
p                 161 lib/842/842_compress.c 	ret = add_bits(p, d >> s, n - s);
p                 164 lib/842/842_compress.c 	return add_bits(p, d & GENMASK_ULL(s - 1, 0), s);
p                 167 lib/842/842_compress.c static int add_bits(struct sw842_param *p, u64 d, u8 n)
p                 169 lib/842/842_compress.c 	int b = p->bit, bits = b + n, s = round_up(bits, 8) - bits;
p                 171 lib/842/842_compress.c 	u8 *out = p->out;
p                 182 lib/842/842_compress.c 		return __split_add_bits(p, d, n, 32);
p                 183 lib/842/842_compress.c 	else if (p->olen < 8 && bits > 32 && bits <= 56)
p                 184 lib/842/842_compress.c 		return __split_add_bits(p, d, n, 16);
p                 185 lib/842/842_compress.c 	else if (p->olen < 4 && bits > 16 && bits <= 24)
p                 186 lib/842/842_compress.c 		return __split_add_bits(p, d, n, 8);
p                 188 lib/842/842_compress.c 	if (DIV_ROUND_UP(bits, 8) > p->olen)
p                 211 lib/842/842_compress.c 	p->bit += n;
p                 213 lib/842/842_compress.c 	if (p->bit > 7) {
p                 214 lib/842/842_compress.c 		p->out += p->bit / 8;
p                 215 lib/842/842_compress.c 		p->olen -= p->bit / 8;
p                 216 lib/842/842_compress.c 		p->bit %= 8;
p                 222 lib/842/842_compress.c static int add_template(struct sw842_param *p, u8 c)
p                 233 lib/842/842_compress.c 	ret = add_bits(p, t[4], OP_BITS);
p                 245 lib/842/842_compress.c 				ret = add_bits(p, p->index8[0], I8_BITS);
p                 247 lib/842/842_compress.c 				ret = add_bits(p, p->data8[0], 64);
p                 253 lib/842/842_compress.c 				ret = add_bits(p, get_input_data(p, 2, 32), 32);
p                 257 lib/842/842_compress.c 				ret = add_bits(p, p->index4[b >> 2], I4_BITS);
p                 259 lib/842/842_compress.c 				ret = add_bits(p, p->data4[b >> 2], 32);
p                 267 lib/842/842_compress.c 				ret = add_bits(p, p->index2[b >> 1], I2_BITS);
p                 269 lib/842/842_compress.c 				ret = add_bits(p, p->data2[b >> 1], 16);
p                 305 lib/842/842_compress.c static int add_repeat_template(struct sw842_param *p, u8 r)
p                 313 lib/842/842_compress.c 	ret = add_bits(p, OP_REPEAT, OP_BITS);
p                 317 lib/842/842_compress.c 	ret = add_bits(p, r, REPEAT_BITS);
p                 327 lib/842/842_compress.c static int add_short_data_template(struct sw842_param *p, u8 b)
p                 334 lib/842/842_compress.c 	ret = add_bits(p, OP_SHORT_DATA, OP_BITS);
p                 338 lib/842/842_compress.c 	ret = add_bits(p, b, SHORT_DATA_BITS);
p                 343 lib/842/842_compress.c 		ret = add_bits(p, p->in[i], 8);
p                 354 lib/842/842_compress.c static int add_zeros_template(struct sw842_param *p)
p                 356 lib/842/842_compress.c 	int ret = add_bits(p, OP_ZEROS, OP_BITS);
p                 367 lib/842/842_compress.c static int add_end_template(struct sw842_param *p)
p                 369 lib/842/842_compress.c 	int ret = add_bits(p, OP_END, OP_BITS);
p                 380 lib/842/842_compress.c static bool check_template(struct sw842_param *p, u8 c)
p                 391 lib/842/842_compress.c 				match = check_index(p, 2, b >> 1);
p                 393 lib/842/842_compress.c 				match = check_index(p, 4, b >> 2);
p                 395 lib/842/842_compress.c 				match = check_index(p, 8, 0);
p                 408 lib/842/842_compress.c static void get_next_data(struct sw842_param *p)
p                 410 lib/842/842_compress.c 	p->data8[0] = get_input_data(p, 0, 64);
p                 411 lib/842/842_compress.c 	p->data4[0] = get_input_data(p, 0, 32);
p                 412 lib/842/842_compress.c 	p->data4[1] = get_input_data(p, 4, 32);
p                 413 lib/842/842_compress.c 	p->data2[0] = get_input_data(p, 0, 16);
p                 414 lib/842/842_compress.c 	p->data2[1] = get_input_data(p, 2, 16);
p                 415 lib/842/842_compress.c 	p->data2[2] = get_input_data(p, 4, 16);
p                 416 lib/842/842_compress.c 	p->data2[3] = get_input_data(p, 6, 16);
p                 423 lib/842/842_compress.c static void update_hashtables(struct sw842_param *p)
p                 425 lib/842/842_compress.c 	u64 pos = p->in - p->instart;
p                 430 lib/842/842_compress.c 	replace_hash(p, 8, n8, 0);
p                 431 lib/842/842_compress.c 	replace_hash(p, 4, n4, 0);
p                 432 lib/842/842_compress.c 	replace_hash(p, 4, n4, 1);
p                 433 lib/842/842_compress.c 	replace_hash(p, 2, n2, 0);
p                 434 lib/842/842_compress.c 	replace_hash(p, 2, n2, 1);
p                 435 lib/842/842_compress.c 	replace_hash(p, 2, n2, 2);
p                 436 lib/842/842_compress.c 	replace_hash(p, 2, n2, 3);
p                 442 lib/842/842_compress.c static int process_next(struct sw842_param *p)
p                 446 lib/842/842_compress.c 	p->index8[0] = INDEX_NOT_CHECKED;
p                 447 lib/842/842_compress.c 	p->index4[0] = INDEX_NOT_CHECKED;
p                 448 lib/842/842_compress.c 	p->index4[1] = INDEX_NOT_CHECKED;
p                 449 lib/842/842_compress.c 	p->index2[0] = INDEX_NOT_CHECKED;
p                 450 lib/842/842_compress.c 	p->index2[1] = INDEX_NOT_CHECKED;
p                 451 lib/842/842_compress.c 	p->index2[2] = INDEX_NOT_CHECKED;
p                 452 lib/842/842_compress.c 	p->index2[3] = INDEX_NOT_CHECKED;
p                 456 lib/842/842_compress.c 		if (check_template(p, i))
p                 460 lib/842/842_compress.c 	ret = add_template(p, i);
p                 480 lib/842/842_compress.c 	struct sw842_param *p = (struct sw842_param *)wmem;
p                 486 lib/842/842_compress.c 	BUILD_BUG_ON(sizeof(*p) > SW842_MEM_COMPRESS);
p                 488 lib/842/842_compress.c 	init_hashtable_nodes(p, 8);
p                 489 lib/842/842_compress.c 	init_hashtable_nodes(p, 4);
p                 490 lib/842/842_compress.c 	init_hashtable_nodes(p, 2);
p                 492 lib/842/842_compress.c 	p->in = (u8 *)in;
p                 493 lib/842/842_compress.c 	p->instart = p->in;
p                 494 lib/842/842_compress.c 	p->ilen = ilen;
p                 495 lib/842/842_compress.c 	p->out = out;
p                 496 lib/842/842_compress.c 	p->olen = *olen;
p                 497 lib/842/842_compress.c 	p->bit = 0;
p                 499 lib/842/842_compress.c 	total = p->olen;
p                 514 lib/842/842_compress.c 	last = ~get_unaligned((u64 *)p->in);
p                 516 lib/842/842_compress.c 	while (p->ilen > 7) {
p                 517 lib/842/842_compress.c 		next = get_unaligned((u64 *)p->in);
p                 522 lib/842/842_compress.c 		get_next_data(p);
p                 534 lib/842/842_compress.c 			ret = add_repeat_template(p, repeat_count);
p                 541 lib/842/842_compress.c 			ret = add_zeros_template(p);
p                 543 lib/842/842_compress.c 			ret = process_next(p);
p                 550 lib/842/842_compress.c 		update_hashtables(p);
p                 551 lib/842/842_compress.c 		p->in += 8;
p                 552 lib/842/842_compress.c 		p->ilen -= 8;
p                 556 lib/842/842_compress.c 		ret = add_repeat_template(p, repeat_count);
p                 562 lib/842/842_compress.c 	if (p->ilen > 0) {
p                 563 lib/842/842_compress.c 		ret = add_short_data_template(p, p->ilen);
p                 567 lib/842/842_compress.c 		p->in += p->ilen;
p                 568 lib/842/842_compress.c 		p->ilen = 0;
p                 571 lib/842/842_compress.c 	ret = add_end_template(p);
p                 583 lib/842/842_compress.c 	ret = add_bits(p, crc, CRC_BITS);
p                 587 lib/842/842_compress.c 	if (p->bit) {
p                 588 lib/842/842_compress.c 		p->out++;
p                 589 lib/842/842_compress.c 		p->olen--;
p                 590 lib/842/842_compress.c 		p->bit = 0;
p                 594 lib/842/842_compress.c 	pad = (8 - ((total - p->olen) % 8)) % 8;
p                 596 lib/842/842_compress.c 		if (pad > p->olen) /* we were so close! */
p                 598 lib/842/842_compress.c 		memset(p->out, 0, pad);
p                 599 lib/842/842_compress.c 		p->out += pad;
p                 600 lib/842/842_compress.c 		p->olen -= pad;
p                 603 lib/842/842_compress.c 	if (unlikely((total - p->olen) > UINT_MAX))
p                 606 lib/842/842_compress.c 	*olen = total - p->olen;
p                  65 lib/842/842_decompress.c static int next_bits(struct sw842_param *p, u64 *d, u8 n);
p                  67 lib/842/842_decompress.c static int __split_next_bits(struct sw842_param *p, u64 *d, u8 n, u8 s)
p                  77 lib/842/842_decompress.c 	ret = next_bits(p, &tmp, n - s);
p                  80 lib/842/842_decompress.c 	ret = next_bits(p, d, s);
p                  87 lib/842/842_decompress.c static int next_bits(struct sw842_param *p, u64 *d, u8 n)
p                  89 lib/842/842_decompress.c 	u8 *in = p->in, b = p->bit, bits = b + n;
p                 100 lib/842/842_decompress.c 		return __split_next_bits(p, d, n, 32);
p                 101 lib/842/842_decompress.c 	else if (p->ilen < 8 && bits > 32 && bits <= 56)
p                 102 lib/842/842_decompress.c 		return __split_next_bits(p, d, n, 16);
p                 103 lib/842/842_decompress.c 	else if (p->ilen < 4 && bits > 16 && bits <= 24)
p                 104 lib/842/842_decompress.c 		return __split_next_bits(p, d, n, 8);
p                 106 lib/842/842_decompress.c 	if (DIV_ROUND_UP(bits, 8) > p->ilen)
p                 120 lib/842/842_decompress.c 	p->bit += n;
p                 122 lib/842/842_decompress.c 	if (p->bit > 7) {
p                 123 lib/842/842_decompress.c 		p->in += p->bit / 8;
p                 124 lib/842/842_decompress.c 		p->ilen -= p->bit / 8;
p                 125 lib/842/842_decompress.c 		p->bit %= 8;
p                 131 lib/842/842_decompress.c static int do_data(struct sw842_param *p, u8 n)
p                 136 lib/842/842_decompress.c 	if (n > p->olen)
p                 139 lib/842/842_decompress.c 	ret = next_bits(p, &v, n * 8);
p                 145 lib/842/842_decompress.c 		put_unaligned(cpu_to_be16((u16)v), (__be16 *)p->out);
p                 148 lib/842/842_decompress.c 		put_unaligned(cpu_to_be32((u32)v), (__be32 *)p->out);
p                 151 lib/842/842_decompress.c 		put_unaligned(cpu_to_be64((u64)v), (__be64 *)p->out);
p                 157 lib/842/842_decompress.c 	p->out += n;
p                 158 lib/842/842_decompress.c 	p->olen -= n;
p                 163 lib/842/842_decompress.c static int __do_index(struct sw842_param *p, u8 size, u8 bits, u64 fsize)
p                 165 lib/842/842_decompress.c 	u64 index, offset, total = round_down(p->out - p->ostart, 8);
p                 168 lib/842/842_decompress.c 	ret = next_bits(p, &index, bits);
p                 203 lib/842/842_decompress.c 			 (unsigned long)beN_to_cpu(&p->ostart[offset], size));
p                 205 lib/842/842_decompress.c 	memcpy(p->out, &p->ostart[offset], size);
p                 206 lib/842/842_decompress.c 	p->out += size;
p                 207 lib/842/842_decompress.c 	p->olen -= size;
p                 212 lib/842/842_decompress.c static int do_index(struct sw842_param *p, u8 n)
p                 216 lib/842/842_decompress.c 		return __do_index(p, 2, I2_BITS, I2_FIFO_SIZE);
p                 218 lib/842/842_decompress.c 		return __do_index(p, 4, I4_BITS, I4_FIFO_SIZE);
p                 220 lib/842/842_decompress.c 		return __do_index(p, 8, I8_BITS, I8_FIFO_SIZE);
p                 226 lib/842/842_decompress.c static int do_op(struct sw842_param *p, u8 o)
p                 240 lib/842/842_decompress.c 			ret = do_data(p, op & OP_AMOUNT);
p                 243 lib/842/842_decompress.c 			ret = do_index(p, op & OP_AMOUNT);
p                 280 lib/842/842_decompress.c 	struct sw842_param p;
p                 285 lib/842/842_decompress.c 	p.in = (u8 *)in;
p                 286 lib/842/842_decompress.c 	p.bit = 0;
p                 287 lib/842/842_decompress.c 	p.ilen = ilen;
p                 288 lib/842/842_decompress.c 	p.out = out;
p                 289 lib/842/842_decompress.c 	p.ostart = out;
p                 290 lib/842/842_decompress.c 	p.olen = *olen;
p                 292 lib/842/842_decompress.c 	total = p.olen;
p                 297 lib/842/842_decompress.c 		ret = next_bits(&p, &op, OP_BITS);
p                 305 lib/842/842_decompress.c 			ret = next_bits(&p, &rep, REPEAT_BITS);
p                 309 lib/842/842_decompress.c 			if (p.out == out) /* no previous bytes */
p                 315 lib/842/842_decompress.c 			if (rep * 8 > p.olen)
p                 319 lib/842/842_decompress.c 				memcpy(p.out, p.out - 8, 8);
p                 320 lib/842/842_decompress.c 				p.out += 8;
p                 321 lib/842/842_decompress.c 				p.olen -= 8;
p                 329 lib/842/842_decompress.c 			if (8 > p.olen)
p                 332 lib/842/842_decompress.c 			memset(p.out, 0, 8);
p                 333 lib/842/842_decompress.c 			p.out += 8;
p                 334 lib/842/842_decompress.c 			p.olen -= 8;
p                 341 lib/842/842_decompress.c 			ret = next_bits(&p, &bytes, SHORT_DATA_BITS);
p                 349 lib/842/842_decompress.c 				ret = next_bits(&p, &tmp, 8);
p                 352 lib/842/842_decompress.c 				*p.out = (u8)tmp;
p                 353 lib/842/842_decompress.c 				p.out++;
p                 354 lib/842/842_decompress.c 				p.olen--;
p                 367 lib/842/842_decompress.c 			ret = do_op(&p, op);
p                 378 lib/842/842_decompress.c 	ret = next_bits(&p, &crc, CRC_BITS);
p                 385 lib/842/842_decompress.c 	if (crc != (u64)crc32_be(0, out, total - p.olen)) {
p                 390 lib/842/842_decompress.c 	if (unlikely((total - p.olen) > UINT_MAX))
p                 393 lib/842/842_decompress.c 	*olen = total - p.olen;
p                 929 lib/assoc_array.c 		edit->set_parent_slot[0].p = &side->parent_slot;
p                1235 lib/assoc_array.c 				struct assoc_array_node *p =
p                1237 lib/assoc_array.c 				edit->set[1].ptr = &p->slots[node->parent_slot];
p                1361 lib/assoc_array.c 		if (edit->set_parent_slot[i].p)
p                1362 lib/assoc_array.c 			*edit->set_parent_slot[i].p = edit->set_parent_slot[i].to;
p                1592 lib/assoc_array.c 				struct assoc_array_ptr *p = child->slots[i];
p                1593 lib/assoc_array.c 				if (!p)
p                1595 lib/assoc_array.c 				BUG_ON(assoc_array_ptr_is_meta(p));
p                1599 lib/assoc_array.c 				new_n->slots[next_slot++] = p;
p                 125 lib/bch.c      	const uint32_t *p;
p                 129 lib/bch.c      		p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(*data++)) & 0xff);
p                 132 lib/bch.c      			ecc[i] = ((ecc[i] << 8)|(ecc[i+1] >> 24))^(*p++);
p                 134 lib/bch.c      		ecc[l] = (ecc[l] << 8)^(*p);
p                 447 lib/bch.c      	int rem, c, r, p, k, param[BCH_MAX_M];
p                 455 lib/bch.c      		p = c-k;
p                 457 lib/bch.c      		for (r = p; r < m; r++) {
p                 459 lib/bch.c      				if (r != p) {
p                 461 lib/bch.c      					rows[r] = rows[p];
p                 462 lib/bch.c      					rows[p] = tmp;
p                 470 lib/bch.c      			tmp = rows[p];
p                 483 lib/bch.c      		p = k;
p                 489 lib/bch.c      			rows[r] = (p && (r == param[p-1])) ?
p                 490 lib/bch.c      				p--, 1u << (m-r) : rows[r-p];
p                 498 lib/bch.c      	for (p = 0; p < nsol; p++) {
p                 501 lib/bch.c      			rows[param[c]] = (rows[param[c]] & ~1)|((p >> c) & 1);
p                 509 lib/bch.c      		sol[p] = tmp >> 1;
p                 725 lib/bch.c      	int la, p, m;
p                 741 lib/bch.c      			p = j-d;
p                 742 lib/bch.c      			for (i = 0; i < d; i++, p++) {
p                 745 lib/bch.c      					c[p] ^= bch->a_pow_tab[mod_s(bch,
p                 925 lib/bch.c      			struct gf_poly *p, unsigned int *roots)
p                 932 lib/bch.c      	gf_poly_logrep(bch, p, bch->cache);
p                 933 lib/bch.c      	bch->cache[p->deg] = 0;
p                 934 lib/bch.c      	syn0 = gf_div(bch, p->c[0], p->c[p->deg]);
p                 938 lib/bch.c      		for (j = 1, syn = syn0; j <= p->deg; j++) {
p                 945 lib/bch.c      			if (count == p->deg)
p                 949 lib/bch.c      	return (count == p->deg) ? count : 0;
p                 272 lib/bitmap.c   	unsigned long *p = map + BIT_WORD(start);
p                 278 lib/bitmap.c   		*p |= mask_to_set;
p                 282 lib/bitmap.c   		p++;
p                 286 lib/bitmap.c   		*p |= mask_to_set;
p                 293 lib/bitmap.c   	unsigned long *p = map + BIT_WORD(start);
p                 299 lib/bitmap.c   		*p &= ~mask_to_clear;
p                 303 lib/bitmap.c   		p++;
p                 307 lib/bitmap.c   		*p &= ~mask_to_clear;
p                 118 lib/crc32.c    		u8 *p = (u8 *)(b + 1) - 1;
p                 121 lib/crc32.c    			DO_CRC(*++p); /* use pre increment for speed */
p                 124 lib/crc32.c    			DO_CRC(*++p); /* use pre increment for speed */
p                 146 lib/crc32.c    static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
p                 153 lib/crc32.c    		crc ^= *p++;
p                 159 lib/crc32.c    		crc ^= *p++;
p                 167 lib/crc32.c    		crc ^= *p++;
p                 174 lib/crc32.c    		crc ^= *p++;
p                 179 lib/crc32.c    	crc = crc32_body(crc, p, len, tab);
p                 186 lib/crc32.c    u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
p                 188 lib/crc32.c    	return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
p                 190 lib/crc32.c    u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
p                 192 lib/crc32.c    	return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE);
p                 195 lib/crc32.c    u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
p                 197 lib/crc32.c    	return crc32_le_generic(crc, p, len,
p                 200 lib/crc32.c    u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
p                 202 lib/crc32.c    	return crc32_le_generic(crc, p, len,
p                 294 lib/crc32.c    static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
p                 301 lib/crc32.c    		crc ^= *p++ << 24;
p                 309 lib/crc32.c    		crc ^= *p++ << 24;
p                 317 lib/crc32.c    		crc ^= *p++ << 24;
p                 323 lib/crc32.c    		crc ^= *p++ << 24;
p                 328 lib/crc32.c    	crc = crc32_body(crc, p, len, tab);
p                 335 lib/crc32.c    u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
p                 337 lib/crc32.c    	return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
p                 340 lib/crc32.c    u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
p                 342 lib/crc32.c    	return crc32_be_generic(crc, p, len,
p                  43 lib/crc64.c    u64 __pure crc64_be(u64 crc, const void *p, size_t len)
p                  47 lib/crc64.c    	const unsigned char *_p = p;
p                 148 lib/decompress_unlzma.c static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p)
p                 151 lib/decompress_unlzma.c 	rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
p                 154 lib/decompress_unlzma.c static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p)
p                 156 lib/decompress_unlzma.c 	uint32_t t = rc_is_bit_0_helper(rc, p);
p                 161 lib/decompress_unlzma.c static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p)
p                 164 lib/decompress_unlzma.c 	*p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
p                 166 lib/decompress_unlzma.c static inline void INIT rc_update_bit_1(struct rc *rc, uint16_t *p)
p                 170 lib/decompress_unlzma.c 	*p -= *p >> RC_MOVE_BITS;
p                 174 lib/decompress_unlzma.c static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol)
p                 176 lib/decompress_unlzma.c 	if (rc_is_bit_0(rc, p)) {
p                 177 lib/decompress_unlzma.c 		rc_update_bit_0(rc, p);
p                 181 lib/decompress_unlzma.c 		rc_update_bit_1(rc, p);
p                 201 lib/decompress_unlzma.c rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
p                 207 lib/decompress_unlzma.c 		rc_get_bit(rc, p + *symbol, symbol);
p                 348 lib/decompress_unlzma.c 				     struct cstate *cst, uint16_t *p,
p                 353 lib/decompress_unlzma.c 	prob = (p + LZMA_LITERAL +
p                 392 lib/decompress_unlzma.c 					    struct cstate *cst, uint16_t *p,
p                 400 lib/decompress_unlzma.c 	prob = p + LZMA_IS_REP + cst->state;
p                 407 lib/decompress_unlzma.c 		prob = p + LZMA_LEN_CODER;
p                 410 lib/decompress_unlzma.c 		prob = p + LZMA_IS_REP_G0 + cst->state;
p                 413 lib/decompress_unlzma.c 			prob = (p + LZMA_IS_REP_0_LONG
p                 430 lib/decompress_unlzma.c 			prob = p + LZMA_IS_REP_G1 + cst->state;
p                 436 lib/decompress_unlzma.c 				prob = p + LZMA_IS_REP_G2 + cst->state;
p                 451 lib/decompress_unlzma.c 		prob = p + LZMA_REP_LEN_CODER;
p                 489 lib/decompress_unlzma.c 			p + LZMA_POS_SLOT +
p                 503 lib/decompress_unlzma.c 				prob = p + LZMA_SPEC_POS +
p                 510 lib/decompress_unlzma.c 				prob = p + LZMA_ALIGN;
p                 549 lib/decompress_unlzma.c 	uint16_t *p;
p                 622 lib/decompress_unlzma.c 	p = (uint16_t *) large_malloc(num_probs * sizeof(*p));
p                 623 lib/decompress_unlzma.c 	if (p == NULL)
p                 627 lib/decompress_unlzma.c 		p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
p                 633 lib/decompress_unlzma.c 		uint16_t *prob = p + LZMA_IS_MATCH +
p                 636 lib/decompress_unlzma.c 			if (process_bit0(&wr, &rc, &cst, p, pos_state, prob,
p                 642 lib/decompress_unlzma.c 			if (process_bit1(&wr, &rc, &cst, p, pos_state, prob)) {
p                 658 lib/decompress_unlzma.c 	large_free(p);
p                  79 lib/digsig.c   	uint8_t *p, *datap;
p                 154 lib/digsig.c   	p = mpi_get_buffer(res, &l, NULL);
p                 155 lib/digsig.c   	if (!p) {
p                 163 lib/digsig.c   	memcpy(out1 + head, p, l);
p                 165 lib/digsig.c   	kfree(p);
p                  94 lib/dynamic_debug.c 	char *p = buf;
p                 100 lib/dynamic_debug.c 			*p++ = opt_array[i].opt_char;
p                 101 lib/dynamic_debug.c 	if (p == buf)
p                 102 lib/dynamic_debug.c 		*p++ = '_';
p                 103 lib/dynamic_debug.c 	*p = '\0';
p                 793 lib/dynamic_debug.c static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos)
p                 799 lib/dynamic_debug.c 		 m, p, (unsigned long long)*pos);
p                 801 lib/dynamic_debug.c 	if (p == SEQ_START_TOKEN)
p                 815 lib/dynamic_debug.c static int ddebug_proc_show(struct seq_file *m, void *p)
p                 818 lib/dynamic_debug.c 	struct _ddebug *dp = p;
p                 821 lib/dynamic_debug.c 	vpr_info("called m=%p p=%p\n", m, p);
p                 823 lib/dynamic_debug.c 	if (p == SEQ_START_TOKEN) {
p                 843 lib/dynamic_debug.c static void ddebug_proc_stop(struct seq_file *m, void *p)
p                 845 lib/dynamic_debug.c 	vpr_info("called m=%p p=%p\n", m, p);
p                  64 lib/earlycpio.c 	const char *p, *dptr, *nptr;
p                  70 lib/earlycpio.c 	p = data;
p                  73 lib/earlycpio.c 		if (!*p) {
p                  75 lib/earlycpio.c 			p += 4;
p                  86 lib/earlycpio.c 				c = *p++;
p                 111 lib/earlycpio.c 		dptr = PTR_ALIGN(p + ch[C_NAMESIZE], 4);
p                 114 lib/earlycpio.c 		if (nptr > p + len || dptr < p || nptr < dptr)
p                 119 lib/earlycpio.c 		    !memcmp(p, path, mypathsize)) {
p                 127 lib/earlycpio.c 				p, MAX_CPIO_FILE_NAME);
p                 129 lib/earlycpio.c 			strlcpy(cd.name, p + mypathsize, MAX_CPIO_FILE_NAME);
p                 135 lib/earlycpio.c 		len -= (nptr - p);
p                 136 lib/earlycpio.c 		p = nptr;
p                  38 lib/flex_proportions.c int fprop_global_init(struct fprop_global *p, gfp_t gfp)
p                  42 lib/flex_proportions.c 	p->period = 0;
p                  44 lib/flex_proportions.c 	err = percpu_counter_init(&p->events, 1, gfp);
p                  47 lib/flex_proportions.c 	seqcount_init(&p->sequence);
p                  51 lib/flex_proportions.c void fprop_global_destroy(struct fprop_global *p)
p                  53 lib/flex_proportions.c 	percpu_counter_destroy(&p->events);
p                  64 lib/flex_proportions.c bool fprop_new_period(struct fprop_global *p, int periods)
p                  70 lib/flex_proportions.c 	events = percpu_counter_sum(&p->events);
p                  78 lib/flex_proportions.c 	write_seqcount_begin(&p->sequence);
p                  82 lib/flex_proportions.c 	percpu_counter_add(&p->events, -events);
p                  83 lib/flex_proportions.c 	p->period += periods;
p                  84 lib/flex_proportions.c 	write_seqcount_end(&p->sequence);
p                 106 lib/flex_proportions.c static void fprop_reflect_period_single(struct fprop_global *p,
p                 109 lib/flex_proportions.c 	unsigned int period = p->period;
p                 131 lib/flex_proportions.c void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
p                 133 lib/flex_proportions.c 	fprop_reflect_period_single(p, pl);
p                 135 lib/flex_proportions.c 	percpu_counter_add(&p->events, 1);
p                 139 lib/flex_proportions.c void fprop_fraction_single(struct fprop_global *p,
p                 147 lib/flex_proportions.c 		seq = read_seqcount_begin(&p->sequence);
p                 148 lib/flex_proportions.c 		fprop_reflect_period_single(p, pl);
p                 150 lib/flex_proportions.c 		den = percpu_counter_read_positive(&p->events);
p                 151 lib/flex_proportions.c 	} while (read_seqcount_retry(&p->sequence, seq));
p                 189 lib/flex_proportions.c static void fprop_reflect_period_percpu(struct fprop_global *p,
p                 192 lib/flex_proportions.c 	unsigned int period = p->period;
p                 220 lib/flex_proportions.c void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
p                 222 lib/flex_proportions.c 	fprop_reflect_period_percpu(p, pl);
p                 224 lib/flex_proportions.c 	percpu_counter_add(&p->events, 1);
p                 227 lib/flex_proportions.c void fprop_fraction_percpu(struct fprop_global *p,
p                 235 lib/flex_proportions.c 		seq = read_seqcount_begin(&p->sequence);
p                 236 lib/flex_proportions.c 		fprop_reflect_period_percpu(p, pl);
p                 238 lib/flex_proportions.c 		den = percpu_counter_read_positive(&p->events);
p                 239 lib/flex_proportions.c 	} while (read_seqcount_retry(&p->sequence, seq));
p                 259 lib/flex_proportions.c void __fprop_inc_percpu_max(struct fprop_global *p,
p                 265 lib/flex_proportions.c 		fprop_fraction_percpu(p, pl, &numerator, &denominator);
p                 270 lib/flex_proportions.c 		fprop_reflect_period_percpu(p, pl);
p                 272 lib/flex_proportions.c 	percpu_counter_add(&p->events, 1);
p                  86 lib/genalloc.c 	unsigned long *p = map + BIT_WORD(start);
p                  92 lib/genalloc.c 		if (set_bits_ll(p, mask_to_set))
p                  97 lib/genalloc.c 		p++;
p                 101 lib/genalloc.c 		if (set_bits_ll(p, mask_to_set))
p                 121 lib/genalloc.c 	unsigned long *p = map + BIT_WORD(start);
p                 127 lib/genalloc.c 		if (clear_bits_ll(p, mask_to_clear))
p                 132 lib/genalloc.c 		p++;
p                 136 lib/genalloc.c 		if (clear_bits_ll(p, mask_to_clear))
p                 779 lib/genalloc.c 	struct gen_pool **p = res;
p                 782 lib/genalloc.c 	if (!data && !(*p)->name)
p                 785 lib/genalloc.c 	if (!data || !(*p)->name)
p                 788 lib/genalloc.c 	return !strcmp((*p)->name, data);
p                 800 lib/genalloc.c 	struct gen_pool **p;
p                 802 lib/genalloc.c 	p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
p                 804 lib/genalloc.c 	if (!p)
p                 806 lib/genalloc.c 	return *p;
p                 138 lib/generic-radix-tree.c 		struct genradix_node **p =
p                 142 lib/generic-radix-tree.c 		n = READ_ONCE(*p);
p                 150 lib/generic-radix-tree.c 			if (!(n = cmpxchg_release(p, NULL, new_node)))
p                 133 lib/globtest.c 	char const *p = glob_tests;
p                 143 lib/globtest.c 	while (*p) {
p                 144 lib/globtest.c 		bool expected = *p++ & 1;
p                 145 lib/globtest.c 		char const *pat = p;
p                 147 lib/globtest.c 		p += strlen(p) + 1;
p                 148 lib/globtest.c 		successes += test(pat, p, expected);
p                 149 lib/globtest.c 		p += strlen(p) + 1;
p                 196 lib/idr.c      		int (*fn)(int id, void *p, void *data), void *data)
p                 247 lib/inflate.c         void *p;
p                 256 lib/inflate.c         p = (void *)malloc_ptr;
p                 263 lib/inflate.c         return p;
p                 345 lib/inflate.c    register unsigned *p;         /* pointer into c[], b[], or v[] */
p                 375 lib/inflate.c    p = b;  i = n;
p                 377 lib/inflate.c      Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"), 
p                 378 lib/inflate.c  	    n-i, *p));
p                 379 lib/inflate.c      c[*p]++;                    /* assume all entries <= BMAX */
p                 380 lib/inflate.c      p++;                      /* Can't combine with above line (Solaris bug) */
p                 426 lib/inflate.c    p = c + 1;  xp = x + 2;
p                 428 lib/inflate.c      *xp++ = (j += *p++);
p                 434 lib/inflate.c    p = b;  i = 0;
p                 436 lib/inflate.c      if ((j = *p++) != 0)
p                 445 lib/inflate.c    p = v;                        /* grab values in bit order */
p                 519 lib/inflate.c        if (p >= v + n)
p                 521 lib/inflate.c        else if (*p < s)
p                 523 lib/inflate.c          r.e = (uch)(*p < 256 ? 16 : 15);    /* 256 is end-of-block code */
p                 524 lib/inflate.c          r.v.n = (ush)(*p);             /* simple code is just the value */
p                 525 lib/inflate.c  	p++;                           /* one compiler does not like *p++ */
p                 529 lib/inflate.c          r.e = (uch)e[*p - s];   /* non-simple--look up in lists */
p                 530 lib/inflate.c          r.v.n = d[*p++ - s];
p                 574 lib/inflate.c    register struct huft *p, *q;
p                 578 lib/inflate.c    p = t;
p                 579 lib/inflate.c    while (p != (struct huft *)NULL)
p                 581 lib/inflate.c      q = (--p)->v.t;
p                 582 lib/inflate.c      free((char*)p);
p                 583 lib/inflate.c      p = q;
p                1157 lib/inflate.c    static const int p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26};
p                1161 lib/inflate.c    for (i = 0; i < sizeof(p)/sizeof(int); i++)
p                1162 lib/inflate.c      e |= 1L << (31 - p[i]);
p                 331 lib/iov_iter.c 		struct pipe_buffer *p;
p                 337 lib/iov_iter.c 		p = &pipe->bufs[idx];
p                 338 lib/iov_iter.c 		if (unlikely(p->offset + p->len != i->iov_offset))
p                 589 lib/iov_iter.c 		char *p = kmap_atomic(pipe->bufs[idx].page);
p                 590 lib/iov_iter.c 		sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
p                 591 lib/iov_iter.c 		kunmap_atomic(p);
p                 966 lib/iov_iter.c 	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
p                 977 lib/iov_iter.c 		copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
p                 978 lib/iov_iter.c 		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
p                 980 lib/iov_iter.c 		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
p                1325 lib/iov_iter.c 	struct page **p;
p                1344 lib/iov_iter.c 	p = get_pages_array(npages);
p                1345 lib/iov_iter.c 	if (!p)
p                1347 lib/iov_iter.c 	n = __pipe_get_pages(i, maxsize, p, idx, start);
p                1349 lib/iov_iter.c 		*pages = p;
p                1351 lib/iov_iter.c 		kvfree(p);
p                1359 lib/iov_iter.c 	struct page **p;
p                1377 lib/iov_iter.c 		p = get_pages_array(n);
p                1378 lib/iov_iter.c 		if (!p)
p                1381 lib/iov_iter.c 				iov_iter_rw(i) != WRITE ?  FOLL_WRITE : 0, p);
p                1383 lib/iov_iter.c 			kvfree(p);
p                1386 lib/iov_iter.c 		*pages = p;
p                1391 lib/iov_iter.c 		*pages = p = get_pages_array(1);
p                1392 lib/iov_iter.c 		if (!p)
p                1394 lib/iov_iter.c 		get_page(*p = v.bv_page);
p                1426 lib/iov_iter.c 		char *p = kmap_atomic(v.bv_page);
p                1428 lib/iov_iter.c 				      p + v.bv_offset, v.bv_len,
p                1430 lib/iov_iter.c 		kunmap_atomic(p);
p                1468 lib/iov_iter.c 		char *p = kmap_atomic(v.bv_page);
p                1470 lib/iov_iter.c 				      p + v.bv_offset, v.bv_len,
p                1472 lib/iov_iter.c 		kunmap_atomic(p);
p                1514 lib/iov_iter.c 		char *p = kmap_atomic(v.bv_page);
p                1515 lib/iov_iter.c 		sum = csum_and_memcpy(p + v.bv_offset,
p                1518 lib/iov_iter.c 		kunmap_atomic(p);
p                1575 lib/iov_iter.c 		unsigned long p = (unsigned long)v.iov_base;
p                1576 lib/iov_iter.c 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
p                1577 lib/iov_iter.c 			- p / PAGE_SIZE;
p                1585 lib/iov_iter.c 		unsigned long p = (unsigned long)v.iov_base;
p                1586 lib/iov_iter.c 		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
p                1587 lib/iov_iter.c 			- p / PAGE_SIZE;
p                1644 lib/iov_iter.c 	struct iovec *p;
p                1646 lib/iov_iter.c 				  *iov, &p);
p                1648 lib/iov_iter.c 		if (p != *iov)
p                1649 lib/iov_iter.c 			kfree(p);
p                1653 lib/iov_iter.c 	iov_iter_init(i, type, p, nr_segs, n);
p                1654 lib/iov_iter.c 	*iov = p == *iov ? NULL : p;
p                1668 lib/iov_iter.c 	struct iovec *p;
p                1670 lib/iov_iter.c 				  *iov, &p);
p                1672 lib/iov_iter.c 		if (p != *iov)
p                1673 lib/iov_iter.c 			kfree(p);
p                1677 lib/iov_iter.c 	iov_iter_init(i, type, p, nr_segs, n);
p                1678 lib/iov_iter.c 	*iov = p == *iov ? NULL : p;
p                  19 lib/is_single_threaded.c 	struct task_struct *p, *t;
p                  30 lib/is_single_threaded.c 	for_each_process(p) {
p                  31 lib/is_single_threaded.c 		if (unlikely(p->flags & PF_KTHREAD))
p                  33 lib/is_single_threaded.c 		if (unlikely(p == task->group_leader))
p                  36 lib/is_single_threaded.c 		for_each_thread(p, t) {
p                  18 lib/kasprintf.c 	char *p;
p                  25 lib/kasprintf.c 	p = kmalloc_track_caller(first+1, gfp);
p                  26 lib/kasprintf.c 	if (!p)
p                  29 lib/kasprintf.c 	second = vsnprintf(p, first+1, fmt, ap);
p                  33 lib/kasprintf.c 	return p;
p                  56 lib/kasprintf.c 	char *p;
p                  59 lib/kasprintf.c 	p = kvasprintf(gfp, fmt, ap);
p                  62 lib/kasprintf.c 	return p;
p                  48 lib/kstrtox.c  unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
p                  81 lib/kstrtox.c  	*p = res;
p                 134 lib/lru_cache.c 		void *p = kmem_cache_alloc(cache, GFP_KERNEL);
p                 135 lib/lru_cache.c 		if (!p)
p                 137 lib/lru_cache.c 		memset(p, 0, lc->element_size);
p                 138 lib/lru_cache.c 		e = p + e_off;
p                 150 lib/lru_cache.c 		void *p = element[i];
p                 151 lib/lru_cache.c 		kmem_cache_free(cache, p - e_off);
p                 162 lib/lru_cache.c 	void *p = lc->lc_element[i];
p                 163 lib/lru_cache.c 	WARN_ON(!p);
p                 164 lib/lru_cache.c 	if (p) {
p                 165 lib/lru_cache.c 		p -= lc->element_off;
p                 166 lib/lru_cache.c 		kmem_cache_free(lc->lc_cache, p);
p                 213 lib/lru_cache.c 		void *p = e;
p                 214 lib/lru_cache.c 		p -= lc->element_off;
p                 215 lib/lru_cache.c 		memset(p, 0, lc->element_size);
p                  80 lib/lz4/lz4_compress.c 	const void *p,
p                  85 lib/lz4/lz4_compress.c 		return LZ4_hash5(LZ4_read_ARCH(p), tableType);
p                  88 lib/lz4/lz4_compress.c 	return LZ4_hash4(LZ4_read32(p), tableType);
p                  92 lib/lz4/lz4_compress.c 	const BYTE *p,
p                 103 lib/lz4/lz4_compress.c 		hashTable[h] = p;
p                 110 lib/lz4/lz4_compress.c 		hashTable[h] = (U32)(p - srcBase);
p                 117 lib/lz4/lz4_compress.c 		hashTable[h] = (U16)(p - srcBase);
p                 124 lib/lz4/lz4_compress.c 	const BYTE *p,
p                 129 lib/lz4/lz4_compress.c 	U32 const h = LZ4_hashPosition(p, tableType);
p                 131 lib/lz4/lz4_compress.c 	LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
p                 161 lib/lz4/lz4_compress.c 	const BYTE *p,
p                 166 lib/lz4/lz4_compress.c 	U32 const h = LZ4_hashPosition(p, tableType);
p                 779 lib/lz4/lz4_compress.c 	const BYTE *p = (const BYTE *)dictionary;
p                 780 lib/lz4/lz4_compress.c 	const BYTE * const dictEnd = p + dictSize;
p                 795 lib/lz4/lz4_compress.c 	if ((dictEnd - p) > 64 * KB)
p                 796 lib/lz4/lz4_compress.c 		p = dictEnd - 64 * KB;
p                 798 lib/lz4/lz4_compress.c 	base = p - dict->currentOffset;
p                 799 lib/lz4/lz4_compress.c 	dict->dictionary = p;
p                 800 lib/lz4/lz4_compress.c 	dict->dictSize = (U32)(dictEnd - p);
p                 803 lib/lz4/lz4_compress.c 	while (p <= dictEnd - HASH_UNIT) {
p                 804 lib/lz4/lz4_compress.c 		LZ4_putPosition(p, dict->hashTable, byU32, base);
p                 805 lib/lz4/lz4_compress.c 		p += 3;
p                  51 lib/lz4/lz4hc_compress.c #define DELTANEXTU16(p)	chainTable[(U16)(p)] /* faster */
p                  91 lib/math/prime_numbers.c 				     unsigned long *p,
p                 102 lib/math/prime_numbers.c 		__clear_bit(m, p);
p                 111 lib/math/prime_numbers.c 	const struct primes *p;
p                 134 lib/math/prime_numbers.c 	p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
p                 135 lib/math/prime_numbers.c 	if (x < p->last) {
p                 145 lib/math/prime_numbers.c 	bitmap_copy(new->primes, p->primes, p->sz);
p                 147 lib/math/prime_numbers.c 		new->last = clear_multiples(y, new->primes, p->sz, sz);
p                 153 lib/math/prime_numbers.c 	if (p != &small_primes)
p                 154 lib/math/prime_numbers.c 		kfree_rcu((struct primes *)p, rcu);
p                 163 lib/math/prime_numbers.c 	const struct primes *p;
p                 166 lib/math/prime_numbers.c 	p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
p                 167 lib/math/prime_numbers.c 	if (p != &small_primes) {
p                 169 lib/math/prime_numbers.c 		kfree_rcu((struct primes *)p, rcu);
p                 190 lib/math/prime_numbers.c 	const struct primes *p;
p                 193 lib/math/prime_numbers.c 	p = rcu_dereference(primes);
p                 194 lib/math/prime_numbers.c 	while (x >= p->last) {
p                 201 lib/math/prime_numbers.c 		p = rcu_dereference(primes);
p                 203 lib/math/prime_numbers.c 	x = find_next_bit(p->primes, p->last, x + 1);
p                 223 lib/math/prime_numbers.c 	const struct primes *p;
p                 227 lib/math/prime_numbers.c 	p = rcu_dereference(primes);
p                 228 lib/math/prime_numbers.c 	while (x >= p->sz) {
p                 235 lib/math/prime_numbers.c 		p = rcu_dereference(primes);
p                 237 lib/math/prime_numbers.c 	result = test_bit(x, p->primes);
p                 246 lib/math/prime_numbers.c 	const struct primes *p;
p                 252 lib/math/prime_numbers.c 	p = rcu_dereference(primes);
p                 255 lib/math/prime_numbers.c 		bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
p                 257 lib/math/prime_numbers.c 		p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
p                  12 lib/memcat_p.c 	void **p = a, **new;
p                  16 lib/memcat_p.c 	for (nr = 0, p = a; *p; nr++, p++)
p                  18 lib/memcat_p.c 	for (p = b; *p; nr++, p++)
p                  28 lib/memcat_p.c 	for (nr--; nr >= 0; nr--, p = p == b ? &a[nr] : p - 1)
p                  29 lib/memcat_p.c 		new[nr] = *p;
p                 146 lib/mpi/mpicoder.c 	uint8_t *p;
p                 170 lib/mpi/mpicoder.c 	p = buf;
p                 183 lib/mpi/mpicoder.c 		memcpy(p, (u8 *)&alimb + lzeros, BYTES_PER_MPI_LIMB - lzeros);
p                 184 lib/mpi/mpicoder.c 		p += BYTES_PER_MPI_LIMB - lzeros;
p                 250 lib/mpi/mpicoder.c 	u8 *p, *p2;
p                 300 lib/mpi/mpicoder.c 		p = (u8 *)&alimb;
p                 303 lib/mpi/mpicoder.c 			*p2++ = *p++;
p                  88 lib/mpi/mpiutil.c 	void *p;
p                  94 lib/mpi/mpiutil.c 		p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
p                  95 lib/mpi/mpiutil.c 		if (!p)
p                  97 lib/mpi/mpiutil.c 		memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
p                  99 lib/mpi/mpiutil.c 		a->d = p;
p                 440 lib/nlattr.c   nla_policy_len(const struct nla_policy *p, int n)
p                 444 lib/nlattr.c   	for (i = 0; i < n; i++, p++) {
p                 445 lib/nlattr.c   		if (p->len)
p                 446 lib/nlattr.c   			len += nla_total_size(p->len);
p                 447 lib/nlattr.c   		else if (nla_attr_len[p->type])
p                 448 lib/nlattr.c   			len += nla_total_size(nla_attr_len[p->type]);
p                 449 lib/nlattr.c   		else if (nla_attr_minlen[p->type])
p                 450 lib/nlattr.c   			len += nla_total_size(nla_attr_minlen[p->type]);
p                  28 lib/notifier-error-inject.c 				unsigned long val, void *p)
p                 918 lib/objagg.c   	ptr += ht->p.key_offset;
p                 920 lib/objagg.c   				    memcmp(ptr, arg->key, ht->p.key_len);
p                  24 lib/parser.c   static int match_one(char *s, const char *p, substring_t args[])
p                  29 lib/parser.c   	if (!p)
p                  34 lib/parser.c   		meta = strchr(p, '%');
p                  36 lib/parser.c   			return strcmp(p, s) == 0;
p                  38 lib/parser.c   		if (strncmp(p, s, meta-p))
p                  41 lib/parser.c   		s += meta - p;
p                  42 lib/parser.c   		p = meta + 1;
p                  44 lib/parser.c   		if (isdigit(*p))
p                  45 lib/parser.c   			len = simple_strtoul(p, (char **) &p, 10);
p                  46 lib/parser.c   		else if (*p == '%') {
p                  49 lib/parser.c   			p++;
p                  57 lib/parser.c   		switch (*p++) {
p                 107 lib/parser.c   	const struct match_token *p;
p                 109 lib/parser.c   	for (p = table; !match_one(s, p->pattern, args) ; p++)
p                 112 lib/parser.c   	return p->token;
p                 252 lib/parser.c   	const char *p = pattern;
p                 256 lib/parser.c   		switch (*p) {
p                 259 lib/parser.c   			p++;
p                 264 lib/parser.c   			if (!*++p)
p                 266 lib/parser.c   			pattern = p;
p                 269 lib/parser.c   			if (*s == *p) {
p                 271 lib/parser.c   				p++;
p                 277 lib/parser.c   				p = pattern;
p                 283 lib/parser.c   	if (*p == '*')
p                 284 lib/parser.c   		++p;
p                 285 lib/parser.c   	return !*p;
p                  32 lib/plist.c    static void plist_check_prev_next(struct list_head *t, struct list_head *p,
p                  35 lib/plist.c    	WARN(n->prev != p || p->next != n,
p                  40 lib/plist.c    			p, p->next, p->prev,
p                  39 lib/raid6/avx2.c 	u8 *p, *q;
p                  43 lib/raid6/avx2.c 	p = dptr[z0+1];		/* XOR parity */
p                  74 lib/raid6/avx2.c 		asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
p                  88 lib/raid6/avx2.c 	u8 *p, *q;
p                  92 lib/raid6/avx2.c 	p = dptr[disks-2];	/* XOR parity */
p                 101 lib/raid6/avx2.c 		asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
p                 125 lib/raid6/avx2.c 		asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
p                 146 lib/raid6/avx2.c 	u8 *p, *q;
p                 150 lib/raid6/avx2.c 	p = dptr[z0+1];		/* XOR parity */
p                 184 lib/raid6/avx2.c 		asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
p                 185 lib/raid6/avx2.c 		asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
p                 198 lib/raid6/avx2.c 	u8 *p, *q;
p                 202 lib/raid6/avx2.c 	p = dptr[disks-2];	/* XOR parity */
p                 212 lib/raid6/avx2.c 		asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
p                 213 lib/raid6/avx2.c 		asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
p                 254 lib/raid6/avx2.c 		asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
p                 255 lib/raid6/avx2.c 		asm volatile("vmovdqa %%ymm3,%0" : "=m" (p[d+32]));
p                 278 lib/raid6/avx2.c 	u8 *p, *q;
p                 282 lib/raid6/avx2.c 	p = dptr[z0+1];		/* XOR parity */
p                 333 lib/raid6/avx2.c 		asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
p                 335 lib/raid6/avx2.c 		asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
p                 337 lib/raid6/avx2.c 		asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
p                 339 lib/raid6/avx2.c 		asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
p                 359 lib/raid6/avx2.c 	u8 *p, *q;
p                 363 lib/raid6/avx2.c 	p = dptr[disks-2];	/* XOR parity */
p                 375 lib/raid6/avx2.c 		asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
p                 376 lib/raid6/avx2.c 		asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
p                 377 lib/raid6/avx2.c 		asm volatile("vmovdqa %0,%%ymm10" : : "m" (p[d+64]));
p                 378 lib/raid6/avx2.c 		asm volatile("vmovdqa %0,%%ymm11" : : "m" (p[d+96]));
p                 448 lib/raid6/avx2.c 		asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
p                 449 lib/raid6/avx2.c 		asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
p                 450 lib/raid6/avx2.c 		asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
p                 451 lib/raid6/avx2.c 		asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
p                  47 lib/raid6/avx512.c 	u8 *p, *q;
p                  51 lib/raid6/avx512.c 	p = dptr[z0+1];         /* XOR parity */
p                  94 lib/raid6/avx512.c 			     : "m" (p[d]), "m" (q[d]));
p                 105 lib/raid6/avx512.c 	u8 *p, *q;
p                 109 lib/raid6/avx512.c 	p = dptr[disks-2];	/* XOR parity */
p                 122 lib/raid6/avx512.c 			     : "m" (dptr[z0][d]),  "m" (p[d]));
p                 153 lib/raid6/avx512.c 			     : "m" (q[d]), "m" (p[d]));
p                 174 lib/raid6/avx512.c 	u8 *p, *q;
p                 178 lib/raid6/avx512.c 	p = dptr[z0+1];         /* XOR parity */
p                 225 lib/raid6/avx512.c 			     : "m" (p[d]), "m" (p[d+64]), "m" (q[d]),
p                 237 lib/raid6/avx512.c 	u8 *p, *q;
p                 241 lib/raid6/avx512.c 	p = dptr[disks-2];	/* XOR parity */
p                 258 lib/raid6/avx512.c 			       "m" (p[d]), "m" (p[d+64]));
p                 309 lib/raid6/avx512.c 			     : "m" (q[d]), "m" (q[d+64]), "m" (p[d]),
p                 310 lib/raid6/avx512.c 			       "m" (p[d+64]));
p                 333 lib/raid6/avx512.c 	u8 *p, *q;
p                 337 lib/raid6/avx512.c 	p = dptr[z0+1];         /* XOR parity */
p                 414 lib/raid6/avx512.c 			     : "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
p                 415 lib/raid6/avx512.c 			       "m" (p[d+192]), "m" (q[d]), "m" (q[d+64]),
p                 427 lib/raid6/avx512.c 	u8 *p, *q;
p                 431 lib/raid6/avx512.c 	p = dptr[disks-2];	/* XOR parity */
p                 455 lib/raid6/avx512.c 			       "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
p                 456 lib/raid6/avx512.c 			       "m" (p[d+192]));
p                 548 lib/raid6/avx512.c 			     : "m" (p[d]),  "m" (p[d+64]), "m" (p[d+128]),
p                 549 lib/raid6/avx512.c 			       "m" (p[d+192]), "m" (q[d]),  "m" (q[d+64]),
p                  38 lib/raid6/mmx.c 	u8 *p, *q;
p                  42 lib/raid6/mmx.c 	p = dptr[z0+1];		/* XOR parity */
p                  63 lib/raid6/mmx.c 		asm volatile("movq %%mm2,%0" : "=m" (p[d]));
p                  86 lib/raid6/mmx.c 	u8 *p, *q;
p                  90 lib/raid6/mmx.c 	p = dptr[z0+1];		/* XOR parity */
p                 122 lib/raid6/mmx.c 		asm volatile("movq %%mm2,%0" : "=m" (p[d]));
p                 123 lib/raid6/mmx.c 		asm volatile("movq %%mm3,%0" : "=m" (p[d+8]));
p                  23 lib/raid6/recov.c 	u8 *p, *q, *dp, *dq;
p                  28 lib/raid6/recov.c 	p = (u8 *)ptrs[disks-2];
p                  46 lib/raid6/recov.c 	ptrs[disks-2] = p;
p                  55 lib/raid6/recov.c 		px    = *p ^ *dp;
p                  59 lib/raid6/recov.c 		p++; q++;
p                  67 lib/raid6/recov.c 	u8 *p, *q, *dq;
p                  70 lib/raid6/recov.c 	p = (u8 *)ptrs[disks-2];
p                  90 lib/raid6/recov.c 		*p++ ^= *dq = qmul[*q ^ *dq];
p                  21 lib/raid6/recov_avx2.c 	u8 *p, *q, *dp, *dq;
p                  26 lib/raid6/recov_avx2.c 	p = (u8 *)ptrs[disks-2];
p                  44 lib/raid6/recov_avx2.c 	ptrs[disks-2] = p;
p                  61 lib/raid6/recov_avx2.c 		asm volatile("vmovdqa %0, %%ymm0" : : "m" (p[0]));
p                  62 lib/raid6/recov_avx2.c 		asm volatile("vmovdqa %0, %%ymm8" : : "m" (p[32]));
p                 131 lib/raid6/recov_avx2.c 		p += 64;
p                 137 lib/raid6/recov_avx2.c 		asm volatile("vmovdqa %0, %%ymm0" : : "m" (*p));
p                 178 lib/raid6/recov_avx2.c 		p += 32;
p                 191 lib/raid6/recov_avx2.c 	u8 *p, *q, *dq;
p                 195 lib/raid6/recov_avx2.c 	p = (u8 *)ptrs[disks-2];
p                 250 lib/raid6/recov_avx2.c 		asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
p                 251 lib/raid6/recov_avx2.c 		asm volatile("vmovdqa %0, %%ymm12" : : "m" (p[32]));
p                 262 lib/raid6/recov_avx2.c 		asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
p                 263 lib/raid6/recov_avx2.c 		asm volatile("vmovdqa %%ymm12,%0" : "=m" (p[32]));
p                 266 lib/raid6/recov_avx2.c 		p += 64;
p                 287 lib/raid6/recov_avx2.c 		asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
p                 293 lib/raid6/recov_avx2.c 		asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
p                 296 lib/raid6/recov_avx2.c 		p += 32;
p                  27 lib/raid6/recov_avx512.c 	u8 *p, *q, *dp, *dq;
p                  32 lib/raid6/recov_avx512.c 	p = (u8 *)ptrs[disks-2];
p                  53 lib/raid6/recov_avx512.c 	ptrs[disks-2] = p;
p                  77 lib/raid6/recov_avx512.c 			     : "m" (q[0]), "m" (q[64]), "m" (p[0]),
p                  78 lib/raid6/recov_avx512.c 			       "m" (p[64]), "m" (dq[0]), "m" (dq[64]),
p                 156 lib/raid6/recov_avx512.c 		p += 128;
p                 166 lib/raid6/recov_avx512.c 			     : "m" (*q), "m" (*p), "m"(*dq), "m" (*dp));
p                 217 lib/raid6/recov_avx512.c 		p += 64;
p                 230 lib/raid6/recov_avx512.c 	u8 *p, *q, *dq;
p                 234 lib/raid6/recov_avx512.c 	p = (u8 *)ptrs[disks-2];
p                 304 lib/raid6/recov_avx512.c 			     : "m" (p[0]), "m" (p[64]));
p                 316 lib/raid6/recov_avx512.c 			     : "m" (dq[0]), "m" (dq[64]), "m" (p[0]),
p                 317 lib/raid6/recov_avx512.c 			       "m" (p[64]));
p                 320 lib/raid6/recov_avx512.c 		p += 128;
p                 350 lib/raid6/recov_avx512.c 			     : "m" (p[0]));
p                 357 lib/raid6/recov_avx512.c 			     : "m" (dq[0]), "m" (p[0]));
p                 360 lib/raid6/recov_avx512.c 		p += 64;
p                  22 lib/raid6/recov_neon.c void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
p                  26 lib/raid6/recov_neon.c void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
p                  32 lib/raid6/recov_neon.c 	u8 *p, *q, *dp, *dq;
p                  36 lib/raid6/recov_neon.c 	p = (u8 *)ptrs[disks - 2];
p                  56 lib/raid6/recov_neon.c 	ptrs[disks - 2] = p;
p                  65 lib/raid6/recov_neon.c 	__raid6_2data_recov_neon(bytes, p, q, dp, dq, pbmul, qmul);
p                  72 lib/raid6/recov_neon.c 	u8 *p, *q, *dq;
p                  75 lib/raid6/recov_neon.c 	p = (u8 *)ptrs[disks - 2];
p                  96 lib/raid6/recov_neon.c 	__raid6_datap_recov_neon(bytes, p, q, dq, qmul);
p                  27 lib/raid6/recov_neon_inner.c void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
p                  52 lib/raid6/recov_neon_inner.c 		px = veorq_u8(vld1q_u8(p), vld1q_u8(dp));
p                  70 lib/raid6/recov_neon_inner.c 		p += 16;
p                  77 lib/raid6/recov_neon_inner.c void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
p                 100 lib/raid6/recov_neon_inner.c 		vy = veorq_u8(vx, vld1q_u8(p));
p                 103 lib/raid6/recov_neon_inner.c 		vst1q_u8(p, vy);
p                 106 lib/raid6/recov_neon_inner.c 		p += 16;
p                  26 lib/raid6/recov_s390xc.c 	u8 *p, *q, *dp, *dq;
p                  31 lib/raid6/recov_s390xc.c 	p = (u8 *)ptrs[disks-2];
p                  49 lib/raid6/recov_s390xc.c 	ptrs[disks-2] = p;
p                  58 lib/raid6/recov_s390xc.c 		xor_block(dp, p);
p                  63 lib/raid6/recov_s390xc.c 		p += 256;
p                  75 lib/raid6/recov_s390xc.c 	u8 *p, *q, *dq;
p                  79 lib/raid6/recov_s390xc.c 	p = (u8 *)ptrs[disks-2];
p                 102 lib/raid6/recov_s390xc.c 		xor_block(p, dq);
p                 103 lib/raid6/recov_s390xc.c 		p += 256;
p                  21 lib/raid6/recov_ssse3.c 	u8 *p, *q, *dp, *dq;
p                  28 lib/raid6/recov_ssse3.c 	p = (u8 *)ptrs[disks-2];
p                  46 lib/raid6/recov_ssse3.c 	ptrs[disks-2] = p;
p                  71 lib/raid6/recov_ssse3.c 		asm volatile("movdqa %0,%%xmm0" : : "m" (p[0]));
p                  72 lib/raid6/recov_ssse3.c 		asm volatile("movdqa %0,%%xmm8" : : "m" (p[16]));
p                 135 lib/raid6/recov_ssse3.c 		p += 32;
p                 141 lib/raid6/recov_ssse3.c 		asm volatile("movdqa %0,%%xmm0" : : "m" (*p));
p                 182 lib/raid6/recov_ssse3.c 		p += 16;
p                 196 lib/raid6/recov_ssse3.c 	u8 *p, *q, *dq;
p                 202 lib/raid6/recov_ssse3.c 	p = (u8 *)ptrs[disks-2];
p                 259 lib/raid6/recov_ssse3.c 		asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
p                 261 lib/raid6/recov_ssse3.c 		asm volatile("movdqa %0, %%xmm12" : : "m" (p[16]));
p                 276 lib/raid6/recov_ssse3.c 		asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
p                 277 lib/raid6/recov_ssse3.c 		asm volatile("movdqa %%xmm12, %0" : "=m" (p[16]));
p                 280 lib/raid6/recov_ssse3.c 		p += 32;
p                 293 lib/raid6/recov_ssse3.c 		asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
p                 308 lib/raid6/recov_ssse3.c 		asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
p                 311 lib/raid6/recov_ssse3.c 		p += 16;
p                  43 lib/raid6/sse1.c 	u8 *p, *q;
p                  47 lib/raid6/sse1.c 	p = dptr[z0+1];		/* XOR parity */
p                  80 lib/raid6/sse1.c 		asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
p                 102 lib/raid6/sse1.c 	u8 *p, *q;
p                 106 lib/raid6/sse1.c 	p = dptr[z0+1];		/* XOR parity */
p                 141 lib/raid6/sse1.c 		asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
p                 142 lib/raid6/sse1.c 		asm volatile("movntq %%mm3,%0" : "=m" (p[d+8]));
p                  39 lib/raid6/sse2.c 	u8 *p, *q;
p                  43 lib/raid6/sse2.c 	p = dptr[z0+1];		/* XOR parity */
p                  76 lib/raid6/sse2.c 		asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
p                  91 lib/raid6/sse2.c 	u8 *p, *q;
p                  95 lib/raid6/sse2.c 	p = dptr[disks-2];	/* XOR parity */
p                 104 lib/raid6/sse2.c 		asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
p                 128 lib/raid6/sse2.c 		asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
p                 149 lib/raid6/sse2.c 	u8 *p, *q;
p                 153 lib/raid6/sse2.c 	p = dptr[z0+1];		/* XOR parity */
p                 188 lib/raid6/sse2.c 		asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
p                 189 lib/raid6/sse2.c 		asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
p                 202 lib/raid6/sse2.c 	u8 *p, *q;
p                 206 lib/raid6/sse2.c 	p = dptr[disks-2];	/* XOR parity */
p                 216 lib/raid6/sse2.c 		asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
p                 217 lib/raid6/sse2.c 		asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
p                 257 lib/raid6/sse2.c 		asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
p                 258 lib/raid6/sse2.c 		asm volatile("movdqa %%xmm3,%0" : "=m" (p[d+16]));
p                 281 lib/raid6/sse2.c 	u8 *p, *q;
p                 285 lib/raid6/sse2.c 	p = dptr[z0+1];		/* XOR parity */
p                 342 lib/raid6/sse2.c 		asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
p                 344 lib/raid6/sse2.c 		asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
p                 346 lib/raid6/sse2.c 		asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
p                 348 lib/raid6/sse2.c 		asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
p                 368 lib/raid6/sse2.c 	u8 *p, *q;
p                 372 lib/raid6/sse2.c 	p = dptr[disks-2];	/* XOR parity */
p                 384 lib/raid6/sse2.c 		asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
p                 385 lib/raid6/sse2.c 		asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
p                 386 lib/raid6/sse2.c 		asm volatile("movdqa %0,%%xmm10" : : "m" (p[d+32]));
p                 387 lib/raid6/sse2.c 		asm volatile("movdqa %0,%%xmm11" : : "m" (p[d+48]));
p                 454 lib/raid6/sse2.c 		asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
p                 455 lib/raid6/sse2.c 		asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
p                 456 lib/raid6/sse2.c 		asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
p                 457 lib/raid6/sse2.c 		asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
p                  41 lib/rhashtable.c 	return rht_head_hashfn(ht, tbl, he, ht->p);
p                 386 lib/rhashtable.c 	if (size < ht->p.min_size)
p                 387 lib/rhashtable.c 		size = ht->p.min_size;
p                 412 lib/rhashtable.c 	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
p                 498 lib/rhashtable.c 		    (ht->p.obj_cmpfn ?
p                 499 lib/rhashtable.c 		     ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
p                 593 lib/rhashtable.c 		hash = rht_head_hashfn(ht, tbl, obj, ht->p);
p                 661 lib/rhashtable.c 	iter->p = NULL;
p                 729 lib/rhashtable.c 	if (iter->p && !rhlist) {
p                 734 lib/rhashtable.c 		struct rhash_head *p;
p                 736 lib/rhashtable.c 		rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
p                 738 lib/rhashtable.c 			if (p == iter->p) {
p                 743 lib/rhashtable.c 		iter->p = NULL;
p                 744 lib/rhashtable.c 	} else if (iter->p && rhlist) {
p                 748 lib/rhashtable.c 		struct rhash_head *p;
p                 751 lib/rhashtable.c 		rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
p                 752 lib/rhashtable.c 			for (list = container_of(p, struct rhlist_head, rhead);
p                 757 lib/rhashtable.c 					iter->p = p;
p                 763 lib/rhashtable.c 		iter->p = NULL;
p                 785 lib/rhashtable.c 	struct rhash_head *p = iter->p;
p                 794 lib/rhashtable.c 		rht_for_each_rcu(p, tbl, iter->slot) {
p                 796 lib/rhashtable.c 				list = container_of(p, struct rhlist_head,
p                 813 lib/rhashtable.c 		if (!rht_is_a_nulls(p)) {
p                 815 lib/rhashtable.c 			iter->p = p;
p                 817 lib/rhashtable.c 			return rht_obj(ht, rhlist ? &list->rhead : p);
p                 823 lib/rhashtable.c 	iter->p = NULL;
p                 856 lib/rhashtable.c 	struct rhash_head *p = iter->p;
p                 859 lib/rhashtable.c 	if (p) {
p                 861 lib/rhashtable.c 			p = rcu_dereference(p->next);
p                 862 lib/rhashtable.c 			list = container_of(p, struct rhlist_head, rhead);
p                 864 lib/rhashtable.c 		if (!rht_is_a_nulls(p)) {
p                 866 lib/rhashtable.c 			iter->p = p;
p                 868 lib/rhashtable.c 			return rht_obj(ht, rhlist ? &list->rhead : p);
p                 895 lib/rhashtable.c 	struct rhash_head *p = iter->p;
p                 897 lib/rhashtable.c 	if (p)
p                 898 lib/rhashtable.c 		return rht_obj(ht, ht->rhlist ? &list->rhead : p);
p                1021 lib/rhashtable.c 	memcpy(&ht->p, params, sizeof(*params));
p                1024 lib/rhashtable.c 		ht->p.min_size = roundup_pow_of_two(params->min_size);
p                1030 lib/rhashtable.c 		ht->p.max_size = rounddown_pow_of_two(params->max_size);
p                1031 lib/rhashtable.c 		if (ht->p.max_size < ht->max_elems / 2)
p                1032 lib/rhashtable.c 			ht->max_elems = ht->p.max_size * 2;
p                1035 lib/rhashtable.c 	ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
p                1037 lib/rhashtable.c 	size = rounded_hashtable_size(&ht->p);
p                1039 lib/rhashtable.c 	ht->key_len = ht->p.key_len;
p                1041 lib/rhashtable.c 		ht->p.hashfn = jhash;
p                1045 lib/rhashtable.c 			ht->p.hashfn = rhashtable_jhash2;
p                1056 lib/rhashtable.c 		size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
p                 274 lib/seq_buf.c  		char *p = d_path(path, buf, size);
p                 275 lib/seq_buf.c  		if (!IS_ERR(p)) {
p                 276 lib/seq_buf.c  			char *end = mangle_path(buf, p, esc);
p                 559 lib/string.c   	const char *p;
p                 563 lib/string.c   	for (p = s; *p != '\0'; ++p) {
p                 565 lib/string.c   			if (*p == *a)
p                 586 lib/string.c   	const char *p;
p                 590 lib/string.c   	for (p = s; *p != '\0'; ++p) {
p                 592 lib/string.c   			if (*p == *r)
p                 924 lib/string.c   	unsigned char *p = addr;
p                 927 lib/string.c   		if (*p == c)
p                 928 lib/string.c   			return (void *)p;
p                 929 lib/string.c   		p++;
p                 932 lib/string.c     	return (void *)p;
p                 999 lib/string.c   	const unsigned char *p = s;
p                1001 lib/string.c           	if ((unsigned char)c == *p++) {
p                1002 lib/string.c   			return (void *)(p - 1);
p                 135 lib/string_helpers.c 	char *p = *dst, *q = *src;
p                 139 lib/string_helpers.c 		*p = '\n';
p                 142 lib/string_helpers.c 		*p = '\r';
p                 145 lib/string_helpers.c 		*p = '\t';
p                 148 lib/string_helpers.c 		*p = '\v';
p                 151 lib/string_helpers.c 		*p = '\f';
p                 163 lib/string_helpers.c 	char *p = *dst, *q = *src;
p                 174 lib/string_helpers.c 	*p = num;
p                 182 lib/string_helpers.c 	char *p = *dst, *q = *src;
p                 198 lib/string_helpers.c 	*p = num;
p                 206 lib/string_helpers.c 	char *p = *dst, *q = *src;
p                 210 lib/string_helpers.c 		*p = '\"';
p                 213 lib/string_helpers.c 		*p = '\\';
p                 216 lib/string_helpers.c 		*p = '\a';
p                 219 lib/string_helpers.c 		*p = '\e';
p                 500 lib/string_helpers.c 	char *p = dst;
p                 501 lib/string_helpers.c 	char *end = p + osz;
p                 522 lib/string_helpers.c 			if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
p                 525 lib/string_helpers.c 			if (flags & ESCAPE_SPECIAL && escape_special(c, &p, end))
p                 528 lib/string_helpers.c 			if (flags & ESCAPE_NULL && escape_null(c, &p, end))
p                 532 lib/string_helpers.c 			if (flags & ESCAPE_OCTAL && escape_octal(c, &p, end))
p                 535 lib/string_helpers.c 			if (flags & ESCAPE_HEX && escape_hex(c, &p, end))
p                 539 lib/string_helpers.c 		escape_passthrough(c, &p, end);
p                 542 lib/string_helpers.c 	return p - dst;
p                 549 lib/string_helpers.c 	char *p = dst;
p                 550 lib/string_helpers.c 	char *end = p + osz;
p                 556 lib/string_helpers.c 			escape_hex(c, &p, end);
p                 558 lib/string_helpers.c 			escape_passthrough(c, &p, end);
p                 561 lib/string_helpers.c 	return p - dst;
p                  15 lib/test-string_helpers.c 					 char *in, size_t p,
p                  25 lib/test-string_helpers.c 		       in, p, true);
p                  70 lib/test-string_helpers.c 	int i, p = 0, q_test = 0;
p                  80 lib/test-string_helpers.c 		memcpy(&in[p], s, len);
p                  81 lib/test-string_helpers.c 		p += len;
p                  91 lib/test-string_helpers.c 	in[p++] = '\0';
p                  95 lib/test-string_helpers.c 		memcpy(out_real, in, p);
p                 106 lib/test-string_helpers.c 	test_string_check_buf(name, flags, in, p - 1, out_real, q_real,
p                 264 lib/test-string_helpers.c test_string_escape_overflow(const char *in, int p, unsigned int flags, const char *esc,
p                 269 lib/test-string_helpers.c 	q_real = string_escape_mem(in, p, NULL, 0, flags, esc);
p                 283 lib/test-string_helpers.c 	int p = 0, q_test = 0;
p                 295 lib/test-string_helpers.c 			in[p++] = '\0';
p                 307 lib/test-string_helpers.c 		memcpy(&in[p], s2->in, len);
p                 308 lib/test-string_helpers.c 		p += len;
p                 316 lib/test-string_helpers.c 	q_real = string_escape_mem(in, p, out_real, out_size, flags, esc);
p                 318 lib/test-string_helpers.c 	test_string_check_buf(name, flags, in, p, out_real, q_real, out_test,
p                 321 lib/test-string_helpers.c 	test_string_escape_overflow(in, p, flags, esc, q_test, name);
p                  71 lib/test_hexdump.c 	char *p;
p                  97 lib/test_hexdump.c 	p = test;
p                 102 lib/test_hexdump.c 		memcpy(p, q, amount);
p                 103 lib/test_hexdump.c 		p += amount;
p                 105 lib/test_hexdump.c 		*p++ = ' ';
p                 108 lib/test_hexdump.c 		p--;
p                 113 lib/test_hexdump.c 			*p++ = ' ';
p                 114 lib/test_hexdump.c 		} while (p < test + rs * 2 + rs / gs + 1);
p                 116 lib/test_hexdump.c 		strncpy(p, data_a, l);
p                 117 lib/test_hexdump.c 		p += l;
p                 120 lib/test_hexdump.c 	*p = '\0';
p                 382 lib/test_kasan.c 	char *p;
p                 392 lib/test_kasan.c 	p = kmem_cache_alloc(cache, GFP_KERNEL);
p                 393 lib/test_kasan.c 	if (!p) {
p                 399 lib/test_kasan.c 	*p = p[size];
p                 400 lib/test_kasan.c 	kmem_cache_free(cache, p);
p                 407 lib/test_kasan.c 	char *p;
p                 423 lib/test_kasan.c 		p = kmem_cache_alloc(cache, GFP_KERNEL);
p                 424 lib/test_kasan.c 		if (!p)
p                 427 lib/test_kasan.c 		kmem_cache_free(cache, p);
p                 440 lib/test_kasan.c 	char *p = &global_array[ARRAY_SIZE(global_array) + i];
p                 443 lib/test_kasan.c 	*(volatile char *)p;
p                 450 lib/test_kasan.c 	char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
p                 453 lib/test_kasan.c 	*(volatile char *)p;
p                 524 lib/test_kasan.c 	char *p = alloca_array - 1;
p                 527 lib/test_kasan.c 	*(volatile char *)p;
p                 534 lib/test_kasan.c 	char *p = alloca_array + i;
p                 537 lib/test_kasan.c 	*(volatile char *)p;
p                 542 lib/test_kasan.c 	char *p;
p                 552 lib/test_kasan.c 	p = kmem_cache_alloc(cache, GFP_KERNEL);
p                 553 lib/test_kasan.c 	if (!p) {
p                 559 lib/test_kasan.c 	kmem_cache_free(cache, p);
p                 560 lib/test_kasan.c 	kmem_cache_free(cache, p);
p                 566 lib/test_kasan.c 	char *p;
p                 577 lib/test_kasan.c 	p = kmem_cache_alloc(cache, GFP_KERNEL);
p                 578 lib/test_kasan.c 	if (!p) {
p                 585 lib/test_kasan.c 	kmem_cache_free(cache, p + 1);
p                 591 lib/test_kasan.c 	kmem_cache_free(cache, p);
p                  24 lib/test_memcat_p.c 	struct test_struct **in0, **in1, **out, **p;
p                  61 lib/test_memcat_p.c 	for (i = 0, p = out; *p && (i < INPUT_MAX * 2 - 1); p++, i++) {
p                  62 lib/test_memcat_p.c 		total += (*p)->num;
p                  64 lib/test_memcat_p.c 		if ((*p)->magic != MAGIC) {
p                  66 lib/test_memcat_p.c 			       (*p)->magic);
p                  32 lib/test_meminit.c 	unsigned char *p = (unsigned char *)ptr;
p                  35 lib/test_meminit.c 		if (p[i])
p                  43 lib/test_meminit.c 	unsigned int *p = (unsigned int *)((char *)ptr + skip);
p                  49 lib/test_meminit.c 	while (size >= sizeof(*p)) {
p                  50 lib/test_meminit.c 		p[i] = GARBAGE_INT;
p                  52 lib/test_meminit.c 		size -= sizeof(*p);
p                  55 lib/test_meminit.c 		memset(&p[i], GARBAGE_BYTE, size);
p                 238 lib/test_overflow.c static int __init do_test_ ## t(const struct test_ ## t *p)		\
p                 242 lib/test_overflow.c 	check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of);	\
p                 243 lib/test_overflow.c 	check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of);	\
p                 244 lib/test_overflow.c 	check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of);	\
p                 245 lib/test_overflow.c 	check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of);	\
p                 246 lib/test_overflow.c 	check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of);	\
p                  96 lib/test_printf.c 	char *p;
p                 119 lib/test_printf.c 	p = kvasprintf(GFP_KERNEL, fmt, ap);
p                 120 lib/test_printf.c 	if (p) {
p                 122 lib/test_printf.c 		if (memcmp(p, expect, elen+1)) {
p                 124 lib/test_printf.c 				fmt, p, expect);
p                 127 lib/test_printf.c 		kfree(p);
p                 259 lib/test_printf.c plain_hash_to_buffer(const void *p, char *buf, size_t len)
p                 263 lib/test_printf.c 	nchars = snprintf(buf, len, "%p", p);
p                 317 lib/test_printf.c test_hashed(const char *fmt, const void *p)
p                 326 lib/test_printf.c 	ret = plain_hash_to_buffer(p, buf, PLAIN_BUF_SIZE);
p                 330 lib/test_printf.c 	test(buf, fmt, p);
p                 498 lib/test_rhashtable.c 		struct test_obj_rhl *p;
p                 513 lib/test_rhashtable.c 				p = rht_obj(ht, pos);
p                 515 lib/test_rhashtable.c 				sprintf(buff, "%s val %d (tid=%d)%s", buff, p->value.id, p->value.tid,
p                  10 lib/test_string.c 	u16 v, *p;
p                  12 lib/test_string.c 	p = kmalloc(256 * 2 * 2, GFP_KERNEL);
p                  13 lib/test_string.c 	if (!p)
p                  18 lib/test_string.c 			memset(p, 0xa1, 256 * 2 * sizeof(v));
p                  19 lib/test_string.c 			memset16(p + i, 0xb1b2, j);
p                  21 lib/test_string.c 				v = p[k];
p                  37 lib/test_string.c 	kfree(p);
p                  46 lib/test_string.c 	u32 v, *p;
p                  48 lib/test_string.c 	p = kmalloc(256 * 2 * 4, GFP_KERNEL);
p                  49 lib/test_string.c 	if (!p)
p                  54 lib/test_string.c 			memset(p, 0xa1, 256 * 2 * sizeof(v));
p                  55 lib/test_string.c 			memset32(p + i, 0xb1b2b3b4, j);
p                  57 lib/test_string.c 				v = p[k];
p                  73 lib/test_string.c 	kfree(p);
p                  82 lib/test_string.c 	u64 v, *p;
p                  84 lib/test_string.c 	p = kmalloc(256 * 2 * 8, GFP_KERNEL);
p                  85 lib/test_string.c 	if (!p)
p                  90 lib/test_string.c 			memset(p, 0xa1, 256 * 2 * sizeof(v));
p                  91 lib/test_string.c 			memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j);
p                  93 lib/test_string.c 				v = p[k];
p                 109 lib/test_string.c 	kfree(p);
p                 161 lib/test_vmalloc.c 	void *p;
p                 168 lib/test_vmalloc.c 		p = vmalloc(n * PAGE_SIZE);
p                 170 lib/test_vmalloc.c 		if (!p)
p                 173 lib/test_vmalloc.c 		*((__u8 *)p) = 1;
p                 174 lib/test_vmalloc.c 		vfree(p);
p                  29 lib/timerqueue.c 	struct rb_node **p = &head->rb_root.rb_root.rb_node;
p                  37 lib/timerqueue.c 	while (*p) {
p                  38 lib/timerqueue.c 		parent = *p;
p                  41 lib/timerqueue.c 			p = &(*p)->rb_left;
p                  43 lib/timerqueue.c 			p = &(*p)->rb_right;
p                  47 lib/timerqueue.c 	rb_link_node(&node->node, parent, p);
p                 835 lib/vsprintf.c 	const struct dentry *p;
p                 848 lib/vsprintf.c 	for (i = 0; i < depth; i++, d = p) {
p                 854 lib/vsprintf.c 		p = READ_ONCE(d->d_parent);
p                 856 lib/vsprintf.c 		if (p == d) {
p                1013 lib/vsprintf.c 	char *p = sym, *pend = sym + sizeof(sym);
p                1020 lib/vsprintf.c 	*p++ = '[';
p                1022 lib/vsprintf.c 		p = string_nocheck(p, pend, "io  ", str_spec);
p                1025 lib/vsprintf.c 		p = string_nocheck(p, pend, "mem ", str_spec);
p                1028 lib/vsprintf.c 		p = string_nocheck(p, pend, "irq ", str_spec);
p                1031 lib/vsprintf.c 		p = string_nocheck(p, pend, "dma ", str_spec);
p                1034 lib/vsprintf.c 		p = string_nocheck(p, pend, "bus ", str_spec);
p                1037 lib/vsprintf.c 		p = string_nocheck(p, pend, "??? ", str_spec);
p                1042 lib/vsprintf.c 		p = string_nocheck(p, pend, "size ", str_spec);
p                1043 lib/vsprintf.c 		p = number(p, pend, resource_size(res), *specp);
p                1045 lib/vsprintf.c 		p = number(p, pend, res->start, *specp);
p                1047 lib/vsprintf.c 			*p++ = '-';
p                1048 lib/vsprintf.c 			p = number(p, pend, res->end, *specp);
p                1053 lib/vsprintf.c 			p = string_nocheck(p, pend, " 64bit", str_spec);
p                1055 lib/vsprintf.c 			p = string_nocheck(p, pend, " pref", str_spec);
p                1057 lib/vsprintf.c 			p = string_nocheck(p, pend, " window", str_spec);
p                1059 lib/vsprintf.c 			p = string_nocheck(p, pend, " disabled", str_spec);
p                1061 lib/vsprintf.c 		p = string_nocheck(p, pend, " flags ", str_spec);
p                1062 lib/vsprintf.c 		p = number(p, pend, res->flags, default_flag_spec);
p                1064 lib/vsprintf.c 	*p++ = ']';
p                1065 lib/vsprintf.c 	*p = '\0';
p                1210 lib/vsprintf.c 	char *p = mac_addr;
p                1234 lib/vsprintf.c 			p = hex_byte_pack(p, addr[5 - i]);
p                1236 lib/vsprintf.c 			p = hex_byte_pack(p, addr[i]);
p                1239 lib/vsprintf.c 			*p++ = separator;
p                1241 lib/vsprintf.c 	*p = '\0';
p                1247 lib/vsprintf.c char *ip4_string(char *p, const u8 *addr, const char *fmt)
p                1280 lib/vsprintf.c 				*p++ = '0';
p                1282 lib/vsprintf.c 				*p++ = '0';
p                1286 lib/vsprintf.c 			*p++ = temp[digits];
p                1288 lib/vsprintf.c 			*p++ = '.';
p                1291 lib/vsprintf.c 	*p = '\0';
p                1293 lib/vsprintf.c 	return p;
p                1297 lib/vsprintf.c char *ip6_compressed_string(char *p, const char *addr)
p                1341 lib/vsprintf.c 				*p++ = ':';
p                1342 lib/vsprintf.c 			*p++ = ':';
p                1348 lib/vsprintf.c 			*p++ = ':';
p                1357 lib/vsprintf.c 				p = hex_byte_pack(p, hi);
p                1359 lib/vsprintf.c 				*p++ = hex_asc_lo(hi);
p                1360 lib/vsprintf.c 			p = hex_byte_pack(p, lo);
p                1363 lib/vsprintf.c 			p = hex_byte_pack(p, lo);
p                1365 lib/vsprintf.c 			*p++ = hex_asc_lo(lo);
p                1371 lib/vsprintf.c 			*p++ = ':';
p                1372 lib/vsprintf.c 		p = ip4_string(p, &in6.s6_addr[12], "I4");
p                1374 lib/vsprintf.c 	*p = '\0';
p                1376 lib/vsprintf.c 	return p;
p                1380 lib/vsprintf.c char *ip6_string(char *p, const char *addr, const char *fmt)
p                1385 lib/vsprintf.c 		p = hex_byte_pack(p, *addr++);
p                1386 lib/vsprintf.c 		p = hex_byte_pack(p, *addr++);
p                1388 lib/vsprintf.c 			*p++ = ':';
p                1390 lib/vsprintf.c 	*p = '\0';
p                1392 lib/vsprintf.c 	return p;
p                1428 lib/vsprintf.c 	char *p = ip6_addr, *pend = ip6_addr + sizeof(ip6_addr);
p                1452 lib/vsprintf.c 		*p = '[';
p                1457 lib/vsprintf.c 		p = ip6_compressed_string(ip6_addr + off, addr);
p                1459 lib/vsprintf.c 		p = ip6_string(ip6_addr + off, addr, fmt6);
p                1462 lib/vsprintf.c 		*p++ = ']';
p                1465 lib/vsprintf.c 		*p++ = ':';
p                1466 lib/vsprintf.c 		p = number(p, pend, ntohs(sa->sin6_port), spec);
p                1469 lib/vsprintf.c 		*p++ = '/';
p                1470 lib/vsprintf.c 		p = number(p, pend, ntohl(sa->sin6_flowinfo &
p                1474 lib/vsprintf.c 		*p++ = '%';
p                1475 lib/vsprintf.c 		p = number(p, pend, sa->sin6_scope_id, spec);
p                1477 lib/vsprintf.c 	*p = '\0';
p                1487 lib/vsprintf.c 	char *p, ip4_addr[sizeof("255.255.255.255") + sizeof(":12345")];
p                1507 lib/vsprintf.c 	p = ip4_string(ip4_addr, addr, fmt4);
p                1509 lib/vsprintf.c 		*p++ = ':';
p                1510 lib/vsprintf.c 		p = number(p, pend, ntohs(sa->sin_port), spec);
p                1512 lib/vsprintf.c 	*p = '\0';
p                1631 lib/vsprintf.c 	char *p = uuid;
p                1652 lib/vsprintf.c 			p = hex_byte_pack_upper(p, addr[index[i]]);
p                1654 lib/vsprintf.c 			p = hex_byte_pack(p, addr[index[i]]);
p                1660 lib/vsprintf.c 			*p++ = '-';
p                1665 lib/vsprintf.c 	*p = 0;
p                1916 lib/vsprintf.c 	const char *p;
p                1955 lib/vsprintf.c 			p = kbasename(of_node_full_name(dn));
p                1957 lib/vsprintf.c 			str_spec.precision = strchrnul(p, '@') - p;
p                1958 lib/vsprintf.c 			buf = string(buf, end, p, str_spec);
p                1965 lib/vsprintf.c 			p = kbasename(of_node_full_name(dn));
p                1966 lib/vsprintf.c 			if (!p[1])
p                1967 lib/vsprintf.c 				p = "/";
p                1968 lib/vsprintf.c 			buf = string(buf, end, p, str_spec);
p                1979 lib/vsprintf.c 			ret = of_property_read_string(dn, "compatible", &p);
p                1981 lib/vsprintf.c 				buf = string(buf, end, p, str_spec);
p                1985 lib/vsprintf.c 			of_property_for_each_string(dn, "compatible", prop, p) {
p                1989 lib/vsprintf.c 				buf = string(buf, end, p, str_spec);
p                 104 lib/xxhash.c   	const uint8_t *p = (const uint8_t *)input;
p                 105 lib/xxhash.c   	const uint8_t *b_end = p + len;
p                 116 lib/xxhash.c   			v1 = xxh32_round(v1, get_unaligned_le32(p));
p                 117 lib/xxhash.c   			p += 4;
p                 118 lib/xxhash.c   			v2 = xxh32_round(v2, get_unaligned_le32(p));
p                 119 lib/xxhash.c   			p += 4;
p                 120 lib/xxhash.c   			v3 = xxh32_round(v3, get_unaligned_le32(p));
p                 121 lib/xxhash.c   			p += 4;
p                 122 lib/xxhash.c   			v4 = xxh32_round(v4, get_unaligned_le32(p));
p                 123 lib/xxhash.c   			p += 4;
p                 124 lib/xxhash.c   		} while (p <= limit);
p                 134 lib/xxhash.c   	while (p + 4 <= b_end) {
p                 135 lib/xxhash.c   		h32 += get_unaligned_le32(p) * PRIME32_3;
p                 137 lib/xxhash.c   		p += 4;
p                 140 lib/xxhash.c   	while (p < b_end) {
p                 141 lib/xxhash.c   		h32 += (*p) * PRIME32_5;
p                 143 lib/xxhash.c   		p++;
p                 174 lib/xxhash.c   	const uint8_t *p = (const uint8_t *)input;
p                 175 lib/xxhash.c   	const uint8_t *const b_end = p + len;
p                 186 lib/xxhash.c   			v1 = xxh64_round(v1, get_unaligned_le64(p));
p                 187 lib/xxhash.c   			p += 8;
p                 188 lib/xxhash.c   			v2 = xxh64_round(v2, get_unaligned_le64(p));
p                 189 lib/xxhash.c   			p += 8;
p                 190 lib/xxhash.c   			v3 = xxh64_round(v3, get_unaligned_le64(p));
p                 191 lib/xxhash.c   			p += 8;
p                 192 lib/xxhash.c   			v4 = xxh64_round(v4, get_unaligned_le64(p));
p                 193 lib/xxhash.c   			p += 8;
p                 194 lib/xxhash.c   		} while (p <= limit);
p                 209 lib/xxhash.c   	while (p + 8 <= b_end) {
p                 210 lib/xxhash.c   		const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));
p                 214 lib/xxhash.c   		p += 8;
p                 217 lib/xxhash.c   	if (p + 4 <= b_end) {
p                 218 lib/xxhash.c   		h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1;
p                 220 lib/xxhash.c   		p += 4;
p                 223 lib/xxhash.c   	while (p < b_end) {
p                 224 lib/xxhash.c   		h64 ^= (*p) * PRIME64_5;
p                 226 lib/xxhash.c   		p++;
p                 272 lib/xxhash.c   	const uint8_t *p = (const uint8_t *)input;
p                 273 lib/xxhash.c   	const uint8_t *const b_end = p + len;
p                 302 lib/xxhash.c   		p += 16-state->memsize;
p                 306 lib/xxhash.c   	if (p <= b_end - 16) {
p                 314 lib/xxhash.c   			v1 = xxh32_round(v1, get_unaligned_le32(p));
p                 315 lib/xxhash.c   			p += 4;
p                 316 lib/xxhash.c   			v2 = xxh32_round(v2, get_unaligned_le32(p));
p                 317 lib/xxhash.c   			p += 4;
p                 318 lib/xxhash.c   			v3 = xxh32_round(v3, get_unaligned_le32(p));
p                 319 lib/xxhash.c   			p += 4;
p                 320 lib/xxhash.c   			v4 = xxh32_round(v4, get_unaligned_le32(p));
p                 321 lib/xxhash.c   			p += 4;
p                 322 lib/xxhash.c   		} while (p <= limit);
p                 330 lib/xxhash.c   	if (p < b_end) {
p                 331 lib/xxhash.c   		memcpy(state->mem32, p, (size_t)(b_end-p));
p                 332 lib/xxhash.c   		state->memsize = (uint32_t)(b_end-p);
p                 341 lib/xxhash.c   	const uint8_t *p = (const uint8_t *)state->mem32;
p                 355 lib/xxhash.c   	while (p + 4 <= b_end) {
p                 356 lib/xxhash.c   		h32 += get_unaligned_le32(p) * PRIME32_3;
p                 358 lib/xxhash.c   		p += 4;
p                 361 lib/xxhash.c   	while (p < b_end) {
p                 362 lib/xxhash.c   		h32 += (*p) * PRIME32_5;
p                 364 lib/xxhash.c   		p++;
p                 379 lib/xxhash.c   	const uint8_t *p = (const uint8_t *)input;
p                 380 lib/xxhash.c   	const uint8_t *const b_end = p + len;
p                 407 lib/xxhash.c   		p += 32 - state->memsize;
p                 411 lib/xxhash.c   	if (p + 32 <= b_end) {
p                 419 lib/xxhash.c   			v1 = xxh64_round(v1, get_unaligned_le64(p));
p                 420 lib/xxhash.c   			p += 8;
p                 421 lib/xxhash.c   			v2 = xxh64_round(v2, get_unaligned_le64(p));
p                 422 lib/xxhash.c   			p += 8;
p                 423 lib/xxhash.c   			v3 = xxh64_round(v3, get_unaligned_le64(p));
p                 424 lib/xxhash.c   			p += 8;
p                 425 lib/xxhash.c   			v4 = xxh64_round(v4, get_unaligned_le64(p));
p                 426 lib/xxhash.c   			p += 8;
p                 427 lib/xxhash.c   		} while (p <= limit);
p                 435 lib/xxhash.c   	if (p < b_end) {
p                 436 lib/xxhash.c   		memcpy(state->mem64, p, (size_t)(b_end-p));
p                 437 lib/xxhash.c   		state->memsize = (uint32_t)(b_end - p);
p                 446 lib/xxhash.c   	const uint8_t *p = (const uint8_t *)state->mem64;
p                 469 lib/xxhash.c   	while (p + 8 <= b_end) {
p                 470 lib/xxhash.c   		const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));
p                 474 lib/xxhash.c   		p += 8;
p                 477 lib/xxhash.c   	if (p + 4 <= b_end) {
p                 478 lib/xxhash.c   		h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1;
p                 480 lib/xxhash.c   		p += 4;
p                 483 lib/xxhash.c   	while (p < b_end) {
p                 484 lib/xxhash.c   		h64 ^= (*p) * PRIME64_5;
p                 486 lib/xxhash.c   		p++;
p                  43 lib/xz/xz_private.h #	define get_le32(p) le32_to_cpup((const uint32_t *)(p))
p                 738 lib/zlib_deflate/deflate.c     register Pos *p;
p                 773 lib/zlib_deflate/deflate.c             p = &s->head[n];
p                 775 lib/zlib_deflate/deflate.c                 m = *--p;
p                 776 lib/zlib_deflate/deflate.c                 *p = (Pos)(m >= wsize ? m-wsize : NIL);
p                 780 lib/zlib_deflate/deflate.c             p = &s->prev[n];
p                 782 lib/zlib_deflate/deflate.c                 m = *--p;
p                 783 lib/zlib_deflate/deflate.c                 *p = (Pos)(m >= wsize ? m-wsize : NIL);
p                  31 lib/zlib_inflate/inffast.c get_unaligned16(const unsigned short *p)
p                  34 lib/zlib_inflate/inffast.c 	unsigned char *b = (unsigned char *)p;
p                 962 lib/zstd/compress.c static size_t ZSTD_hash5Ptr(const void *p, U32 h) { return ZSTD_hash5(ZSTD_readLE64(p), h); }
p                 966 lib/zstd/compress.c static size_t ZSTD_hash6Ptr(const void *p, U32 h) { return ZSTD_hash6(ZSTD_readLE64(p), h); }
p                 970 lib/zstd/compress.c static size_t ZSTD_hash7Ptr(const void *p, U32 h) { return ZSTD_hash7(ZSTD_readLE64(p), h); }
p                 974 lib/zstd/compress.c static size_t ZSTD_hash8Ptr(const void *p, U32 h) { return ZSTD_hash8(ZSTD_readLE64(p), h); }
p                 976 lib/zstd/compress.c static size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls)
p                 981 lib/zstd/compress.c 	case 4: return ZSTD_hash4Ptr(p, hBits);
p                 982 lib/zstd/compress.c 	case 5: return ZSTD_hash5Ptr(p, hBits);
p                 983 lib/zstd/compress.c 	case 6: return ZSTD_hash6Ptr(p, hBits);
p                 984 lib/zstd/compress.c 	case 7: return ZSTD_hash7Ptr(p, hBits);
p                 985 lib/zstd/compress.c 	case 8: return ZSTD_hash8Ptr(p, hBits);
p                 176 lib/zstd/huf_decompress.c FORCE_INLINE size_t HUF_decodeStreamX2(BYTE *p, BIT_DStream_t *const bitDPtr, BYTE *const pEnd, const HUF_DEltX2 *const dt, const U32 dtLog)
p                 178 lib/zstd/huf_decompress.c 	BYTE *const pStart = p;
p                 181 lib/zstd/huf_decompress.c 	while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd - 4)) {
p                 182 lib/zstd/huf_decompress.c 		HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
p                 183 lib/zstd/huf_decompress.c 		HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
p                 184 lib/zstd/huf_decompress.c 		HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
p                 185 lib/zstd/huf_decompress.c 		HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
p                 189 lib/zstd/huf_decompress.c 	while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
p                 190 lib/zstd/huf_decompress.c 		HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
p                 193 lib/zstd/huf_decompress.c 	while (p < pEnd)
p                 194 lib/zstd/huf_decompress.c 		HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
p                 630 lib/zstd/huf_decompress.c FORCE_INLINE size_t HUF_decodeStreamX4(BYTE *p, BIT_DStream_t *bitDPtr, BYTE *const pEnd, const HUF_DEltX4 *const dt, const U32 dtLog)
p                 632 lib/zstd/huf_decompress.c 	BYTE *const pStart = p;
p                 635 lib/zstd/huf_decompress.c 	while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd - (sizeof(bitDPtr->bitContainer) - 1))) {
p                 636 lib/zstd/huf_decompress.c 		HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
p                 637 lib/zstd/huf_decompress.c 		HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
p                 638 lib/zstd/huf_decompress.c 		HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
p                 639 lib/zstd/huf_decompress.c 		HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
p                 643 lib/zstd/huf_decompress.c 	while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd - 2))
p                 644 lib/zstd/huf_decompress.c 		HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
p                 646 lib/zstd/huf_decompress.c 	while (p <= pEnd - 2)
p                 647 lib/zstd/huf_decompress.c 		HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
p                 649 lib/zstd/huf_decompress.c 	if (p < pEnd)
p                 650 lib/zstd/huf_decompress.c 		p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
p                 652 lib/zstd/huf_decompress.c 	return p - pStart;
p                 236 lib/zstd/zstd_internal.h #define ZSTD_PTR_ALIGN(p) PTR_ALIGN(p, sizeof(size_t))
p                 887 mm/backing-dev.c 	struct rb_node **p = &bdi_tree.rb_node;
p                 893 mm/backing-dev.c 	while (*p) {
p                 894 mm/backing-dev.c 		parent = *p;
p                 898 mm/backing-dev.c 			p = &(*p)->rb_left;
p                 900 mm/backing-dev.c 			p = &(*p)->rb_right;
p                 907 mm/backing-dev.c 	return p;
p                 920 mm/backing-dev.c 	struct rb_node **p;
p                 923 mm/backing-dev.c 	p = bdi_lookup_rb_node(id, NULL);
p                 924 mm/backing-dev.c 	if (*p) {
p                 925 mm/backing-dev.c 		bdi = rb_entry(*p, struct backing_dev_info, rb_node);
p                 936 mm/backing-dev.c 	struct rb_node *parent, **p;
p                 956 mm/backing-dev.c 	p = bdi_lookup_rb_node(bdi->id, &parent);
p                 957 mm/backing-dev.c 	rb_link_node(&bdi->rb_node, parent, p);
p                  20 mm/cma_debug.c 	struct page *p;
p                  26 mm/cma_debug.c 	unsigned long *p = data;
p                  28 mm/cma_debug.c 	*val = *p;
p                 102 mm/cma_debug.c 			cma_release(cma, mem->p, mem->n);
p                 106 mm/cma_debug.c 			cma_release(cma, mem->p, count);
p                 107 mm/cma_debug.c 			mem->p += count;
p                 134 mm/cma_debug.c 	struct page *p;
p                 140 mm/cma_debug.c 	p = cma_alloc(cma, count, 0, false);
p                 141 mm/cma_debug.c 	if (!p) {
p                 146 mm/cma_debug.c 	mem->p = p;
p                2628 mm/compaction.c static int kcompactd(void *p)
p                2630 mm/compaction.c 	pg_data_t *pgdat = (pg_data_t*)p;
p                 257 mm/early_ioremap.c 	char *p;
p                 264 mm/early_ioremap.c 		p = early_memremap(src & PAGE_MASK, clen + slop);
p                 265 mm/early_ioremap.c 		memcpy(dest, p + slop, clen);
p                 266 mm/early_ioremap.c 		early_memunmap(p, clen + slop);
p                1055 mm/hugetlb.c   	struct page *p = page + 1;
p                1058 mm/hugetlb.c   	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
p                1059 mm/hugetlb.c   		clear_compound_head(p);
p                1060 mm/hugetlb.c   		set_page_refcounted(p);
p                1385 mm/hugetlb.c   	struct page *p = page + 1;
p                1391 mm/hugetlb.c   	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
p                1404 mm/hugetlb.c   		__ClearPageReserved(p);
p                1405 mm/hugetlb.c   		set_page_count(p, 0);
p                1406 mm/hugetlb.c   		set_compound_head(p, page);
p                  17 mm/hwpoison-inject.c 	struct page *p;
p                  27 mm/hwpoison-inject.c 	p = pfn_to_page(pfn);
p                  28 mm/hwpoison-inject.c 	hpage = compound_head(p);
p                  32 mm/hwpoison-inject.c 	if (!get_hwpoison_page(p))
p                  42 mm/hwpoison-inject.c 	if (!PageLRU(hpage) && !PageHuge(p))
p                  58 mm/hwpoison-inject.c 	put_hwpoison_page(p);
p                 486 mm/internal.h  extern int hwpoison_filter(struct page *p);
p                  90 mm/kasan/common.c bool __kasan_check_read(const volatile void *p, unsigned int size)
p                  92 mm/kasan/common.c 	return check_memory_region((unsigned long)p, size, false, _RET_IP_);
p                  96 mm/kasan/common.c bool __kasan_check_write(const volatile void *p, unsigned int size)
p                  98 mm/kasan/common.c 	return check_memory_region((unsigned long)p, size, true, _RET_IP_);
p                  39 mm/kasan/generic_report.c 	void *p = addr;
p                  41 mm/kasan/generic_report.c 	while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p)))
p                  42 mm/kasan/generic_report.c 		p += KASAN_SHADOW_SCALE_SIZE;
p                  43 mm/kasan/generic_report.c 	return p;
p                 129 mm/kasan/init.c 			pte_t *p;
p                 132 mm/kasan/init.c 				p = pte_alloc_one_kernel(&init_mm);
p                 134 mm/kasan/init.c 				p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
p                 135 mm/kasan/init.c 			if (!p)
p                 138 mm/kasan/init.c 			pmd_populate_kernel(&init_mm, pmd, p);
p                 166 mm/kasan/init.c 			pmd_t *p;
p                 169 mm/kasan/init.c 				p = pmd_alloc(&init_mm, pud, addr);
p                 170 mm/kasan/init.c 				if (!p)
p                 207 mm/kasan/init.c 			pud_t *p;
p                 210 mm/kasan/init.c 				p = pud_alloc(&init_mm, p4d, addr);
p                 211 mm/kasan/init.c 				if (!p)
p                 280 mm/kasan/init.c 			p4d_t *p;
p                 283 mm/kasan/init.c 				p = p4d_alloc(&init_mm, pgd, addr);
p                 284 mm/kasan/init.c 				if (!p)
p                  69 mm/kasan/tags_report.c 	void *p = reset_tag(addr);
p                  70 mm/kasan/tags_report.c 	void *end = p + size;
p                  72 mm/kasan/tags_report.c 	while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p))
p                  73 mm/kasan/tags_report.c 		p += KASAN_SHADOW_SCALE_SIZE;
p                  74 mm/kasan/tags_report.c 	return p;
p                1470 mm/kmemleak.c  		struct task_struct *p, *g;
p                1473 mm/kmemleak.c  		do_each_thread(g, p) {
p                1474 mm/kmemleak.c  			void *stack = try_get_task_stack(p);
p                1477 mm/kmemleak.c  				put_task_stack(p);
p                1479 mm/kmemleak.c  		} while_each_thread(g, p);
p                1884 mm/memblock.c  static int __init early_memblock(char *p)
p                1886 mm/memblock.c  	if (p && strstr(p, "debug"))
p                 527 mm/memcontrol.c 	struct rb_node **p = &mctz->rb_root.rb_node;
p                 538 mm/memcontrol.c 	while (*p) {
p                 539 mm/memcontrol.c 		parent = *p;
p                 543 mm/memcontrol.c 			p = &(*p)->rb_left;
p                 552 mm/memcontrol.c 			p = &(*p)->rb_right;
p                 558 mm/memcontrol.c 	rb_link_node(&mz->tree_node, parent, p);
p                 769 mm/memcontrol.c void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
p                 771 mm/memcontrol.c 	struct page *page = virt_to_head_page(p);
p                 789 mm/memcontrol.c void mod_memcg_obj_state(void *p, int idx, int val)
p                 794 mm/memcontrol.c 	memcg = mem_cgroup_from_obj(p);
p                 933 mm/memcontrol.c struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
p                 940 mm/memcontrol.c 	if (unlikely(!p))
p                 943 mm/memcontrol.c 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
p                1512 mm/memcontrol.c void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
p                1521 mm/memcontrol.c 	if (p) {
p                1523 mm/memcontrol.c 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
p                2801 mm/memcontrol.c struct mem_cgroup *mem_cgroup_from_obj(void *p)
p                2808 mm/memcontrol.c 	page = virt_to_head_page(p);
p                5805 mm/memcontrol.c 	struct task_struct *leader, *p;
p                5820 mm/memcontrol.c 	p = NULL;
p                5822 mm/memcontrol.c 		WARN_ON_ONCE(p);
p                5823 mm/memcontrol.c 		p = leader;
p                5826 mm/memcontrol.c 	if (!p)
p                5838 mm/memcontrol.c 	from = mem_cgroup_from_task(p);
p                5842 mm/memcontrol.c 	mm = get_task_mm(p);
p                5846 mm/memcontrol.c 	if (mm->owner == p) {
p                  81 mm/memory-failure.c static int hwpoison_filter_dev(struct page *p)
p                  93 mm/memory-failure.c 	if (PageSlab(p))
p                  96 mm/memory-failure.c 	mapping = page_mapping(p);
p                 111 mm/memory-failure.c static int hwpoison_filter_flags(struct page *p)
p                 116 mm/memory-failure.c 	if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
p                 136 mm/memory-failure.c static int hwpoison_filter_task(struct page *p)
p                 141 mm/memory-failure.c 	if (page_cgroup_ino(p) != hwpoison_filter_memcg)
p                 147 mm/memory-failure.c static int hwpoison_filter_task(struct page *p) { return 0; }
p                 150 mm/memory-failure.c int hwpoison_filter(struct page *p)
p                 155 mm/memory-failure.c 	if (hwpoison_filter_dev(p))
p                 158 mm/memory-failure.c 	if (hwpoison_filter_flags(p))
p                 161 mm/memory-failure.c 	if (hwpoison_filter_task(p))
p                 167 mm/memory-failure.c int hwpoison_filter(struct page *p)
p                 241 mm/memory-failure.c void shake_page(struct page *p, int access)
p                 243 mm/memory-failure.c 	if (PageHuge(p))
p                 246 mm/memory-failure.c 	if (!PageSlab(p)) {
p                 248 mm/memory-failure.c 		if (PageLRU(p))
p                 250 mm/memory-failure.c 		drain_all_pages(page_zone(p));
p                 251 mm/memory-failure.c 		if (PageLRU(p) || is_free_buddy_page(p))
p                 260 mm/memory-failure.c 		drop_slab_node(page_to_nid(p));
p                 308 mm/memory-failure.c static void add_to_kill(struct task_struct *tsk, struct page *p,
p                 325 mm/memory-failure.c 	tk->addr = page_address_in_vma(p, vma);
p                 326 mm/memory-failure.c 	if (is_zone_device_page(p))
p                 327 mm/memory-failure.c 		tk->size_shift = dev_pagemap_mapping_shift(p, vma);
p                 329 mm/memory-failure.c 		tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
p                 343 mm/memory-failure.c 			page_to_pfn(p), tsk->comm);
p                 568 mm/memory-failure.c static int delete_from_lru_cache(struct page *p)
p                 570 mm/memory-failure.c 	if (!isolate_lru_page(p)) {
p                 575 mm/memory-failure.c 		ClearPageActive(p);
p                 576 mm/memory-failure.c 		ClearPageUnevictable(p);
p                 582 mm/memory-failure.c 		mem_cgroup_uncharge(p);
p                 587 mm/memory-failure.c 		put_page(p);
p                 593 mm/memory-failure.c static int truncate_error_page(struct page *p, unsigned long pfn,
p                 599 mm/memory-failure.c 		int err = mapping->a_ops->error_remove_page(mapping, p);
p                 604 mm/memory-failure.c 		} else if (page_has_private(p) &&
p                 605 mm/memory-failure.c 			   !try_to_release_page(p, GFP_NOIO)) {
p                 616 mm/memory-failure.c 		if (invalidate_inode_page(p))
p                 631 mm/memory-failure.c static int me_kernel(struct page *p, unsigned long pfn)
p                 639 mm/memory-failure.c static int me_unknown(struct page *p, unsigned long pfn)
p                 648 mm/memory-failure.c static int me_pagecache_clean(struct page *p, unsigned long pfn)
p                 652 mm/memory-failure.c 	delete_from_lru_cache(p);
p                 658 mm/memory-failure.c 	if (PageAnon(p))
p                 668 mm/memory-failure.c 	mapping = page_mapping(p);
p                 681 mm/memory-failure.c 	return truncate_error_page(p, pfn, mapping);
p                 689 mm/memory-failure.c static int me_pagecache_dirty(struct page *p, unsigned long pfn)
p                 691 mm/memory-failure.c 	struct address_space *mapping = page_mapping(p);
p                 693 mm/memory-failure.c 	SetPageError(p);
p                 733 mm/memory-failure.c 	return me_pagecache_clean(p, pfn);
p                 755 mm/memory-failure.c static int me_swapcache_dirty(struct page *p, unsigned long pfn)
p                 757 mm/memory-failure.c 	ClearPageDirty(p);
p                 759 mm/memory-failure.c 	ClearPageUptodate(p);
p                 761 mm/memory-failure.c 	if (!delete_from_lru_cache(p))
p                 767 mm/memory-failure.c static int me_swapcache_clean(struct page *p, unsigned long pfn)
p                 769 mm/memory-failure.c 	delete_from_swap_cache(p);
p                 771 mm/memory-failure.c 	if (!delete_from_lru_cache(p))
p                 783 mm/memory-failure.c static int me_huge_page(struct page *p, unsigned long pfn)
p                 786 mm/memory-failure.c 	struct page *hpage = compound_head(p);
p                 804 mm/memory-failure.c 		dissolve_free_huge_page(p);
p                 839 mm/memory-failure.c 	int (*action)(struct page *p, unsigned long pfn);
p                 897 mm/memory-failure.c static int page_action(struct page_state *ps, struct page *p,
p                 903 mm/memory-failure.c 	result = ps->action(p, pfn);
p                 905 mm/memory-failure.c 	count = page_count(p) - 1;
p                 965 mm/memory-failure.c static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
p                 980 mm/memory-failure.c 	if (PageReserved(p) || PageSlab(p))
p                 982 mm/memory-failure.c 	if (!(PageLRU(hpage) || PageHuge(p)))
p                 992 mm/memory-failure.c 	if (PageKsm(p)) {
p                 997 mm/memory-failure.c 	if (PageSwapCache(p)) {
p                1061 mm/memory-failure.c static int identify_page_state(unsigned long pfn, struct page *p,
p                1072 mm/memory-failure.c 		if ((p->flags & ps->mask) == ps->res)
p                1075 mm/memory-failure.c 	page_flags |= (p->flags & (1UL << PG_dirty));
p                1081 mm/memory-failure.c 	return page_action(ps, p, pfn);
p                1086 mm/memory-failure.c 	struct page *p = pfn_to_page(pfn);
p                1087 mm/memory-failure.c 	struct page *head = compound_head(p);
p                1099 mm/memory-failure.c 	if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
p                1105 mm/memory-failure.c 			if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
p                1106 mm/memory-failure.c 			    || (p != head && TestSetPageHWPoison(head))) {
p                1113 mm/memory-failure.c 		dissolve_free_huge_page(p);
p                1144 mm/memory-failure.c 	if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
p                1150 mm/memory-failure.c 	res = identify_page_state(pfn, p, page_flags);
p                1250 mm/memory-failure.c 	struct page *p;
p                1260 mm/memory-failure.c 	p = pfn_to_online_page(pfn);
p                1261 mm/memory-failure.c 	if (!p) {
p                1273 mm/memory-failure.c 	if (PageHuge(p))
p                1275 mm/memory-failure.c 	if (TestSetPageHWPoison(p)) {
p                1281 mm/memory-failure.c 	orig_head = hpage = compound_head(p);
p                1295 mm/memory-failure.c 	if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
p                1296 mm/memory-failure.c 		if (is_free_buddy_page(p)) {
p                1306 mm/memory-failure.c 		lock_page(p);
p                1307 mm/memory-failure.c 		if (!PageAnon(p) || unlikely(split_huge_page(p))) {
p                1308 mm/memory-failure.c 			unlock_page(p);
p                1309 mm/memory-failure.c 			if (!PageAnon(p))
p                1315 mm/memory-failure.c 			if (TestClearPageHWPoison(p))
p                1317 mm/memory-failure.c 			put_hwpoison_page(p);
p                1320 mm/memory-failure.c 		unlock_page(p);
p                1321 mm/memory-failure.c 		VM_BUG_ON_PAGE(!page_count(p), p);
p                1322 mm/memory-failure.c 		hpage = compound_head(p);
p                1333 mm/memory-failure.c 	shake_page(p, 0);
p                1335 mm/memory-failure.c 	if (!PageLRU(p) && is_free_buddy_page(p)) {
p                1343 mm/memory-failure.c 	lock_page(p);
p                1349 mm/memory-failure.c 	if (PageCompound(p) && compound_head(p) != orig_head) {
p                1362 mm/memory-failure.c 	if (PageHuge(p))
p                1365 mm/memory-failure.c 		page_flags = p->flags;
p                1370 mm/memory-failure.c 	if (!PageHWPoison(p)) {
p                1373 mm/memory-failure.c 		unlock_page(p);
p                1374 mm/memory-failure.c 		put_hwpoison_page(p);
p                1377 mm/memory-failure.c 	if (hwpoison_filter(p)) {
p                1378 mm/memory-failure.c 		if (TestClearPageHWPoison(p))
p                1380 mm/memory-failure.c 		unlock_page(p);
p                1381 mm/memory-failure.c 		put_hwpoison_page(p);
p                1385 mm/memory-failure.c 	if (!PageTransTail(p) && !PageLRU(p))
p                1392 mm/memory-failure.c 	wait_on_page_writeback(p);
p                1401 mm/memory-failure.c 	if (!hwpoison_user_mappings(p, pfn, flags, &hpage)) {
p                1410 mm/memory-failure.c 	if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
p                1417 mm/memory-failure.c 	res = identify_page_state(pfn, p, page_flags);
p                1419 mm/memory-failure.c 	unlock_page(p);
p                1536 mm/memory-failure.c 	struct page *p;
p                1544 mm/memory-failure.c 	p = pfn_to_page(pfn);
p                1545 mm/memory-failure.c 	page = compound_head(p);
p                1547 mm/memory-failure.c 	if (!PageHWPoison(p)) {
p                1582 mm/memory-failure.c 	if (!get_hwpoison_page(p)) {
p                1583 mm/memory-failure.c 		if (TestClearPageHWPoison(p))
p                1606 mm/memory-failure.c 	if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
p                1613 mm/memory-failure.c static struct page *new_page(struct page *p, unsigned long private)
p                1615 mm/memory-failure.c 	int nid = page_to_nid(p);
p                1617 mm/memory-failure.c 	return new_page_nodemask(p, nid, &node_states[N_MEMORY]);
p                1626 mm/memory-failure.c static int __get_any_page(struct page *p, unsigned long pfn, int flags)
p                1637 mm/memory-failure.c 	if (!get_hwpoison_page(p)) {
p                1638 mm/memory-failure.c 		if (PageHuge(p)) {
p                1641 mm/memory-failure.c 		} else if (is_free_buddy_page(p)) {
p                1646 mm/memory-failure.c 				__func__, pfn, p->flags);
p                4436 mm/memory.c    			char *p;
p                4438 mm/memory.c    			p = file_path(f, buf, PAGE_SIZE);
p                4439 mm/memory.c    			if (IS_ERR(p))
p                4440 mm/memory.c    				p = "?";
p                4441 mm/memory.c    			printk("%s%s[%lx+%lx]", prefix, kbasename(p),
p                4529 mm/memory.c    	struct page *p = page;
p                4533 mm/memory.c    	     i++, p = mem_map_next(p, page, i)) {
p                4535 mm/memory.c    		clear_user_highpage(p, addr + i * PAGE_SIZE);
p                 917 mm/memory_hotplug.c 			struct per_cpu_nodestat *p;
p                 919 mm/memory_hotplug.c 			p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
p                 920 mm/memory_hotplug.c 			memset(p, 0, sizeof(*p));
p                1409 mm/memory_hotplug.c static int __init cmdline_parse_movable_node(char *p)
p                 130 mm/mempolicy.c struct mempolicy *get_task_policy(struct task_struct *p)
p                 132 mm/mempolicy.c 	struct mempolicy *pol = p->mempolicy;
p                 287 mm/mempolicy.c void __mpol_put(struct mempolicy *p)
p                 289 mm/mempolicy.c 	if (!atomic_dec_and_test(&p->refcnt))
p                 291 mm/mempolicy.c 	kmem_cache_free(policy_cache, p);
p                 832 mm/mempolicy.c static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
p                 835 mm/mempolicy.c 	if (p == &default_policy)
p                 838 mm/mempolicy.c 	switch (p->mode) {
p                 842 mm/mempolicy.c 		*nodes = p->v.nodes;
p                 845 mm/mempolicy.c 		if (!(p->flags & MPOL_F_LOCAL))
p                 846 mm/mempolicy.c 			node_set(p->v.preferred_node, *nodes);
p                 856 mm/mempolicy.c 	struct page *p;
p                 860 mm/mempolicy.c 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
p                 862 mm/mempolicy.c 		err = page_to_nid(p);
p                 863 mm/mempolicy.c 		put_page(p);
p                2293 mm/mempolicy.c 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
p                2295 mm/mempolicy.c 		if (start >= p->end)
p                2297 mm/mempolicy.c 		else if (end <= p->start)
p                2323 mm/mempolicy.c 	struct rb_node **p = &sp->root.rb_node;
p                2327 mm/mempolicy.c 	while (*p) {
p                2328 mm/mempolicy.c 		parent = *p;
p                2331 mm/mempolicy.c 			p = &(*p)->rb_left;
p                2333 mm/mempolicy.c 			p = &(*p)->rb_right;
p                2337 mm/mempolicy.c 	rb_link_node(&new->nd, parent, p);
p                2644 mm/mempolicy.c void mpol_free_shared_policy(struct shared_policy *p)
p                2649 mm/mempolicy.c 	if (!p->root.rb_node)
p                2651 mm/mempolicy.c 	write_lock(&p->lock);
p                2652 mm/mempolicy.c 	next = rb_first(&p->root);
p                2656 mm/mempolicy.c 		sp_delete(p, n);
p                2658 mm/mempolicy.c 	write_unlock(&p->lock);
p                2930 mm/mempolicy.c 	char *p = buffer;
p                2955 mm/mempolicy.c 		snprintf(p, maxlen, "unknown");
p                2959 mm/mempolicy.c 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
p                2962 mm/mempolicy.c 		p += snprintf(p, buffer + maxlen - p, "=");
p                2968 mm/mempolicy.c 			p += snprintf(p, buffer + maxlen - p, "static");
p                2970 mm/mempolicy.c 			p += snprintf(p, buffer + maxlen - p, "relative");
p                2974 mm/mempolicy.c 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
p                  37 mm/memtest.c   	u64 *p, *start, *end;
p                  48 mm/memtest.c   	for (p = start; p < end; p++)
p                  49 mm/memtest.c   		*p = pattern;
p                  51 mm/memtest.c   	for (p = start; p < end; p++, start_phys_aligned += incr) {
p                  52 mm/memtest.c   		if (*p == pattern)
p                1608 mm/migrate.c   		const void __user *p;
p                1613 mm/migrate.c   		if (get_user(p, pages + i))
p                1617 mm/migrate.c   		addr = (unsigned long)untagged_addr(p);
p                1868 mm/migrate.c   		compat_uptr_t p;
p                1870 mm/migrate.c   		if (get_user(p, pages32 + i) ||
p                1871 mm/migrate.c   			put_user(compat_ptr(p), pages + i))
p                2507 mm/mmap.c      static int __init cmdline_parse_stack_guard_gap(char *p)
p                2512 mm/mmap.c      	val = simple_strtoul(p, &endptr, 10);
p                 434 mm/mremap.c    	unsigned long old_len, unsigned long new_len, unsigned long *p)
p                 492 mm/mremap.c    		*p = charged;
p                 459 mm/nommu.c     	struct rb_node *p, *lastp;
p                 469 mm/nommu.c     	while ((p = rb_next(lastp))) {
p                 470 mm/nommu.c     		region = rb_entry(p, struct vm_region, vm_rb);
p                 477 mm/nommu.c     		lastp = p;
p                 492 mm/nommu.c     	struct rb_node **p, *parent;
p                 497 mm/nommu.c     	p = &nommu_region_tree.rb_node;
p                 498 mm/nommu.c     	while (*p) {
p                 499 mm/nommu.c     		parent = *p;
p                 502 mm/nommu.c     			p = &(*p)->rb_left;
p                 504 mm/nommu.c     			p = &(*p)->rb_right;
p                 511 mm/nommu.c     	rb_link_node(&region->vm_rb, parent, p);
p                 590 mm/nommu.c     	struct rb_node **p, *parent, *rb_prev;
p                 610 mm/nommu.c     	p = &mm->mm_rb.rb_node;
p                 611 mm/nommu.c     	while (*p) {
p                 612 mm/nommu.c     		parent = *p;
p                 618 mm/nommu.c     			p = &(*p)->rb_left;
p                 621 mm/nommu.c     			p = &(*p)->rb_right;
p                 623 mm/nommu.c     			p = &(*p)->rb_left;
p                 626 mm/nommu.c     			p = &(*p)->rb_right;
p                 628 mm/nommu.c     			p = &(*p)->rb_left;
p                 631 mm/nommu.c     			p = &(*p)->rb_right;
p                 636 mm/nommu.c     	rb_link_node(&vma->vm_rb, parent, p);
p                 132 mm/oom_kill.c  struct task_struct *find_lock_task_mm(struct task_struct *p)
p                 138 mm/oom_kill.c  	for_each_thread(p, t) {
p                 161 mm/oom_kill.c  static bool oom_unkillable_task(struct task_struct *p)
p                 163 mm/oom_kill.c  	if (is_global_init(p))
p                 165 mm/oom_kill.c  	if (p->flags & PF_KTHREAD)
p                 198 mm/oom_kill.c  unsigned long oom_badness(struct task_struct *p, unsigned long totalpages)
p                 203 mm/oom_kill.c  	if (oom_unkillable_task(p))
p                 206 mm/oom_kill.c  	p = find_lock_task_mm(p);
p                 207 mm/oom_kill.c  	if (!p)
p                 215 mm/oom_kill.c  	adj = (long)p->signal->oom_score_adj;
p                 217 mm/oom_kill.c  			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
p                 218 mm/oom_kill.c  			in_vfork(p)) {
p                 219 mm/oom_kill.c  		task_unlock(p);
p                 227 mm/oom_kill.c  	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
p                 228 mm/oom_kill.c  		mm_pgtables_bytes(p->mm) / PAGE_SIZE;
p                 229 mm/oom_kill.c  	task_unlock(p);
p                 370 mm/oom_kill.c  		struct task_struct *p;
p                 373 mm/oom_kill.c  		for_each_process(p)
p                 374 mm/oom_kill.c  			if (oom_evaluate_task(p, oc))
p                 380 mm/oom_kill.c  static int dump_task(struct task_struct *p, void *arg)
p                 385 mm/oom_kill.c  	if (oom_unkillable_task(p))
p                 389 mm/oom_kill.c  	if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
p                 392 mm/oom_kill.c  	task = find_lock_task_mm(p);
p                 431 mm/oom_kill.c  		struct task_struct *p;
p                 434 mm/oom_kill.c  		for_each_process(p)
p                 435 mm/oom_kill.c  			dump_task(p, oc);
p                 452 mm/oom_kill.c  static void dump_header(struct oom_control *oc, struct task_struct *p)
p                 470 mm/oom_kill.c  	if (p)
p                 471 mm/oom_kill.c  		dump_oom_summary(oc, p);
p                 490 mm/oom_kill.c  bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
p                 494 mm/oom_kill.c  	for_each_thread(p, t) {
p                 812 mm/oom_kill.c  	struct task_struct *p;
p                 842 mm/oom_kill.c  	for_each_process(p) {
p                 843 mm/oom_kill.c  		if (!process_shares_mm(p, mm))
p                 845 mm/oom_kill.c  		if (same_thread_group(task, p))
p                 847 mm/oom_kill.c  		ret = __task_will_free_mem(p);
p                 858 mm/oom_kill.c  	struct task_struct *p;
p                 862 mm/oom_kill.c  	p = find_lock_task_mm(victim);
p                 863 mm/oom_kill.c  	if (!p) {
p                 866 mm/oom_kill.c  	} else if (victim != p) {
p                 867 mm/oom_kill.c  		get_task_struct(p);
p                 869 mm/oom_kill.c  		victim = p;
p                 906 mm/oom_kill.c  	for_each_process(p) {
p                 907 mm/oom_kill.c  		if (!process_shares_mm(p, mm))
p                 909 mm/oom_kill.c  		if (same_thread_group(p, victim))
p                 911 mm/oom_kill.c  		if (is_global_init(p)) {
p                 916 mm/oom_kill.c  					task_pid_nr(p), p->comm);
p                 923 mm/oom_kill.c  		if (unlikely(p->flags & PF_KTHREAD))
p                 925 mm/oom_kill.c  		do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
p                1870 mm/page-writeback.c 	int *p;
p                1891 mm/page-writeback.c 	p =  this_cpu_ptr(&bdp_ratelimits);
p                1893 mm/page-writeback.c 		*p = 0;
p                1894 mm/page-writeback.c 	else if (unlikely(*p >= ratelimit_pages)) {
p                1895 mm/page-writeback.c 		*p = 0;
p                1903 mm/page-writeback.c 	p = this_cpu_ptr(&dirty_throttle_leaks);
p                1904 mm/page-writeback.c 	if (*p > 0 && current->nr_dirtied < ratelimit) {
p                1906 mm/page-writeback.c 		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
p                1907 mm/page-writeback.c 		*p -= nr_pages_dirtied;
p                 435 mm/page_alloc.c #define kasan_free_nondeferred_pages(p, o)	kasan_free_pages(p, o)
p                 686 mm/page_alloc.c 		struct page *p = page + i;
p                 687 mm/page_alloc.c 		set_page_count(p, 0);
p                 688 mm/page_alloc.c 		p->mapping = TAIL_MAPPING;
p                 689 mm/page_alloc.c 		set_compound_head(p, page);
p                1433 mm/page_alloc.c 	struct page *p = page;
p                1436 mm/page_alloc.c 	prefetchw(p);
p                1437 mm/page_alloc.c 	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
p                1438 mm/page_alloc.c 		prefetchw(p + 1);
p                1439 mm/page_alloc.c 		__ClearPageReserved(p);
p                1440 mm/page_alloc.c 		set_page_count(p, 0);
p                1442 mm/page_alloc.c 	__ClearPageReserved(p);
p                1443 mm/page_alloc.c 	set_page_count(p, 0);
p                1977 mm/page_alloc.c 	struct page *p = page;
p                1980 mm/page_alloc.c 		__ClearPageReserved(p);
p                1981 mm/page_alloc.c 		set_page_count(p, 0);
p                1982 mm/page_alloc.c 	} while (++p, --i);
p                1988 mm/page_alloc.c 		p = page;
p                1990 mm/page_alloc.c 			set_page_refcounted(p);
p                1991 mm/page_alloc.c 			__free_pages(p, MAX_ORDER - 1);
p                1992 mm/page_alloc.c 			p += MAX_ORDER_NR_PAGES;
p                2132 mm/page_alloc.c 		struct page *p = page + i;
p                2134 mm/page_alloc.c 		if (unlikely(check_new_page(p)))
p                5002 mm/page_alloc.c 	struct page *p;
p                5007 mm/page_alloc.c 	p = alloc_pages_node(nid, gfp_mask, order);
p                5008 mm/page_alloc.c 	if (!p)
p                5010 mm/page_alloc.c 	return make_alloc_exact((unsigned long)page_address(p), order, size);
p                5227 mm/page_alloc.c 	char *p = tmp;
p                5232 mm/page_alloc.c 			*p++ = types[i];
p                5235 mm/page_alloc.c 	*p = '\0';
p                5738 mm/page_alloc.c static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
p                6111 mm/page_alloc.c static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
p                6113 mm/page_alloc.c 	pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
p                6116 mm/page_alloc.c static void pageset_init(struct per_cpu_pageset *p)
p                6121 mm/page_alloc.c 	memset(p, 0, sizeof(*p));
p                6123 mm/page_alloc.c 	pcp = &p->pcp;
p                6128 mm/page_alloc.c static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
p                6130 mm/page_alloc.c 	pageset_init(p);
p                6131 mm/page_alloc.c 	pageset_set_batch(p, batch);
p                6138 mm/page_alloc.c static void pageset_set_high(struct per_cpu_pageset *p,
p                6145 mm/page_alloc.c 	pageset_update(&p->pcp, high, batch);
p                7424 mm/page_alloc.c static int __init cmdline_parse_core(char *p, unsigned long *core,
p                7430 mm/page_alloc.c 	if (!p)
p                7434 mm/page_alloc.c 	coremem = simple_strtoull(p, &endptr, 0);
p                7441 mm/page_alloc.c 		coremem = memparse(p, &p);
p                7455 mm/page_alloc.c static int __init cmdline_parse_kernelcore(char *p)
p                7458 mm/page_alloc.c 	if (parse_option_str(p, "mirror")) {
p                7463 mm/page_alloc.c 	return cmdline_parse_core(p, &required_kernelcore,
p                7471 mm/page_alloc.c static int __init cmdline_parse_movablecore(char *p)
p                7473 mm/page_alloc.c 	return cmdline_parse_core(p, &required_movablecore,
p                  57 mm/percpu-stats.c 	int *alloc_sizes, *p;
p                 111 mm/percpu-stats.c 		for (i = 0, p = alloc_sizes; *p < 0 && i < as_len; i++, p++) {
p                 112 mm/percpu-stats.c 			sum_frag -= *p;
p                 113 mm/percpu-stats.c 			max_frag = max(max_frag, -1 * (*p));
p                2184 mm/slab.c      	struct list_head *p;
p                2192 mm/slab.c      		p = n->slabs_free.prev;
p                2193 mm/slab.c      		if (p == &n->slabs_free) {
p                2198 mm/slab.c      		page = list_entry(p, struct page, slab_list);
p                3494 mm/slab.c      				  size_t size, void **p, unsigned long caller)
p                3499 mm/slab.c      		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
p                3503 mm/slab.c      			  void **p)
p                3519 mm/slab.c      		p[i] = objp;
p                3523 mm/slab.c      	cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
p                3528 mm/slab.c      			memset(p[i], 0, s->object_size);
p                3530 mm/slab.c      	slab_post_alloc_hook(s, flags, size, p);
p                3535 mm/slab.c      	cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
p                3536 mm/slab.c      	slab_post_alloc_hook(s, flags, i, p);
p                3537 mm/slab.c      	__kmem_cache_free_bulk(s, i, p);
p                3700 mm/slab.c      void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
p                3707 mm/slab.c      		void *objp = p[i];
p                 298 mm/slab.h      				      struct kmem_cache *p)
p                 300 mm/slab.h      	return p == s || p == s->memcg_params.root_cache;
p                 426 mm/slab.h      				      struct kmem_cache *p)
p                 428 mm/slab.h      	return s == p;
p                 578 mm/slab.h      					size_t size, void **p)
p                 584 mm/slab.h      		p[i] = kasan_slab_alloc(s, p[i], flags);
p                 586 mm/slab.h      		kmemleak_alloc_recursive(p[i], s->object_size, 1,
p                 644 mm/slab.h      void *slab_next(struct seq_file *m, void *p, loff_t *pos);
p                 645 mm/slab.h      void slab_stop(struct seq_file *m, void *p);
p                 647 mm/slab.h      void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
p                 648 mm/slab.h      void memcg_slab_stop(struct seq_file *m, void *p);
p                 649 mm/slab.h      int memcg_slab_show(struct seq_file *m, void *p);
p                 104 mm/slab_common.c void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
p                 110 mm/slab_common.c 			kmem_cache_free(s, p[i]);
p                 112 mm/slab_common.c 			kfree(p[i]);
p                 117 mm/slab_common.c 								void **p)
p                 122 mm/slab_common.c 		void *x = p[i] = kmem_cache_alloc(s, flags);
p                 124 mm/slab_common.c 			__kmem_cache_free_bulk(s, i, p);
p                1421 mm/slab_common.c void *slab_next(struct seq_file *m, void *p, loff_t *pos)
p                1423 mm/slab_common.c 	return seq_list_next(p, &slab_root_caches, pos);
p                1426 mm/slab_common.c void slab_stop(struct seq_file *m, void *p)
p                1473 mm/slab_common.c static int slab_show(struct seq_file *m, void *p)
p                1475 mm/slab_common.c 	struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node);
p                1477 mm/slab_common.c 	if (p == slab_root_caches.next)
p                1526 mm/slab_common.c void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos)
p                1530 mm/slab_common.c 	return seq_list_next(p, &memcg->kmem_caches, pos);
p                1533 mm/slab_common.c void memcg_slab_stop(struct seq_file *m, void *p)
p                1538 mm/slab_common.c int memcg_slab_show(struct seq_file *m, void *p)
p                1540 mm/slab_common.c 	struct kmem_cache *s = list_entry(p, struct kmem_cache,
p                1544 mm/slab_common.c 	if (p == memcg->kmem_caches.next)
p                1651 mm/slab_common.c static __always_inline void *__do_krealloc(const void *p, size_t new_size,
p                1657 mm/slab_common.c 	if (p)
p                1658 mm/slab_common.c 		ks = ksize(p);
p                1661 mm/slab_common.c 		p = kasan_krealloc((void *)p, new_size, flags);
p                1662 mm/slab_common.c 		return (void *)p;
p                1666 mm/slab_common.c 	if (ret && p)
p                1667 mm/slab_common.c 		memcpy(ret, p, ks);
p                1684 mm/slab_common.c void *__krealloc(const void *p, size_t new_size, gfp_t flags)
p                1689 mm/slab_common.c 	return __do_krealloc(p, new_size, flags);
p                1707 mm/slab_common.c void *krealloc(const void *p, size_t new_size, gfp_t flags)
p                1712 mm/slab_common.c 		kfree(p);
p                1716 mm/slab_common.c 	ret = __do_krealloc(p, new_size, flags);
p                1717 mm/slab_common.c 	if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
p                1718 mm/slab_common.c 		kfree(p);
p                1735 mm/slab_common.c void kzfree(const void *p)
p                1738 mm/slab_common.c 	void *mem = (void *)p;
p                 674 mm/slob.c      void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
p                 676 mm/slob.c      	__kmem_cache_free_bulk(s, size, p);
p                 681 mm/slob.c      								void **p)
p                 683 mm/slob.c      	return __kmem_cache_alloc_bulk(s, flags, size, p);
p                 128 mm/slub.c      void *fixup_red_left(struct kmem_cache *s, void *p)
p                 131 mm/slub.c      		p += s->red_left_pad;
p                 133 mm/slub.c      	return p;
p                 223 mm/slub.c      static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
p                 291 mm/slub.c      	void *p;
p                 297 mm/slub.c      	probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
p                 298 mm/slub.c      	return freelist_ptr(s, p, freepointer_addr);
p                 319 mm/slub.c      static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
p                 321 mm/slub.c      	return (kasan_reset_tag(p) - addr) / s->size;
p                 452 mm/slub.c      	void *p;
p                 455 mm/slub.c      	for (p = page->freelist; p; p = get_freepointer(s, p))
p                 456 mm/slub.c      		set_bit(slab_index(p, s, addr), map);
p                 467 mm/slub.c      static inline void *restore_red_left(struct kmem_cache *s, void *p)
p                 470 mm/slub.c      		p -= s->red_left_pad;
p                 472 mm/slub.c      	return p;
p                 539 mm/slub.c      	struct track *p;
p                 542 mm/slub.c      		p = object + s->offset + sizeof(void *);
p                 544 mm/slub.c      		p = object + s->inuse;
p                 546 mm/slub.c      	return p + alloc;
p                 552 mm/slub.c      	struct track *p = get_track(s, object, alloc);
p                 559 mm/slub.c      		nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3);
p                 563 mm/slub.c      			p->addrs[nr_entries] = 0;
p                 565 mm/slub.c      		p->addr = addr;
p                 566 mm/slub.c      		p->cpu = smp_processor_id();
p                 567 mm/slub.c      		p->pid = current->pid;
p                 568 mm/slub.c      		p->when = jiffies;
p                 570 mm/slub.c      		memset(p, 0, sizeof(struct track));
p                 647 mm/slub.c      static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
p                 652 mm/slub.c      	print_tracking(s, p);
p                 657 mm/slub.c      	       p, p - addr, get_freepointer(s, p));
p                 660 mm/slub.c      		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
p                 662 mm/slub.c      	else if (p > addr + 16)
p                 663 mm/slub.c      		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
p                 665 mm/slub.c      	print_section(KERN_ERR, "Object ", p,
p                 668 mm/slub.c      		print_section(KERN_ERR, "Redzone ", p + s->object_size,
p                 683 mm/slub.c      		print_section(KERN_ERR, "Padding ", p + off,
p                 712 mm/slub.c      	u8 *p = object;
p                 715 mm/slub.c      		memset(p - s->red_left_pad, val, s->red_left_pad);
p                 718 mm/slub.c      		memset(p, POISON_FREE, s->object_size - 1);
p                 719 mm/slub.c      		p[s->object_size - 1] = POISON_END;
p                 723 mm/slub.c      		memset(p + s->object_size, val, s->inuse - s->object_size);
p                 797 mm/slub.c      static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
p                 814 mm/slub.c      	return check_bytes_and_report(s, page, p, "Object padding",
p                 815 mm/slub.c      			p + off, POISON_INUSE, size_from_object(s) - off);
p                 857 mm/slub.c      	u8 *p = object;
p                 870 mm/slub.c      			check_bytes_and_report(s, page, p, "Alignment padding",
p                 878 mm/slub.c      			(!check_bytes_and_report(s, page, p, "Poison", p,
p                 880 mm/slub.c      			 !check_bytes_and_report(s, page, p, "Poison",
p                 881 mm/slub.c      				p + s->object_size - 1, POISON_END, 1)))
p                 886 mm/slub.c      		check_pad_bytes(s, page, p);
p                 897 mm/slub.c      	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
p                 898 mm/slub.c      		object_err(s, page, p, "Freepointer corrupt");
p                 904 mm/slub.c      		set_freepointer(s, p, NULL);
p                1621 mm/slub.c      	void *start, *p, *next;
p                1673 mm/slub.c      		for (idx = 0, p = start; idx < page->objects - 1; idx++) {
p                1674 mm/slub.c      			next = p + s->size;
p                1676 mm/slub.c      			set_freepointer(s, p, next);
p                1677 mm/slub.c      			p = next;
p                1679 mm/slub.c      		set_freepointer(s, p, NULL);
p                1716 mm/slub.c      		void *p;
p                1719 mm/slub.c      		for_each_object(p, s, page_address(page),
p                1721 mm/slub.c      			check_object(s, page, p, SLUB_RED_INACTIVE);
p                2643 mm/slub.c      	void *p;
p                2656 mm/slub.c      	p = ___slab_alloc(s, gfpflags, node, addr, c);
p                2658 mm/slub.c      	return p;
p                3057 mm/slub.c      			    void **p, struct detached_freelist *df)
p                3068 mm/slub.c      		object = p[--size];
p                3082 mm/slub.c      			p[size] = NULL; /* mark object processed */
p                3096 mm/slub.c      	p[size] = NULL; /* mark object processed */
p                3100 mm/slub.c      		object = p[--size];
p                3110 mm/slub.c      			p[size] = NULL; /* mark object processed */
p                3127 mm/slub.c      void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
p                3135 mm/slub.c      		size = build_detached_freelist(s, size, p, &df);
p                3146 mm/slub.c      			  void **p)
p                3180 mm/slub.c      			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
p                3182 mm/slub.c      			if (unlikely(!p[i]))
p                3186 mm/slub.c      			maybe_wipe_obj_freeptr(s, p[i]);
p                3191 mm/slub.c      		p[i] = object;
p                3192 mm/slub.c      		maybe_wipe_obj_freeptr(s, p[i]);
p                3202 mm/slub.c      			memset(p[j], 0, s->object_size);
p                3206 mm/slub.c      	slab_post_alloc_hook(s, flags, size, p);
p                3210 mm/slub.c      	slab_post_alloc_hook(s, flags, i, p);
p                3211 mm/slub.c      	__kmem_cache_free_bulk(s, i, p);
p                3695 mm/slub.c      	void *p;
p                3703 mm/slub.c      	for_each_object(p, s, addr, page->objects) {
p                3705 mm/slub.c      		if (!test_bit(slab_index(p, s, addr), map)) {
p                3706 mm/slub.c      			pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
p                3707 mm/slub.c      			print_tracking(s, p);
p                4226 mm/slub.c      		struct page *p;
p                4228 mm/slub.c      		list_for_each_entry(p, &n->partial, slab_list)
p                4229 mm/slub.c      			p->slab_cache = s;
p                4232 mm/slub.c      		list_for_each_entry(p, &n->full, slab_list)
p                4233 mm/slub.c      			p->slab_cache = s;
p                4408 mm/slub.c      	void *p;
p                4419 mm/slub.c      	for_each_object(p, s, addr, page->objects) {
p                4420 mm/slub.c      		if (test_bit(slab_index(p, s, addr), map))
p                4421 mm/slub.c      			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
p                4425 mm/slub.c      	for_each_object(p, s, addr, page->objects)
p                4426 mm/slub.c      		if (!test_bit(slab_index(p, s, addr), map))
p                4427 mm/slub.c      			if (!check_object(s, page, p, SLUB_RED_ACTIVE))
p                4620 mm/slub.c      	void *p;
p                4625 mm/slub.c      	for_each_object(p, s, addr, page->objects)
p                4626 mm/slub.c      		if (!test_bit(slab_index(p, s, addr), map))
p                4627 mm/slub.c      			add_location(t, s, get_track(s, p, alloc));
p                4718 mm/slub.c      	u8 *p;
p                4727 mm/slub.c      	p = kzalloc(16, GFP_KERNEL);
p                4728 mm/slub.c      	p[16] = 0x12;
p                4730 mm/slub.c      	       p + 16);
p                4735 mm/slub.c      	p = kzalloc(32, GFP_KERNEL);
p                4736 mm/slub.c      	p[32 + sizeof(void *)] = 0x34;
p                4738 mm/slub.c      	       p);
p                4742 mm/slub.c      	p = kzalloc(64, GFP_KERNEL);
p                4743 mm/slub.c      	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
p                4744 mm/slub.c      	*p = 0x56;
p                4746 mm/slub.c      	       p);
p                4751 mm/slub.c      	p = kzalloc(128, GFP_KERNEL);
p                4752 mm/slub.c      	kfree(p);
p                4753 mm/slub.c      	*p = 0x78;
p                4754 mm/slub.c      	pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
p                4757 mm/slub.c      	p = kzalloc(256, GFP_KERNEL);
p                4758 mm/slub.c      	kfree(p);
p                4759 mm/slub.c      	p[50] = 0x9a;
p                4760 mm/slub.c      	pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
p                4763 mm/slub.c      	p = kzalloc(512, GFP_KERNEL);
p                4764 mm/slub.c      	kfree(p);
p                4765 mm/slub.c      	p[512] = 0xab;
p                4766 mm/slub.c      	pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
p                5691 mm/slub.c      	char *p = name;
p                5695 mm/slub.c      	*p++ = ':';
p                5704 mm/slub.c      		*p++ = 'd';
p                5706 mm/slub.c      		*p++ = 'D';
p                5708 mm/slub.c      		*p++ = 'a';
p                5710 mm/slub.c      		*p++ = 'F';
p                5712 mm/slub.c      		*p++ = 'A';
p                5713 mm/slub.c      	if (p != name + 1)
p                5714 mm/slub.c      		*p++ = '-';
p                5715 mm/slub.c      	p += sprintf(p, "%07u", s->size);
p                5717 mm/slub.c      	BUG_ON(p > name + ID_STR_LENGTH - 1);
p                 148 mm/sparse-vmemmap.c 		void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
p                 149 mm/sparse-vmemmap.c 		if (!p)
p                 151 mm/sparse-vmemmap.c 		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
p                 159 mm/sparse-vmemmap.c 	void *p = vmemmap_alloc_block(size, node);
p                 161 mm/sparse-vmemmap.c 	if (!p)
p                 163 mm/sparse-vmemmap.c 	memset(p, 0, size);
p                 165 mm/sparse-vmemmap.c 	return p;
p                 172 mm/sparse-vmemmap.c 		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
p                 173 mm/sparse-vmemmap.c 		if (!p)
p                 175 mm/sparse-vmemmap.c 		pmd_populate_kernel(&init_mm, pmd, p);
p                 184 mm/sparse-vmemmap.c 		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
p                 185 mm/sparse-vmemmap.c 		if (!p)
p                 187 mm/sparse-vmemmap.c 		pud_populate(&init_mm, pud, p);
p                 196 mm/sparse-vmemmap.c 		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
p                 197 mm/sparse-vmemmap.c 		if (!p)
p                 199 mm/sparse-vmemmap.c 		p4d_populate(&init_mm, p4d, p);
p                 208 mm/sparse-vmemmap.c 		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
p                 209 mm/sparse-vmemmap.c 		if (!p)
p                 211 mm/sparse-vmemmap.c 		pgd_populate(&init_mm, pgd, p);
p                 535 mm/swapfile.c  static void inc_cluster_info_page(struct swap_info_struct *p,
p                 543 mm/swapfile.c  		alloc_cluster(p, idx);
p                 555 mm/swapfile.c  static void dec_cluster_info_page(struct swap_info_struct *p,
p                 568 mm/swapfile.c  		free_cluster(p, idx);
p                 658 mm/swapfile.c  static void __del_from_avail_list(struct swap_info_struct *p)
p                 663 mm/swapfile.c  		plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
p                 666 mm/swapfile.c  static void del_from_avail_list(struct swap_info_struct *p)
p                 669 mm/swapfile.c  	__del_from_avail_list(p);
p                 690 mm/swapfile.c  static void add_to_avail_list(struct swap_info_struct *p)
p                 696 mm/swapfile.c  		WARN_ON(!plist_node_empty(&p->avail_lists[nid]));
p                 697 mm/swapfile.c  		plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]);
p                1106 mm/swapfile.c  	struct swap_info_struct *p;
p                1111 mm/swapfile.c  	p = swp_swap_info(entry);
p                1112 mm/swapfile.c  	if (!p)
p                1114 mm/swapfile.c  	if (!(p->flags & SWP_USED))
p                1117 mm/swapfile.c  	if (offset >= p->max)
p                1119 mm/swapfile.c  	return p;
p                1135 mm/swapfile.c  	struct swap_info_struct *p;
p                1137 mm/swapfile.c  	p = __swap_info_get(entry);
p                1138 mm/swapfile.c  	if (!p)
p                1140 mm/swapfile.c  	if (!p->swap_map[swp_offset(entry)])
p                1142 mm/swapfile.c  	return p;
p                1153 mm/swapfile.c  	struct swap_info_struct *p;
p                1155 mm/swapfile.c  	p = _swap_info_get(entry);
p                1156 mm/swapfile.c  	if (p)
p                1157 mm/swapfile.c  		spin_lock(&p->lock);
p                1158 mm/swapfile.c  	return p;
p                1164 mm/swapfile.c  	struct swap_info_struct *p;
p                1166 mm/swapfile.c  	p = _swap_info_get(entry);
p                1168 mm/swapfile.c  	if (p != q) {
p                1171 mm/swapfile.c  		if (p != NULL)
p                1172 mm/swapfile.c  			spin_lock(&p->lock);
p                1174 mm/swapfile.c  	return p;
p                1177 mm/swapfile.c  static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
p                1184 mm/swapfile.c  	count = p->swap_map[offset];
p                1200 mm/swapfile.c  			if (swap_count_continued(p, offset, count))
p                1209 mm/swapfile.c  	p->swap_map[offset] = usage ? : SWAP_HAS_CACHE;
p                1277 mm/swapfile.c  static unsigned char __swap_entry_free(struct swap_info_struct *p,
p                1283 mm/swapfile.c  	ci = lock_cluster_or_swap_info(p, offset);
p                1284 mm/swapfile.c  	usage = __swap_entry_free_locked(p, offset, usage);
p                1285 mm/swapfile.c  	unlock_cluster_or_swap_info(p, ci);
p                1292 mm/swapfile.c  static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
p                1298 mm/swapfile.c  	ci = lock_cluster(p, offset);
p                1299 mm/swapfile.c  	count = p->swap_map[offset];
p                1301 mm/swapfile.c  	p->swap_map[offset] = 0;
p                1302 mm/swapfile.c  	dec_cluster_info_page(p, p->cluster_info, offset);
p                1306 mm/swapfile.c  	swap_range_free(p, offset, 1);
p                1315 mm/swapfile.c  	struct swap_info_struct *p;
p                1317 mm/swapfile.c  	p = _swap_info_get(entry);
p                1318 mm/swapfile.c  	if (p)
p                1319 mm/swapfile.c  		__swap_entry_free(p, entry, 1);
p                1398 mm/swapfile.c  	struct swap_info_struct *p, *prev;
p                1405 mm/swapfile.c  	p = NULL;
p                1415 mm/swapfile.c  		p = swap_info_get_cont(entries[i], prev);
p                1416 mm/swapfile.c  		if (p)
p                1417 mm/swapfile.c  			swap_entry_free(p, entries[i]);
p                1418 mm/swapfile.c  		prev = p;
p                1420 mm/swapfile.c  	if (p)
p                1421 mm/swapfile.c  		spin_unlock(&p->lock);
p                1432 mm/swapfile.c  	struct swap_info_struct *p;
p                1438 mm/swapfile.c  	p = _swap_info_get(entry);
p                1439 mm/swapfile.c  	if (p) {
p                1441 mm/swapfile.c  		ci = lock_cluster_or_swap_info(p, offset);
p                1442 mm/swapfile.c  		count = swap_count(p->swap_map[offset]);
p                1443 mm/swapfile.c  		unlock_cluster_or_swap_info(p, ci);
p                1499 mm/swapfile.c  	struct swap_info_struct *p;
p                1505 mm/swapfile.c  	p = _swap_info_get(entry);
p                1506 mm/swapfile.c  	if (!p)
p                1511 mm/swapfile.c  	ci = lock_cluster_or_swap_info(p, offset);
p                1513 mm/swapfile.c  	count = swap_count(p->swap_map[offset]);
p                1520 mm/swapfile.c  	page = vmalloc_to_page(p->swap_map + offset);
p                1534 mm/swapfile.c  	unlock_cluster_or_swap_info(p, ci);
p                1674 mm/swapfile.c  			struct swap_info_struct *p;
p                1677 mm/swapfile.c  			p = swap_info_get(entry);
p                1678 mm/swapfile.c  			if (p->flags & SWP_STABLE_WRITES) {
p                1679 mm/swapfile.c  				spin_unlock(&p->lock);
p                1682 mm/swapfile.c  			spin_unlock(&p->lock);
p                1734 mm/swapfile.c  	struct swap_info_struct *p;
p                1740 mm/swapfile.c  	p = _swap_info_get(entry);
p                1741 mm/swapfile.c  	if (p) {
p                1742 mm/swapfile.c  		count = __swap_entry_free(p, entry, 1);
p                1744 mm/swapfile.c  		    !swap_page_trans_huge_swapped(p, entry))
p                1745 mm/swapfile.c  			__try_to_reclaim_swap(p, swp_offset(entry),
p                1748 mm/swapfile.c  	return p != NULL;
p                2128 mm/swapfile.c  	struct list_head *p;
p                2150 mm/swapfile.c  	p = &init_mm.mmlist;
p                2153 mm/swapfile.c  	       (p = p->next) != &init_mm.mmlist) {
p                2155 mm/swapfile.c  		mm = list_entry(p, struct mm_struct, mmlist);
p                2239 mm/swapfile.c  	struct list_head *p, *next;
p                2246 mm/swapfile.c  	list_for_each_safe(p, next, &init_mm.mmlist)
p                2247 mm/swapfile.c  		list_del_init(p);
p                2409 mm/swapfile.c  static int swap_node(struct swap_info_struct *p)
p                2413 mm/swapfile.c  	if (p->bdev)
p                2414 mm/swapfile.c  		bdev = p->bdev;
p                2416 mm/swapfile.c  		bdev = p->swap_file->f_inode->i_sb->s_bdev;
p                2421 mm/swapfile.c  static void setup_swap_info(struct swap_info_struct *p, int prio,
p                2428 mm/swapfile.c  		p->prio = prio;
p                2430 mm/swapfile.c  		p->prio = --least_priority;
p                2435 mm/swapfile.c  	p->list.prio = -p->prio;
p                2437 mm/swapfile.c  		if (p->prio >= 0)
p                2438 mm/swapfile.c  			p->avail_lists[i].prio = -p->prio;
p                2440 mm/swapfile.c  			if (swap_node(p) == i)
p                2441 mm/swapfile.c  				p->avail_lists[i].prio = 1;
p                2443 mm/swapfile.c  				p->avail_lists[i].prio = -p->prio;
p                2446 mm/swapfile.c  	p->swap_map = swap_map;
p                2447 mm/swapfile.c  	p->cluster_info = cluster_info;
p                2450 mm/swapfile.c  static void _enable_swap_info(struct swap_info_struct *p)
p                2452 mm/swapfile.c  	p->flags |= SWP_WRITEOK | SWP_VALID;
p                2453 mm/swapfile.c  	atomic_long_add(p->pages, &nr_swap_pages);
p                2454 mm/swapfile.c  	total_swap_pages += p->pages;
p                2467 mm/swapfile.c  	plist_add(&p->list, &swap_active_head);
p                2468 mm/swapfile.c  	add_to_avail_list(p);
p                2471 mm/swapfile.c  static void enable_swap_info(struct swap_info_struct *p, int prio,
p                2476 mm/swapfile.c  	frontswap_init(p->type, frontswap_map);
p                2478 mm/swapfile.c  	spin_lock(&p->lock);
p                2479 mm/swapfile.c  	setup_swap_info(p, prio, swap_map, cluster_info);
p                2480 mm/swapfile.c  	spin_unlock(&p->lock);
p                2488 mm/swapfile.c  	spin_lock(&p->lock);
p                2489 mm/swapfile.c  	_enable_swap_info(p);
p                2490 mm/swapfile.c  	spin_unlock(&p->lock);
p                2494 mm/swapfile.c  static void reinsert_swap_info(struct swap_info_struct *p)
p                2497 mm/swapfile.c  	spin_lock(&p->lock);
p                2498 mm/swapfile.c  	setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
p                2499 mm/swapfile.c  	_enable_swap_info(p);
p                2500 mm/swapfile.c  	spin_unlock(&p->lock);
p                2517 mm/swapfile.c  	struct swap_info_struct *p = NULL;
p                2544 mm/swapfile.c  	plist_for_each_entry(p, &swap_active_head, list) {
p                2545 mm/swapfile.c  		if (p->flags & SWP_WRITEOK) {
p                2546 mm/swapfile.c  			if (p->swap_file->f_mapping == mapping) {
p                2557 mm/swapfile.c  	if (!security_vm_enough_memory_mm(current->mm, p->pages))
p                2558 mm/swapfile.c  		vm_unacct_memory(p->pages);
p                2564 mm/swapfile.c  	del_from_avail_list(p);
p                2565 mm/swapfile.c  	spin_lock(&p->lock);
p                2566 mm/swapfile.c  	if (p->prio < 0) {
p                2567 mm/swapfile.c  		struct swap_info_struct *si = p;
p                2580 mm/swapfile.c  	plist_del(&p->list, &swap_active_head);
p                2581 mm/swapfile.c  	atomic_long_sub(p->pages, &nr_swap_pages);
p                2582 mm/swapfile.c  	total_swap_pages -= p->pages;
p                2583 mm/swapfile.c  	p->flags &= ~SWP_WRITEOK;
p                2584 mm/swapfile.c  	spin_unlock(&p->lock);
p                2590 mm/swapfile.c  	err = try_to_unuse(p->type, false, 0); /* force unuse all pages */
p                2595 mm/swapfile.c  		reinsert_swap_info(p);
p                2603 mm/swapfile.c  	spin_lock(&p->lock);
p                2604 mm/swapfile.c  	p->flags &= ~SWP_VALID;		/* mark swap device as invalid */
p                2605 mm/swapfile.c  	spin_unlock(&p->lock);
p                2613 mm/swapfile.c  	flush_work(&p->discard_work);
p                2615 mm/swapfile.c  	destroy_swap_extents(p);
p                2616 mm/swapfile.c  	if (p->flags & SWP_CONTINUED)
p                2617 mm/swapfile.c  		free_swap_count_continuations(p);
p                2619 mm/swapfile.c  	if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev)))
p                2624 mm/swapfile.c  	spin_lock(&p->lock);
p                2628 mm/swapfile.c  	p->highest_bit = 0;		/* cuts scans short */
p                2629 mm/swapfile.c  	while (p->flags >= SWP_SCANNING) {
p                2630 mm/swapfile.c  		spin_unlock(&p->lock);
p                2634 mm/swapfile.c  		spin_lock(&p->lock);
p                2637 mm/swapfile.c  	swap_file = p->swap_file;
p                2638 mm/swapfile.c  	old_block_size = p->old_block_size;
p                2639 mm/swapfile.c  	p->swap_file = NULL;
p                2640 mm/swapfile.c  	p->max = 0;
p                2641 mm/swapfile.c  	swap_map = p->swap_map;
p                2642 mm/swapfile.c  	p->swap_map = NULL;
p                2643 mm/swapfile.c  	cluster_info = p->cluster_info;
p                2644 mm/swapfile.c  	p->cluster_info = NULL;
p                2645 mm/swapfile.c  	frontswap_map = frontswap_map_get(p);
p                2646 mm/swapfile.c  	spin_unlock(&p->lock);
p                2648 mm/swapfile.c  	frontswap_invalidate_area(p->type);
p                2649 mm/swapfile.c  	frontswap_map_set(p, NULL);
p                2651 mm/swapfile.c  	free_percpu(p->percpu_cluster);
p                2652 mm/swapfile.c  	p->percpu_cluster = NULL;
p                2657 mm/swapfile.c  	swap_cgroup_swapoff(p->type);
p                2658 mm/swapfile.c  	exit_swap_address_space(p->type);
p                2679 mm/swapfile.c  	p->flags = 0;
p                2826 mm/swapfile.c  	struct swap_info_struct *p;
p                2830 mm/swapfile.c  	p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
p                2831 mm/swapfile.c  	if (!p)
p                2841 mm/swapfile.c  		kvfree(p);
p                2845 mm/swapfile.c  		p->type = type;
p                2846 mm/swapfile.c  		WRITE_ONCE(swap_info[type], p);
p                2855 mm/swapfile.c  		kvfree(p);
p                2856 mm/swapfile.c  		p = swap_info[type];
p                2862 mm/swapfile.c  	p->swap_extent_root = RB_ROOT;
p                2863 mm/swapfile.c  	plist_node_init(&p->list, 0);
p                2865 mm/swapfile.c  		plist_node_init(&p->avail_lists[i], 0);
p                2866 mm/swapfile.c  	p->flags = SWP_USED;
p                2868 mm/swapfile.c  	spin_lock_init(&p->lock);
p                2869 mm/swapfile.c  	spin_lock_init(&p->cont_lock);
p                2871 mm/swapfile.c  	return p;
p                2874 mm/swapfile.c  static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
p                2879 mm/swapfile.c  		p->bdev = bdgrab(I_BDEV(inode));
p                2880 mm/swapfile.c  		error = blkdev_get(p->bdev,
p                2881 mm/swapfile.c  				   FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
p                2883 mm/swapfile.c  			p->bdev = NULL;
p                2886 mm/swapfile.c  		p->old_block_size = block_size(p->bdev);
p                2887 mm/swapfile.c  		error = set_blocksize(p->bdev, PAGE_SIZE);
p                2890 mm/swapfile.c  		p->flags |= SWP_BLKDEV;
p                2892 mm/swapfile.c  		p->bdev = inode->i_sb->s_bdev;
p                2927 mm/swapfile.c  static unsigned long read_swap_header(struct swap_info_struct *p,
p                2958 mm/swapfile.c  	p->lowest_bit  = 1;
p                2959 mm/swapfile.c  	p->cluster_next = 1;
p                2960 mm/swapfile.c  	p->cluster_nr = 0;
p                2979 mm/swapfile.c  	p->highest_bit = maxpages - 1;
p                3003 mm/swapfile.c  static int setup_swap_map_and_extents(struct swap_info_struct *p,
p                3014 mm/swapfile.c  	unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
p                3019 mm/swapfile.c  	cluster_list_init(&p->free_clusters);
p                3020 mm/swapfile.c  	cluster_list_init(&p->discard_clusters);
p                3033 mm/swapfile.c  			inc_cluster_info_page(p, cluster_info, page_nr);
p                3039 mm/swapfile.c  		inc_cluster_info_page(p, cluster_info, i);
p                3047 mm/swapfile.c  		inc_cluster_info_page(p, cluster_info, 0);
p                3048 mm/swapfile.c  		p->max = maxpages;
p                3049 mm/swapfile.c  		p->pages = nr_good_pages;
p                3050 mm/swapfile.c  		nr_extents = setup_swap_extents(p, span);
p                3053 mm/swapfile.c  		nr_good_pages = p->pages;
p                3077 mm/swapfile.c  			cluster_list_add_tail(&p->free_clusters, cluster_info,
p                3100 mm/swapfile.c  	struct swap_info_struct *p;
p                3126 mm/swapfile.c  	p = alloc_swap_info();
p                3127 mm/swapfile.c  	if (IS_ERR(p))
p                3128 mm/swapfile.c  		return PTR_ERR(p);
p                3130 mm/swapfile.c  	INIT_WORK(&p->discard_work, swap_discard_work);
p                3145 mm/swapfile.c  	p->swap_file = swap_file;
p                3149 mm/swapfile.c  	error = claim_swapfile(p, inode);
p                3173 mm/swapfile.c  	maxpages = read_swap_header(p, swap_header, inode);
p                3187 mm/swapfile.c  		p->flags |= SWP_STABLE_WRITES;
p                3190 mm/swapfile.c  		p->flags |= SWP_SYNCHRONOUS_IO;
p                3192 mm/swapfile.c  	if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
p                3196 mm/swapfile.c  		p->flags |= SWP_SOLIDSTATE;
p                3201 mm/swapfile.c  		p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
p                3214 mm/swapfile.c  		p->percpu_cluster = alloc_percpu(struct percpu_cluster);
p                3215 mm/swapfile.c  		if (!p->percpu_cluster) {
p                3221 mm/swapfile.c  			cluster = per_cpu_ptr(p->percpu_cluster, cpu);
p                3229 mm/swapfile.c  	error = swap_cgroup_swapon(p->type, maxpages);
p                3233 mm/swapfile.c  	nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
p                3245 mm/swapfile.c  	if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
p                3252 mm/swapfile.c  		p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
p                3262 mm/swapfile.c  			p->flags &= ~SWP_PAGE_DISCARD;
p                3264 mm/swapfile.c  			p->flags &= ~SWP_AREA_DISCARD;
p                3267 mm/swapfile.c  		if (p->flags & SWP_AREA_DISCARD) {
p                3268 mm/swapfile.c  			int err = discard_swap(p);
p                3271 mm/swapfile.c  					p, err);
p                3275 mm/swapfile.c  	error = init_swap_address_space(p->type, maxpages);
p                3295 mm/swapfile.c  	enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
p                3298 mm/swapfile.c  		p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
p                3300 mm/swapfile.c  		(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
p                3301 mm/swapfile.c  		(p->flags & SWP_DISCARDABLE) ? "D" : "",
p                3302 mm/swapfile.c  		(p->flags & SWP_AREA_DISCARD) ? "s" : "",
p                3303 mm/swapfile.c  		(p->flags & SWP_PAGE_DISCARD) ? "c" : "",
p                3315 mm/swapfile.c  	free_percpu(p->percpu_cluster);
p                3316 mm/swapfile.c  	p->percpu_cluster = NULL;
p                3317 mm/swapfile.c  	if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
p                3318 mm/swapfile.c  		set_blocksize(p->bdev, p->old_block_size);
p                3319 mm/swapfile.c  		blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
p                3322 mm/swapfile.c  	destroy_swap_extents(p);
p                3323 mm/swapfile.c  	swap_cgroup_swapoff(p->type);
p                3325 mm/swapfile.c  	p->swap_file = NULL;
p                3326 mm/swapfile.c  	p->flags = 0;
p                3379 mm/swapfile.c  	struct swap_info_struct *p;
p                3386 mm/swapfile.c  	p = get_swap_device(entry);
p                3387 mm/swapfile.c  	if (!p)
p                3391 mm/swapfile.c  	ci = lock_cluster_or_swap_info(p, offset);
p                3393 mm/swapfile.c  	count = p->swap_map[offset];
p                3424 mm/swapfile.c  		else if (swap_count_continued(p, offset, count))
p                3431 mm/swapfile.c  	p->swap_map[offset] = count | has_cache;
p                3434 mm/swapfile.c  	unlock_cluster_or_swap_info(p, ci);
p                3436 mm/swapfile.c  	if (p)
p                3437 mm/swapfile.c  		put_swap_device(p);
p                 125 mm/util.c      	void *p;
p                 127 mm/util.c      	p = kmalloc_track_caller(len, gfp);
p                 128 mm/util.c      	if (p)
p                 129 mm/util.c      		memcpy(p, src, len);
p                 130 mm/util.c      	return p;
p                 170 mm/util.c      	void *p;
p                 172 mm/util.c      	p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
p                 173 mm/util.c      	if (!p)
p                 176 mm/util.c      	if (copy_from_user(p, src, len)) {
p                 177 mm/util.c      		kfree(p);
p                 181 mm/util.c      	return p;
p                 196 mm/util.c      	void *p;
p                 198 mm/util.c      	p = kvmalloc(len, GFP_USER);
p                 199 mm/util.c      	if (!p)
p                 202 mm/util.c      	if (copy_from_user(p, src, len)) {
p                 203 mm/util.c      		kvfree(p);
p                 207 mm/util.c      	return p;
p                 220 mm/util.c      	char *p;
p                 231 mm/util.c      	p = memdup_user(s, length);
p                 233 mm/util.c      	if (IS_ERR(p))
p                 234 mm/util.c      		return p;
p                 236 mm/util.c      	p[length - 1] = '\0';
p                 238 mm/util.c      	return p;
p                 252 mm/util.c      	char *p;
p                 259 mm/util.c      	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
p                 260 mm/util.c      	if (!p)
p                 263 mm/util.c      	if (copy_from_user(p, src, len)) {
p                 264 mm/util.c      		kfree(p);
p                 267 mm/util.c      	p[len] = '\0';
p                 269 mm/util.c      	return p;
p                  55 mm/vmalloc.c   	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
p                  58 mm/vmalloc.c   	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
p                1821 mm/vmalloc.c   	struct vm_struct *tmp, **p;
p                1824 mm/vmalloc.c   	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
p                1831 mm/vmalloc.c   	vm->next = *p;
p                1832 mm/vmalloc.c   	*p = vm;
p                1914 mm/vmalloc.c   		struct vfree_deferred *p;
p                1919 mm/vmalloc.c   		p = &per_cpu(vfree_deferred, i);
p                1920 mm/vmalloc.c   		init_llist_head(&p->list);
p                1921 mm/vmalloc.c   		INIT_WORK(&p->wq, free_work);
p                2277 mm/vmalloc.c   	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
p                2279 mm/vmalloc.c   	if (llist_add((struct llist_node *)addr, &p->list))
p                2280 mm/vmalloc.c   		schedule_work(&p->wq);
p                2748 mm/vmalloc.c   	struct page *p;
p                2758 mm/vmalloc.c   		p = vmalloc_to_page(addr);
p                2766 mm/vmalloc.c   		if (p) {
p                2771 mm/vmalloc.c   			void *map = kmap_atomic(p);
p                2787 mm/vmalloc.c   	struct page *p;
p                2797 mm/vmalloc.c   		p = vmalloc_to_page(addr);
p                2805 mm/vmalloc.c   		if (p) {
p                2810 mm/vmalloc.c   			void *map = kmap_atomic(p);
p                3079 mm/vmalloc.c   	pte_t ***p = data;
p                3081 mm/vmalloc.c   	if (p) {
p                3082 mm/vmalloc.c   		*(*p) = pte;
p                3083 mm/vmalloc.c   		(*p)++;
p                3439 mm/vmalloc.c   static void *s_next(struct seq_file *m, void *p, loff_t *pos)
p                3441 mm/vmalloc.c   	return seq_list_next(p, &vmap_area_list, pos);
p                3444 mm/vmalloc.c   static void s_stop(struct seq_file *m, void *p)
p                3490 mm/vmalloc.c   static int s_show(struct seq_file *m, void *p)
p                3495 mm/vmalloc.c   	va = list_entry(p, struct vmap_area, list);
p                3894 mm/vmscan.c    static int kswapd(void *p)
p                3898 mm/vmscan.c    	pg_data_t *pgdat = (pg_data_t*)p;
p                4213 mm/vmscan.c    	struct task_struct *p = current;
p                4237 mm/vmscan.c    	p->flags |= PF_SWAPWRITE;
p                4238 mm/vmscan.c    	set_task_reclaim_state(p, &sc.reclaim_state);
p                4250 mm/vmscan.c    	set_task_reclaim_state(p, NULL);
p                 320 mm/vmstat.c    	s8 __percpu *p = pcp->vm_stat_diff + item;
p                 324 mm/vmstat.c    	x = delta + __this_cpu_read(*p);
p                 332 mm/vmstat.c    	__this_cpu_write(*p, x);
p                 340 mm/vmstat.c    	s8 __percpu *p = pcp->vm_node_stat_diff + item;
p                 344 mm/vmstat.c    	x = delta + __this_cpu_read(*p);
p                 352 mm/vmstat.c    	__this_cpu_write(*p, x);
p                 382 mm/vmstat.c    	s8 __percpu *p = pcp->vm_stat_diff + item;
p                 385 mm/vmstat.c    	v = __this_cpu_inc_return(*p);
p                 391 mm/vmstat.c    		__this_cpu_write(*p, -overstep);
p                 398 mm/vmstat.c    	s8 __percpu *p = pcp->vm_node_stat_diff + item;
p                 401 mm/vmstat.c    	v = __this_cpu_inc_return(*p);
p                 407 mm/vmstat.c    		__this_cpu_write(*p, -overstep);
p                 426 mm/vmstat.c    	s8 __percpu *p = pcp->vm_stat_diff + item;
p                 429 mm/vmstat.c    	v = __this_cpu_dec_return(*p);
p                 435 mm/vmstat.c    		__this_cpu_write(*p, overstep);
p                 442 mm/vmstat.c    	s8 __percpu *p = pcp->vm_node_stat_diff + item;
p                 445 mm/vmstat.c    	v = __this_cpu_dec_return(*p);
p                 451 mm/vmstat.c    		__this_cpu_write(*p, overstep);
p                 484 mm/vmstat.c    	s8 __percpu *p = pcp->vm_stat_diff + item;
p                 502 mm/vmstat.c    		o = this_cpu_read(*p);
p                 512 mm/vmstat.c    	} while (this_cpu_cmpxchg(*p, o, n) != o);
p                 541 mm/vmstat.c    	s8 __percpu *p = pcp->vm_node_stat_diff + item;
p                 559 mm/vmstat.c    		o = this_cpu_read(*p);
p                 569 mm/vmstat.c    	} while (this_cpu_cmpxchg(*p, o, n) != o);
p                 758 mm/vmstat.c    		struct per_cpu_pageset __percpu *p = zone->pageset;
p                 763 mm/vmstat.c    			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
p                 770 mm/vmstat.c    				__this_cpu_write(p->expire, 3);
p                 778 mm/vmstat.c    			v = this_cpu_xchg(p->vm_numa_stat_diff[i], 0);
p                 783 mm/vmstat.c    				__this_cpu_write(p->expire, 3);
p                 796 mm/vmstat.c    			if (!__this_cpu_read(p->expire) ||
p                 797 mm/vmstat.c    			       !__this_cpu_read(p->pcp.count))
p                 804 mm/vmstat.c    				__this_cpu_write(p->expire, 0);
p                 808 mm/vmstat.c    			if (__this_cpu_dec_return(p->expire))
p                 811 mm/vmstat.c    			if (__this_cpu_read(p->pcp.count)) {
p                 812 mm/vmstat.c    				drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
p                 820 mm/vmstat.c    		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
p                 825 mm/vmstat.c    			v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
p                 859 mm/vmstat.c    		struct per_cpu_pageset *p;
p                 861 mm/vmstat.c    		p = per_cpu_ptr(zone->pageset, cpu);
p                 864 mm/vmstat.c    			if (p->vm_stat_diff[i]) {
p                 867 mm/vmstat.c    				v = p->vm_stat_diff[i];
p                 868 mm/vmstat.c    				p->vm_stat_diff[i] = 0;
p                 875 mm/vmstat.c    			if (p->vm_numa_stat_diff[i]) {
p                 878 mm/vmstat.c    				v = p->vm_numa_stat_diff[i];
p                 879 mm/vmstat.c    				p->vm_numa_stat_diff[i] = 0;
p                 887 mm/vmstat.c    		struct per_cpu_nodestat *p;
p                 889 mm/vmstat.c    		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
p                 892 mm/vmstat.c    			if (p->vm_node_stat_diff[i]) {
p                 895 mm/vmstat.c    				v = p->vm_node_stat_diff[i];
p                 896 mm/vmstat.c    				p->vm_node_stat_diff[i] = 0;
p                 943 mm/vmstat.c    	u16 __percpu *p = pcp->vm_numa_stat_diff + item;
p                 946 mm/vmstat.c    	v = __this_cpu_inc_return(*p);
p                 950 mm/vmstat.c    		__this_cpu_write(*p, 0);
p                1836 mm/vmstat.c    		struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
p                1838 mm/vmstat.c    		BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
p                1840 mm/vmstat.c    		BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2);
p                1846 mm/vmstat.c    		if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
p                1847 mm/vmstat.c    			       sizeof(p->vm_stat_diff[0])))
p                1850 mm/vmstat.c    		if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS *
p                1851 mm/vmstat.c    			       sizeof(p->vm_numa_stat_diff[0])))
p                 199 mm/zswap.c     #define zswap_pool_debug(msg, p)				\
p                 200 mm/zswap.c     	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
p                 201 mm/zswap.c     		 zpool_get_type((p)->zpool))
p                 170 net/802/garp.c 	struct rb_node *parent = NULL, **p = &app->gid.rb_node;
p                 174 net/802/garp.c 	while (*p) {
p                 175 net/802/garp.c 		parent = *p;
p                 179 net/802/garp.c 			p = &parent->rb_left;
p                 181 net/802/garp.c 			p = &parent->rb_right;
p                 195 net/802/garp.c 	rb_link_node(&attr->node, parent, p);
p                 119 net/802/hippi.c int hippi_mac_addr(struct net_device *dev, void *p)
p                 121 net/802/hippi.c 	struct sockaddr *addr = p;
p                 129 net/802/hippi.c int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
p                 132 net/802/hippi.c 	NEIGH_VAR_INIT(p, MCAST_PROBES, 0);
p                 138 net/802/hippi.c 	if (p->tbl->family != AF_INET6)
p                 139 net/802/hippi.c 		NEIGH_VAR_INIT(p, UCAST_PROBES, 0);
p                 259 net/802/mrp.c  	struct rb_node *parent = NULL, **p = &app->mad.rb_node;
p                 263 net/802/mrp.c  	while (*p) {
p                 264 net/802/mrp.c  		parent = *p;
p                 268 net/802/mrp.c  			p = &parent->rb_left;
p                 270 net/802/mrp.c  			p = &parent->rb_right;
p                 284 net/802/mrp.c  	rb_link_node(&attr->node, parent, p);
p                  31 net/802/psnap.c 	struct datalink_proto *proto = NULL, *p;
p                  33 net/802/psnap.c 	list_for_each_entry_rcu(p, &snap_list, node) {
p                  34 net/802/psnap.c 		if (!memcmp(p->type, desc, 5)) {
p                  35 net/802/psnap.c 			proto = p;
p                 464 net/8021q/vlan_core.c 	struct sk_buff *p;
p                 486 net/8021q/vlan_core.c 	list_for_each_entry(p, head, list) {
p                 489 net/8021q/vlan_core.c 		if (!NAPI_GRO_CB(p)->same_flow)
p                 492 net/8021q/vlan_core.c 		vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
p                 494 net/8021q/vlan_core.c 			NAPI_GRO_CB(p)->same_flow = 0;
p                 331 net/8021q/vlan_dev.c static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
p                 334 net/8021q/vlan_dev.c 	struct sockaddr *addr = p;
p                 666 net/8021q/vlan_dev.c 	struct vlan_pcpu_stats *p;
p                 674 net/8021q/vlan_dev.c 		p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
p                 676 net/8021q/vlan_dev.c 			start = u64_stats_fetch_begin_irq(&p->syncp);
p                 677 net/8021q/vlan_dev.c 			rxpackets	= p->rx_packets;
p                 678 net/8021q/vlan_dev.c 			rxbytes		= p->rx_bytes;
p                 679 net/8021q/vlan_dev.c 			rxmulticast	= p->rx_multicast;
p                 680 net/8021q/vlan_dev.c 			txpackets	= p->tx_packets;
p                 681 net/8021q/vlan_dev.c 			txbytes		= p->tx_bytes;
p                 682 net/8021q/vlan_dev.c 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
p                 690 net/8021q/vlan_dev.c 		rx_errors	+= p->rx_errors;
p                 691 net/8021q/vlan_dev.c 		tx_dropped	+= p->tx_dropped;
p                 135 net/9p/client.c 	char *p;
p                 155 net/9p/client.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 157 net/9p/client.c 		if (!*p)
p                 159 net/9p/client.c 		token = match_token(p, tokens, args);
p                 544 net/9p/trans_fd.c p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
p                 546 net/9p/trans_fd.c 	struct p9_conn *m = container_of(p, struct p9_conn, pt);
p                 741 net/9p/trans_fd.c 	char *p;
p                 762 net/9p/trans_fd.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 765 net/9p/trans_fd.c 		if (!*p)
p                 767 net/9p/trans_fd.c 		token = match_token(p, tokens, args);
p                 824 net/9p/trans_fd.c 	struct p9_trans_fd *p;
p                 827 net/9p/trans_fd.c 	p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
p                 828 net/9p/trans_fd.c 	if (!p)
p                 836 net/9p/trans_fd.c 		kfree(p);
p                 841 net/9p/trans_fd.c 	p->wr = p->rd = file;
p                 842 net/9p/trans_fd.c 	client->trans = p;
p                 845 net/9p/trans_fd.c 	p->rd->f_flags |= O_NONBLOCK;
p                 176 net/9p/trans_rdma.c 	char *p;
p                 198 net/9p/trans_rdma.c 	while ((p = strsep(&options, ",")) != NULL) {
p                 201 net/9p/trans_rdma.c 		if (!*p)
p                 203 net/9p/trans_rdma.c 		token = match_token(p, tokens, args);
p                 340 net/9p/trans_virtio.c 		void *p;
p                 346 net/9p/trans_virtio.c 				p = data->kvec->iov_base + data->iov_offset;
p                 354 net/9p/trans_virtio.c 		nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) -
p                 355 net/9p/trans_virtio.c 			   (unsigned long)p / PAGE_SIZE;
p                 363 net/9p/trans_virtio.c 		p -= (*offs = offset_in_page(p));
p                 365 net/9p/trans_virtio.c 			if (is_vmalloc_addr(p))
p                 366 net/9p/trans_virtio.c 				(*pages)[index] = vmalloc_to_page(p);
p                 368 net/9p/trans_virtio.c 				(*pages)[index] = kmap_to_page(p);
p                 369 net/9p/trans_virtio.c 			p += PAGE_SIZE;
p                  51 net/atm/addr.c 	struct atm_dev_addr *this, *p;
p                  59 net/atm/addr.c 	list_for_each_entry_safe(this, p, head, entry) {
p                 350 net/atm/br2684.c static int br2684_mac_addr(struct net_device *dev, void *p)
p                 352 net/atm/br2684.c 	int err = eth_mac_addr(dev, p);
p                 827 net/atm/br2684.c 	struct proc_dir_entry *p;
p                 828 net/atm/br2684.c 	p = proc_create_seq("br2684", 0, atm_proc_root, &br2684_seq_ops);
p                 829 net/atm/br2684.c 	if (p == NULL)
p                 880 net/atm/clip.c 		struct proc_dir_entry *p;
p                 882 net/atm/clip.c 		p = proc_create_net("arp", 0444, atm_proc_root, &arp_seq_ops,
p                 884 net/atm/clip.c 		if (!p) {
p                 340 net/atm/common.c 	static short p;        /* poor man's per-device cache */
p                 352 net/atm/common.c 		p = *vpi;
p                 353 net/atm/common.c 	else if (p >= 1 << vcc->dev->ci_range.vpi_bits)
p                 354 net/atm/common.c 		p = 0;
p                 359 net/atm/common.c 	old_p = p;
p                 362 net/atm/common.c 		if (!check_ci(vcc, p, c)) {
p                 363 net/atm/common.c 			*vpi = p;
p                 374 net/atm/common.c 			p++;
p                 375 net/atm/common.c 			if (p >= 1 << vcc->dev->ci_range.vpi_bits)
p                 376 net/atm/common.c 				p = 0;
p                 378 net/atm/common.c 	} while (old_p != p || old_c != c);
p                 857 net/atm/lec.c  	int p;
p                 859 net/atm/lec.c  	for (p = state->arp_table; p < LEC_ARP_TABLE_SIZE; p++) {
p                 860 net/atm/lec.c  		v = lec_tbl_walk(state, &priv->lec_arp_tables[p], l);
p                 864 net/atm/lec.c  	state->arp_table = p;
p                1038 net/atm/lec.c  	struct proc_dir_entry *p;
p                1040 net/atm/lec.c  	p = proc_create_seq_private("lec", 0444, atm_proc_root, &lec_seq_ops,
p                1042 net/atm/lec.c  	if (!p) {
p                 122 net/atm/mpoa_proc.c 	struct mpoa_client *p = v;
p                 124 net/atm/mpoa_proc.c 	return v == SEQ_START_TOKEN ? mpcs : p->next;
p                 172 net/atm/mpoa_proc.c 		unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
p                 176 net/atm/mpoa_proc.c 			seq_printf(m, "%02x", p[i]);
p                 212 net/atm/mpoa_proc.c 	char *page, *p;
p                 225 net/atm/mpoa_proc.c 	for (p = page, len = 0; len < nbytes; p++, len++) {
p                 226 net/atm/mpoa_proc.c 		if (get_user(*p, buff++)) {
p                 230 net/atm/mpoa_proc.c 		if (*p == '\0' || *p == '\n')
p                 234 net/atm/mpoa_proc.c 	*p = '\0';
p                 291 net/atm/mpoa_proc.c 	struct proc_dir_entry *p;
p                 293 net/atm/mpoa_proc.c 	p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations);
p                 294 net/atm/mpoa_proc.c 	if (!p) {
p                  55 net/atm/resources.c 	struct list_head *p;
p                  57 net/atm/resources.c 	list_for_each(p, &atm_devs) {
p                  58 net/atm/resources.c 		dev = list_entry(p, struct atm_dev, dev_list);
p                 201 net/atm/resources.c 	struct list_head *p;
p                 226 net/atm/resources.c 		list_for_each(p, &atm_devs)
p                 238 net/atm/resources.c 		list_for_each(p, &atm_devs) {
p                 239 net/atm/resources.c 			dev = list_entry(p, struct atm_dev, dev_list);
p                 127 net/ax25/ax25_ds_subr.c 	unsigned char *p;
p                 136 net/ax25/ax25_ds_subr.c 	p = skb_put(skb, 2);
p                 138 net/ax25/ax25_ds_subr.c 	*p++ = cmd;
p                 139 net/ax25/ax25_ds_subr.c 	*p++ = param;
p                 119 net/ax25/ax25_out.c 	unsigned char *p;
p                 162 net/ax25/ax25_out.c 				p = skb_push(skbn, 2);
p                 164 net/ax25/ax25_out.c 				*p++ = AX25_P_SEGMENT;
p                 166 net/ax25/ax25_out.c 				*p = fragno--;
p                 168 net/ax25/ax25_out.c 					*p |= AX25_SEG_FIRST;
p                 176 net/ax25/ax25_out.c 				p = skb_push(skbn, 1);
p                 177 net/ax25/ax25_out.c 				*p = AX25_P_TEXT;
p                 632 net/batman-adv/gateway_client.c 	u8 *p;
p                 709 net/batman-adv/gateway_client.c 		p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
p                 710 net/batman-adv/gateway_client.c 		if (*p != BATADV_DHCP_HTYPE_ETHERNET)
p                 714 net/batman-adv/gateway_client.c 		p = skb->data + *header_len + BATADV_DHCP_HLEN_OFFSET;
p                 715 net/batman-adv/gateway_client.c 		if (*p != ETH_ALEN)
p                  65 net/batman-adv/log.c 	char *p;
p                  75 net/batman-adv/log.c 	for (p = debug_log_buf; *p != 0; p++)
p                  76 net/batman-adv/log.c 		batadv_emit_log_char(debug_log, *p);
p                 128 net/batman-adv/soft-interface.c static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
p                 132 net/batman-adv/soft-interface.c 	struct sockaddr *addr = p;
p                 147 net/bluetooth/bnep/sock.c 		unsigned __user *p = argp;
p                 151 net/bluetooth/bnep/sock.c 		if (get_user(cl.cnum, p) || get_user(uci, p + 1))
p                 161 net/bluetooth/bnep/sock.c 		if (!err && put_user(cl.cnum, p))
p                 150 net/bluetooth/cmtp/sock.c 		u32 __user *p = argp;
p                 154 net/bluetooth/cmtp/sock.c 		if (get_user(cl.cnum, p) || get_user(uci, p + 1))
p                 164 net/bluetooth/cmtp/sock.c 		if (!err && put_user(cl.cnum, p))
p                 127 net/bluetooth/ecdh_helper.c 	struct ecdh p = {0};
p                 129 net/bluetooth/ecdh_helper.c 	p.curve_id = ECC_CURVE_NIST_P256;
p                 136 net/bluetooth/ecdh_helper.c 		p.key = tmp;
p                 137 net/bluetooth/ecdh_helper.c 		p.key_size = 32;
p                 140 net/bluetooth/ecdh_helper.c 	buf_len = crypto_ecdh_key_len(&p);
p                 147 net/bluetooth/ecdh_helper.c 	err = crypto_ecdh_encode_key(buf, buf_len, &p);
p                 755 net/bluetooth/hci_conn.c 				struct hci_cp_le_ext_conn_param *p)
p                 759 net/bluetooth/hci_conn.c 	memset(p, 0, sizeof(*p));
p                 764 net/bluetooth/hci_conn.c 	p->scan_interval = cpu_to_le16(hdev->le_scan_interval);
p                 765 net/bluetooth/hci_conn.c 	p->scan_window = p->scan_interval;
p                 766 net/bluetooth/hci_conn.c 	p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
p                 767 net/bluetooth/hci_conn.c 	p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
p                 768 net/bluetooth/hci_conn.c 	p->conn_latency = cpu_to_le16(conn->le_conn_latency);
p                 769 net/bluetooth/hci_conn.c 	p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
p                 770 net/bluetooth/hci_conn.c 	p->min_ce_len = cpu_to_le16(0x0000);
p                 771 net/bluetooth/hci_conn.c 	p->max_ce_len = cpu_to_le16(0x0000);
p                 802 net/bluetooth/hci_conn.c 		struct hci_cp_le_ext_conn_param *p;
p                 803 net/bluetooth/hci_conn.c 		u8 data[sizeof(*cp) + sizeof(*p) * 3];
p                 807 net/bluetooth/hci_conn.c 		p = (void *) cp->data;
p                 819 net/bluetooth/hci_conn.c 			set_ext_conn_params(conn, p);
p                 821 net/bluetooth/hci_conn.c 			p++;
p                 822 net/bluetooth/hci_conn.c 			plen += sizeof(*p);
p                 827 net/bluetooth/hci_conn.c 			set_ext_conn_params(conn, p);
p                 829 net/bluetooth/hci_conn.c 			p++;
p                 830 net/bluetooth/hci_conn.c 			plen += sizeof(*p);
p                 835 net/bluetooth/hci_conn.c 			set_ext_conn_params(conn, p);
p                 837 net/bluetooth/hci_conn.c 			plen += sizeof(*p);
p                 587 net/bluetooth/hci_core.c 	u8 p;
p                 778 net/bluetooth/hci_core.c 	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
p                 781 net/bluetooth/hci_core.c 		cp.page = p;
p                1073 net/bluetooth/hci_core.c 	struct inquiry_entry *p, *n;
p                1075 net/bluetooth/hci_core.c 	list_for_each_entry_safe(p, n, &cache->all, all) {
p                1076 net/bluetooth/hci_core.c 		list_del(&p->all);
p                1077 net/bluetooth/hci_core.c 		kfree(p);
p                1140 net/bluetooth/hci_core.c 	struct inquiry_entry *p;
p                1144 net/bluetooth/hci_core.c 	list_for_each_entry(p, &cache->resolve, list) {
p                1145 net/bluetooth/hci_core.c 		if (p->name_state != NAME_PENDING &&
p                1146 net/bluetooth/hci_core.c 		    abs(p->data.rssi) >= abs(ie->data.rssi))
p                1148 net/bluetooth/hci_core.c 		pos = &p->list;
p                1648 net/bluetooth/hci_core.c 	struct hci_conn_params *p;
p                1650 net/bluetooth/hci_core.c 	list_for_each_entry(p, &hdev->le_conn_params, list) {
p                1651 net/bluetooth/hci_core.c 		if (p->conn) {
p                1652 net/bluetooth/hci_core.c 			hci_conn_drop(p->conn);
p                1653 net/bluetooth/hci_core.c 			hci_conn_put(p->conn);
p                1654 net/bluetooth/hci_core.c 			p->conn = NULL;
p                1656 net/bluetooth/hci_core.c 		list_del_init(&p->action);
p                  92 net/bluetooth/hci_debugfs.c 	u8 p;
p                  95 net/bluetooth/hci_debugfs.c 	for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++)
p                  96 net/bluetooth/hci_debugfs.c 		seq_printf(f, "%2u: %8ph\n", p, hdev->features[p]);
p                 123 net/bluetooth/hci_debugfs.c 	struct hci_conn_params *p;
p                 129 net/bluetooth/hci_debugfs.c 	list_for_each_entry(p, &hdev->le_conn_params, list) {
p                 130 net/bluetooth/hci_debugfs.c 		seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
p                 131 net/bluetooth/hci_debugfs.c 			   p->auto_connect);
p                 140 net/bluetooth/hci_debugfs.c static int blacklist_show(struct seq_file *f, void *p)
p                 155 net/bluetooth/hci_debugfs.c static int uuids_show(struct seq_file *f, void *p)
p                 337 net/bluetooth/hci_debugfs.c static int inquiry_cache_show(struct seq_file *f, void *p)
p                 593 net/bluetooth/hci_debugfs.c static int identity_show(struct seq_file *f, void *p)
p                 644 net/bluetooth/hci_debugfs.c static int random_address_show(struct seq_file *f, void *p)
p                 657 net/bluetooth/hci_debugfs.c static int static_address_show(struct seq_file *f, void *p)
p                 154 net/bluetooth/hidp/sock.c 		u32 __user *p = argp;
p                 157 net/bluetooth/hidp/sock.c 		if (get_user(cl.cnum, p) || get_user(uci, p + 1))
p                 167 net/bluetooth/hidp/sock.c 		if (!err && put_user(cl.cnum, p))
p                 205 net/bluetooth/l2cap_core.c 		u16 p, start, end, incr;
p                 218 net/bluetooth/l2cap_core.c 		for (p = start; p <= end; p += incr)
p                 219 net/bluetooth/l2cap_core.c 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
p                 221 net/bluetooth/l2cap_core.c 				chan->psm   = cpu_to_le16(p);
p                 222 net/bluetooth/l2cap_core.c 				chan->sport = cpu_to_le16(p);
p                7690 net/bluetooth/l2cap_core.c static int l2cap_debugfs_show(struct seq_file *f, void *p)
p                2891 net/bluetooth/mgmt.c 		struct hci_conn_params *p;
p                2902 net/bluetooth/mgmt.c 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
p                2904 net/bluetooth/mgmt.c 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
p                2905 net/bluetooth/mgmt.c 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
p                5777 net/bluetooth/mgmt.c 		struct hci_conn_params *p, *tmp;
p                5796 net/bluetooth/mgmt.c 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
p                5797 net/bluetooth/mgmt.c 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
p                5799 net/bluetooth/mgmt.c 			device_removed(sk, hdev, &p->addr, p->addr_type);
p                5800 net/bluetooth/mgmt.c 			if (p->explicit_connect) {
p                5801 net/bluetooth/mgmt.c 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
p                5804 net/bluetooth/mgmt.c 			list_del(&p->action);
p                5805 net/bluetooth/mgmt.c 			list_del(&p->list);
p                5806 net/bluetooth/mgmt.c 			kfree(p);
p                6989 net/bluetooth/mgmt.c 	struct hci_conn_params *p;
p                6991 net/bluetooth/mgmt.c 	list_for_each_entry(p, &hdev->le_conn_params, list) {
p                6995 net/bluetooth/mgmt.c 		list_del_init(&p->action);
p                6997 net/bluetooth/mgmt.c 		switch (p->auto_connect) {
p                7000 net/bluetooth/mgmt.c 			list_add(&p->action, &hdev->pend_le_conns);
p                7003 net/bluetooth/mgmt.c 			list_add(&p->action, &hdev->pend_le_reports);
p                1006 net/bluetooth/rfcomm/sock.c static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
p                1160 net/bluetooth/sco.c static int sco_debugfs_show(struct seq_file *f, void *p)
p                  33 net/bridge/br.c 	struct net_bridge_port *p;
p                  52 net/bridge/br.c 	p = br_port_get_rtnl(dev);
p                  53 net/bridge/br.c 	if (!p)
p                  56 net/bridge/br.c 	br = p->br;
p                  76 net/bridge/br.c 		br_fdb_changeaddr(p, dev->dev_addr);
p                  86 net/bridge/br.c 		br_port_carrier_check(p, &notified);
p                  96 net/bridge/br.c 			br_stp_disable_port(p);
p                 105 net/bridge/br.c 			br_stp_enable_port(p);
p                 116 net/bridge/br.c 		err = br_sysfs_renameif(p);
p                 132 net/bridge/br.c 		br_vlan_port_event(p, event);
p                 137 net/bridge/br.c 		br_ifinfo_notify(RTM_NEWLINK, NULL, p);
p                 151 net/bridge/br.c 	struct net_bridge_port *p;
p                 156 net/bridge/br.c 	p = br_port_get_rtnl_rcu(dev);
p                 157 net/bridge/br.c 	if (!p)
p                 160 net/bridge/br.c 	br = p->br;
p                 165 net/bridge/br.c 		err = br_fdb_external_learn_add(br, p, fdb_info->addr,
p                 171 net/bridge/br.c 		br_fdb_offloaded_set(br, p, fdb_info->addr,
p                 176 net/bridge/br.c 		err = br_fdb_external_learn_del(br, p, fdb_info->addr,
p                 183 net/bridge/br.c 		br_fdb_offloaded_set(br, p, fdb_info->addr,
p                  29 net/bridge/br_arp_nd_proxy.c 	struct net_bridge_port *p;
p                  32 net/bridge/br_arp_nd_proxy.c 	list_for_each_entry(p, &br->port_list, list) {
p                  33 net/bridge/br_arp_nd_proxy.c 		if (p->flags & BR_NEIGH_SUPPRESS) {
p                  43 net/bridge/br_arp_nd_proxy.c static void br_arp_send(struct net_bridge *br, struct net_bridge_port *p,
p                  68 net/bridge/br_arp_nd_proxy.c 	if (p)
p                  69 net/bridge/br_arp_nd_proxy.c 		vg = nbp_vlan_group_rcu(p);
p                  79 net/bridge/br_arp_nd_proxy.c 	if (p) {
p                 121 net/bridge/br_arp_nd_proxy.c 			      u16 vid, struct net_bridge_port *p)
p                 156 net/bridge/br_arp_nd_proxy.c 		if (p && (p->flags & BR_NEIGH_SUPPRESS))
p                 197 net/bridge/br_arp_nd_proxy.c 			if ((p && (p->flags & BR_PROXYARP)) ||
p                 201 net/bridge/br_arp_nd_proxy.c 					br_arp_send(br, p, skb->dev, sip, tip,
p                 204 net/bridge/br_arp_nd_proxy.c 					br_arp_send(br, p, skb->dev, sip, tip,
p                 242 net/bridge/br_arp_nd_proxy.c static void br_nd_send(struct net_bridge *br, struct net_bridge_port *p,
p                 338 net/bridge/br_arp_nd_proxy.c 	if (p)
p                 339 net/bridge/br_arp_nd_proxy.c 		vg = nbp_vlan_group_rcu(p);
p                 352 net/bridge/br_arp_nd_proxy.c 	if (p) {
p                 388 net/bridge/br_arp_nd_proxy.c 		       u16 vid, struct net_bridge_port *p, struct nd_msg *msg)
p                 398 net/bridge/br_arp_nd_proxy.c 	if (p && (p->flags & BR_NEIGH_SUPPRESS))
p                 454 net/bridge/br_arp_nd_proxy.c 					br_nd_send(br, p, skb, n,
p                 458 net/bridge/br_arp_nd_proxy.c 					br_nd_send(br, p, skb, n, 0, 0, msg);
p                 240 net/bridge/br_device.c static int br_set_mac_address(struct net_device *dev, void *p)
p                 243 net/bridge/br_device.c 	struct sockaddr *addr = p;
p                 288 net/bridge/br_device.c 	struct net_bridge_port *p;
p                 290 net/bridge/br_device.c 	list_for_each_entry(p, &br->port_list, list)
p                 291 net/bridge/br_device.c 		br_netpoll_disable(p);
p                 294 net/bridge/br_device.c static int __br_netpoll_enable(struct net_bridge_port *p)
p                 299 net/bridge/br_device.c 	np = kzalloc(sizeof(*p->np), GFP_KERNEL);
p                 303 net/bridge/br_device.c 	err = __netpoll_setup(np, p->dev);
p                 309 net/bridge/br_device.c 	p->np = np;
p                 313 net/bridge/br_device.c int br_netpoll_enable(struct net_bridge_port *p)
p                 315 net/bridge/br_device.c 	if (!p->br->dev->npinfo)
p                 318 net/bridge/br_device.c 	return __br_netpoll_enable(p);
p                 324 net/bridge/br_device.c 	struct net_bridge_port *p;
p                 327 net/bridge/br_device.c 	list_for_each_entry(p, &br->port_list, list) {
p                 328 net/bridge/br_device.c 		if (!p->dev)
p                 330 net/bridge/br_device.c 		err = __br_netpoll_enable(p);
p                 343 net/bridge/br_device.c void br_netpoll_disable(struct net_bridge_port *p)
p                 345 net/bridge/br_device.c 	struct netpoll *np = p->np;
p                 350 net/bridge/br_device.c 	p->np = NULL;
p                 158 net/bridge/br_fdb.c 	struct net_bridge_port *p;
p                 162 net/bridge/br_fdb.c 	list_for_each_entry(p, &br->port_list, list) {
p                 163 net/bridge/br_fdb.c 		if (!br_promisc_port(p)) {
p                 164 net/bridge/br_fdb.c 			err = dev_uc_add(p->dev, addr);
p                 172 net/bridge/br_fdb.c 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
p                 173 net/bridge/br_fdb.c 		if (!br_promisc_port(p))
p                 174 net/bridge/br_fdb.c 			dev_uc_del(p->dev, addr);
p                 185 net/bridge/br_fdb.c 	struct net_bridge_port *p;
p                 189 net/bridge/br_fdb.c 	list_for_each_entry(p, &br->port_list, list) {
p                 190 net/bridge/br_fdb.c 		if (!br_promisc_port(p))
p                 191 net/bridge/br_fdb.c 			dev_uc_del(p->dev, addr);
p                 212 net/bridge/br_fdb.c 			     const struct net_bridge_port *p,
p                 224 net/bridge/br_fdb.c 		if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
p                 235 net/bridge/br_fdb.c 	if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
p                 246 net/bridge/br_fdb.c 			      const struct net_bridge_port *p,
p                 253 net/bridge/br_fdb.c 	if (f && f->is_local && !f->added_by_user && f->dst == p)
p                 254 net/bridge/br_fdb.c 		fdb_delete_local(br, p, f);
p                 258 net/bridge/br_fdb.c void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
p                 262 net/bridge/br_fdb.c 	struct net_bridge *br = p->br;
p                 266 net/bridge/br_fdb.c 	vg = nbp_vlan_group(p);
p                 268 net/bridge/br_fdb.c 		if (f->dst == p && f->is_local && !f->added_by_user) {
p                 270 net/bridge/br_fdb.c 			fdb_delete_local(br, p, f);
p                 283 net/bridge/br_fdb.c 	fdb_insert(br, p, newaddr, 0);
p                 293 net/bridge/br_fdb.c 		fdb_insert(br, p, newaddr, v->vid);
p                 387 net/bridge/br_fdb.c 			   const struct net_bridge_port *p,
p                 396 net/bridge/br_fdb.c 		if (f->dst != p)
p                 404 net/bridge/br_fdb.c 			fdb_delete_local(br, p, f);
p                 882 net/bridge/br_fdb.c 			struct net_bridge_port *p, const unsigned char *addr,
p                 888 net/bridge/br_fdb.c 		if (!p) {
p                 895 net/bridge/br_fdb.c 		br_fdb_update(br, p, addr, vid, true);
p                 899 net/bridge/br_fdb.c 		err = br_fdb_external_learn_add(br, p, addr, vid, true);
p                 902 net/bridge/br_fdb.c 		err = fdb_add_entry(br, p, addr, ndm->ndm_state,
p                 917 net/bridge/br_fdb.c 	struct net_bridge_port *p = NULL;
p                 938 net/bridge/br_fdb.c 		p = br_port_get_rtnl(dev);
p                 939 net/bridge/br_fdb.c 		if (!p) {
p                 944 net/bridge/br_fdb.c 		br = p->br;
p                 945 net/bridge/br_fdb.c 		vg = nbp_vlan_group(p);
p                 956 net/bridge/br_fdb.c 		err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid);
p                 958 net/bridge/br_fdb.c 		err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0);
p                 969 net/bridge/br_fdb.c 			err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid);
p                 980 net/bridge/br_fdb.c 				       const struct net_bridge_port *p,
p                 986 net/bridge/br_fdb.c 	if (!fdb || fdb->dst != p)
p                 995 net/bridge/br_fdb.c 			   const struct net_bridge_port *p,
p                1001 net/bridge/br_fdb.c 	err = fdb_delete_by_addr_and_port(br, p, addr, vid);
p                1013 net/bridge/br_fdb.c 	struct net_bridge_port *p = NULL;
p                1022 net/bridge/br_fdb.c 		p = br_port_get_rtnl(dev);
p                1023 net/bridge/br_fdb.c 		if (!p) {
p                1028 net/bridge/br_fdb.c 		vg = nbp_vlan_group(p);
p                1029 net/bridge/br_fdb.c 		br = p->br;
p                1039 net/bridge/br_fdb.c 		err = __br_fdb_delete(br, p, addr, vid);
p                1042 net/bridge/br_fdb.c 		err &= __br_fdb_delete(br, p, addr, 0);
p                1049 net/bridge/br_fdb.c 			err &= __br_fdb_delete(br, p, addr, v->vid);
p                1056 net/bridge/br_fdb.c int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
p                1069 net/bridge/br_fdb.c 		err = dev_uc_add(p->dev, f->key.addr.addr);
p                1085 net/bridge/br_fdb.c 		dev_uc_del(p->dev, tmp->key.addr.addr);
p                1091 net/bridge/br_fdb.c void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
p                1103 net/bridge/br_fdb.c 		dev_uc_del(p->dev, f->key.addr.addr);
p                1108 net/bridge/br_fdb.c int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
p                1116 net/bridge/br_fdb.c 	trace_br_fdb_external_learn_add(br, p, addr, vid);
p                1122 net/bridge/br_fdb.c 		fdb = fdb_create(br, p, addr, vid, 0, 0);
p                1134 net/bridge/br_fdb.c 		if (fdb->dst != p) {
p                1135 net/bridge/br_fdb.c 			fdb->dst = p;
p                1161 net/bridge/br_fdb.c int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
p                1181 net/bridge/br_fdb.c void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
p                1198 net/bridge/br_fdb.c 	struct net_bridge_port *p;
p                1202 net/bridge/br_fdb.c 	p = br_port_get_rtnl(dev);
p                1203 net/bridge/br_fdb.c 	if (!p)
p                1206 net/bridge/br_fdb.c 	spin_lock_bh(&p->br->hash_lock);
p                1207 net/bridge/br_fdb.c 	hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
p                1208 net/bridge/br_fdb.c 		if (f->dst == p && f->key.vlan_id == vid)
p                1211 net/bridge/br_fdb.c 	spin_unlock_bh(&p->br->hash_lock);
p                  21 net/bridge/br_forward.c static inline int should_deliver(const struct net_bridge_port *p,
p                  26 net/bridge/br_forward.c 	vg = nbp_vlan_group_rcu(p);
p                  27 net/bridge/br_forward.c 	return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
p                  28 net/bridge/br_forward.c 		br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING &&
p                  29 net/bridge/br_forward.c 		nbp_switchdev_allowed_egress(p, skb) &&
p                  30 net/bridge/br_forward.c 		!br_skb_isolated(p, skb);
p                 169 net/bridge/br_forward.c 	struct net_bridge_port *prev, struct net_bridge_port *p,
p                 175 net/bridge/br_forward.c 	if (!should_deliver(p, skb))
p                 185 net/bridge/br_forward.c 	br_multicast_count(p->br, p, skb, igmp_type, BR_MCAST_DIR_TX);
p                 187 net/bridge/br_forward.c 	return p;
p                 195 net/bridge/br_forward.c 	struct net_bridge_port *p;
p                 197 net/bridge/br_forward.c 	list_for_each_entry_rcu(p, &br->port_list, list) {
p                 203 net/bridge/br_forward.c 			if (!(p->flags & BR_FLOOD))
p                 207 net/bridge/br_forward.c 			if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
p                 211 net/bridge/br_forward.c 			if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev)
p                 217 net/bridge/br_forward.c 		if (p->flags & BR_PROXYARP)
p                 219 net/bridge/br_forward.c 		if ((p->flags & (BR_PROXYARP_WIFI | BR_NEIGH_SUPPRESS)) &&
p                 223 net/bridge/br_forward.c 		prev = maybe_deliver(prev, p, skb, local_orig);
p                 243 net/bridge/br_forward.c static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
p                 249 net/bridge/br_forward.c 	if (!should_deliver(p, skb))
p                 253 net/bridge/br_forward.c 	if (skb->dev == p->dev && ether_addr_equal(src, addr))
p                 265 net/bridge/br_forward.c 	__br_forward(p, skb, local_orig);
p                 276 net/bridge/br_forward.c 	struct net_bridge_port_group *p;
p                 280 net/bridge/br_forward.c 	p = mdst ? rcu_dereference(mdst->ports) : NULL;
p                 281 net/bridge/br_forward.c 	while (p || rp) {
p                 284 net/bridge/br_forward.c 		lport = p ? p->port : NULL;
p                 291 net/bridge/br_forward.c 				maybe_deliver_addr(lport, skb, p->eth_addr,
p                 304 net/bridge/br_forward.c 			p = rcu_dereference(p->next);
p                  64 net/bridge/br_if.c void br_port_carrier_check(struct net_bridge_port *p, bool *notified)
p                  66 net/bridge/br_if.c 	struct net_device *dev = p->dev;
p                  67 net/bridge/br_if.c 	struct net_bridge *br = p->br;
p                  69 net/bridge/br_if.c 	if (!(p->flags & BR_ADMIN_COST) &&
p                  71 net/bridge/br_if.c 		p->path_cost = port_cost(dev);
p                  79 net/bridge/br_if.c 		if (p->state == BR_STATE_DISABLED) {
p                  80 net/bridge/br_if.c 			br_stp_enable_port(p);
p                  84 net/bridge/br_if.c 		if (p->state != BR_STATE_DISABLED) {
p                  85 net/bridge/br_if.c 			br_stp_disable_port(p);
p                  92 net/bridge/br_if.c static void br_port_set_promisc(struct net_bridge_port *p)
p                  96 net/bridge/br_if.c 	if (br_promisc_port(p))
p                  99 net/bridge/br_if.c 	err = dev_set_promiscuity(p->dev, 1);
p                 103 net/bridge/br_if.c 	br_fdb_unsync_static(p->br, p);
p                 104 net/bridge/br_if.c 	p->flags |= BR_PROMISC;
p                 107 net/bridge/br_if.c static void br_port_clear_promisc(struct net_bridge_port *p)
p                 116 net/bridge/br_if.c 	if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
p                 122 net/bridge/br_if.c 	err = br_fdb_sync_static(p->br, p);
p                 126 net/bridge/br_if.c 	dev_set_promiscuity(p->dev, -1);
p                 127 net/bridge/br_if.c 	p->flags &= ~BR_PROMISC;
p                 137 net/bridge/br_if.c 	struct net_bridge_port *p;
p                 146 net/bridge/br_if.c 	list_for_each_entry(p, &br->port_list, list) {
p                 148 net/bridge/br_if.c 			br_port_set_promisc(p);
p                 161 net/bridge/br_if.c 			    (br->auto_cnt == 1 && br_auto_port(p)))
p                 162 net/bridge/br_if.c 				br_port_clear_promisc(p);
p                 164 net/bridge/br_if.c 				br_port_set_promisc(p);
p                 169 net/bridge/br_if.c int nbp_backup_change(struct net_bridge_port *p,
p                 172 net/bridge/br_if.c 	struct net_bridge_port *old_backup = rtnl_dereference(p->backup_port);
p                 182 net/bridge/br_if.c 		if (backup_p->br != p->br)
p                 186 net/bridge/br_if.c 	if (p == backup_p)
p                 198 net/bridge/br_if.c 	rcu_assign_pointer(p->backup_port, backup_p);
p                 203 net/bridge/br_if.c static void nbp_backup_clear(struct net_bridge_port *p)
p                 205 net/bridge/br_if.c 	nbp_backup_change(p, NULL);
p                 206 net/bridge/br_if.c 	if (p->backup_redirected_cnt) {
p                 209 net/bridge/br_if.c 		list_for_each_entry(cur_p, &p->br->port_list, list) {
p                 213 net/bridge/br_if.c 			if (backup_p == p)
p                 218 net/bridge/br_if.c 	WARN_ON(rcu_access_pointer(p->backup_port) || p->backup_redirected_cnt);
p                 223 net/bridge/br_if.c 	struct net_bridge_port *p;
p                 226 net/bridge/br_if.c 	list_for_each_entry(p, &br->port_list, list) {
p                 227 net/bridge/br_if.c 		if (br_auto_port(p))
p                 236 net/bridge/br_if.c static void nbp_delete_promisc(struct net_bridge_port *p)
p                 242 net/bridge/br_if.c 	dev_set_allmulti(p->dev, -1);
p                 243 net/bridge/br_if.c 	if (br_promisc_port(p))
p                 244 net/bridge/br_if.c 		dev_set_promiscuity(p->dev, -1);
p                 246 net/bridge/br_if.c 		br_fdb_unsync_static(p->br, p);
p                 251 net/bridge/br_if.c 	struct net_bridge_port *p
p                 253 net/bridge/br_if.c 	kfree(p);
p                 258 net/bridge/br_if.c 	struct net_bridge_port *p = kobj_to_brport(kobj);
p                 260 net/bridge/br_if.c 	net_ns_get_ownership(dev_net(p->dev), uid, gid);
p                 271 net/bridge/br_if.c static void destroy_nbp(struct net_bridge_port *p)
p                 273 net/bridge/br_if.c 	struct net_device *dev = p->dev;
p                 275 net/bridge/br_if.c 	p->br = NULL;
p                 276 net/bridge/br_if.c 	p->dev = NULL;
p                 279 net/bridge/br_if.c 	kobject_put(&p->kobj);
p                 284 net/bridge/br_if.c 	struct net_bridge_port *p =
p                 286 net/bridge/br_if.c 	destroy_nbp(p);
p                 292 net/bridge/br_if.c 	struct net_bridge_port *p;
p                 294 net/bridge/br_if.c 	list_for_each_entry(p, &br->port_list, list) {
p                 295 net/bridge/br_if.c 		unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
p                 306 net/bridge/br_if.c 	struct net_bridge_port *p;
p                 308 net/bridge/br_if.c 	list_for_each_entry(p, &br->port_list, list)
p                 309 net/bridge/br_if.c 		netdev_set_rx_headroom(p->dev, new_hr);
p                 323 net/bridge/br_if.c static void del_nbp(struct net_bridge_port *p)
p                 325 net/bridge/br_if.c 	struct net_bridge *br = p->br;
p                 326 net/bridge/br_if.c 	struct net_device *dev = p->dev;
p                 328 net/bridge/br_if.c 	sysfs_remove_link(br->ifobj, p->dev->name);
p                 330 net/bridge/br_if.c 	nbp_delete_promisc(p);
p                 333 net/bridge/br_if.c 	br_stp_disable_port(p);
p                 336 net/bridge/br_if.c 	br_ifinfo_notify(RTM_DELLINK, NULL, p);
p                 338 net/bridge/br_if.c 	list_del_rcu(&p->list);
p                 343 net/bridge/br_if.c 	nbp_vlan_flush(p);
p                 344 net/bridge/br_if.c 	br_fdb_delete_by_port(br, p, 0, 1);
p                 346 net/bridge/br_if.c 	nbp_backup_clear(p);
p                 356 net/bridge/br_if.c 	br_multicast_del_port(p);
p                 358 net/bridge/br_if.c 	kobject_uevent(&p->kobj, KOBJ_REMOVE);
p                 359 net/bridge/br_if.c 	kobject_del(&p->kobj);
p                 361 net/bridge/br_if.c 	br_netpoll_disable(p);
p                 363 net/bridge/br_if.c 	call_rcu(&p->rcu, destroy_nbp_rcu);
p                 370 net/bridge/br_if.c 	struct net_bridge_port *p, *n;
p                 372 net/bridge/br_if.c 	list_for_each_entry_safe(p, n, &br->port_list, list) {
p                 373 net/bridge/br_if.c 		del_nbp(p);
p                 390 net/bridge/br_if.c 	struct net_bridge_port *p;
p                 398 net/bridge/br_if.c 	list_for_each_entry(p, &br->port_list, list) {
p                 399 net/bridge/br_if.c 		set_bit(p->port_no, inuse);
p                 411 net/bridge/br_if.c 	struct net_bridge_port *p;
p                 418 net/bridge/br_if.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 419 net/bridge/br_if.c 	if (p == NULL)
p                 422 net/bridge/br_if.c 	p->br = br;
p                 424 net/bridge/br_if.c 	p->dev = dev;
p                 425 net/bridge/br_if.c 	p->path_cost = port_cost(dev);
p                 426 net/bridge/br_if.c 	p->priority = 0x8000 >> BR_PORT_BITS;
p                 427 net/bridge/br_if.c 	p->port_no = index;
p                 428 net/bridge/br_if.c 	p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
p                 429 net/bridge/br_if.c 	br_init_port(p);
p                 430 net/bridge/br_if.c 	br_set_state(p, BR_STATE_DISABLED);
p                 431 net/bridge/br_if.c 	br_stp_port_timer_init(p);
p                 432 net/bridge/br_if.c 	err = br_multicast_add_port(p);
p                 435 net/bridge/br_if.c 		kfree(p);
p                 436 net/bridge/br_if.c 		p = ERR_PTR(err);
p                 439 net/bridge/br_if.c 	return p;
p                 492 net/bridge/br_if.c 	const struct net_bridge_port *p;
p                 495 net/bridge/br_if.c 	list_for_each_entry(p, &br->port_list, list)
p                 496 net/bridge/br_if.c 		if (!ret_mtu || ret_mtu > p->dev->mtu)
p                 497 net/bridge/br_if.c 			ret_mtu = p->dev->mtu;
p                 521 net/bridge/br_if.c 	const struct net_bridge_port *p;
p                 523 net/bridge/br_if.c 	list_for_each_entry(p, &br->port_list, list) {
p                 524 net/bridge/br_if.c 		gso_max_size = min(gso_max_size, p->dev->gso_max_size);
p                 525 net/bridge/br_if.c 		gso_max_segs = min(gso_max_segs, p->dev->gso_max_segs);
p                 537 net/bridge/br_if.c 	struct net_bridge_port *p;
p                 546 net/bridge/br_if.c 	list_for_each_entry(p, &br->port_list, list) {
p                 548 net/bridge/br_if.c 						     p->dev->features, mask);
p                 559 net/bridge/br_if.c 	struct net_bridge_port *p;
p                 594 net/bridge/br_if.c 	p = new_nbp(br, dev);
p                 595 net/bridge/br_if.c 	if (IS_ERR(p))
p                 596 net/bridge/br_if.c 		return PTR_ERR(p);
p                 602 net/bridge/br_if.c 		kfree(p);	/* kobject not yet init'd, manually free */
p                 606 net/bridge/br_if.c 	err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
p                 611 net/bridge/br_if.c 	err = br_sysfs_addif(p);
p                 615 net/bridge/br_if.c 	err = br_netpoll_enable(p);
p                 619 net/bridge/br_if.c 	err = netdev_rx_handler_register(dev, br_handle_frame, p);
p                 629 net/bridge/br_if.c 	err = nbp_switchdev_mark_set(p);
p                 635 net/bridge/br_if.c 	list_add_rcu(&p->list, &br->port_list);
p                 648 net/bridge/br_if.c 	if (br_fdb_insert(br, p, dev->dev_addr, 0))
p                 660 net/bridge/br_if.c 	err = nbp_vlan_init(p, extack);
p                 671 net/bridge/br_if.c 		br_stp_enable_port(p);
p                 674 net/bridge/br_if.c 	br_ifinfo_notify(RTM_NEWLINK, NULL, p);
p                 682 net/bridge/br_if.c 	kobject_uevent(&p->kobj, KOBJ_ADD);
p                 687 net/bridge/br_if.c 	list_del_rcu(&p->list);
p                 688 net/bridge/br_if.c 	br_fdb_delete_by_port(br, p, 0, 1);
p                 696 net/bridge/br_if.c 	br_netpoll_disable(p);
p                 698 net/bridge/br_if.c 	sysfs_remove_link(br->ifobj, p->dev->name);
p                 700 net/bridge/br_if.c 	kobject_put(&p->kobj);
p                 710 net/bridge/br_if.c 	struct net_bridge_port *p;
p                 713 net/bridge/br_if.c 	p = br_port_get_rtnl(dev);
p                 714 net/bridge/br_if.c 	if (!p || p->br != br)
p                 721 net/bridge/br_if.c 	del_nbp(p);
p                 738 net/bridge/br_if.c void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
p                 740 net/bridge/br_if.c 	struct net_bridge *br = p->br;
p                 751 net/bridge/br_if.c 	struct net_bridge_port *p;
p                 753 net/bridge/br_if.c 	p = br_port_get_rtnl_rcu(dev);
p                 754 net/bridge/br_if.c 	if (!p)
p                 757 net/bridge/br_if.c 	return p->flags & flag;
p                  72 net/bridge/br_input.c 	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
p                  80 net/bridge/br_input.c 	if (!p || p->state == BR_STATE_DISABLED)
p                  83 net/bridge/br_input.c 	if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid))
p                  86 net/bridge/br_input.c 	nbp_switchdev_frame_mark(p, skb);
p                  89 net/bridge/br_input.c 	br = p->br;
p                  90 net/bridge/br_input.c 	if (p->flags & BR_LEARNING)
p                  91 net/bridge/br_input.c 		br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
p                 101 net/bridge/br_input.c 			if (br_multicast_rcv(br, p, skb, vid))
p                 106 net/bridge/br_input.c 	if (p->state == BR_STATE_LEARNING)
p                 110 net/bridge/br_input.c 	BR_INPUT_SKB_CB(skb)->src_port_isolated = !!(p->flags & BR_ISOLATED);
p                 115 net/bridge/br_input.c 		br_do_proxy_suppress_arp(skb, br, vid, p);
p                 126 net/bridge/br_input.c 				br_do_suppress_nd(skb, br, vid, p, msg);
p                 180 net/bridge/br_input.c 	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
p                 184 net/bridge/br_input.c 	if ((p->flags & BR_LEARNING) &&
p                 185 net/bridge/br_input.c 	    !br_opt_get(p->br, BROPT_NO_LL_LEARN) &&
p                 186 net/bridge/br_input.c 	    br_should_learn(p, skb, &vid))
p                 187 net/bridge/br_input.c 		br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
p                 258 net/bridge/br_input.c 	struct net_bridge_port *p;
p                 274 net/bridge/br_input.c 	p = br_port_get_rcu(skb->dev);
p                 275 net/bridge/br_input.c 	if (p->flags & BR_VLAN_TUNNEL) {
p                 276 net/bridge/br_input.c 		if (br_handle_ingress_vlan_tunnel(skb, p,
p                 277 net/bridge/br_input.c 						  nbp_vlan_group_rcu(p)))
p                 282 net/bridge/br_input.c 		u16 fwd_mask = p->br->group_fwd_mask_required;
p                 297 net/bridge/br_input.c 		fwd_mask |= p->group_fwd_mask;
p                 302 net/bridge/br_input.c 			if (p->br->stp_enabled == BR_NO_STP ||
p                 313 net/bridge/br_input.c 			fwd_mask |= p->br->group_fwd_mask;
p                 322 net/bridge/br_input.c 			fwd_mask |= p->br->group_fwd_mask;
p                 342 net/bridge/br_input.c 	switch (p->state) {
p                 345 net/bridge/br_input.c 		if (ether_addr_equal(p->br->dev->dev_addr, dest))
p                  40 net/bridge/br_ioctl.c 	struct net_bridge_port *p;
p                  42 net/bridge/br_ioctl.c 	list_for_each_entry(p, &br->port_list, list) {
p                  43 net/bridge/br_ioctl.c 		if (p->port_no < num)
p                  44 net/bridge/br_ioctl.c 			ifindices[p->port_no] = p->dev->ifindex;
p                 112 net/bridge/br_ioctl.c 	struct net_bridge_port *p = NULL;
p                 210 net/bridge/br_ioctl.c 		struct __port_info p;
p                 219 net/bridge/br_ioctl.c 		memset(&p, 0, sizeof(struct __port_info));
p                 220 net/bridge/br_ioctl.c 		memcpy(&p.designated_root, &pt->designated_root, 8);
p                 221 net/bridge/br_ioctl.c 		memcpy(&p.designated_bridge, &pt->designated_bridge, 8);
p                 222 net/bridge/br_ioctl.c 		p.port_id = pt->port_id;
p                 223 net/bridge/br_ioctl.c 		p.designated_port = pt->designated_port;
p                 224 net/bridge/br_ioctl.c 		p.path_cost = pt->path_cost;
p                 225 net/bridge/br_ioctl.c 		p.designated_cost = pt->designated_cost;
p                 226 net/bridge/br_ioctl.c 		p.state = pt->state;
p                 227 net/bridge/br_ioctl.c 		p.top_change_ack = pt->topology_change_ack;
p                 228 net/bridge/br_ioctl.c 		p.config_pending = pt->config_pending;
p                 229 net/bridge/br_ioctl.c 		p.message_age_timer_value = br_timer_value(&pt->message_age_timer);
p                 230 net/bridge/br_ioctl.c 		p.forward_delay_timer_value = br_timer_value(&pt->forward_delay_timer);
p                 231 net/bridge/br_ioctl.c 		p.hold_timer_value = br_timer_value(&pt->hold_timer);
p                 235 net/bridge/br_ioctl.c 		if (copy_to_user((void __user *)args[1], &p, sizeof(p)))
p                 263 net/bridge/br_ioctl.c 		if ((p = br_get_port(br, args[1])) == NULL)
p                 266 net/bridge/br_ioctl.c 			ret = br_stp_set_port_priority(p, args[2]);
p                 277 net/bridge/br_ioctl.c 		if ((p = br_get_port(br, args[1])) == NULL)
p                 280 net/bridge/br_ioctl.c 			ret = br_stp_set_path_cost(p, args[2]);
p                 291 net/bridge/br_ioctl.c 		if (p)
p                 292 net/bridge/br_ioctl.c 			br_ifinfo_notify(RTM_NEWLINK, NULL, p);
p                  23 net/bridge/br_mdb.c 	struct net_bridge_port *p;
p                  33 net/bridge/br_mdb.c 	hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
p                  34 net/bridge/br_mdb.c 		if (!p)
p                  39 net/bridge/br_mdb.c 		if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
p                  41 net/bridge/br_mdb.c 				br_timer_value(&p->multicast_router_timer)) ||
p                  43 net/bridge/br_mdb.c 			       p->multicast_router)) {
p                  82 net/bridge/br_mdb.c 			   struct net_bridge_port_group *p)
p                  91 net/bridge/br_mdb.c 	if (p) {
p                  92 net/bridge/br_mdb.c 		ifindex = p->port->dev->ifindex;
p                  93 net/bridge/br_mdb.c 		mtimer = &p->timer;
p                  94 net/bridge/br_mdb.c 		flags = p->flags;
p                 143 net/bridge/br_mdb.c 		struct net_bridge_port_group *p;
p                 163 net/bridge/br_mdb.c 		for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
p                 164 net/bridge/br_mdb.c 		      pp = &p->next) {
p                 165 net/bridge/br_mdb.c 			if (!p->port)
p                 168 net/bridge/br_mdb.c 			err = __mdb_fill_info(skb, mp, p);
p                 318 net/bridge/br_mdb.c 	struct net_bridge_port_group *p;
p                 330 net/bridge/br_mdb.c 	for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
p                 331 net/bridge/br_mdb.c 	     pp = &p->next) {
p                 332 net/bridge/br_mdb.c 		if (p->port != port)
p                 334 net/bridge/br_mdb.c 		p->flags |= MDB_PG_FLAGS_OFFLOAD;
p                 382 net/bridge/br_mdb.c static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
p                 407 net/bridge/br_mdb.c 	if (p && port_dev && type == RTM_NEWMDB) {
p                 410 net/bridge/br_mdb.c 			complete_info->port = p;
p                 417 net/bridge/br_mdb.c 	} else if (p && port_dev && type == RTM_DELMDB) {
p                 421 net/bridge/br_mdb.c 	if (!p)
p                 606 net/bridge/br_mdb.c 	struct net_bridge_port_group *p;
p                 633 net/bridge/br_mdb.c 	     (p = mlock_dereference(*pp, br)) != NULL;
p                 634 net/bridge/br_mdb.c 	     pp = &p->next) {
p                 635 net/bridge/br_mdb.c 		if (p->port == port)
p                 637 net/bridge/br_mdb.c 		if ((unsigned long)p->port < (unsigned long)port)
p                 641 net/bridge/br_mdb.c 	p = br_multicast_new_port_group(port, group, *pp, state, NULL);
p                 642 net/bridge/br_mdb.c 	if (unlikely(!p))
p                 644 net/bridge/br_mdb.c 	rcu_assign_pointer(*pp, p);
p                 646 net/bridge/br_mdb.c 		mod_timer(&p->timer, now + br->multicast_membership_interval);
p                 656 net/bridge/br_mdb.c 	struct net_bridge_port *p = NULL;
p                 667 net/bridge/br_mdb.c 		p = br_port_get_rtnl(dev);
p                 668 net/bridge/br_mdb.c 		if (!p || p->br != br || p->state == BR_STATE_DISABLED)
p                 675 net/bridge/br_mdb.c 	ret = br_mdb_add_group(br, p, &ip, entry->state);
p                 685 net/bridge/br_mdb.c 	struct net_bridge_port *p = NULL;
p                 703 net/bridge/br_mdb.c 		p = br_port_get_rtnl(pdev);
p                 704 net/bridge/br_mdb.c 		if (!p || p->br != br || p->state == BR_STATE_DISABLED)
p                 706 net/bridge/br_mdb.c 		vg = nbp_vlan_group(p);
p                 720 net/bridge/br_mdb.c 			__br_mdb_notify(dev, p, entry, RTM_NEWMDB);
p                 725 net/bridge/br_mdb.c 			__br_mdb_notify(dev, p, entry, RTM_NEWMDB);
p                 734 net/bridge/br_mdb.c 	struct net_bridge_port_group *p;
p                 759 net/bridge/br_mdb.c 	     (p = mlock_dereference(*pp, br)) != NULL;
p                 760 net/bridge/br_mdb.c 	     pp = &p->next) {
p                 761 net/bridge/br_mdb.c 		if (!p->port || p->port->dev->ifindex != entry->ifindex)
p                 764 net/bridge/br_mdb.c 		if (p->port->state == BR_STATE_DISABLED)
p                 767 net/bridge/br_mdb.c 		__mdb_entry_fill_flags(entry, p->flags);
p                 768 net/bridge/br_mdb.c 		rcu_assign_pointer(*pp, p->next);
p                 769 net/bridge/br_mdb.c 		hlist_del_init(&p->mglist);
p                 770 net/bridge/br_mdb.c 		del_timer(&p->timer);
p                 771 net/bridge/br_mdb.c 		kfree_rcu(p, rcu);
p                 790 net/bridge/br_mdb.c 	struct net_bridge_port *p = NULL;
p                 808 net/bridge/br_mdb.c 		p = br_port_get_rtnl(pdev);
p                 809 net/bridge/br_mdb.c 		if (!p || p->br != br || p->state == BR_STATE_DISABLED)
p                 811 net/bridge/br_mdb.c 		vg = nbp_vlan_group(p);
p                 824 net/bridge/br_mdb.c 				__br_mdb_notify(dev, p, entry, RTM_DELMDB);
p                 829 net/bridge/br_mdb.c 			__br_mdb_notify(dev, p, entry, RTM_DELMDB);
p                  54 net/bridge/br_multicast.c static void __del_port_router(struct net_bridge_port *p);
p                 170 net/bridge/br_multicast.c 	struct net_bridge_port_group *p;
p                 178 net/bridge/br_multicast.c 	     (p = mlock_dereference(*pp, br)) != NULL;
p                 179 net/bridge/br_multicast.c 	     pp = &p->next) {
p                 180 net/bridge/br_multicast.c 		if (p != pg)
p                 183 net/bridge/br_multicast.c 		rcu_assign_pointer(*pp, p->next);
p                 184 net/bridge/br_multicast.c 		hlist_del_init(&p->mglist);
p                 185 net/bridge/br_multicast.c 		del_timer(&p->timer);
p                 186 net/bridge/br_multicast.c 		br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
p                 187 net/bridge/br_multicast.c 			      p->flags);
p                 188 net/bridge/br_multicast.c 		kfree_rcu(p, rcu);
p                 480 net/bridge/br_multicast.c 	struct net_bridge_port_group *p;
p                 482 net/bridge/br_multicast.c 	p = kzalloc(sizeof(*p), GFP_ATOMIC);
p                 483 net/bridge/br_multicast.c 	if (unlikely(!p))
p                 486 net/bridge/br_multicast.c 	p->addr = *group;
p                 487 net/bridge/br_multicast.c 	p->port = port;
p                 488 net/bridge/br_multicast.c 	p->flags = flags;
p                 489 net/bridge/br_multicast.c 	rcu_assign_pointer(p->next, next);
p                 490 net/bridge/br_multicast.c 	hlist_add_head(&p->mglist, &port->mglist);
p                 491 net/bridge/br_multicast.c 	timer_setup(&p->timer, br_multicast_port_group_expired, 0);
p                 494 net/bridge/br_multicast.c 		memcpy(p->eth_addr, src, ETH_ALEN);
p                 496 net/bridge/br_multicast.c 		eth_broadcast_addr(p->eth_addr);
p                 498 net/bridge/br_multicast.c 	return p;
p                 501 net/bridge/br_multicast.c static bool br_port_group_equal(struct net_bridge_port_group *p,
p                 505 net/bridge/br_multicast.c 	if (p->port != port)
p                 511 net/bridge/br_multicast.c 	return ether_addr_equal(src, p->eth_addr);
p                 541 net/bridge/br_multicast.c 	struct net_bridge_port_group *p;
p                 562 net/bridge/br_multicast.c 	     (p = mlock_dereference(*pp, br)) != NULL;
p                 563 net/bridge/br_multicast.c 	     pp = &p->next) {
p                 564 net/bridge/br_multicast.c 		if (br_port_group_equal(p, port, src))
p                 566 net/bridge/br_multicast.c 		if ((unsigned long)p->port < (unsigned long)port)
p                 570 net/bridge/br_multicast.c 	p = br_multicast_new_port_group(port, group, *pp, 0, src);
p                 571 net/bridge/br_multicast.c 	if (unlikely(!p))
p                 573 net/bridge/br_multicast.c 	rcu_assign_pointer(*pp, p);
p                 577 net/bridge/br_multicast.c 	mod_timer(&p->timer, now + br->multicast_membership_interval);
p                 643 net/bridge/br_multicast.c static void br_mc_router_state_change(struct net_bridge *p,
p                 647 net/bridge/br_multicast.c 		.orig_dev = p->dev,
p                 653 net/bridge/br_multicast.c 	switchdev_port_attr_set(p->dev, &attr);
p                1136 net/bridge/br_multicast.c static void br_port_mc_router_state_change(struct net_bridge_port *p,
p                1140 net/bridge/br_multicast.c 		.orig_dev = p->dev,
p                1146 net/bridge/br_multicast.c 	switchdev_port_attr_set(p->dev, &attr);
p                1157 net/bridge/br_multicast.c 	struct net_bridge_port *p;
p                1163 net/bridge/br_multicast.c 	hlist_for_each_entry(p, &br->router_list, rlist) {
p                1164 net/bridge/br_multicast.c 		if ((unsigned long) port >= (unsigned long) p)
p                1166 net/bridge/br_multicast.c 		slot = &p->rlist;
p                1225 net/bridge/br_multicast.c 	struct net_bridge_port_group *p;
p                1279 net/bridge/br_multicast.c 	     (p = mlock_dereference(*pp, br)) != NULL;
p                1280 net/bridge/br_multicast.c 	     pp = &p->next) {
p                1281 net/bridge/br_multicast.c 		if (timer_pending(&p->timer) ?
p                1282 net/bridge/br_multicast.c 		    time_after(p->timer.expires, now + max_delay) :
p                1283 net/bridge/br_multicast.c 		    try_to_del_timer_sync(&p->timer) >= 0)
p                1284 net/bridge/br_multicast.c 			mod_timer(&p->timer, now + max_delay);
p                1301 net/bridge/br_multicast.c 	struct net_bridge_port_group *p;
p                1362 net/bridge/br_multicast.c 	     (p = mlock_dereference(*pp, br)) != NULL;
p                1363 net/bridge/br_multicast.c 	     pp = &p->next) {
p                1364 net/bridge/br_multicast.c 		if (timer_pending(&p->timer) ?
p                1365 net/bridge/br_multicast.c 		    time_after(p->timer.expires, now + max_delay) :
p                1366 net/bridge/br_multicast.c 		    try_to_del_timer_sync(&p->timer) >= 0)
p                1367 net/bridge/br_multicast.c 			mod_timer(&p->timer, now + max_delay);
p                1385 net/bridge/br_multicast.c 	struct net_bridge_port_group *p;
p                1402 net/bridge/br_multicast.c 		     (p = mlock_dereference(*pp, br)) != NULL;
p                1403 net/bridge/br_multicast.c 		     pp = &p->next) {
p                1404 net/bridge/br_multicast.c 			if (!br_port_group_equal(p, port, src))
p                1407 net/bridge/br_multicast.c 			if (p->flags & MDB_PG_FLAGS_PERMANENT)
p                1410 net/bridge/br_multicast.c 			rcu_assign_pointer(*pp, p->next);
p                1411 net/bridge/br_multicast.c 			hlist_del_init(&p->mglist);
p                1412 net/bridge/br_multicast.c 			del_timer(&p->timer);
p                1413 net/bridge/br_multicast.c 			kfree_rcu(p, rcu);
p                1415 net/bridge/br_multicast.c 				      p->flags | MDB_PG_FLAGS_FAST_LEAVE);
p                1435 net/bridge/br_multicast.c 		for (p = mlock_dereference(mp->ports, br);
p                1436 net/bridge/br_multicast.c 		     p != NULL;
p                1437 net/bridge/br_multicast.c 		     p = mlock_dereference(p->next, br)) {
p                1438 net/bridge/br_multicast.c 			if (!br_port_group_equal(p, port, src))
p                1441 net/bridge/br_multicast.c 			if (!hlist_unhashed(&p->mglist) &&
p                1442 net/bridge/br_multicast.c 			    (timer_pending(&p->timer) ?
p                1443 net/bridge/br_multicast.c 			     time_after(p->timer.expires, time) :
p                1444 net/bridge/br_multicast.c 			     try_to_del_timer_sync(&p->timer) >= 0)) {
p                1445 net/bridge/br_multicast.c 				mod_timer(&p->timer, time);
p                1467 net/bridge/br_multicast.c 	for (p = mlock_dereference(mp->ports, br);
p                1468 net/bridge/br_multicast.c 	     p != NULL;
p                1469 net/bridge/br_multicast.c 	     p = mlock_dereference(p->next, br)) {
p                1470 net/bridge/br_multicast.c 		if (p->port != port)
p                1473 net/bridge/br_multicast.c 		if (!hlist_unhashed(&p->mglist) &&
p                1474 net/bridge/br_multicast.c 		    (timer_pending(&p->timer) ?
p                1475 net/bridge/br_multicast.c 		     time_after(p->timer.expires, time) :
p                1476 net/bridge/br_multicast.c 		     try_to_del_timer_sync(&p->timer) >= 0)) {
p                1477 net/bridge/br_multicast.c 			mod_timer(&p->timer, time);
p                1535 net/bridge/br_multicast.c 				   const struct net_bridge_port *p,
p                1544 net/bridge/br_multicast.c 	if (p)
p                1545 net/bridge/br_multicast.c 		stats = p->mcast_stats;
p                1969 net/bridge/br_multicast.c static void __del_port_router(struct net_bridge_port *p)
p                1971 net/bridge/br_multicast.c 	if (hlist_unhashed(&p->rlist))
p                1973 net/bridge/br_multicast.c 	hlist_del_init_rcu(&p->rlist);
p                1974 net/bridge/br_multicast.c 	br_rtr_notify(p->br->dev, p, RTM_DELMDB);
p                1975 net/bridge/br_multicast.c 	br_port_mc_router_state_change(p, false);
p                1978 net/bridge/br_multicast.c 	if (p->multicast_router == MDB_RTR_TYPE_TEMP)
p                1979 net/bridge/br_multicast.c 		p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
p                1982 net/bridge/br_multicast.c int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
p                1984 net/bridge/br_multicast.c 	struct net_bridge *br = p->br;
p                1989 net/bridge/br_multicast.c 	if (p->multicast_router == val) {
p                1991 net/bridge/br_multicast.c 		if (p->multicast_router == MDB_RTR_TYPE_TEMP)
p                1992 net/bridge/br_multicast.c 			mod_timer(&p->multicast_router_timer,
p                1999 net/bridge/br_multicast.c 		p->multicast_router = MDB_RTR_TYPE_DISABLED;
p                2000 net/bridge/br_multicast.c 		__del_port_router(p);
p                2001 net/bridge/br_multicast.c 		del_timer(&p->multicast_router_timer);
p                2004 net/bridge/br_multicast.c 		p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
p                2005 net/bridge/br_multicast.c 		__del_port_router(p);
p                2008 net/bridge/br_multicast.c 		p->multicast_router = MDB_RTR_TYPE_PERM;
p                2009 net/bridge/br_multicast.c 		del_timer(&p->multicast_router_timer);
p                2010 net/bridge/br_multicast.c 		br_multicast_add_router(br, p);
p                2013 net/bridge/br_multicast.c 		p->multicast_router = MDB_RTR_TYPE_TEMP;
p                2014 net/bridge/br_multicast.c 		br_multicast_mark_router(br, p);
p                2383 net/bridge/br_multicast.c void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
p                2392 net/bridge/br_multicast.c 	if (p)
p                2393 net/bridge/br_multicast.c 		stats = p->mcast_stats;
p                2423 net/bridge/br_multicast.c 			    const struct net_bridge_port *p,
p                2431 net/bridge/br_multicast.c 	if (p)
p                2432 net/bridge/br_multicast.c 		stats = p->mcast_stats;
p                 480 net/bridge/br_netfilter_hooks.c 	struct net_bridge_port *p;
p                 488 net/bridge/br_netfilter_hooks.c 	p = br_port_get_rcu(state->in);
p                 489 net/bridge/br_netfilter_hooks.c 	if (p == NULL)
p                 491 net/bridge/br_netfilter_hooks.c 	br = p->br;
p                 645 net/bridge/br_netfilter_hooks.c 	struct net_bridge_port *p;
p                 650 net/bridge/br_netfilter_hooks.c 	p = br_port_get_rcu(state->out);
p                 651 net/bridge/br_netfilter_hooks.c 	if (p == NULL)
p                 653 net/bridge/br_netfilter_hooks.c 	br = p->br;
p                  95 net/bridge/br_netlink.c 	struct net_bridge_port *p = NULL;
p                 102 net/bridge/br_netlink.c 		p = br_port_get_rcu(dev);
p                 103 net/bridge/br_netlink.c 		vg = nbp_vlan_group_rcu(p);
p                 111 net/bridge/br_netlink.c 	if (p && (p->flags & BR_VLAN_TUNNEL))
p                 173 net/bridge/br_netlink.c 			      const struct net_bridge_port *p)
p                 175 net/bridge/br_netlink.c 	u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
p                 179 net/bridge/br_netlink.c 	if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
p                 180 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
p                 181 net/bridge/br_netlink.c 	    nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
p                 183 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
p                 185 net/bridge/br_netlink.c 		       !!(p->flags & BR_ROOT_BLOCK)) ||
p                 187 net/bridge/br_netlink.c 		       !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
p                 189 net/bridge/br_netlink.c 		       !!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
p                 190 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
p                 192 net/bridge/br_netlink.c 		       !!(p->flags & BR_FLOOD)) ||
p                 194 net/bridge/br_netlink.c 		       !!(p->flags & BR_MCAST_FLOOD)) ||
p                 196 net/bridge/br_netlink.c 		       !!(p->flags & BR_BCAST_FLOOD)) ||
p                 197 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
p                 199 net/bridge/br_netlink.c 		       !!(p->flags & BR_PROXYARP_WIFI)) ||
p                 201 net/bridge/br_netlink.c 		    &p->designated_root) ||
p                 203 net/bridge/br_netlink.c 		    &p->designated_bridge) ||
p                 204 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
p                 205 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
p                 206 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
p                 207 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
p                 209 net/bridge/br_netlink.c 		       p->topology_change_ack) ||
p                 210 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
p                 211 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
p                 213 net/bridge/br_netlink.c 	    nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) ||
p                 215 net/bridge/br_netlink.c 		       !!(p->flags & BR_NEIGH_SUPPRESS)) ||
p                 216 net/bridge/br_netlink.c 	    nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)))
p                 219 net/bridge/br_netlink.c 	timerval = br_timer_value(&p->message_age_timer);
p                 223 net/bridge/br_netlink.c 	timerval = br_timer_value(&p->forward_delay_timer);
p                 227 net/bridge/br_netlink.c 	timerval = br_timer_value(&p->hold_timer);
p                 234 net/bridge/br_netlink.c 		       p->multicast_router))
p                 240 net/bridge/br_netlink.c 	backup_p = rcu_dereference(p->backup_port);
p                 523 net/bridge/br_netlink.c static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
p                 532 net/bridge/br_netlink.c 		if (p) {
p                 536 net/bridge/br_netlink.c 			err = nbp_vlan_add(p, vinfo->vid, vinfo->flags,
p                 548 net/bridge/br_netlink.c 		if (p) {
p                 549 net/bridge/br_netlink.c 			if (!nbp_vlan_delete(p, vinfo->vid))
p                 553 net/bridge/br_netlink.c 			    !br_vlan_delete(p->br, vinfo->vid))
p                 565 net/bridge/br_netlink.c 				struct net_bridge_port *p, int cmd,
p                 599 net/bridge/br_netlink.c 			err = br_vlan_info(br, p, cmd, &tmp_vinfo, changed,
p                 609 net/bridge/br_netlink.c 	return br_vlan_info(br, p, cmd, vinfo_curr, changed, extack);
p                 613 net/bridge/br_netlink.c 		     struct net_bridge_port *p,
p                 629 net/bridge/br_netlink.c 			if (!p || !(p->flags & BR_VLAN_TUNNEL))
p                 634 net/bridge/br_netlink.c 			err = br_process_vlan_tunnel_info(br, p, cmd,
p                 645 net/bridge/br_netlink.c 			err = br_process_vlan_info(br, p, cmd, vinfo_curr,
p                 681 net/bridge/br_netlink.c static int br_set_port_state(struct net_bridge_port *p, u8 state)
p                 687 net/bridge/br_netlink.c 	if (p->br->stp_enabled == BR_KERNEL_STP)
p                 693 net/bridge/br_netlink.c 	if (!netif_running(p->dev) ||
p                 694 net/bridge/br_netlink.c 	    (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
p                 697 net/bridge/br_netlink.c 	br_set_state(p, state);
p                 698 net/bridge/br_netlink.c 	br_port_state_selection(p->br);
p                 703 net/bridge/br_netlink.c static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
p                 713 net/bridge/br_netlink.c 		flags = p->flags | mask;
p                 715 net/bridge/br_netlink.c 		flags = p->flags & ~mask;
p                 717 net/bridge/br_netlink.c 	err = br_switchdev_set_port_flag(p, flags, mask);
p                 721 net/bridge/br_netlink.c 	p->flags = flags;
p                 726 net/bridge/br_netlink.c static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
p                 728 net/bridge/br_netlink.c 	unsigned long old_flags = p->flags;
p                 732 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
p                 736 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
p                 740 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
p                 744 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
p                 748 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
p                 752 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
p                 756 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
p                 760 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
p                 764 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
p                 768 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
p                 772 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
p                 776 net/bridge/br_netlink.c 	br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false;
p                 777 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
p                 781 net/bridge/br_netlink.c 	if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
p                 782 net/bridge/br_netlink.c 		nbp_vlan_tunnel_info_flush(p);
p                 785 net/bridge/br_netlink.c 		err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
p                 791 net/bridge/br_netlink.c 		err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
p                 797 net/bridge/br_netlink.c 		err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
p                 803 net/bridge/br_netlink.c 		br_fdb_delete_by_port(p->br, p, 0, 0);
p                 809 net/bridge/br_netlink.c 		err = br_multicast_set_port_router(p, mcast_router);
p                 820 net/bridge/br_netlink.c 		p->group_fwd_mask = fwd_mask;
p                 823 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS,
p                 828 net/bridge/br_netlink.c 	err = br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED);
p                 838 net/bridge/br_netlink.c 			backup_dev = __dev_get_by_index(dev_net(p->dev),
p                 844 net/bridge/br_netlink.c 		err = nbp_backup_change(p, backup_dev);
p                 849 net/bridge/br_netlink.c 	br_port_flags_change(p, old_flags ^ p->flags);
p                 859 net/bridge/br_netlink.c 	struct net_bridge_port *p;
p                 870 net/bridge/br_netlink.c 	p = br_port_get_rtnl(dev);
p                 874 net/bridge/br_netlink.c 	if (!p && !afspec)
p                 877 net/bridge/br_netlink.c 	if (p && protinfo) {
p                 886 net/bridge/br_netlink.c 			spin_lock_bh(&p->br->lock);
p                 887 net/bridge/br_netlink.c 			err = br_setport(p, tb);
p                 888 net/bridge/br_netlink.c 			spin_unlock_bh(&p->br->lock);
p                 894 net/bridge/br_netlink.c 			spin_lock_bh(&p->br->lock);
p                 895 net/bridge/br_netlink.c 			err = br_set_port_state(p, nla_get_u8(protinfo));
p                 896 net/bridge/br_netlink.c 			spin_unlock_bh(&p->br->lock);
p                 904 net/bridge/br_netlink.c 		err = br_afspec(br, p, afspec, RTM_SETLINK, &changed, extack);
p                 907 net/bridge/br_netlink.c 		br_ifinfo_notify(RTM_NEWLINK, br, p);
p                 916 net/bridge/br_netlink.c 	struct net_bridge_port *p;
p                 925 net/bridge/br_netlink.c 	p = br_port_get_rtnl(dev);
p                 927 net/bridge/br_netlink.c 	if (!p && !(dev->priv_flags & IFF_EBRIDGE))
p                 930 net/bridge/br_netlink.c 	err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL);
p                 935 net/bridge/br_netlink.c 		br_ifinfo_notify(RTM_NEWLINK, br, p);
p                1509 net/bridge/br_netlink.c 	struct net_bridge_port *p = NULL;
p                1521 net/bridge/br_netlink.c 		p = br_port_get_rtnl(dev);
p                1522 net/bridge/br_netlink.c 		if (!p)
p                1524 net/bridge/br_netlink.c 		br = p->br;
p                1525 net/bridge/br_netlink.c 		vg = nbp_vlan_group(p);
p                1547 net/bridge/br_netlink.c 	struct net_bridge_port *p = NULL;
p                1560 net/bridge/br_netlink.c 		p = br_port_get_rtnl(dev);
p                1561 net/bridge/br_netlink.c 		if (!p)
p                1563 net/bridge/br_netlink.c 		br = p->br;
p                1564 net/bridge/br_netlink.c 		vg = nbp_vlan_group(p);
p                1607 net/bridge/br_netlink.c 		br_multicast_get_stats(br, p, nla_data(nla));
p                 196 net/bridge/br_netlink_tunnel.c static int br_vlan_tunnel_info(struct net_bridge_port *p, int cmd,
p                 201 net/bridge/br_netlink_tunnel.c 	if (!p)
p                 206 net/bridge/br_netlink_tunnel.c 		err = nbp_vlan_tunnel_info_add(p, vid, tun_id);
p                 211 net/bridge/br_netlink_tunnel.c 		if (!nbp_vlan_tunnel_info_delete(p, vid))
p                 254 net/bridge/br_netlink_tunnel.c 				struct net_bridge_port *p, int cmd,
p                 275 net/bridge/br_netlink_tunnel.c 			err = br_vlan_tunnel_info(p, cmd, v, t, changed);
p                 285 net/bridge/br_netlink_tunnel.c 		err = br_vlan_tunnel_info(p, cmd, tinfo_curr->vid,
p                 285 net/bridge/br_private.h #define br_auto_port(p) ((p)->flags & BR_AUTO_MASK)
p                 286 net/bridge/br_private.h #define br_promisc_port(p) ((p)->flags & BR_PROMISC)
p                 519 net/bridge/br_private.h static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
p                 522 net/bridge/br_private.h 	struct netpoll *np = p->np;
p                 528 net/bridge/br_private.h int br_netpoll_enable(struct net_bridge_port *p);
p                 529 net/bridge/br_private.h void br_netpoll_disable(struct net_bridge_port *p);
p                 531 net/bridge/br_private.h static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
p                 536 net/bridge/br_private.h static inline int br_netpoll_enable(struct net_bridge_port *p)
p                 541 net/bridge/br_private.h static inline void br_netpoll_disable(struct net_bridge_port *p)
p                 553 net/bridge/br_private.h 			      const struct net_bridge_port *p,
p                 555 net/bridge/br_private.h void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr);
p                 559 net/bridge/br_private.h 			   const struct net_bridge_port *p, u16 vid, int do_all);
p                 581 net/bridge/br_private.h int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
p                 582 net/bridge/br_private.h void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
p                 583 net/bridge/br_private.h int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
p                 586 net/bridge/br_private.h int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
p                 589 net/bridge/br_private.h void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
p                 614 net/bridge/br_private.h void br_port_carrier_check(struct net_bridge_port *p, bool *notified);
p                 625 net/bridge/br_private.h int nbp_backup_change(struct net_bridge_port *p, struct net_device *backup_dev);
p                 674 net/bridge/br_private.h int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val);
p                 696 net/bridge/br_private.h void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
p                 701 net/bridge/br_private.h 			    const struct net_bridge_port *p,
p                 842 net/bridge/br_private.h 				      const struct net_bridge_port *p,
p                 870 net/bridge/br_private.h bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid);
p                 896 net/bridge/br_private.h int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
p                 899 net/bridge/br_private.h void br_vlan_port_event(struct net_bridge_port *p, unsigned long event);
p                 910 net/bridge/br_private.h 					const struct net_bridge_port *p)
p                 912 net/bridge/br_private.h 	return rtnl_dereference(p->vlgrp);
p                 922 net/bridge/br_private.h 					const struct net_bridge_port *p)
p                 924 net/bridge/br_private.h 	return rcu_dereference(p->vlgrp);
p                 968 net/bridge/br_private.h static inline bool br_should_learn(struct net_bridge_port *p,
p                1051 net/bridge/br_private.h static inline int nbp_get_num_vlan_infos(struct net_bridge_port *p,
p                1064 net/bridge/br_private.h 					const struct net_bridge_port *p)
p                1076 net/bridge/br_private.h 					const struct net_bridge_port *p)
p                1086 net/bridge/br_private.h static inline void br_vlan_port_event(struct net_bridge_port *p,
p                1115 net/bridge/br_private.h void br_set_state(struct net_bridge_port *p, unsigned int state);
p                1117 net/bridge/br_private.h void br_init_port(struct net_bridge_port *p);
p                1118 net/bridge/br_private.h void br_become_designated_port(struct net_bridge_port *p);
p                1132 net/bridge/br_private.h void br_stp_enable_port(struct net_bridge_port *p);
p                1133 net/bridge/br_private.h void br_stp_disable_port(struct net_bridge_port *p);
p                1137 net/bridge/br_private.h int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio);
p                1138 net/bridge/br_private.h int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost);
p                1148 net/bridge/br_private.h void br_stp_port_timer_init(struct net_bridge_port *p);
p                1171 net/bridge/br_private.h int br_sysfs_addif(struct net_bridge_port *p);
p                1172 net/bridge/br_private.h int br_sysfs_renameif(struct net_bridge_port *p);
p                1180 net/bridge/br_private.h static inline int br_sysfs_addif(struct net_bridge_port *p) { return 0; }
p                1181 net/bridge/br_private.h static inline int br_sysfs_renameif(struct net_bridge_port *p) { return 0; }
p                1188 net/bridge/br_private.h int nbp_switchdev_mark_set(struct net_bridge_port *p);
p                1189 net/bridge/br_private.h void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
p                1191 net/bridge/br_private.h bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
p                1193 net/bridge/br_private.h int br_switchdev_set_port_flag(struct net_bridge_port *p,
p                1207 net/bridge/br_private.h static inline int nbp_switchdev_mark_set(struct net_bridge_port *p)
p                1212 net/bridge/br_private.h static inline void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
p                1217 net/bridge/br_private.h static inline bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
p                1223 net/bridge/br_private.h static inline int br_switchdev_set_port_flag(struct net_bridge_port *p,
p                1255 net/bridge/br_private.h 			      u16 vid, struct net_bridge_port *p);
p                1257 net/bridge/br_private.h 		       u16 vid, struct net_bridge_port *p, struct nd_msg *msg);
p                  42 net/bridge/br_private_stp.h static inline int br_is_designated_port(const struct net_bridge_port *p)
p                  44 net/bridge/br_private_stp.h 	return !memcmp(&p->designated_bridge, &p->br->bridge_id, 8) &&
p                  45 net/bridge/br_private_stp.h 		(p->designated_port == p->port_id);
p                  54 net/bridge/br_private_stp.h void br_received_config_bpdu(struct net_bridge_port *p,
p                  56 net/bridge/br_private_stp.h void br_received_tcn_bpdu(struct net_bridge_port *p);
p                  57 net/bridge/br_private_stp.h void br_transmit_config(struct net_bridge_port *p);
p                  22 net/bridge/br_private_tunnel.h 				struct net_bridge_port *p,
p                  41 net/bridge/br_private_tunnel.h 				  struct net_bridge_port *p,
p                  73 net/bridge/br_private_tunnel.h 						struct net_bridge_port *p,
p                  29 net/bridge/br_stp.c void br_set_state(struct net_bridge_port *p, unsigned int state)
p                  32 net/bridge/br_stp.c 		.orig_dev = p->dev,
p                  39 net/bridge/br_stp.c 	p->state = state;
p                  40 net/bridge/br_stp.c 	err = switchdev_port_attr_set(p->dev, &attr);
p                  42 net/bridge/br_stp.c 		br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
p                  43 net/bridge/br_stp.c 				(unsigned int) p->port_no, p->dev->name);
p                  45 net/bridge/br_stp.c 		br_info(p->br, "port %u(%s) entered %s state\n",
p                  46 net/bridge/br_stp.c 				(unsigned int) p->port_no, p->dev->name,
p                  47 net/bridge/br_stp.c 				br_port_state_names[p->state]);
p                  53 net/bridge/br_stp.c 	struct net_bridge_port *p;
p                  55 net/bridge/br_stp.c 	list_for_each_entry_rcu(p, &br->port_list, list) {
p                  56 net/bridge/br_stp.c 		if (p->port_no == port_no)
p                  57 net/bridge/br_stp.c 			return p;
p                  64 net/bridge/br_stp.c static int br_should_become_root_port(const struct net_bridge_port *p,
p                  71 net/bridge/br_stp.c 	br = p->br;
p                  72 net/bridge/br_stp.c 	if (p->state == BR_STATE_DISABLED ||
p                  73 net/bridge/br_stp.c 	    br_is_designated_port(p))
p                  76 net/bridge/br_stp.c 	if (memcmp(&br->bridge_id, &p->designated_root, 8) <= 0)
p                  84 net/bridge/br_stp.c 	t = memcmp(&p->designated_root, &rp->designated_root, 8);
p                  90 net/bridge/br_stp.c 	if (p->designated_cost + p->path_cost <
p                  93 net/bridge/br_stp.c 	else if (p->designated_cost + p->path_cost >
p                  97 net/bridge/br_stp.c 	t = memcmp(&p->designated_bridge, &rp->designated_bridge, 8);
p                 103 net/bridge/br_stp.c 	if (p->designated_port < rp->designated_port)
p                 105 net/bridge/br_stp.c 	else if (p->designated_port > rp->designated_port)
p                 108 net/bridge/br_stp.c 	if (p->port_id < rp->port_id)
p                 115 net/bridge/br_stp.c 			       struct net_bridge_port *p)
p                 119 net/bridge/br_stp.c 		  (unsigned int) p->port_no, p->dev->name);
p                 121 net/bridge/br_stp.c 	br_set_state(p, BR_STATE_LISTENING);
p                 122 net/bridge/br_stp.c 	br_ifinfo_notify(RTM_NEWLINK, NULL, p);
p                 125 net/bridge/br_stp.c 		mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay);
p                 131 net/bridge/br_stp.c 	struct net_bridge_port *p;
p                 134 net/bridge/br_stp.c 	list_for_each_entry(p, &br->port_list, list) {
p                 135 net/bridge/br_stp.c 		if (!br_should_become_root_port(p, root_port))
p                 138 net/bridge/br_stp.c 		if (p->flags & BR_ROOT_BLOCK)
p                 139 net/bridge/br_stp.c 			br_root_port_block(br, p);
p                 141 net/bridge/br_stp.c 			root_port = p->port_no;
p                 150 net/bridge/br_stp.c 		p = br_get_port(br, root_port);
p                 151 net/bridge/br_stp.c 		br->designated_root = p->designated_root;
p                 152 net/bridge/br_stp.c 		br->root_path_cost = p->designated_cost + p->path_cost;
p                 172 net/bridge/br_stp.c void br_transmit_config(struct net_bridge_port *p)
p                 177 net/bridge/br_stp.c 	if (timer_pending(&p->hold_timer)) {
p                 178 net/bridge/br_stp.c 		p->config_pending = 1;
p                 182 net/bridge/br_stp.c 	br = p->br;
p                 185 net/bridge/br_stp.c 	bpdu.topology_change_ack = p->topology_change_ack;
p                 189 net/bridge/br_stp.c 	bpdu.port_id = p->port_id;
p                 203 net/bridge/br_stp.c 		br_send_config_bpdu(p, &bpdu);
p                 204 net/bridge/br_stp.c 		p->topology_change_ack = 0;
p                 205 net/bridge/br_stp.c 		p->config_pending = 0;
p                 206 net/bridge/br_stp.c 		if (p->br->stp_enabled == BR_KERNEL_STP)
p                 207 net/bridge/br_stp.c 			mod_timer(&p->hold_timer,
p                 213 net/bridge/br_stp.c static void br_record_config_information(struct net_bridge_port *p,
p                 216 net/bridge/br_stp.c 	p->designated_root = bpdu->root;
p                 217 net/bridge/br_stp.c 	p->designated_cost = bpdu->root_path_cost;
p                 218 net/bridge/br_stp.c 	p->designated_bridge = bpdu->bridge_id;
p                 219 net/bridge/br_stp.c 	p->designated_port = bpdu->port_id;
p                 220 net/bridge/br_stp.c 	p->designated_age = jiffies - bpdu->message_age;
p                 222 net/bridge/br_stp.c 	mod_timer(&p->message_age_timer, jiffies
p                 239 net/bridge/br_stp.c 	struct net_bridge_port *p;
p                 241 net/bridge/br_stp.c 	p = br_get_port(br, br->root_port);
p                 242 net/bridge/br_stp.c 	if (p)
p                 243 net/bridge/br_stp.c 		br_send_tcn_bpdu(p);
p                 250 net/bridge/br_stp.c static int br_should_become_designated_port(const struct net_bridge_port *p)
p                 255 net/bridge/br_stp.c 	br = p->br;
p                 256 net/bridge/br_stp.c 	if (br_is_designated_port(p))
p                 259 net/bridge/br_stp.c 	if (memcmp(&p->designated_root, &br->designated_root, 8))
p                 262 net/bridge/br_stp.c 	if (br->root_path_cost < p->designated_cost)
p                 264 net/bridge/br_stp.c 	else if (br->root_path_cost > p->designated_cost)
p                 267 net/bridge/br_stp.c 	t = memcmp(&br->bridge_id, &p->designated_bridge, 8);
p                 273 net/bridge/br_stp.c 	if (p->port_id < p->designated_port)
p                 282 net/bridge/br_stp.c 	struct net_bridge_port *p;
p                 284 net/bridge/br_stp.c 	list_for_each_entry(p, &br->port_list, list) {
p                 285 net/bridge/br_stp.c 		if (p->state != BR_STATE_DISABLED &&
p                 286 net/bridge/br_stp.c 		    br_should_become_designated_port(p))
p                 287 net/bridge/br_stp.c 			br_become_designated_port(p);
p                 293 net/bridge/br_stp.c static int br_supersedes_port_info(const struct net_bridge_port *p,
p                 298 net/bridge/br_stp.c 	t = memcmp(&bpdu->root, &p->designated_root, 8);
p                 304 net/bridge/br_stp.c 	if (bpdu->root_path_cost < p->designated_cost)
p                 306 net/bridge/br_stp.c 	else if (bpdu->root_path_cost > p->designated_cost)
p                 309 net/bridge/br_stp.c 	t = memcmp(&bpdu->bridge_id, &p->designated_bridge, 8);
p                 315 net/bridge/br_stp.c 	if (memcmp(&bpdu->bridge_id, &p->br->bridge_id, 8))
p                 318 net/bridge/br_stp.c 	if (bpdu->port_id <= p->designated_port)
p                 357 net/bridge/br_stp.c 	struct net_bridge_port *p;
p                 359 net/bridge/br_stp.c 	list_for_each_entry(p, &br->port_list, list) {
p                 360 net/bridge/br_stp.c 		if (p->state != BR_STATE_DISABLED &&
p                 361 net/bridge/br_stp.c 		    br_is_designated_port(p))
p                 362 net/bridge/br_stp.c 			br_transmit_config(p);
p                 367 net/bridge/br_stp.c static void br_reply(struct net_bridge_port *p)
p                 369 net/bridge/br_stp.c 	br_transmit_config(p);
p                 380 net/bridge/br_stp.c void br_become_designated_port(struct net_bridge_port *p)
p                 384 net/bridge/br_stp.c 	br = p->br;
p                 385 net/bridge/br_stp.c 	p->designated_root = br->designated_root;
p                 386 net/bridge/br_stp.c 	p->designated_cost = br->root_path_cost;
p                 387 net/bridge/br_stp.c 	p->designated_bridge = br->bridge_id;
p                 388 net/bridge/br_stp.c 	p->designated_port = p->port_id;
p                 393 net/bridge/br_stp.c static void br_make_blocking(struct net_bridge_port *p)
p                 395 net/bridge/br_stp.c 	if (p->state != BR_STATE_DISABLED &&
p                 396 net/bridge/br_stp.c 	    p->state != BR_STATE_BLOCKING) {
p                 397 net/bridge/br_stp.c 		if (p->state == BR_STATE_FORWARDING ||
p                 398 net/bridge/br_stp.c 		    p->state == BR_STATE_LEARNING)
p                 399 net/bridge/br_stp.c 			br_topology_change_detection(p->br);
p                 401 net/bridge/br_stp.c 		br_set_state(p, BR_STATE_BLOCKING);
p                 402 net/bridge/br_stp.c 		br_ifinfo_notify(RTM_NEWLINK, NULL, p);
p                 404 net/bridge/br_stp.c 		del_timer(&p->forward_delay_timer);
p                 409 net/bridge/br_stp.c static void br_make_forwarding(struct net_bridge_port *p)
p                 411 net/bridge/br_stp.c 	struct net_bridge *br = p->br;
p                 413 net/bridge/br_stp.c 	if (p->state != BR_STATE_BLOCKING)
p                 417 net/bridge/br_stp.c 		br_set_state(p, BR_STATE_FORWARDING);
p                 419 net/bridge/br_stp.c 		del_timer(&p->forward_delay_timer);
p                 421 net/bridge/br_stp.c 		br_set_state(p, BR_STATE_LISTENING);
p                 423 net/bridge/br_stp.c 		br_set_state(p, BR_STATE_LEARNING);
p                 425 net/bridge/br_stp.c 	br_ifinfo_notify(RTM_NEWLINK, NULL, p);
p                 428 net/bridge/br_stp.c 		mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay);
p                 434 net/bridge/br_stp.c 	struct net_bridge_port *p;
p                 437 net/bridge/br_stp.c 	list_for_each_entry(p, &br->port_list, list) {
p                 438 net/bridge/br_stp.c 		if (p->state == BR_STATE_DISABLED)
p                 443 net/bridge/br_stp.c 			if (p->port_no == br->root_port) {
p                 444 net/bridge/br_stp.c 				p->config_pending = 0;
p                 445 net/bridge/br_stp.c 				p->topology_change_ack = 0;
p                 446 net/bridge/br_stp.c 				br_make_forwarding(p);
p                 447 net/bridge/br_stp.c 			} else if (br_is_designated_port(p)) {
p                 448 net/bridge/br_stp.c 				del_timer(&p->message_age_timer);
p                 449 net/bridge/br_stp.c 				br_make_forwarding(p);
p                 451 net/bridge/br_stp.c 				p->config_pending = 0;
p                 452 net/bridge/br_stp.c 				p->topology_change_ack = 0;
p                 453 net/bridge/br_stp.c 				br_make_blocking(p);
p                 457 net/bridge/br_stp.c 		if (p->state != BR_STATE_BLOCKING)
p                 458 net/bridge/br_stp.c 			br_multicast_enable_port(p);
p                 463 net/bridge/br_stp.c 		if (p->state == BR_STATE_FORWARDING)
p                 474 net/bridge/br_stp.c static void br_topology_change_acknowledge(struct net_bridge_port *p)
p                 476 net/bridge/br_stp.c 	p->topology_change_ack = 1;
p                 477 net/bridge/br_stp.c 	br_transmit_config(p);
p                 481 net/bridge/br_stp.c void br_received_config_bpdu(struct net_bridge_port *p,
p                 487 net/bridge/br_stp.c 	br = p->br;
p                 490 net/bridge/br_stp.c 	if (br_supersedes_port_info(p, bpdu)) {
p                 491 net/bridge/br_stp.c 		br_record_config_information(p, bpdu);
p                 506 net/bridge/br_stp.c 		if (p->port_no == br->root_port) {
p                 512 net/bridge/br_stp.c 	} else if (br_is_designated_port(p)) {
p                 513 net/bridge/br_stp.c 		br_reply(p);
p                 518 net/bridge/br_stp.c void br_received_tcn_bpdu(struct net_bridge_port *p)
p                 520 net/bridge/br_stp.c 	if (br_is_designated_port(p)) {
p                 521 net/bridge/br_stp.c 		br_info(p->br, "port %u(%s) received tcn bpdu\n",
p                 522 net/bridge/br_stp.c 			(unsigned int) p->port_no, p->dev->name);
p                 524 net/bridge/br_stp.c 		br_topology_change_detection(p->br);
p                 525 net/bridge/br_stp.c 		br_topology_change_acknowledge(p);
p                  35 net/bridge/br_stp_bpdu.c static void br_send_bpdu(struct net_bridge_port *p,
p                  44 net/bridge/br_stp_bpdu.c 	skb->dev = p->dev;
p                  55 net/bridge/br_stp_bpdu.c 	llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr);
p                  60 net/bridge/br_stp_bpdu.c 		dev_net(p->dev), NULL, skb, NULL, skb->dev,
p                  79 net/bridge/br_stp_bpdu.c void br_send_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *bpdu)
p                  83 net/bridge/br_stp_bpdu.c 	if (p->br->stp_enabled != BR_KERNEL_STP)
p                 120 net/bridge/br_stp_bpdu.c 	br_send_bpdu(p, buf, 35);
p                 124 net/bridge/br_stp_bpdu.c void br_send_tcn_bpdu(struct net_bridge_port *p)
p                 128 net/bridge/br_stp_bpdu.c 	if (p->br->stp_enabled != BR_KERNEL_STP)
p                 135 net/bridge/br_stp_bpdu.c 	br_send_bpdu(p, buf, 4);
p                 146 net/bridge/br_stp_bpdu.c 	struct net_bridge_port *p;
p                 158 net/bridge/br_stp_bpdu.c 	p = br_port_get_check_rcu(dev);
p                 159 net/bridge/br_stp_bpdu.c 	if (!p)
p                 162 net/bridge/br_stp_bpdu.c 	br = p->br;
p                 171 net/bridge/br_stp_bpdu.c 	if (p->state == BR_STATE_DISABLED)
p                 177 net/bridge/br_stp_bpdu.c 	if (p->flags & BR_BPDU_GUARD) {
p                 179 net/bridge/br_stp_bpdu.c 			  (unsigned int) p->port_no, p->dev->name);
p                 180 net/bridge/br_stp_bpdu.c 		br_stp_disable_port(p);
p                 226 net/bridge/br_stp_bpdu.c 				br_notice(p->br,
p                 229 net/bridge/br_stp_bpdu.c 					  p->port_no,
p                 235 net/bridge/br_stp_bpdu.c 		br_received_config_bpdu(p, &bpdu);
p                 237 net/bridge/br_stp_bpdu.c 		br_received_tcn_bpdu(p);
p                  33 net/bridge/br_stp_if.c void br_init_port(struct net_bridge_port *p)
p                  37 net/bridge/br_stp_if.c 	p->port_id = br_make_port_id(p->priority, p->port_no);
p                  38 net/bridge/br_stp_if.c 	br_become_designated_port(p);
p                  39 net/bridge/br_stp_if.c 	br_set_state(p, BR_STATE_BLOCKING);
p                  40 net/bridge/br_stp_if.c 	p->topology_change_ack = 0;
p                  41 net/bridge/br_stp_if.c 	p->config_pending = 0;
p                  43 net/bridge/br_stp_if.c 	err = __set_ageing_time(p->dev, p->br->ageing_time);
p                  45 net/bridge/br_stp_if.c 		netdev_err(p->dev, "failed to offload ageing time\n");
p                  51 net/bridge/br_stp_if.c 	struct net_bridge_port *p;
p                  60 net/bridge/br_stp_if.c 	list_for_each_entry(p, &br->port_list, list) {
p                  61 net/bridge/br_stp_if.c 		if (netif_running(p->dev) && netif_oper_up(p->dev))
p                  62 net/bridge/br_stp_if.c 			br_stp_enable_port(p);
p                  71 net/bridge/br_stp_if.c 	struct net_bridge_port *p;
p                  74 net/bridge/br_stp_if.c 	list_for_each_entry(p, &br->port_list, list) {
p                  75 net/bridge/br_stp_if.c 		if (p->state != BR_STATE_DISABLED)
p                  76 net/bridge/br_stp_if.c 			br_stp_disable_port(p);
p                  91 net/bridge/br_stp_if.c void br_stp_enable_port(struct net_bridge_port *p)
p                  93 net/bridge/br_stp_if.c 	br_init_port(p);
p                  94 net/bridge/br_stp_if.c 	br_port_state_selection(p->br);
p                  95 net/bridge/br_stp_if.c 	br_ifinfo_notify(RTM_NEWLINK, NULL, p);
p                  99 net/bridge/br_stp_if.c void br_stp_disable_port(struct net_bridge_port *p)
p                 101 net/bridge/br_stp_if.c 	struct net_bridge *br = p->br;
p                 105 net/bridge/br_stp_if.c 	br_become_designated_port(p);
p                 106 net/bridge/br_stp_if.c 	br_set_state(p, BR_STATE_DISABLED);
p                 107 net/bridge/br_stp_if.c 	p->topology_change_ack = 0;
p                 108 net/bridge/br_stp_if.c 	p->config_pending = 0;
p                 110 net/bridge/br_stp_if.c 	br_ifinfo_notify(RTM_NEWLINK, NULL, p);
p                 112 net/bridge/br_stp_if.c 	del_timer(&p->message_age_timer);
p                 113 net/bridge/br_stp_if.c 	del_timer(&p->forward_delay_timer);
p                 114 net/bridge/br_stp_if.c 	del_timer(&p->hold_timer);
p                 116 net/bridge/br_stp_if.c 	if (!rcu_access_pointer(p->backup_port))
p                 117 net/bridge/br_stp_if.c 		br_fdb_delete_by_port(br, p, 0, 0);
p                 118 net/bridge/br_stp_if.c 	br_multicast_disable_port(p);
p                 218 net/bridge/br_stp_if.c 	struct net_bridge_port *p;
p                 229 net/bridge/br_stp_if.c 	list_for_each_entry(p, &br->port_list, list) {
p                 230 net/bridge/br_stp_if.c 		if (ether_addr_equal(p->designated_bridge.addr, oldaddr))
p                 231 net/bridge/br_stp_if.c 			memcpy(p->designated_bridge.addr, addr, ETH_ALEN);
p                 233 net/bridge/br_stp_if.c 		if (ether_addr_equal(p->designated_root.addr, oldaddr))
p                 234 net/bridge/br_stp_if.c 			memcpy(p->designated_root.addr, addr, ETH_ALEN);
p                 252 net/bridge/br_stp_if.c 	struct net_bridge_port *p;
p                 258 net/bridge/br_stp_if.c 	list_for_each_entry(p, &br->port_list, list) {
p                 260 net/bridge/br_stp_if.c 		    memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0)
p                 261 net/bridge/br_stp_if.c 			addr = p->dev->dev_addr;
p                 275 net/bridge/br_stp_if.c 	struct net_bridge_port *p;
p                 281 net/bridge/br_stp_if.c 	list_for_each_entry(p, &br->port_list, list) {
p                 282 net/bridge/br_stp_if.c 		if (p->state != BR_STATE_DISABLED &&
p                 283 net/bridge/br_stp_if.c 		    br_is_designated_port(p)) {
p                 284 net/bridge/br_stp_if.c 			p->designated_bridge.prio[0] = (newprio >> 8) & 0xFF;
p                 285 net/bridge/br_stp_if.c 			p->designated_bridge.prio[1] = newprio & 0xFF;
p                 300 net/bridge/br_stp_if.c int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio)
p                 307 net/bridge/br_stp_if.c 	new_port_id = br_make_port_id(newprio, p->port_no);
p                 308 net/bridge/br_stp_if.c 	if (br_is_designated_port(p))
p                 309 net/bridge/br_stp_if.c 		p->designated_port = new_port_id;
p                 311 net/bridge/br_stp_if.c 	p->port_id = new_port_id;
p                 312 net/bridge/br_stp_if.c 	p->priority = newprio;
p                 313 net/bridge/br_stp_if.c 	if (!memcmp(&p->br->bridge_id, &p->designated_bridge, 8) &&
p                 314 net/bridge/br_stp_if.c 	    p->port_id < p->designated_port) {
p                 315 net/bridge/br_stp_if.c 		br_become_designated_port(p);
p                 316 net/bridge/br_stp_if.c 		br_port_state_selection(p->br);
p                 323 net/bridge/br_stp_if.c int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost)
p                 329 net/bridge/br_stp_if.c 	p->flags |= BR_ADMIN_COST;
p                 330 net/bridge/br_stp_if.c 	p->path_cost = path_cost;
p                 331 net/bridge/br_stp_if.c 	br_configuration_update(p->br);
p                 332 net/bridge/br_stp_if.c 	br_port_state_selection(p->br);
p                  19 net/bridge/br_stp_timer.c 	struct net_bridge_port *p;
p                  21 net/bridge/br_stp_timer.c 	list_for_each_entry(p, &br->port_list, list) {
p                  22 net/bridge/br_stp_timer.c 		if (p->state != BR_STATE_DISABLED &&
p                  23 net/bridge/br_stp_timer.c 		    !memcmp(&p->designated_bridge, &br->bridge_id, 8))
p                  48 net/bridge/br_stp_timer.c 	struct net_bridge_port *p = from_timer(p, t, message_age_timer);
p                  49 net/bridge/br_stp_timer.c 	struct net_bridge *br = p->br;
p                  50 net/bridge/br_stp_timer.c 	const bridge_id *id = &p->designated_bridge;
p                  53 net/bridge/br_stp_timer.c 	if (p->state == BR_STATE_DISABLED)
p                  57 net/bridge/br_stp_timer.c 		(unsigned int) p->port_no, p->dev->name,
p                  66 net/bridge/br_stp_timer.c 	if (p->state == BR_STATE_DISABLED)
p                  70 net/bridge/br_stp_timer.c 	br_become_designated_port(p);
p                  81 net/bridge/br_stp_timer.c 	struct net_bridge_port *p = from_timer(p, t, forward_delay_timer);
p                  82 net/bridge/br_stp_timer.c 	struct net_bridge *br = p->br;
p                  85 net/bridge/br_stp_timer.c 		 (unsigned int) p->port_no, p->dev->name);
p                  87 net/bridge/br_stp_timer.c 	if (p->state == BR_STATE_LISTENING) {
p                  88 net/bridge/br_stp_timer.c 		br_set_state(p, BR_STATE_LEARNING);
p                  89 net/bridge/br_stp_timer.c 		mod_timer(&p->forward_delay_timer,
p                  91 net/bridge/br_stp_timer.c 	} else if (p->state == BR_STATE_LEARNING) {
p                  92 net/bridge/br_stp_timer.c 		br_set_state(p, BR_STATE_FORWARDING);
p                  98 net/bridge/br_stp_timer.c 	br_ifinfo_notify(RTM_NEWLINK, NULL, p);
p                 130 net/bridge/br_stp_timer.c 	struct net_bridge_port *p = from_timer(p, t, hold_timer);
p                 132 net/bridge/br_stp_timer.c 	br_debug(p->br, "port %u(%s) hold timer expired\n",
p                 133 net/bridge/br_stp_timer.c 		 (unsigned int) p->port_no, p->dev->name);
p                 135 net/bridge/br_stp_timer.c 	spin_lock(&p->br->lock);
p                 136 net/bridge/br_stp_timer.c 	if (p->config_pending)
p                 137 net/bridge/br_stp_timer.c 		br_transmit_config(p);
p                 138 net/bridge/br_stp_timer.c 	spin_unlock(&p->br->lock);
p                 149 net/bridge/br_stp_timer.c void br_stp_port_timer_init(struct net_bridge_port *p)
p                 151 net/bridge/br_stp_timer.c 	timer_setup(&p->message_age_timer, br_message_age_timer_expired, 0);
p                 152 net/bridge/br_stp_timer.c 	timer_setup(&p->forward_delay_timer, br_forward_delay_timer_expired, 0);
p                 153 net/bridge/br_stp_timer.c 	timer_setup(&p->hold_timer, br_hold_timer_expired, 0);
p                  13 net/bridge/br_switchdev.c 	struct net_bridge_port *p;
p                  16 net/bridge/br_switchdev.c 	list_for_each_entry(p, &br->port_list, list) {
p                  17 net/bridge/br_switchdev.c 		if (netdev_port_same_parent_id(dev, p->dev))
p                  18 net/bridge/br_switchdev.c 			return p->offload_fwd_mark;
p                  24 net/bridge/br_switchdev.c int nbp_switchdev_mark_set(struct net_bridge_port *p)
p                  31 net/bridge/br_switchdev.c 	err = dev_get_port_parent_id(p->dev, &ppid, true);
p                  38 net/bridge/br_switchdev.c 	p->offload_fwd_mark = br_switchdev_mark_get(p->br, p->dev);
p                  43 net/bridge/br_switchdev.c void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
p                  46 net/bridge/br_switchdev.c 	if (skb->offload_fwd_mark && !WARN_ON_ONCE(!p->offload_fwd_mark))
p                  47 net/bridge/br_switchdev.c 		BR_INPUT_SKB_CB(skb)->offload_fwd_mark = p->offload_fwd_mark;
p                  50 net/bridge/br_switchdev.c bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
p                  54 net/bridge/br_switchdev.c 	       BR_INPUT_SKB_CB(skb)->offload_fwd_mark != p->offload_fwd_mark;
p                  61 net/bridge/br_switchdev.c int br_switchdev_set_port_flag(struct net_bridge_port *p,
p                  66 net/bridge/br_switchdev.c 		.orig_dev = p->dev,
p                  79 net/bridge/br_switchdev.c 	err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
p                  86 net/bridge/br_switchdev.c 		br_warn(p->br, "bridge flag offload is not supported %u(%s)\n",
p                  87 net/bridge/br_switchdev.c 			(unsigned int)p->port_no, p->dev->name);
p                  95 net/bridge/br_switchdev.c 	err = switchdev_port_attr_set(p->dev, &attr);
p                  97 net/bridge/br_switchdev.c 		br_warn(p->br, "error setting offload flag on port %u(%s)\n",
p                  98 net/bridge/br_switchdev.c 			(unsigned int)p->port_no, p->dev->name);
p                  44 net/bridge/br_sysfs_if.c static ssize_t show_##_name(struct net_bridge_port *p, char *buf) \
p                  46 net/bridge/br_sysfs_if.c 	return sprintf(buf, "%d\n", !!(p->flags & _mask));	\
p                  48 net/bridge/br_sysfs_if.c static int store_##_name(struct net_bridge_port *p, unsigned long v) \
p                  50 net/bridge/br_sysfs_if.c 	return store_flag(p, v, _mask);				\
p                  55 net/bridge/br_sysfs_if.c static int store_flag(struct net_bridge_port *p, unsigned long v,
p                  60 net/bridge/br_sysfs_if.c 	flags = p->flags;
p                  67 net/bridge/br_sysfs_if.c 	if (flags != p->flags) {
p                  68 net/bridge/br_sysfs_if.c 		p->flags = flags;
p                  69 net/bridge/br_sysfs_if.c 		br_port_flags_change(p, mask);
p                  74 net/bridge/br_sysfs_if.c static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
p                  76 net/bridge/br_sysfs_if.c 	return sprintf(buf, "%d\n", p->path_cost);
p                  82 net/bridge/br_sysfs_if.c static ssize_t show_priority(struct net_bridge_port *p, char *buf)
p                  84 net/bridge/br_sysfs_if.c 	return sprintf(buf, "%d\n", p->priority);
p                  90 net/bridge/br_sysfs_if.c static ssize_t show_designated_root(struct net_bridge_port *p, char *buf)
p                  92 net/bridge/br_sysfs_if.c 	return br_show_bridge_id(buf, &p->designated_root);
p                  96 net/bridge/br_sysfs_if.c static ssize_t show_designated_bridge(struct net_bridge_port *p, char *buf)
p                  98 net/bridge/br_sysfs_if.c 	return br_show_bridge_id(buf, &p->designated_bridge);
p                 102 net/bridge/br_sysfs_if.c static ssize_t show_designated_port(struct net_bridge_port *p, char *buf)
p                 104 net/bridge/br_sysfs_if.c 	return sprintf(buf, "%d\n", p->designated_port);
p                 108 net/bridge/br_sysfs_if.c static ssize_t show_designated_cost(struct net_bridge_port *p, char *buf)
p                 110 net/bridge/br_sysfs_if.c 	return sprintf(buf, "%d\n", p->designated_cost);
p                 114 net/bridge/br_sysfs_if.c static ssize_t show_port_id(struct net_bridge_port *p, char *buf)
p                 116 net/bridge/br_sysfs_if.c 	return sprintf(buf, "0x%x\n", p->port_id);
p                 120 net/bridge/br_sysfs_if.c static ssize_t show_port_no(struct net_bridge_port *p, char *buf)
p                 122 net/bridge/br_sysfs_if.c 	return sprintf(buf, "0x%x\n", p->port_no);
p                 127 net/bridge/br_sysfs_if.c static ssize_t show_change_ack(struct net_bridge_port *p, char *buf)
p                 129 net/bridge/br_sysfs_if.c 	return sprintf(buf, "%d\n", p->topology_change_ack);
p                 133 net/bridge/br_sysfs_if.c static ssize_t show_config_pending(struct net_bridge_port *p, char *buf)
p                 135 net/bridge/br_sysfs_if.c 	return sprintf(buf, "%d\n", p->config_pending);
p                 139 net/bridge/br_sysfs_if.c static ssize_t show_port_state(struct net_bridge_port *p, char *buf)
p                 141 net/bridge/br_sysfs_if.c 	return sprintf(buf, "%d\n", p->state);
p                 145 net/bridge/br_sysfs_if.c static ssize_t show_message_age_timer(struct net_bridge_port *p,
p                 148 net/bridge/br_sysfs_if.c 	return sprintf(buf, "%ld\n", br_timer_value(&p->message_age_timer));
p                 152 net/bridge/br_sysfs_if.c static ssize_t show_forward_delay_timer(struct net_bridge_port *p,
p                 155 net/bridge/br_sysfs_if.c 	return sprintf(buf, "%ld\n", br_timer_value(&p->forward_delay_timer));
p                 159 net/bridge/br_sysfs_if.c static ssize_t show_hold_timer(struct net_bridge_port *p,
p                 162 net/bridge/br_sysfs_if.c 	return sprintf(buf, "%ld\n", br_timer_value(&p->hold_timer));
p                 166 net/bridge/br_sysfs_if.c static int store_flush(struct net_bridge_port *p, unsigned long v)
p                 168 net/bridge/br_sysfs_if.c 	br_fdb_delete_by_port(p->br, p, 0, 0); // Don't delete local entry
p                 173 net/bridge/br_sysfs_if.c static ssize_t show_group_fwd_mask(struct net_bridge_port *p, char *buf)
p                 175 net/bridge/br_sysfs_if.c 	return sprintf(buf, "%#x\n", p->group_fwd_mask);
p                 178 net/bridge/br_sysfs_if.c static int store_group_fwd_mask(struct net_bridge_port *p,
p                 183 net/bridge/br_sysfs_if.c 	p->group_fwd_mask = v;
p                 190 net/bridge/br_sysfs_if.c static ssize_t show_backup_port(struct net_bridge_port *p, char *buf)
p                 196 net/bridge/br_sysfs_if.c 	backup_p = rcu_dereference(p->backup_port);
p                 204 net/bridge/br_sysfs_if.c static int store_backup_port(struct net_bridge_port *p, char *buf)
p                 213 net/bridge/br_sysfs_if.c 		backup_dev = __dev_get_by_name(dev_net(p->dev), buf);
p                 218 net/bridge/br_sysfs_if.c 	return nbp_backup_change(p, backup_dev);
p                 235 net/bridge/br_sysfs_if.c static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
p                 237 net/bridge/br_sysfs_if.c 	return sprintf(buf, "%d\n", p->multicast_router);
p                 240 net/bridge/br_sysfs_if.c static int store_multicast_router(struct net_bridge_port *p,
p                 243 net/bridge/br_sysfs_if.c 	return br_multicast_set_port_router(p, v);
p                 295 net/bridge/br_sysfs_if.c 	struct net_bridge_port *p = kobj_to_brport(kobj);
p                 300 net/bridge/br_sysfs_if.c 	return brport_attr->show(p, buf);
p                 308 net/bridge/br_sysfs_if.c 	struct net_bridge_port *p = kobj_to_brport(kobj);
p                 313 net/bridge/br_sysfs_if.c 	if (!ns_capable(dev_net(p->dev)->user_ns, CAP_NET_ADMIN))
p                 327 net/bridge/br_sysfs_if.c 		spin_lock_bh(&p->br->lock);
p                 328 net/bridge/br_sysfs_if.c 		ret = brport_attr->store_raw(p, buf_copy);
p                 329 net/bridge/br_sysfs_if.c 		spin_unlock_bh(&p->br->lock);
p                 335 net/bridge/br_sysfs_if.c 		spin_lock_bh(&p->br->lock);
p                 336 net/bridge/br_sysfs_if.c 		ret = brport_attr->store(p, val);
p                 337 net/bridge/br_sysfs_if.c 		spin_unlock_bh(&p->br->lock);
p                 341 net/bridge/br_sysfs_if.c 		br_ifinfo_notify(RTM_NEWLINK, NULL, p);
p                 360 net/bridge/br_sysfs_if.c int br_sysfs_addif(struct net_bridge_port *p)
p                 362 net/bridge/br_sysfs_if.c 	struct net_bridge *br = p->br;
p                 366 net/bridge/br_sysfs_if.c 	err = sysfs_create_link(&p->kobj, &br->dev->dev.kobj,
p                 372 net/bridge/br_sysfs_if.c 		err = sysfs_create_file(&p->kobj, &((*a)->attr));
p                 377 net/bridge/br_sysfs_if.c 	strlcpy(p->sysfs_name, p->dev->name, IFNAMSIZ);
p                 378 net/bridge/br_sysfs_if.c 	return sysfs_create_link(br->ifobj, &p->kobj, p->sysfs_name);
p                 382 net/bridge/br_sysfs_if.c int br_sysfs_renameif(struct net_bridge_port *p)
p                 384 net/bridge/br_sysfs_if.c 	struct net_bridge *br = p->br;
p                 390 net/bridge/br_sysfs_if.c 	if (!strncmp(p->sysfs_name, p->dev->name, IFNAMSIZ))
p                 393 net/bridge/br_sysfs_if.c 	err = sysfs_rename_link(br->ifobj, &p->kobj,
p                 394 net/bridge/br_sysfs_if.c 				p->sysfs_name, p->dev->name);
p                 397 net/bridge/br_sysfs_if.c 			      p->sysfs_name, p->dev->name);
p                 399 net/bridge/br_sysfs_if.c 		strlcpy(p->sysfs_name, p->dev->name, IFNAMSIZ);
p                  11 net/bridge/br_vlan.c static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
p                 225 net/bridge/br_vlan.c 	struct net_bridge_port *p = NULL;
p                 236 net/bridge/br_vlan.c 		p = v->port;
p                 237 net/bridge/br_vlan.c 		br = p->br;
p                 238 net/bridge/br_vlan.c 		dev = p->dev;
p                 239 net/bridge/br_vlan.c 		vg = nbp_vlan_group(p);
p                 242 net/bridge/br_vlan.c 	if (p) {
p                 284 net/bridge/br_vlan.c 		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
p                 300 net/bridge/br_vlan.c 	if (p)
p                 301 net/bridge/br_vlan.c 		nbp_vlan_set_vlan_dev_state(p, v->vid);
p                 307 net/bridge/br_vlan.c 		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
p                 312 net/bridge/br_vlan.c 	if (p) {
p                 333 net/bridge/br_vlan.c 	struct net_bridge_port *p = NULL;
p                 339 net/bridge/br_vlan.c 		p = v->port;
p                 345 net/bridge/br_vlan.c 	if (p) {
p                 346 net/bridge/br_vlan.c 		err = __vlan_vid_del(p->dev, p->br, v);
p                 366 net/bridge/br_vlan.c 		nbp_vlan_set_vlan_dev_state(p, v->vid);
p                 393 net/bridge/br_vlan.c 			       const struct net_bridge_port *p,
p                 435 net/bridge/br_vlan.c 	if (p && (p->flags & BR_VLAN_TUNNEL) &&
p                 570 net/bridge/br_vlan.c bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
p                 573 net/bridge/br_vlan.c 	struct net_bridge *br = p->br;
p                 579 net/bridge/br_vlan.c 	vg = nbp_vlan_group_rcu(p);
p                 813 net/bridge/br_vlan.c 	struct net_bridge_port *p;
p                 822 net/bridge/br_vlan.c 	list_for_each_entry(p, &br->port_list, list) {
p                 823 net/bridge/br_vlan.c 		vg = nbp_vlan_group(p);
p                 825 net/bridge/br_vlan.c 			err = vlan_vid_add(p->dev, proto, vlan->vid);
p                 838 net/bridge/br_vlan.c 	list_for_each_entry(p, &br->port_list, list) {
p                 839 net/bridge/br_vlan.c 		vg = nbp_vlan_group(p);
p                 841 net/bridge/br_vlan.c 			vlan_vid_del(p->dev, oldproto, vlan->vid);
p                 848 net/bridge/br_vlan.c 		vlan_vid_del(p->dev, proto, vlan->vid);
p                 850 net/bridge/br_vlan.c 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
p                 851 net/bridge/br_vlan.c 		vg = nbp_vlan_group(p);
p                 853 net/bridge/br_vlan.c 			vlan_vid_del(p->dev, proto, vlan->vid);
p                 883 net/bridge/br_vlan.c 	struct net_bridge_port *p;
p                 886 net/bridge/br_vlan.c 	list_for_each_entry(p, &br->port_list, list) {
p                 887 net/bridge/br_vlan.c 		struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
p                 922 net/bridge/br_vlan.c 	struct net_bridge_port *p;
p                 931 net/bridge/br_vlan.c 	list_for_each_entry(p, &br->port_list, list) {
p                 932 net/bridge/br_vlan.c 		if (vlan_default_pvid(nbp_vlan_group(p), pvid))
p                 933 net/bridge/br_vlan.c 			nbp_vlan_delete(p, pvid);
p                 944 net/bridge/br_vlan.c 	struct net_bridge_port *p;
p                 979 net/bridge/br_vlan.c 	list_for_each_entry(p, &br->port_list, list) {
p                 983 net/bridge/br_vlan.c 		vg = nbp_vlan_group(p);
p                 989 net/bridge/br_vlan.c 		err = nbp_vlan_add(p, pvid,
p                 995 net/bridge/br_vlan.c 		nbp_vlan_delete(p, old_pvid);
p                 996 net/bridge/br_vlan.c 		set_bit(p->port_no, changed);
p                1006 net/bridge/br_vlan.c 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
p                1007 net/bridge/br_vlan.c 		if (!test_bit(p->port_no, changed))
p                1011 net/bridge/br_vlan.c 			nbp_vlan_add(p, old_pvid,
p                1015 net/bridge/br_vlan.c 		nbp_vlan_delete(p, pvid);
p                1082 net/bridge/br_vlan.c int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
p                1085 net/bridge/br_vlan.c 		.orig_dev = p->br->dev,
p                1088 net/bridge/br_vlan.c 		.u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
p                1097 net/bridge/br_vlan.c 	ret = switchdev_port_attr_set(p->dev, &attr);
p                1108 net/bridge/br_vlan.c 	rcu_assign_pointer(p->vlgrp, vg);
p                1109 net/bridge/br_vlan.c 	if (p->br->default_pvid) {
p                1112 net/bridge/br_vlan.c 		ret = nbp_vlan_add(p, p->br->default_pvid,
p                1123 net/bridge/br_vlan.c 	RCU_INIT_POINTER(p->vlgrp, NULL);
p                1233 net/bridge/br_vlan.c 			      struct net_bridge_port *p, u16 *p_pvid)
p                1237 net/bridge/br_vlan.c 	if (p)
p                1238 net/bridge/br_vlan.c 		vg = nbp_vlan_group(p);
p                1267 net/bridge/br_vlan.c 	struct net_bridge_port *p;
p                1270 net/bridge/br_vlan.c 	p = br_port_get_check_rtnl(dev);
p                1271 net/bridge/br_vlan.c 	if (p)
p                1272 net/bridge/br_vlan.c 		vg = nbp_vlan_group(p);
p                1359 net/bridge/br_vlan.c 	struct net_bridge_port *p;
p                1367 net/bridge/br_vlan.c 	list_for_each_entry(p, &br->port_list, list) {
p                1368 net/bridge/br_vlan.c 		vg = nbp_vlan_group(p);
p                1369 net/bridge/br_vlan.c 		if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
p                1381 net/bridge/br_vlan.c static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
p                1383 net/bridge/br_vlan.c 	struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
p                1388 net/bridge/br_vlan.c 		vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
p                1391 net/bridge/br_vlan.c 			if (br_vlan_is_dev_up(p->dev)) {
p                1392 net/bridge/br_vlan.c 				if (netif_carrier_ok(p->br->dev))
p                1395 net/bridge/br_vlan.c 				br_vlan_set_vlan_dev_state(p->br, vlan_dev);
p                1448 net/bridge/br_vlan.c static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
p                1452 net/bridge/br_vlan.c 	if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
p                1455 net/bridge/br_vlan.c 	vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
p                1457 net/bridge/br_vlan.c 		br_vlan_set_vlan_dev_state(p->br, vlan_dev);
p                1495 net/bridge/br_vlan.c void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
p                1497 net/bridge/br_vlan.c 	if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
p                1504 net/bridge/br_vlan.c 		br_vlan_set_all_vlan_dev_state(p);
p                 156 net/bridge/br_vlan_tunnel.c 				  struct net_bridge_port *p,
p                 176 net/bridge/br_vlan_tunnel.c 	__vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid);
p                  27 net/bridge/netfilter/ebt_among.c 	const struct ebt_mac_wormhash_tuple *p;
p                  37 net/bridge/netfilter/ebt_among.c 			p = &wh->pool[i];
p                  38 net/bridge/netfilter/ebt_among.c 			if (cmp[1] == p->cmp[1] && cmp[0] == p->cmp[0])
p                  39 net/bridge/netfilter/ebt_among.c 				if (p->ip == 0 || p->ip == ip)
p                  44 net/bridge/netfilter/ebt_among.c 			p = &wh->pool[i];
p                  45 net/bridge/netfilter/ebt_among.c 			if (cmp[1] == p->cmp[1] && cmp[0] == p->cmp[0])
p                  46 net/bridge/netfilter/ebt_among.c 				if (p->ip == 0)
p                  41 net/bridge/netfilter/ebt_stp.c #define NR16(p) (p[0] << 8 | p[1])
p                  42 net/bridge/netfilter/ebt_stp.c #define NR32(p) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3])
p                  57 net/bridge/netfilter/ebtable_broute.c 	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
p                  62 net/bridge/netfilter/ebtable_broute.c 	if (!p || p->state != BR_STATE_FORWARDING)
p                  89 net/bridge/netfilter/ebtable_broute.c 	     ether_addr_equal(p->br->dev->dev_addr, dest))
p                 122 net/bridge/netfilter/ebtables.c 	const struct net_bridge_port *p;
p                 142 net/bridge/netfilter/ebtables.c 	if (in && (p = br_port_get_rcu(in)) != NULL &&
p                 144 net/bridge/netfilter/ebtables.c 		    ebt_dev_check(e->logical_in, p->br->dev)))
p                 146 net/bridge/netfilter/ebtables.c 	if (out && (p = br_port_get_rcu(out)) != NULL &&
p                 148 net/bridge/netfilter/ebtables.c 		    ebt_dev_check(e->logical_out, p->br->dev)))
p                1150 net/bridge/netfilter/ebtables.c 	void *p;
p                1170 net/bridge/netfilter/ebtables.c 	p = vmalloc(repl->entries_size);
p                1171 net/bridge/netfilter/ebtables.c 	if (!p)
p                1174 net/bridge/netfilter/ebtables.c 	memcpy(p, repl->entries, repl->entries_size);
p                1175 net/bridge/netfilter/ebtables.c 	newinfo->entries = p;
p                1189 net/bridge/netfilter/ebtables.c 			newinfo->hook_entry[i] = p +
p                  62 net/caif/cfctrl.c 	struct cfctrl_request_info *p, *tmp;
p                  66 net/caif/cfctrl.c 	list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
p                  67 net/caif/cfctrl.c 		list_del(&p->list);
p                  68 net/caif/cfctrl.c 		kfree(p);
p                 143 net/caif/cfctrl.c 	struct cfctrl_request_info *p, *tmp, *first;
p                 147 net/caif/cfctrl.c 	list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
p                 148 net/caif/cfctrl.c 		if (cfctrl_req_eq(req, p)) {
p                 149 net/caif/cfctrl.c 			if (p != first)
p                 153 net/caif/cfctrl.c 					 p->sequence_no);
p                 154 net/caif/cfctrl.c 			list_del(&p->list);
p                 158 net/caif/cfctrl.c 	p = NULL;
p                 160 net/caif/cfctrl.c 	return p;
p                 333 net/caif/cfctrl.c 	struct cfctrl_request_info *p, *tmp;
p                 338 net/caif/cfctrl.c 	list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
p                 339 net/caif/cfctrl.c 		if (p->client_layer == adap_layer) {
p                 340 net/caif/cfctrl.c 			list_del(&p->list);
p                 341 net/caif/cfctrl.c 			kfree(p);
p                 564 net/caif/cfctrl.c 		struct cfctrl_request_info *p, *tmp;
p                 568 net/caif/cfctrl.c 		list_for_each_entry_safe(p, tmp, &this->list, list) {
p                 569 net/caif/cfctrl.c 			if (p->param.phyid == phyid) {
p                 570 net/caif/cfctrl.c 				list_del(&p->list);
p                 571 net/caif/cfctrl.c 				p->client_layer->ctrlcmd(p->client_layer,
p                 574 net/caif/cfctrl.c 				kfree(p);
p                  89 net/ceph/auth.c int ceph_auth_entity_name_encode(const char *name, void **p, void *end)
p                  93 net/ceph/auth.c 	if (*p + 2*sizeof(u32) + len > end)
p                  95 net/ceph/auth.c 	ceph_encode_32(p, CEPH_ENTITY_TYPE_CLIENT);
p                  96 net/ceph/auth.c 	ceph_encode_32(p, len);
p                  97 net/ceph/auth.c 	ceph_encode_copy(p, name, len);
p                 108 net/ceph/auth.c 	void *p = monhdr + 1, *end = buf + len, *lenp;
p                 118 net/ceph/auth.c 	ceph_encode_32(&p, CEPH_AUTH_UNKNOWN);  /* no protocol, yet */
p                 120 net/ceph/auth.c 	lenp = p;
p                 121 net/ceph/auth.c 	p += sizeof(u32);
p                 123 net/ceph/auth.c 	ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
p                 124 net/ceph/auth.c 	ceph_encode_8(&p, 1);
p                 126 net/ceph/auth.c 	ceph_encode_32(&p, num);
p                 127 net/ceph/auth.c 	ceph_decode_need(&p, end, num * sizeof(u32), bad);
p                 129 net/ceph/auth.c 		ceph_encode_32(&p, supported_protocols[i]);
p                 131 net/ceph/auth.c 	ret = ceph_auth_entity_name_encode(ac->name, &p, end);
p                 134 net/ceph/auth.c 	ceph_decode_need(&p, end, sizeof(u64), bad);
p                 135 net/ceph/auth.c 	ceph_encode_64(&p, ac->global_id);
p                 137 net/ceph/auth.c 	ceph_encode_32(&lenp, p - lenp - sizeof(u32));
p                 138 net/ceph/auth.c 	ret = p - buf;
p                 152 net/ceph/auth.c 	void *p = monhdr + 1;
p                 160 net/ceph/auth.c 	ceph_encode_32(&p, ac->protocol);
p                 162 net/ceph/auth.c 	ret = ac->ops->build_request(ac, p + sizeof(u32), end);
p                 169 net/ceph/auth.c 	ceph_encode_32(&p, ret);
p                 170 net/ceph/auth.c 	ret = p + ret - msg_buf;
p                 182 net/ceph/auth.c 	void *p = buf;
p                 194 net/ceph/auth.c 	dout("handle_auth_reply %p %p\n", p, end);
p                 195 net/ceph/auth.c 	ceph_decode_need(&p, end, sizeof(u32) * 3 + sizeof(u64), bad);
p                 196 net/ceph/auth.c 	protocol = ceph_decode_32(&p);
p                 197 net/ceph/auth.c 	result = ceph_decode_32(&p);
p                 198 net/ceph/auth.c 	global_id = ceph_decode_64(&p);
p                 199 net/ceph/auth.c 	payload_len = ceph_decode_32(&p);
p                 200 net/ceph/auth.c 	payload = p;
p                 201 net/ceph/auth.c 	p += payload_len;
p                 202 net/ceph/auth.c 	ceph_decode_need(&p, end, sizeof(u32), bad);
p                 203 net/ceph/auth.c 	result_msg_len = ceph_decode_32(&p);
p                 204 net/ceph/auth.c 	result_msg = p;
p                 205 net/ceph/auth.c 	p += result_msg_len;
p                 206 net/ceph/auth.c 	if (p != end)
p                  45 net/ceph/auth_none.c 	void *p = au->buf;
p                  46 net/ceph/auth_none.c 	void *const end = p + sizeof(au->buf);
p                  49 net/ceph/auth_none.c 	ceph_encode_8_safe(&p, end, 1, e_range);
p                  50 net/ceph/auth_none.c 	ret = ceph_auth_entity_name_encode(ac->name, &p, end);
p                  54 net/ceph/auth_none.c 	ceph_encode_64_safe(&p, end, ac->global_id, e_range);
p                  55 net/ceph/auth_none.c 	au->buf_len = p - (void *)au->buf;
p                  74 net/ceph/auth_x.c static int __ceph_x_decrypt(struct ceph_crypto_key *secret, void *p,
p                  77 net/ceph/auth_x.c 	struct ceph_x_encrypt_header *hdr = p;
p                  81 net/ceph/auth_x.c 	ret = ceph_crypt(secret, false, p, ciphertext_len, ciphertext_len,
p                  94 net/ceph/auth_x.c static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end)
p                  99 net/ceph/auth_x.c 	ceph_decode_32_safe(p, end, ciphertext_len, e_inval);
p                 100 net/ceph/auth_x.c 	ceph_decode_need(p, end, ciphertext_len, e_inval);
p                 102 net/ceph/auth_x.c 	ret = __ceph_x_decrypt(secret, *p, ciphertext_len);
p                 106 net/ceph/auth_x.c 	*p += ciphertext_len;
p                 121 net/ceph/auth_x.c 	struct rb_node *parent = NULL, **p = &xi->ticket_handlers.rb_node;
p                 123 net/ceph/auth_x.c 	while (*p) {
p                 124 net/ceph/auth_x.c 		parent = *p;
p                 127 net/ceph/auth_x.c 			p = &(*p)->rb_left;
p                 129 net/ceph/auth_x.c 			p = &(*p)->rb_right;
p                 139 net/ceph/auth_x.c 	rb_link_node(&th->node, parent, p);
p                 159 net/ceph/auth_x.c 			      void **p, void *end)
p                 177 net/ceph/auth_x.c 	ceph_decode_need(p, end, sizeof(u32) + 1, bad);
p                 179 net/ceph/auth_x.c 	type = ceph_decode_32(p);
p                 182 net/ceph/auth_x.c 	tkt_struct_v = ceph_decode_8(p);
p                 193 net/ceph/auth_x.c 	dp = *p + ceph_x_encrypt_offset();
p                 194 net/ceph/auth_x.c 	ret = ceph_x_decrypt(secret, p, end);
p                 216 net/ceph/auth_x.c 	ceph_decode_8_safe(p, end, is_enc, bad);
p                 219 net/ceph/auth_x.c 		tp = *p + ceph_x_encrypt_offset();
p                 220 net/ceph/auth_x.c 		ret = ceph_x_decrypt(&th->session_key, p, end);
p                 228 net/ceph/auth_x.c 		ptp = p;
p                 270 net/ceph/auth_x.c 	void *p = buf;
p                 275 net/ceph/auth_x.c 	ceph_decode_8_safe(&p, end, reply_struct_v, bad);
p                 279 net/ceph/auth_x.c 	ceph_decode_32_safe(&p, end, num, bad);
p                 283 net/ceph/auth_x.c 		ret = process_one_ticket(ac, secret, &p, end);
p                 304 net/ceph/auth_x.c 	void *p, *end;
p                 309 net/ceph/auth_x.c 	p = (void *)(msg_a + 1) + le32_to_cpu(msg_a->ticket_blob.blob_len);
p                 312 net/ceph/auth_x.c 	msg_b = p + ceph_x_encrypt_offset();
p                 324 net/ceph/auth_x.c 	ret = ceph_x_encrypt(&au->session_key, p, end - p, sizeof(*msg_b));
p                 328 net/ceph/auth_x.c 	p += ret;
p                 330 net/ceph/auth_x.c 		WARN_ON(p != end);
p                 332 net/ceph/auth_x.c 		WARN_ON(p > end);
p                 333 net/ceph/auth_x.c 		au->buf->vec.iov_len = p - au->buf->vec.iov_base;
p                 415 net/ceph/auth_x.c 				void **p, void *end)
p                 417 net/ceph/auth_x.c 	ceph_decode_need(p, end, 1 + sizeof(u64), bad);
p                 418 net/ceph/auth_x.c 	ceph_encode_8(p, 1);
p                 419 net/ceph/auth_x.c 	ceph_encode_64(p, th->secret_id);
p                 424 net/ceph/auth_x.c 		ceph_encode_32_safe(p, end, len, bad);
p                 425 net/ceph/auth_x.c 		ceph_encode_copy_safe(p, end, buf, len, bad);
p                 427 net/ceph/auth_x.c 		ceph_encode_32_safe(p, end, 0, bad);
p                 503 net/ceph/auth_x.c 		void *p = auth + 1;
p                 509 net/ceph/auth_x.c 		if (p > end)
p                 533 net/ceph/auth_x.c 		ret = ceph_x_encode_ticket(th, &p, end);
p                 537 net/ceph/auth_x.c 		return p - buf;
p                 541 net/ceph/auth_x.c 		void *p = head + 1;
p                 544 net/ceph/auth_x.c 		if (p > end)
p                 551 net/ceph/auth_x.c 		ceph_encode_copy(&p, xi->auth_authorizer.buf->vec.iov_base,
p                 554 net/ceph/auth_x.c 		req = p;
p                 556 net/ceph/auth_x.c 		p += sizeof(*req);
p                 557 net/ceph/auth_x.c 		return p - buf;
p                 733 net/ceph/auth_x.c 	void *p = au->enc_buf;
p                 734 net/ceph/auth_x.c 	struct ceph_x_authorize_reply *reply = p + ceph_x_encrypt_offset();
p                 737 net/ceph/auth_x.c 	ret = ceph_x_decrypt(&au->session_key, &p, p + CEPHX_AU_ENC_BUF_LEN);
p                 766 net/ceph/auth_x.c 	struct rb_node *p;
p                 771 net/ceph/auth_x.c 	while ((p = rb_first(&xi->ticket_handlers)) != NULL) {
p                 773 net/ceph/auth_x.c 			rb_entry(p, struct ceph_x_ticket_handler, node);
p                  44 net/ceph/buffer.c int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end)
p                  48 net/ceph/buffer.c 	ceph_decode_need(p, end, sizeof(u32), bad);
p                  49 net/ceph/buffer.c 	len = ceph_decode_32(p);
p                  51 net/ceph/buffer.c 	ceph_decode_need(p, end, len, bad);
p                  55 net/ceph/buffer.c 	ceph_decode_copy(p, (*b)->vec.iov_base, len);
p                 200 net/ceph/ceph_common.c 	void *p;
p                 203 net/ceph/ceph_common.c 		p = kvmalloc(size, flags);
p                 206 net/ceph/ceph_common.c 		p = kvmalloc(size, GFP_KERNEL);
p                 210 net/ceph/ceph_common.c 		p = kvmalloc(size, GFP_KERNEL);
p                 214 net/ceph/ceph_common.c 	return p;
p                  34 net/ceph/cls_lock_client.c 	void *p, *end;
p                  54 net/ceph/cls_lock_client.c 	p = page_address(lock_op_page);
p                  55 net/ceph/cls_lock_client.c 	end = p + lock_op_buf_size;
p                  58 net/ceph/cls_lock_client.c 	ceph_start_encoding(&p, 1, 1,
p                  60 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, lock_name, name_len);
p                  61 net/ceph/cls_lock_client.c 	ceph_encode_8(&p, type);
p                  62 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, cookie, cookie_len);
p                  63 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, tag, tag_len);
p                  64 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, desc, desc_len);
p                  67 net/ceph/cls_lock_client.c 	ceph_encode_timespec64(p, &mtime);
p                  68 net/ceph/cls_lock_client.c 	p += sizeof(struct ceph_timespec);
p                  69 net/ceph/cls_lock_client.c 	ceph_encode_8(&p, flags);
p                  97 net/ceph/cls_lock_client.c 	void *p, *end;
p                 111 net/ceph/cls_lock_client.c 	p = page_address(unlock_op_page);
p                 112 net/ceph/cls_lock_client.c 	end = p + unlock_op_buf_size;
p                 115 net/ceph/cls_lock_client.c 	ceph_start_encoding(&p, 1, 1,
p                 117 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, lock_name, name_len);
p                 118 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, cookie, cookie_len);
p                 148 net/ceph/cls_lock_client.c 	void *p, *end;
p                 162 net/ceph/cls_lock_client.c 	p = page_address(break_op_page);
p                 163 net/ceph/cls_lock_client.c 	end = p + break_op_buf_size;
p                 166 net/ceph/cls_lock_client.c 	ceph_start_encoding(&p, 1, 1,
p                 168 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, lock_name, name_len);
p                 169 net/ceph/cls_lock_client.c 	ceph_encode_copy(&p, locker, sizeof(*locker));
p                 170 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, cookie, cookie_len);
p                 195 net/ceph/cls_lock_client.c 	void *p, *end;
p                 211 net/ceph/cls_lock_client.c 	p = page_address(cookie_op_page);
p                 212 net/ceph/cls_lock_client.c 	end = p + cookie_op_buf_size;
p                 215 net/ceph/cls_lock_client.c 	ceph_start_encoding(&p, 1, 1,
p                 217 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, lock_name, name_len);
p                 218 net/ceph/cls_lock_client.c 	ceph_encode_8(&p, type);
p                 219 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, old_cookie, old_cookie_len);
p                 220 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, tag, tag_len);
p                 221 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, new_cookie, new_cookie_len);
p                 245 net/ceph/cls_lock_client.c static int decode_locker(void **p, void *end, struct ceph_locker *locker)
p                 252 net/ceph/cls_lock_client.c 	ret = ceph_start_decoding(p, end, 1, "locker_id_t", &struct_v, &len);
p                 256 net/ceph/cls_lock_client.c 	ceph_decode_copy(p, &locker->id.name, sizeof(locker->id.name));
p                 257 net/ceph/cls_lock_client.c 	s = ceph_extract_encoded_string(p, end, NULL, GFP_NOIO);
p                 263 net/ceph/cls_lock_client.c 	ret = ceph_start_decoding(p, end, 1, "locker_info_t", &struct_v, &len);
p                 267 net/ceph/cls_lock_client.c 	*p += sizeof(struct ceph_timespec); /* skip expiration */
p                 269 net/ceph/cls_lock_client.c 	ret = ceph_decode_entity_addr(p, end, &locker->info.addr);
p                 273 net/ceph/cls_lock_client.c 	len = ceph_decode_32(p);
p                 274 net/ceph/cls_lock_client.c 	*p += len; /* skip description */
p                 282 net/ceph/cls_lock_client.c static int decode_lockers(void **p, void *end, u8 *type, char **tag,
p                 291 net/ceph/cls_lock_client.c 	ret = ceph_start_decoding(p, end, 1, "cls_lock_get_info_reply",
p                 296 net/ceph/cls_lock_client.c 	*num_lockers = ceph_decode_32(p);
p                 302 net/ceph/cls_lock_client.c 		ret = decode_locker(p, end, *lockers + i);
p                 307 net/ceph/cls_lock_client.c 	*type = ceph_decode_8(p);
p                 308 net/ceph/cls_lock_client.c 	s = ceph_extract_encoded_string(p, end, NULL, GFP_NOIO);
p                 338 net/ceph/cls_lock_client.c 	void *p, *end;
p                 356 net/ceph/cls_lock_client.c 	p = page_address(get_info_op_page);
p                 357 net/ceph/cls_lock_client.c 	end = p + get_info_op_buf_size;
p                 360 net/ceph/cls_lock_client.c 	ceph_start_encoding(&p, 1, 1,
p                 362 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, lock_name, name_len);
p                 371 net/ceph/cls_lock_client.c 		p = page_address(reply_page);
p                 372 net/ceph/cls_lock_client.c 		end = p + reply_len;
p                 374 net/ceph/cls_lock_client.c 		ret = decode_lockers(&p, end, type, tag, lockers, num_lockers);
p                 391 net/ceph/cls_lock_client.c 	void *p, *end;
p                 409 net/ceph/cls_lock_client.c 	p = page_address(pages[0]);
p                 410 net/ceph/cls_lock_client.c 	end = p + assert_op_buf_size;
p                 413 net/ceph/cls_lock_client.c 	ceph_start_encoding(&p, 1, 1,
p                 415 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, lock_name, name_len);
p                 416 net/ceph/cls_lock_client.c 	ceph_encode_8(&p, type);
p                 417 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, cookie, cookie_len);
p                 418 net/ceph/cls_lock_client.c 	ceph_encode_string(&p, end, tag, tag_len);
p                 419 net/ceph/cls_lock_client.c 	WARN_ON(p != end);
p                  28 net/ceph/crush/crush.c int crush_get_bucket_item_weight(const struct crush_bucket *b, int p)
p                  30 net/ceph/crush/crush.c 	if ((__u32)p >= b->size)
p                  37 net/ceph/crush/crush.c 		return ((struct crush_bucket_list *)b)->item_weights[p];
p                  39 net/ceph/crush/crush.c 		return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)];
p                  41 net/ceph/crush/crush.c 		return ((struct crush_bucket_straw *)b)->item_weights[p];
p                  43 net/ceph/crush/crush.c 		return ((struct crush_bucket_straw2 *)b)->item_weights[p];
p                 110 net/ceph/crush/mapper.c 		unsigned int p = work->perm_n;
p                 112 net/ceph/crush/mapper.c 		if (p < bucket->size - 1) {
p                 113 net/ceph/crush/mapper.c 			i = crush_hash32_3(bucket->hash, x, bucket->id, p) %
p                 114 net/ceph/crush/mapper.c 				(bucket->size - p);
p                 116 net/ceph/crush/mapper.c 				unsigned int t = work->perm[p + i];
p                 117 net/ceph/crush/mapper.c 				work->perm[p + i] = work->perm[p];
p                 118 net/ceph/crush/mapper.c 				work->perm[p] = t;
p                 120 net/ceph/crush/mapper.c 			dprintk(" perm_choose swap %d with %d\n", p, p+i);
p                  77 net/ceph/crypto.c int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
p                  79 net/ceph/crypto.c 	if (*p + sizeof(u16) + sizeof(key->created) +
p                  82 net/ceph/crypto.c 	ceph_encode_16(p, key->type);
p                  83 net/ceph/crypto.c 	ceph_encode_copy(p, &key->created, sizeof(key->created));
p                  84 net/ceph/crypto.c 	ceph_encode_16(p, key->len);
p                  85 net/ceph/crypto.c 	ceph_encode_copy(p, key->key, key->len);
p                  89 net/ceph/crypto.c int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
p                  93 net/ceph/crypto.c 	ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
p                  94 net/ceph/crypto.c 	key->type = ceph_decode_16(p);
p                  95 net/ceph/crypto.c 	ceph_decode_copy(p, &key->created, sizeof(key->created));
p                  96 net/ceph/crypto.c 	key->len = ceph_decode_16(p);
p                  97 net/ceph/crypto.c 	ceph_decode_need(p, end, key->len, bad);
p                  98 net/ceph/crypto.c 	ret = set_secret(key, *p);
p                  99 net/ceph/crypto.c 	*p += key->len;
p                 111 net/ceph/crypto.c 	void *buf, *p;
p                 124 net/ceph/crypto.c 	p = buf;
p                 125 net/ceph/crypto.c 	ret = ceph_crypto_key_decode(key, &p, p + blen);
p                 302 net/ceph/crypto.c 	void *p;
p                 314 net/ceph/crypto.c 	p = (void *)prep->data;
p                 315 net/ceph/crypto.c 	ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
p                  21 net/ceph/crypto.h int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end);
p                  22 net/ceph/crypto.h int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end);
p                  34 net/ceph/debugfs.c static int monmap_show(struct seq_file *s, void *p)
p                  54 net/ceph/debugfs.c static int osdmap_show(struct seq_file *s, void *p)
p                 136 net/ceph/debugfs.c static int monc_show(struct seq_file *s, void *p)
p                 373 net/ceph/debugfs.c static int client_options_show(struct seq_file *s, void *p)
p                   6 net/ceph/decode.c ceph_decode_entity_addr_versioned(void **p, void *end,
p                  14 net/ceph/decode.c 	ret = ceph_start_decoding(p, end, 1, "entity_addr_t", &struct_v,
p                  20 net/ceph/decode.c 	struct_end = *p + struct_len;
p                  22 net/ceph/decode.c 	ceph_decode_copy_safe(p, end, &addr->type, sizeof(addr->type), bad);
p                  24 net/ceph/decode.c 	ceph_decode_copy_safe(p, end, &addr->nonce, sizeof(addr->nonce), bad);
p                  26 net/ceph/decode.c 	ceph_decode_32_safe(p, end, addr_len, bad);
p                  32 net/ceph/decode.c 		ceph_decode_copy_safe(p, end, &addr->in_addr, addr_len, bad);
p                  39 net/ceph/decode.c 	*p = struct_end;
p                  46 net/ceph/decode.c ceph_decode_entity_addr_legacy(void **p, void *end,
p                  52 net/ceph/decode.c 	ceph_decode_skip_n(p, end, 3, bad);
p                  59 net/ceph/decode.c 	ceph_decode_copy_safe(p, end, &addr->nonce, sizeof(addr->nonce), bad);
p                  61 net/ceph/decode.c 	ceph_decode_copy_safe(p, end, &addr->in_addr,
p                  71 net/ceph/decode.c ceph_decode_entity_addr(void **p, void *end, struct ceph_entity_addr *addr)
p                  75 net/ceph/decode.c 	ceph_decode_8_safe(p, end, marker, bad);
p                  77 net/ceph/decode.c 		return ceph_decode_entity_addr_versioned(p, end, addr);
p                  79 net/ceph/decode.c 		return ceph_decode_entity_addr_legacy(p, end, addr);
p                1829 net/ceph/messenger.c static void addr_set_port(struct ceph_entity_addr *addr, int p)
p                1833 net/ceph/messenger.c 		put_unaligned(htons(p), &((struct sockaddr_in *)&addr->in_addr)->sin_port);
p                1836 net/ceph/messenger.c 		put_unaligned(htons(p), &((struct sockaddr_in6 *)&addr->in_addr)->sin6_port);
p                1943 net/ceph/messenger.c 	const char *p = c;
p                1951 net/ceph/messenger.c 		if (*p == '[') {
p                1953 net/ceph/messenger.c 			p++;
p                1956 net/ceph/messenger.c 		ret = ceph_parse_server_name(p, end - p, &addr[i], delim, &ipend);
p                1961 net/ceph/messenger.c 		p = ipend;
p                1964 net/ceph/messenger.c 			if (*p != ']') {
p                1968 net/ceph/messenger.c 			p++;
p                1972 net/ceph/messenger.c 		if (p < end && *p == ':') {
p                1974 net/ceph/messenger.c 			p++;
p                1975 net/ceph/messenger.c 			while (p < end && *p >= '0' && *p <= '9') {
p                1976 net/ceph/messenger.c 				port = (port * 10) + (*p - '0');
p                1977 net/ceph/messenger.c 				p++;
p                1992 net/ceph/messenger.c 		if (p == end)
p                1994 net/ceph/messenger.c 		if (*p != ',')
p                1996 net/ceph/messenger.c 		p++;
p                1999 net/ceph/messenger.c 	if (p != end)
p                  42 net/ceph/mon_client.c static struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
p                  50 net/ceph/mon_client.c 	ceph_decode_32_safe(&p, end, len, bad);
p                  51 net/ceph/mon_client.c 	ceph_decode_need(&p, end, len, bad);
p                  53 net/ceph/mon_client.c 	dout("monmap_decode %p %p len %d (%d)\n", p, end, len, (int)(end-p));
p                  54 net/ceph/mon_client.c 	p += sizeof(u16);  /* skip version */
p                  56 net/ceph/mon_client.c 	ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
p                  57 net/ceph/mon_client.c 	ceph_decode_copy(&p, &fsid, sizeof(fsid));
p                  58 net/ceph/mon_client.c 	epoch = ceph_decode_32(&p);
p                  60 net/ceph/mon_client.c 	num_mon = ceph_decode_32(&p);
p                  74 net/ceph/mon_client.c 		ceph_decode_copy_safe(&p, end, &inst->name,
p                  76 net/ceph/mon_client.c 		err = ceph_decode_entity_addr(&p, end, &inst->addr);
p                 262 net/ceph/mon_client.c 	void *p = msg->front.iov_base;
p                 263 net/ceph/mon_client.c 	void *const end = p + msg->front_alloc_len;
p                 281 net/ceph/mon_client.c 	ceph_encode_32(&p, num);
p                 297 net/ceph/mon_client.c 		ceph_encode_string(&p, end, buf, len);
p                 298 net/ceph/mon_client.c 		memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item));
p                 299 net/ceph/mon_client.c 		p += sizeof(monc->subs[i].item);
p                 302 net/ceph/mon_client.c 	BUG_ON(p > end);
p                 303 net/ceph/mon_client.c 	msg->front.iov_len = p - msg->front.iov_base;
p                 471 net/ceph/mon_client.c 	void *p, *end;
p                 476 net/ceph/mon_client.c 	p = msg->front.iov_base;
p                 477 net/ceph/mon_client.c 	end = p + msg->front.iov_len;
p                 479 net/ceph/mon_client.c 	monmap = ceph_monmap_decode(p, end);
p                 748 net/ceph/mon_client.c 	void *p = msg->front.iov_base;
p                 749 net/ceph/mon_client.c 	void *end = p + msg->front_alloc_len;
p                 754 net/ceph/mon_client.c 	ceph_decode_need(&p, end, 2*sizeof(u64), bad);
p                 755 net/ceph/mon_client.c 	handle = ceph_decode_64(&p);
p                 767 net/ceph/mon_client.c 	req->u.newest = ceph_decode_64(&p);
p                 806 net/ceph/mon_client.c 		void *p = req->request->front.iov_base;
p                 807 net/ceph/mon_client.c 		void *const end = p + req->request->front_alloc_len;
p                 809 net/ceph/mon_client.c 		ceph_encode_64(&p, req->tid); /* handle */
p                 810 net/ceph/mon_client.c 		ceph_encode_string(&p, end, what, strlen(what));
p                 811 net/ceph/mon_client.c 		WARN_ON(p != end);
p                 870 net/ceph/mon_client.c 	void *p = msg->front.iov_base;
p                 871 net/ceph/mon_client.c 	void *const end = p + msg->front_alloc_len;
p                 876 net/ceph/mon_client.c 	ceph_decode_need(&p, end, sizeof(struct ceph_mon_request_header) +
p                 878 net/ceph/mon_client.c 	p += sizeof(struct ceph_mon_request_header);
p                 887 net/ceph/mon_client.c 	req->result = ceph_decode_32(&p);
p                 958 net/ceph/mon_client.c 	struct rb_node *p;
p                 960 net/ceph/mon_client.c 	for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) {
p                 961 net/ceph/mon_client.c 		req = rb_entry(p, struct ceph_mon_generic_request, node);
p                1156 net/ceph/osd_client.c 	struct rb_node *n, *p;
p                1161 net/ceph/osd_client.c 		for (p = rb_first(&osd->o_requests); p; ) {
p                1163 net/ceph/osd_client.c 			    rb_entry(p, struct ceph_osd_request, r_node);
p                1165 net/ceph/osd_client.c 			p = rb_next(p);
p                1171 net/ceph/osd_client.c 	for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
p                1173 net/ceph/osd_client.c 		    rb_entry(p, struct ceph_osd_request, r_node);
p                1175 net/ceph/osd_client.c 		p = rb_next(p);
p                1728 net/ceph/osd_client.c static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
p                1734 net/ceph/osd_client.c 	ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
p                1744 net/ceph/osd_client.c 	hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
p                1752 net/ceph/osd_client.c 	hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
p                1760 net/ceph/osd_client.c 	ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
p                1761 net/ceph/osd_client.c 	ceph_decode_32_safe(p, end, hoid->hash, e_inval);
p                1762 net/ceph/osd_client.c 	ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
p                1764 net/ceph/osd_client.c 	hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
p                1772 net/ceph/osd_client.c 	ceph_decode_64_safe(p, end, hoid->pool, e_inval);
p                1787 net/ceph/osd_client.c static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
p                1789 net/ceph/osd_client.c 	ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
p                1790 net/ceph/osd_client.c 	ceph_encode_string(p, end, hoid->key, hoid->key_len);
p                1791 net/ceph/osd_client.c 	ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
p                1792 net/ceph/osd_client.c 	ceph_encode_64(p, hoid->snapid);
p                1793 net/ceph/osd_client.c 	ceph_encode_32(p, hoid->hash);
p                1794 net/ceph/osd_client.c 	ceph_encode_8(p, hoid->is_max);
p                1795 net/ceph/osd_client.c 	ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
p                1796 net/ceph/osd_client.c 	ceph_encode_64(p, hoid->pool);
p                2012 net/ceph/osd_client.c static void encode_pgid(void **p, const struct ceph_pg *pgid)
p                2014 net/ceph/osd_client.c 	ceph_encode_8(p, 1);
p                2015 net/ceph/osd_client.c 	ceph_encode_64(p, pgid->pool);
p                2016 net/ceph/osd_client.c 	ceph_encode_32(p, pgid->seed);
p                2017 net/ceph/osd_client.c 	ceph_encode_32(p, -1); /* preferred */
p                2020 net/ceph/osd_client.c static void encode_spgid(void **p, const struct ceph_spg *spgid)
p                2022 net/ceph/osd_client.c 	ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
p                2023 net/ceph/osd_client.c 	encode_pgid(p, &spgid->pgid);
p                2024 net/ceph/osd_client.c 	ceph_encode_8(p, spgid->shard);
p                2027 net/ceph/osd_client.c static void encode_oloc(void **p, void *end,
p                2030 net/ceph/osd_client.c 	ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
p                2031 net/ceph/osd_client.c 	ceph_encode_64(p, oloc->pool);
p                2032 net/ceph/osd_client.c 	ceph_encode_32(p, -1); /* preferred */
p                2033 net/ceph/osd_client.c 	ceph_encode_32(p, 0);  /* key len */
p                2035 net/ceph/osd_client.c 		ceph_encode_string(p, end, oloc->pool_ns->str,
p                2038 net/ceph/osd_client.c 		ceph_encode_32(p, 0);
p                2044 net/ceph/osd_client.c 	void *p = msg->front.iov_base;
p                2045 net/ceph/osd_client.c 	void *const end = p + msg->front_alloc_len;
p                2059 net/ceph/osd_client.c 	encode_spgid(&p, &req->r_t.spgid); /* actual spg */
p                2060 net/ceph/osd_client.c 	ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
p                2061 net/ceph/osd_client.c 	ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
p                2062 net/ceph/osd_client.c 	ceph_encode_32(&p, req->r_flags);
p                2065 net/ceph/osd_client.c 	ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
p                2066 net/ceph/osd_client.c 	memset(p, 0, sizeof(struct ceph_osd_reqid));
p                2067 net/ceph/osd_client.c 	p += sizeof(struct ceph_osd_reqid);
p                2070 net/ceph/osd_client.c 	memset(p, 0, sizeof(struct ceph_blkin_trace_info));
p                2071 net/ceph/osd_client.c 	p += sizeof(struct ceph_blkin_trace_info);
p                2073 net/ceph/osd_client.c 	ceph_encode_32(&p, 0); /* client_inc, always 0 */
p                2074 net/ceph/osd_client.c 	ceph_encode_timespec64(p, &req->r_mtime);
p                2075 net/ceph/osd_client.c 	p += sizeof(struct ceph_timespec);
p                2077 net/ceph/osd_client.c 	encode_oloc(&p, end, &req->r_t.target_oloc);
p                2078 net/ceph/osd_client.c 	ceph_encode_string(&p, end, req->r_t.target_oid.name,
p                2082 net/ceph/osd_client.c 	ceph_encode_16(&p, req->r_num_ops);
p                2084 net/ceph/osd_client.c 		data_len += osd_req_encode_op(p, &req->r_ops[i]);
p                2085 net/ceph/osd_client.c 		p += sizeof(struct ceph_osd_op);
p                2088 net/ceph/osd_client.c 	ceph_encode_64(&p, req->r_snapid); /* snapid */
p                2090 net/ceph/osd_client.c 		ceph_encode_64(&p, req->r_snapc->seq);
p                2091 net/ceph/osd_client.c 		ceph_encode_32(&p, req->r_snapc->num_snaps);
p                2093 net/ceph/osd_client.c 			ceph_encode_64(&p, req->r_snapc->snaps[i]);
p                2095 net/ceph/osd_client.c 		ceph_encode_64(&p, 0); /* snap_seq */
p                2096 net/ceph/osd_client.c 		ceph_encode_32(&p, 0); /* snaps len */
p                2099 net/ceph/osd_client.c 	ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
p                2100 net/ceph/osd_client.c 	BUG_ON(p > end - 8); /* space for features */
p                2104 net/ceph/osd_client.c 	msg->front.iov_len = p - msg->front.iov_base;
p                2120 net/ceph/osd_client.c 	void *p = msg->front.iov_base;
p                2121 net/ceph/osd_client.c 	void *const partial_end = p + msg->front.iov_len;
p                2122 net/ceph/osd_client.c 	void *const end = p + msg->front_alloc_len;
p                2126 net/ceph/osd_client.c 		p = partial_end;
p                2127 net/ceph/osd_client.c 		ceph_encode_64(&p, msg->con->peer_features);
p                2152 net/ceph/osd_client.c 		memcpy(&head, p, sizeof(head));
p                2153 net/ceph/osd_client.c 		p += sizeof(head);
p                2155 net/ceph/osd_client.c 		oloc = p;
p                2156 net/ceph/osd_client.c 		p += CEPH_ENCODING_START_BLK_LEN;
p                2157 net/ceph/osd_client.c 		pgid.pool = ceph_decode_64(&p);
p                2158 net/ceph/osd_client.c 		p += 4 + 4; /* preferred, key len */
p                2159 net/ceph/osd_client.c 		len = ceph_decode_32(&p);
p                2160 net/ceph/osd_client.c 		p += len;   /* nspace */
p                2161 net/ceph/osd_client.c 		oloc_len = p - oloc;
p                2163 net/ceph/osd_client.c 		oid = p;
p                2164 net/ceph/osd_client.c 		len = ceph_decode_32(&p);
p                2165 net/ceph/osd_client.c 		p += len;
p                2166 net/ceph/osd_client.c 		oid_len = p - oid;
p                2168 net/ceph/osd_client.c 		tail = p;
p                2169 net/ceph/osd_client.c 		tail_len = partial_end - p;
p                2171 net/ceph/osd_client.c 		p = msg->front.iov_base;
p                2172 net/ceph/osd_client.c 		ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
p                2173 net/ceph/osd_client.c 		ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
p                2174 net/ceph/osd_client.c 		ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
p                2175 net/ceph/osd_client.c 		ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
p                2178 net/ceph/osd_client.c 		memset(p, 0, sizeof(struct ceph_eversion));
p                2179 net/ceph/osd_client.c 		p += sizeof(struct ceph_eversion);
p                2181 net/ceph/osd_client.c 		BUG_ON(p >= oloc);
p                2182 net/ceph/osd_client.c 		memmove(p, oloc, oloc_len);
p                2183 net/ceph/osd_client.c 		p += oloc_len;
p                2186 net/ceph/osd_client.c 		encode_pgid(&p, &pgid); /* raw pg */
p                2188 net/ceph/osd_client.c 		BUG_ON(p >= oid);
p                2189 net/ceph/osd_client.c 		memmove(p, oid, oid_len);
p                2190 net/ceph/osd_client.c 		p += oid_len;
p                2193 net/ceph/osd_client.c 		BUG_ON(p >= tail);
p                2194 net/ceph/osd_client.c 		memmove(p, tail, tail_len);
p                2195 net/ceph/osd_client.c 		p += tail_len;
p                2200 net/ceph/osd_client.c 	BUG_ON(p > end);
p                2201 net/ceph/osd_client.c 	msg->front.iov_len = p - msg->front.iov_base;
p                2947 net/ceph/osd_client.c 		void *p = page_address(osd_data->pages[0]);
p                2954 net/ceph/osd_client.c 			lreq->notify_id = ceph_decode_64(&p);
p                3278 net/ceph/osd_client.c 	struct rb_node *n, *p;
p                3292 net/ceph/osd_client.c 		for (p = rb_first(&osd->o_requests); p; ) {
p                3294 net/ceph/osd_client.c 			    rb_entry(p, struct ceph_osd_request, r_node);
p                3296 net/ceph/osd_client.c 			p = rb_next(p); /* abort_request() */
p                3310 net/ceph/osd_client.c 		for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
p                3312 net/ceph/osd_client.c 			    rb_entry(p, struct ceph_osd_linger_request, node);
p                3329 net/ceph/osd_client.c 		for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
p                3331 net/ceph/osd_client.c 			    rb_entry(p, struct ceph_osd_request, r_node);
p                3333 net/ceph/osd_client.c 			p = rb_next(p); /* abort_request() */
p                3383 net/ceph/osd_client.c static int ceph_oloc_decode(void **p, void *end,
p                3391 net/ceph/osd_client.c 	ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
p                3392 net/ceph/osd_client.c 	struct_v = ceph_decode_8(p);
p                3393 net/ceph/osd_client.c 	struct_cv = ceph_decode_8(p);
p                3404 net/ceph/osd_client.c 	len = ceph_decode_32(p);
p                3405 net/ceph/osd_client.c 	ceph_decode_need(p, end, len, e_inval);
p                3406 net/ceph/osd_client.c 	struct_end = *p + len;
p                3408 net/ceph/osd_client.c 	oloc->pool = ceph_decode_64(p);
p                3409 net/ceph/osd_client.c 	*p += 4; /* skip preferred */
p                3411 net/ceph/osd_client.c 	len = ceph_decode_32(p);
p                3420 net/ceph/osd_client.c 		len = ceph_decode_32(p);
p                3422 net/ceph/osd_client.c 			ceph_decode_need(p, end, len, e_inval);
p                3424 net/ceph/osd_client.c 			    ceph_compare_string(oloc->pool_ns, *p, len))
p                3426 net/ceph/osd_client.c 			*p += len;
p                3439 net/ceph/osd_client.c 		s64 hash = ceph_decode_64(p);
p                3447 net/ceph/osd_client.c 	*p = struct_end;
p                3456 net/ceph/osd_client.c static int ceph_redirect_decode(void **p, void *end,
p                3464 net/ceph/osd_client.c 	ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
p                3465 net/ceph/osd_client.c 	struct_v = ceph_decode_8(p);
p                3466 net/ceph/osd_client.c 	struct_cv = ceph_decode_8(p);
p                3472 net/ceph/osd_client.c 	len = ceph_decode_32(p);
p                3473 net/ceph/osd_client.c 	ceph_decode_need(p, end, len, e_inval);
p                3474 net/ceph/osd_client.c 	struct_end = *p + len;
p                3476 net/ceph/osd_client.c 	ret = ceph_oloc_decode(p, end, &redir->oloc);
p                3480 net/ceph/osd_client.c 	len = ceph_decode_32(p);
p                3486 net/ceph/osd_client.c 	len = ceph_decode_32(p);
p                3487 net/ceph/osd_client.c 	*p += len; /* skip osd_instructions */
p                3490 net/ceph/osd_client.c 	*p = struct_end;
p                3515 net/ceph/osd_client.c 	void *p = msg->front.iov_base;
p                3516 net/ceph/osd_client.c 	void *const end = p + msg->front.iov_len;
p                3524 net/ceph/osd_client.c 	ceph_decode_32_safe(&p, end, len, e_inval);
p                3525 net/ceph/osd_client.c 	ceph_decode_need(&p, end, len, e_inval);
p                3526 net/ceph/osd_client.c 	p += len; /* skip oid */
p                3528 net/ceph/osd_client.c 	ret = ceph_decode_pgid(&p, end, &m->pgid);
p                3532 net/ceph/osd_client.c 	ceph_decode_64_safe(&p, end, m->flags, e_inval);
p                3533 net/ceph/osd_client.c 	ceph_decode_32_safe(&p, end, m->result, e_inval);
p                3534 net/ceph/osd_client.c 	ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
p                3535 net/ceph/osd_client.c 	memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
p                3536 net/ceph/osd_client.c 	p += sizeof(bad_replay_version);
p                3537 net/ceph/osd_client.c 	ceph_decode_32_safe(&p, end, m->epoch, e_inval);
p                3539 net/ceph/osd_client.c 	ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
p                3543 net/ceph/osd_client.c 	ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
p                3546 net/ceph/osd_client.c 		struct ceph_osd_op *op = p;
p                3549 net/ceph/osd_client.c 		p += sizeof(*op);
p                3552 net/ceph/osd_client.c 	ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
p                3554 net/ceph/osd_client.c 		ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
p                3557 net/ceph/osd_client.c 		ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
p                3558 net/ceph/osd_client.c 		memcpy(&m->replay_version, p, sizeof(m->replay_version));
p                3559 net/ceph/osd_client.c 		p += sizeof(m->replay_version);
p                3560 net/ceph/osd_client.c 		ceph_decode_64_safe(&p, end, m->user_version, e_inval);
p                3568 net/ceph/osd_client.c 			ceph_decode_8_safe(&p, end, decode_redir, e_inval);
p                3576 net/ceph/osd_client.c 		ret = ceph_redirect_decode(&p, end, &m->redirect);
p                3830 net/ceph/osd_client.c 			  void *p, void *end, bool incremental,
p                3843 net/ceph/osd_client.c 		newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
p                3845 net/ceph/osd_client.c 		newmap = ceph_osdmap_decode(&p, end);
p                3957 net/ceph/osd_client.c 	void *p = msg->front.iov_base;
p                3958 net/ceph/osd_client.c 	void *const end = p + msg->front.iov_len;
p                3973 net/ceph/osd_client.c 	ceph_decode_need(&p, end, sizeof(fsid), bad);
p                3974 net/ceph/osd_client.c 	ceph_decode_copy(&p, &fsid, sizeof(fsid));
p                3984 net/ceph/osd_client.c 	ceph_decode_32_safe(&p, end, nr_maps, bad);
p                3987 net/ceph/osd_client.c 		ceph_decode_need(&p, end, 2*sizeof(u32), bad);
p                3988 net/ceph/osd_client.c 		epoch = ceph_decode_32(&p);
p                3989 net/ceph/osd_client.c 		maplen = ceph_decode_32(&p);
p                3990 net/ceph/osd_client.c 		ceph_decode_need(&p, end, maplen, bad);
p                3995 net/ceph/osd_client.c 			err = handle_one_map(osdc, p, p + maplen, true,
p                4004 net/ceph/osd_client.c 		p += maplen;
p                4011 net/ceph/osd_client.c 	ceph_decode_32_safe(&p, end, nr_maps, bad);
p                4014 net/ceph/osd_client.c 		ceph_decode_need(&p, end, 2*sizeof(u32), bad);
p                4015 net/ceph/osd_client.c 		epoch = ceph_decode_32(&p);
p                4016 net/ceph/osd_client.c 		maplen = ceph_decode_32(&p);
p                4017 net/ceph/osd_client.c 		ceph_decode_need(&p, end, maplen, bad);
p                4027 net/ceph/osd_client.c 			err = handle_one_map(osdc, p, p + maplen, false,
p                4032 net/ceph/osd_client.c 		p += maplen;
p                4130 net/ceph/osd_client.c 	void *p = msg->front.iov_base;
p                4131 net/ceph/osd_client.c 	void *const end = p + msg->front.iov_len;
p                4136 net/ceph/osd_client.c 	ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
p                4140 net/ceph/osd_client.c 	ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
p                4144 net/ceph/osd_client.c 	ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
p                4145 net/ceph/osd_client.c 	ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
p                4146 net/ceph/osd_client.c 	ceph_decode_8_safe(&p, end, m->op, e_inval);
p                4147 net/ceph/osd_client.c 	ceph_decode_64_safe(&p, end, m->id, e_inval);
p                4153 net/ceph/osd_client.c 	ret = decode_hoid(&p, end, m->begin);
p                4165 net/ceph/osd_client.c 	ret = decode_hoid(&p, end, m->end);
p                4183 net/ceph/osd_client.c 	void *p, *end;
p                4198 net/ceph/osd_client.c 	p = msg->front.iov_base;
p                4199 net/ceph/osd_client.c 	end = p + msg->front_alloc_len;
p                4201 net/ceph/osd_client.c 	encode_spgid(&p, &backoff->spgid);
p                4202 net/ceph/osd_client.c 	ceph_encode_32(&p, map_epoch);
p                4203 net/ceph/osd_client.c 	ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
p                4204 net/ceph/osd_client.c 	ceph_encode_64(&p, backoff->id);
p                4205 net/ceph/osd_client.c 	encode_hoid(&p, end, backoff->begin);
p                4206 net/ceph/osd_client.c 	encode_hoid(&p, end, backoff->end);
p                4207 net/ceph/osd_client.c 	BUG_ON(p != end);
p                4209 net/ceph/osd_client.c 	msg->front.iov_len = p - msg->front.iov_base;
p                4380 net/ceph/osd_client.c 	void *p = msg->front.iov_base;
p                4381 net/ceph/osd_client.c 	void *const end = p + msg->front.iov_len;
p                4391 net/ceph/osd_client.c 	ceph_decode_8_safe(&p, end, proto_ver, bad);
p                4392 net/ceph/osd_client.c 	ceph_decode_8_safe(&p, end, opcode, bad);
p                4393 net/ceph/osd_client.c 	ceph_decode_64_safe(&p, end, cookie, bad);
p                4394 net/ceph/osd_client.c 	p += 8; /* skip ver */
p                4395 net/ceph/osd_client.c 	ceph_decode_64_safe(&p, end, notify_id, bad);
p                4398 net/ceph/osd_client.c 		ceph_decode_32_safe(&p, end, payload_len, bad);
p                4399 net/ceph/osd_client.c 		ceph_decode_need(&p, end, payload_len, bad);
p                4400 net/ceph/osd_client.c 		payload = p;
p                4401 net/ceph/osd_client.c 		p += payload_len;
p                4405 net/ceph/osd_client.c 		ceph_decode_32_safe(&p, end, return_code, bad);
p                4408 net/ceph/osd_client.c 		ceph_decode_64_safe(&p, end, notifier_id, bad);
p                4539 net/ceph/osd_client.c 	struct rb_node *n, *p;
p                4548 net/ceph/osd_client.c 		for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
p                4550 net/ceph/osd_client.c 			    rb_entry(p, struct ceph_osd_request, r_node);
p                4922 net/ceph/osd_client.c static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
p                4928 net/ceph/osd_client.c 	ret = ceph_start_decoding(p, end, 2, "watch_item_t",
p                4934 net/ceph/osd_client.c 	ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad);
p                4935 net/ceph/osd_client.c 	ceph_decode_64_safe(p, end, item->cookie, bad);
p                4936 net/ceph/osd_client.c 	ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */
p                4939 net/ceph/osd_client.c 		ret = ceph_decode_entity_addr(p, end, &item->addr);
p                4953 net/ceph/osd_client.c static int decode_watchers(void **p, void *end,
p                4962 net/ceph/osd_client.c 	ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
p                4967 net/ceph/osd_client.c 	*num_watchers = ceph_decode_32(p);
p                4973 net/ceph/osd_client.c 		ret = decode_watcher(p, end, *watchers + i);
p                5024 net/ceph/osd_client.c 		void *p = page_address(pages[0]);
p                5025 net/ceph/osd_client.c 		void *const end = p + req->r_ops[0].outdata_len;
p                5027 net/ceph/osd_client.c 		ret = decode_watchers(&p, end, watchers, num_watchers);
p                5322 net/ceph/osd_client.c 	void *p, *end;
p                5334 net/ceph/osd_client.c 	p = page_address(pages[0]);
p                5335 net/ceph/osd_client.c 	end = p + PAGE_SIZE;
p                5336 net/ceph/osd_client.c 	ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
p                5337 net/ceph/osd_client.c 	encode_oloc(&p, end, src_oloc);
p                5338 net/ceph/osd_client.c 	op->indata_len = PAGE_SIZE - (end - p);
p                  55 net/ceph/osdmap.c static int crush_decode_uniform_bucket(void **p, void *end,
p                  58 net/ceph/osdmap.c 	dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
p                  59 net/ceph/osdmap.c 	ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
p                  60 net/ceph/osdmap.c 	b->item_weight = ceph_decode_32(p);
p                  66 net/ceph/osdmap.c static int crush_decode_list_bucket(void **p, void *end,
p                  70 net/ceph/osdmap.c 	dout("crush_decode_list_bucket %p to %p\n", *p, end);
p                  77 net/ceph/osdmap.c 	ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
p                  79 net/ceph/osdmap.c 		b->item_weights[j] = ceph_decode_32(p);
p                  80 net/ceph/osdmap.c 		b->sum_weights[j] = ceph_decode_32(p);
p                  87 net/ceph/osdmap.c static int crush_decode_tree_bucket(void **p, void *end,
p                  91 net/ceph/osdmap.c 	dout("crush_decode_tree_bucket %p to %p\n", *p, end);
p                  92 net/ceph/osdmap.c 	ceph_decode_8_safe(p, end, b->num_nodes, bad);
p                  96 net/ceph/osdmap.c 	ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
p                  98 net/ceph/osdmap.c 		b->node_weights[j] = ceph_decode_32(p);
p                 104 net/ceph/osdmap.c static int crush_decode_straw_bucket(void **p, void *end,
p                 108 net/ceph/osdmap.c 	dout("crush_decode_straw_bucket %p to %p\n", *p, end);
p                 115 net/ceph/osdmap.c 	ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
p                 117 net/ceph/osdmap.c 		b->item_weights[j] = ceph_decode_32(p);
p                 118 net/ceph/osdmap.c 		b->straws[j] = ceph_decode_32(p);
p                 125 net/ceph/osdmap.c static int crush_decode_straw2_bucket(void **p, void *end,
p                 129 net/ceph/osdmap.c 	dout("crush_decode_straw2_bucket %p to %p\n", *p, end);
p                 133 net/ceph/osdmap.c 	ceph_decode_need(p, end, b->h.size * sizeof(u32), bad);
p                 135 net/ceph/osdmap.c 		b->item_weights[j] = ceph_decode_32(p);
p                 188 net/ceph/osdmap.c static u32 *decode_array_32_alloc(void **p, void *end, u32 *plen)
p                 194 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, len, e_inval);
p                 204 net/ceph/osdmap.c 		ceph_decode_need(p, end, len * sizeof(u32), e_inval);
p                 206 net/ceph/osdmap.c 			a[i] = ceph_decode_32(p);
p                 222 net/ceph/osdmap.c static int decode_choose_arg(void **p, void *end, struct crush_choose_arg *arg)
p                 226 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, arg->weight_set_size, e_inval);
p                 239 net/ceph/osdmap.c 			w->weights = decode_array_32_alloc(p, end, &w->size);
p                 248 net/ceph/osdmap.c 	arg->ids = decode_array_32_alloc(p, end, &arg->ids_size);
p                 261 net/ceph/osdmap.c static int decode_choose_args(void **p, void *end, struct crush_map *c)
p                 267 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, num_choose_arg_maps, e_inval);
p                 275 net/ceph/osdmap.c 		ceph_decode_64_safe(p, end, arg_map->choose_args_index,
p                 285 net/ceph/osdmap.c 		ceph_decode_32_safe(p, end, num_buckets, e_inval);
p                 290 net/ceph/osdmap.c 			ceph_decode_32_safe(p, end, bucket_index, e_inval);
p                 295 net/ceph/osdmap.c 			ret = decode_choose_arg(p, end, arg);
p                 347 net/ceph/osdmap.c 	void **p = &pbyval;
p                 351 net/ceph/osdmap.c 	dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
p                 365 net/ceph/osdmap.c 	ceph_decode_need(p, end, 4*sizeof(u32), bad);
p                 366 net/ceph/osdmap.c 	magic = ceph_decode_32(p);
p                 372 net/ceph/osdmap.c 	c->max_buckets = ceph_decode_32(p);
p                 373 net/ceph/osdmap.c 	c->max_rules = ceph_decode_32(p);
p                 374 net/ceph/osdmap.c 	c->max_devices = ceph_decode_32(p);
p                 389 net/ceph/osdmap.c 		ceph_decode_32_safe(p, end, alg, bad);
p                 395 net/ceph/osdmap.c 		     i, (int)(*p-start), *p, end);
p                 421 net/ceph/osdmap.c 		ceph_decode_need(p, end, 4*sizeof(u32), bad);
p                 422 net/ceph/osdmap.c 		b->id = ceph_decode_32(p);
p                 423 net/ceph/osdmap.c 		b->type = ceph_decode_16(p);
p                 424 net/ceph/osdmap.c 		b->alg = ceph_decode_8(p);
p                 425 net/ceph/osdmap.c 		b->hash = ceph_decode_8(p);
p                 426 net/ceph/osdmap.c 		b->weight = ceph_decode_32(p);
p                 427 net/ceph/osdmap.c 		b->size = ceph_decode_32(p);
p                 430 net/ceph/osdmap.c 		     b->size, (int)(*p-start), *p, end);
p                 436 net/ceph/osdmap.c 		ceph_decode_need(p, end, b->size*sizeof(u32), bad);
p                 438 net/ceph/osdmap.c 			b->items[j] = ceph_decode_32(p);
p                 442 net/ceph/osdmap.c 			err = crush_decode_uniform_bucket(p, end,
p                 448 net/ceph/osdmap.c 			err = crush_decode_list_bucket(p, end,
p                 454 net/ceph/osdmap.c 			err = crush_decode_tree_bucket(p, end,
p                 460 net/ceph/osdmap.c 			err = crush_decode_straw_bucket(p, end,
p                 466 net/ceph/osdmap.c 			err = crush_decode_straw2_bucket(p, end,
p                 480 net/ceph/osdmap.c 		ceph_decode_32_safe(p, end, yes, bad);
p                 483 net/ceph/osdmap.c 			     i, (int)(*p-start), *p, end);
p                 489 net/ceph/osdmap.c 		     i, (int)(*p-start), *p, end);
p                 492 net/ceph/osdmap.c 		ceph_decode_32_safe(p, end, yes, bad);
p                 504 net/ceph/osdmap.c 		ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
p                 505 net/ceph/osdmap.c 		ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
p                 507 net/ceph/osdmap.c 			r->steps[j].op = ceph_decode_32(p);
p                 508 net/ceph/osdmap.c 			r->steps[j].arg1 = ceph_decode_32(p);
p                 509 net/ceph/osdmap.c 			r->steps[j].arg2 = ceph_decode_32(p);
p                 513 net/ceph/osdmap.c 	ceph_decode_skip_map(p, end, 32, string, bad); /* type_map */
p                 514 net/ceph/osdmap.c 	ceph_decode_skip_map(p, end, 32, string, bad); /* name_map */
p                 515 net/ceph/osdmap.c 	ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */
p                 518 net/ceph/osdmap.c         ceph_decode_need(p, end, 3*sizeof(u32), done);
p                 519 net/ceph/osdmap.c         c->choose_local_tries = ceph_decode_32(p);
p                 520 net/ceph/osdmap.c         c->choose_local_fallback_tries =  ceph_decode_32(p);
p                 521 net/ceph/osdmap.c         c->choose_total_tries = ceph_decode_32(p);
p                 529 net/ceph/osdmap.c 	ceph_decode_need(p, end, sizeof(u32), done);
p                 530 net/ceph/osdmap.c 	c->chooseleaf_descend_once = ceph_decode_32(p);
p                 534 net/ceph/osdmap.c 	ceph_decode_need(p, end, sizeof(u8), done);
p                 535 net/ceph/osdmap.c 	c->chooseleaf_vary_r = ceph_decode_8(p);
p                 540 net/ceph/osdmap.c 	ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done);
p                 541 net/ceph/osdmap.c 	*p += sizeof(u8) + sizeof(u32);
p                 543 net/ceph/osdmap.c 	ceph_decode_need(p, end, sizeof(u8), done);
p                 544 net/ceph/osdmap.c 	c->chooseleaf_stable = ceph_decode_8(p);
p                 548 net/ceph/osdmap.c 	if (*p != end) {
p                 550 net/ceph/osdmap.c 		ceph_decode_skip_map(p, end, 32, 32, bad);
p                 552 net/ceph/osdmap.c 		ceph_decode_skip_map(p, end, 32, string, bad);
p                 554 net/ceph/osdmap.c 		ceph_decode_skip_map_of_map(p, end, 32, 32, 32, bad);
p                 557 net/ceph/osdmap.c 	if (*p != end) {
p                 558 net/ceph/osdmap.c 		err = decode_choose_args(p, end, c);
p                 641 net/ceph/osdmap.c 	struct rb_node **p = &root->rb_node;
p                 645 net/ceph/osdmap.c 	while (*p) {
p                 646 net/ceph/osdmap.c 		parent = *p;
p                 649 net/ceph/osdmap.c 			p = &(*p)->rb_left;
p                 651 net/ceph/osdmap.c 			p = &(*p)->rb_right;
p                 656 net/ceph/osdmap.c 	rb_link_node(&new->node, parent, p);
p                 729 net/ceph/osdmap.c static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
p                 735 net/ceph/osdmap.c 	ceph_decode_need(p, end, 2 + 4, bad);
p                 736 net/ceph/osdmap.c 	ev = ceph_decode_8(p);  /* encoding version */
p                 737 net/ceph/osdmap.c 	cv = ceph_decode_8(p); /* compat version */
p                 746 net/ceph/osdmap.c 	len = ceph_decode_32(p);
p                 747 net/ceph/osdmap.c 	ceph_decode_need(p, end, len, bad);
p                 748 net/ceph/osdmap.c 	pool_end = *p + len;
p                 750 net/ceph/osdmap.c 	pi->type = ceph_decode_8(p);
p                 751 net/ceph/osdmap.c 	pi->size = ceph_decode_8(p);
p                 752 net/ceph/osdmap.c 	pi->crush_ruleset = ceph_decode_8(p);
p                 753 net/ceph/osdmap.c 	pi->object_hash = ceph_decode_8(p);
p                 755 net/ceph/osdmap.c 	pi->pg_num = ceph_decode_32(p);
p                 756 net/ceph/osdmap.c 	pi->pgp_num = ceph_decode_32(p);
p                 758 net/ceph/osdmap.c 	*p += 4 + 4;  /* skip lpg* */
p                 759 net/ceph/osdmap.c 	*p += 4;      /* skip last_change */
p                 760 net/ceph/osdmap.c 	*p += 8 + 4;  /* skip snap_seq, snap_epoch */
p                 763 net/ceph/osdmap.c 	num = ceph_decode_32(p);
p                 765 net/ceph/osdmap.c 		*p += 8;  /* snapid key */
p                 766 net/ceph/osdmap.c 		*p += 1 + 1; /* versions */
p                 767 net/ceph/osdmap.c 		len = ceph_decode_32(p);
p                 768 net/ceph/osdmap.c 		*p += len;
p                 772 net/ceph/osdmap.c 	num = ceph_decode_32(p);
p                 773 net/ceph/osdmap.c 	*p += num * (8 + 8);
p                 775 net/ceph/osdmap.c 	*p += 8;  /* skip auid */
p                 776 net/ceph/osdmap.c 	pi->flags = ceph_decode_64(p);
p                 777 net/ceph/osdmap.c 	*p += 4;  /* skip crash_replay_interval */
p                 780 net/ceph/osdmap.c 		pi->min_size = ceph_decode_8(p);
p                 785 net/ceph/osdmap.c 		*p += 8 + 8;  /* skip quota_max_* */
p                 789 net/ceph/osdmap.c 		num = ceph_decode_32(p);
p                 790 net/ceph/osdmap.c 		*p += num * 8;
p                 792 net/ceph/osdmap.c 		*p += 8;  /* skip tier_of */
p                 793 net/ceph/osdmap.c 		*p += 1;  /* skip cache_mode */
p                 795 net/ceph/osdmap.c 		pi->read_tier = ceph_decode_64(p);
p                 796 net/ceph/osdmap.c 		pi->write_tier = ceph_decode_64(p);
p                 804 net/ceph/osdmap.c 		num = ceph_decode_32(p);
p                 806 net/ceph/osdmap.c 			len = ceph_decode_32(p);
p                 807 net/ceph/osdmap.c 			*p += len; /* key */
p                 808 net/ceph/osdmap.c 			len = ceph_decode_32(p);
p                 809 net/ceph/osdmap.c 			*p += len; /* val */
p                 815 net/ceph/osdmap.c 		*p += 1 + 1; /* versions */
p                 816 net/ceph/osdmap.c 		len = ceph_decode_32(p);
p                 817 net/ceph/osdmap.c 		*p += len;
p                 819 net/ceph/osdmap.c 		*p += 4; /* skip hit_set_period */
p                 820 net/ceph/osdmap.c 		*p += 4; /* skip hit_set_count */
p                 824 net/ceph/osdmap.c 		*p += 4; /* skip stripe_width */
p                 827 net/ceph/osdmap.c 		*p += 8; /* skip target_max_bytes */
p                 828 net/ceph/osdmap.c 		*p += 8; /* skip target_max_objects */
p                 829 net/ceph/osdmap.c 		*p += 4; /* skip cache_target_dirty_ratio_micro */
p                 830 net/ceph/osdmap.c 		*p += 4; /* skip cache_target_full_ratio_micro */
p                 831 net/ceph/osdmap.c 		*p += 4; /* skip cache_min_flush_age */
p                 832 net/ceph/osdmap.c 		*p += 4; /* skip cache_min_evict_age */
p                 837 net/ceph/osdmap.c 		len = ceph_decode_32(p);
p                 838 net/ceph/osdmap.c 		*p += len;
p                 846 net/ceph/osdmap.c 		pi->last_force_request_resend = ceph_decode_32(p);
p                 851 net/ceph/osdmap.c 		*p += 4; /* skip min_read_recency_for_promote */
p                 854 net/ceph/osdmap.c 		*p += 8; /* skip expected_num_objects */
p                 857 net/ceph/osdmap.c 		*p += 4; /* skip cache_target_dirty_high_ratio_micro */
p                 860 net/ceph/osdmap.c 		*p += 4; /* skip min_write_recency_for_promote */
p                 863 net/ceph/osdmap.c 		*p += 1; /* skip use_gmt_hitset */
p                 866 net/ceph/osdmap.c 		*p += 1; /* skip fast_read */
p                 869 net/ceph/osdmap.c 		*p += 4; /* skip hit_set_grade_decay_rate */
p                 870 net/ceph/osdmap.c 		*p += 4; /* skip hit_set_search_last_n */
p                 875 net/ceph/osdmap.c 		*p += 1 + 1; /* versions */
p                 876 net/ceph/osdmap.c 		len = ceph_decode_32(p);
p                 877 net/ceph/osdmap.c 		*p += len;
p                 881 net/ceph/osdmap.c 		pi->last_force_request_resend = ceph_decode_32(p);
p                 885 net/ceph/osdmap.c 	*p = pool_end;
p                 893 net/ceph/osdmap.c static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
p                 899 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, num, bad);
p                 902 net/ceph/osdmap.c 		ceph_decode_64_safe(p, end, pool, bad);
p                 903 net/ceph/osdmap.c 		ceph_decode_32_safe(p, end, len, bad);
p                 905 net/ceph/osdmap.c 		ceph_decode_need(p, end, len, bad);
p                 908 net/ceph/osdmap.c 			char *name = kstrndup(*p, len, GFP_NOFS);
p                 916 net/ceph/osdmap.c 		*p += len;
p                1095 net/ceph/osdmap.c static int get_osdmap_client_data_v(void **p, void *end,
p                1100 net/ceph/osdmap.c 	ceph_decode_8_safe(p, end, struct_v, e_inval);
p                1104 net/ceph/osdmap.c 		ceph_decode_8_safe(p, end, struct_compat, e_inval);
p                1111 net/ceph/osdmap.c 		*p += 4; /* ignore wrapper struct_len */
p                1113 net/ceph/osdmap.c 		ceph_decode_8_safe(p, end, struct_v, e_inval);
p                1114 net/ceph/osdmap.c 		ceph_decode_8_safe(p, end, struct_compat, e_inval);
p                1121 net/ceph/osdmap.c 		*p += 4; /* ignore client data struct_len */
p                1125 net/ceph/osdmap.c 		*p -= 1;
p                1126 net/ceph/osdmap.c 		ceph_decode_16_safe(p, end, version, e_inval);
p                1144 net/ceph/osdmap.c static int __decode_pools(void **p, void *end, struct ceph_osdmap *map,
p                1149 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, n, e_inval);
p                1155 net/ceph/osdmap.c 		ceph_decode_64_safe(p, end, pool, e_inval);
p                1172 net/ceph/osdmap.c 		ret = decode_pool(p, end, pi);
p                1183 net/ceph/osdmap.c static int decode_pools(void **p, void *end, struct ceph_osdmap *map)
p                1185 net/ceph/osdmap.c 	return __decode_pools(p, end, map, false);
p                1188 net/ceph/osdmap.c static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map)
p                1190 net/ceph/osdmap.c 	return __decode_pools(p, end, map, true);
p                1195 net/ceph/osdmap.c static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root,
p                1202 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, n, e_inval);
p                1208 net/ceph/osdmap.c 		ret = ceph_decode_pgid(p, end, &pgid);
p                1220 net/ceph/osdmap.c 			pg = fn(p, end, incremental);
p                1237 net/ceph/osdmap.c static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end,
p                1243 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, len, e_inval);
p                1249 net/ceph/osdmap.c 	ceph_decode_need(p, end, len * sizeof(u32), e_inval);
p                1256 net/ceph/osdmap.c 		pg->pg_temp.osds[i] = ceph_decode_32(p);
p                1264 net/ceph/osdmap.c static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map)
p                1266 net/ceph/osdmap.c 	return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
p                1270 net/ceph/osdmap.c static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map)
p                1272 net/ceph/osdmap.c 	return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
p                1276 net/ceph/osdmap.c static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end,
p                1282 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, osd, e_inval);
p                1297 net/ceph/osdmap.c static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map)
p                1299 net/ceph/osdmap.c 	return decode_pg_mapping(p, end, &map->primary_temp,
p                1303 net/ceph/osdmap.c static int decode_new_primary_temp(void **p, void *end,
p                1306 net/ceph/osdmap.c 	return decode_pg_mapping(p, end, &map->primary_temp,
p                1343 net/ceph/osdmap.c static int decode_primary_affinity(void **p, void *end,
p                1348 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, len, e_inval);
p                1357 net/ceph/osdmap.c 	ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval);
p                1362 net/ceph/osdmap.c 		ret = set_primary_affinity(map, i, ceph_decode_32(p));
p                1373 net/ceph/osdmap.c static int decode_new_primary_affinity(void **p, void *end,
p                1378 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, n, e_inval);
p                1383 net/ceph/osdmap.c 		ceph_decode_32_safe(p, end, osd, e_inval);
p                1384 net/ceph/osdmap.c 		ceph_decode_32_safe(p, end, aff, e_inval);
p                1399 net/ceph/osdmap.c static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end,
p                1402 net/ceph/osdmap.c 	return __decode_pg_temp(p, end, false);
p                1405 net/ceph/osdmap.c static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
p                1407 net/ceph/osdmap.c 	return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
p                1411 net/ceph/osdmap.c static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
p                1413 net/ceph/osdmap.c 	return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
p                1417 net/ceph/osdmap.c static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
p                1419 net/ceph/osdmap.c 	return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true);
p                1422 net/ceph/osdmap.c static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end,
p                1428 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, len, e_inval);
p                1432 net/ceph/osdmap.c 	ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval);
p                1439 net/ceph/osdmap.c 		pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p);
p                1440 net/ceph/osdmap.c 		pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p);
p                1449 net/ceph/osdmap.c static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map)
p                1451 net/ceph/osdmap.c 	return decode_pg_mapping(p, end, &map->pg_upmap_items,
p                1455 net/ceph/osdmap.c static int decode_new_pg_upmap_items(void **p, void *end,
p                1458 net/ceph/osdmap.c 	return decode_pg_mapping(p, end, &map->pg_upmap_items,
p                1462 net/ceph/osdmap.c static int decode_old_pg_upmap_items(void **p, void *end,
p                1465 net/ceph/osdmap.c 	return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true);
p                1471 net/ceph/osdmap.c static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map)
p                1475 net/ceph/osdmap.c 	void *start = *p;
p                1480 net/ceph/osdmap.c 	dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
p                1482 net/ceph/osdmap.c 	err = get_osdmap_client_data_v(p, end, "full", &struct_v);
p                1487 net/ceph/osdmap.c 	ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) +
p                1489 net/ceph/osdmap.c 	ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
p                1490 net/ceph/osdmap.c 	epoch = map->epoch = ceph_decode_32(p);
p                1491 net/ceph/osdmap.c 	ceph_decode_copy(p, &map->created, sizeof(map->created));
p                1492 net/ceph/osdmap.c 	ceph_decode_copy(p, &map->modified, sizeof(map->modified));
p                1495 net/ceph/osdmap.c 	err = decode_pools(p, end, map);
p                1500 net/ceph/osdmap.c 	err = decode_pool_names(p, end, map);
p                1504 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, map->pool_max, e_inval);
p                1506 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, map->flags, e_inval);
p                1509 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, max, e_inval);
p                1517 net/ceph/osdmap.c 	ceph_decode_need(p, end, 3*sizeof(u32) +
p                1521 net/ceph/osdmap.c 	if (ceph_decode_32(p) != map->max_osd)
p                1526 net/ceph/osdmap.c 			map->osd_state[i] = ceph_decode_32(p);
p                1529 net/ceph/osdmap.c 			map->osd_state[i] = ceph_decode_8(p);
p                1532 net/ceph/osdmap.c 	if (ceph_decode_32(p) != map->max_osd)
p                1536 net/ceph/osdmap.c 		map->osd_weight[i] = ceph_decode_32(p);
p                1538 net/ceph/osdmap.c 	if (ceph_decode_32(p) != map->max_osd)
p                1542 net/ceph/osdmap.c 		err = ceph_decode_entity_addr(p, end, &map->osd_addr[i]);
p                1548 net/ceph/osdmap.c 	err = decode_pg_temp(p, end, map);
p                1554 net/ceph/osdmap.c 		err = decode_primary_temp(p, end, map);
p                1561 net/ceph/osdmap.c 		err = decode_primary_affinity(p, end, map);
p                1569 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, len, e_inval);
p                1570 net/ceph/osdmap.c 	err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end)));
p                1574 net/ceph/osdmap.c 	*p += len;
p                1577 net/ceph/osdmap.c 		ceph_decode_skip_map_of_map(p, end, string, string, string,
p                1582 net/ceph/osdmap.c 		err = decode_pg_upmap(p, end, map);
p                1586 net/ceph/osdmap.c 		err = decode_pg_upmap_items(p, end, map);
p                1595 net/ceph/osdmap.c 	*p = end;
p                1604 net/ceph/osdmap.c 	       err, epoch, (int)(*p - start), *p, start, end);
p                1614 net/ceph/osdmap.c struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
p                1623 net/ceph/osdmap.c 	ret = osdmap_decode(p, end, map);
p                1640 net/ceph/osdmap.c static int decode_new_up_state_weight(void **p, void *end, u8 struct_v,
p                1649 net/ceph/osdmap.c 	new_up_client = *p;
p                1650 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, len, e_inval);
p                1654 net/ceph/osdmap.c 		ceph_decode_skip_32(p, end, e_inval);
p                1655 net/ceph/osdmap.c 		if (ceph_decode_entity_addr(p, end, &addr))
p                1659 net/ceph/osdmap.c 	new_state = *p;
p                1660 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, len, e_inval);
p                1662 net/ceph/osdmap.c 	ceph_decode_need(p, end, len, e_inval);
p                1663 net/ceph/osdmap.c 	*p += len;
p                1666 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, len, e_inval);
p                1671 net/ceph/osdmap.c 		ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
p                1672 net/ceph/osdmap.c 		osd = ceph_decode_32(p);
p                1673 net/ceph/osdmap.c 		w = ceph_decode_32(p);
p                1690 net/ceph/osdmap.c 	new_weight_end = *p;
p                1693 net/ceph/osdmap.c 	*p = new_state;
p                1694 net/ceph/osdmap.c 	len = ceph_decode_32(p);
p                1700 net/ceph/osdmap.c 		osd = ceph_decode_32(p);
p                1702 net/ceph/osdmap.c 			xorstate = ceph_decode_32(p);
p                1704 net/ceph/osdmap.c 			xorstate = ceph_decode_8(p);
p                1726 net/ceph/osdmap.c 	*p = new_up_client;
p                1727 net/ceph/osdmap.c 	len = ceph_decode_32(p);
p                1732 net/ceph/osdmap.c 		osd = ceph_decode_32(p);
p                1734 net/ceph/osdmap.c 		if (ceph_decode_entity_addr(p, end, &addr))
p                1741 net/ceph/osdmap.c 	*p = new_weight_end;
p                1751 net/ceph/osdmap.c struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
p                1761 net/ceph/osdmap.c 	void *start = *p;
p                1765 net/ceph/osdmap.c 	dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
p                1767 net/ceph/osdmap.c 	err = get_osdmap_client_data_v(p, end, "inc", &struct_v);
p                1772 net/ceph/osdmap.c 	ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) +
p                1774 net/ceph/osdmap.c 	ceph_decode_copy(p, &fsid, sizeof(fsid));
p                1775 net/ceph/osdmap.c 	epoch = ceph_decode_32(p);
p                1777 net/ceph/osdmap.c 	ceph_decode_copy(p, &modified, sizeof(modified));
p                1778 net/ceph/osdmap.c 	new_pool_max = ceph_decode_64(p);
p                1779 net/ceph/osdmap.c 	new_flags = ceph_decode_32(p);
p                1782 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, len, e_inval);
p                1785 net/ceph/osdmap.c 		     len, *p, end);
p                1786 net/ceph/osdmap.c 		return ceph_osdmap_decode(p, min(*p+len, end));
p                1790 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, len, e_inval);
p                1793 net/ceph/osdmap.c 				       crush_decode(*p, min(*p + len, end)));
p                1796 net/ceph/osdmap.c 		*p += len;
p                1806 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, max, e_inval);
p                1817 net/ceph/osdmap.c 	err = decode_new_pools(p, end, map);
p                1822 net/ceph/osdmap.c 	err = decode_pool_names(p, end, map);
p                1827 net/ceph/osdmap.c 	ceph_decode_32_safe(p, end, len, e_inval);
p                1831 net/ceph/osdmap.c 		ceph_decode_64_safe(p, end, pool, e_inval);
p                1838 net/ceph/osdmap.c 	err = decode_new_up_state_weight(p, end, struct_v, map);
p                1843 net/ceph/osdmap.c 	err = decode_new_pg_temp(p, end, map);
p                1849 net/ceph/osdmap.c 		err = decode_new_primary_temp(p, end, map);
p                1856 net/ceph/osdmap.c 		err = decode_new_primary_affinity(p, end, map);
p                1863 net/ceph/osdmap.c 		ceph_decode_skip_map_of_map(p, end, string, string, string,
p                1866 net/ceph/osdmap.c 		ceph_decode_skip_set(p, end, string, e_inval);
p                1870 net/ceph/osdmap.c 		err = decode_new_pg_upmap(p, end, map);
p                1874 net/ceph/osdmap.c 		err = decode_old_pg_upmap(p, end, map);
p                1878 net/ceph/osdmap.c 		err = decode_new_pg_upmap_items(p, end, map);
p                1882 net/ceph/osdmap.c 		err = decode_old_pg_upmap_items(p, end, map);
p                1888 net/ceph/osdmap.c 	*p = end;
p                1897 net/ceph/osdmap.c 	       err, epoch, (int)(*p - start), *p, start, end);
p                  14 net/ceph/string_table.c 	struct rb_node **p, *parent;
p                  19 net/ceph/string_table.c 	p = &string_tree.rb_node;
p                  20 net/ceph/string_table.c 	while (*p) {
p                  21 net/ceph/string_table.c 		exist = rb_entry(*p, struct ceph_string, node);
p                  24 net/ceph/string_table.c 			p = &(*p)->rb_left;
p                  26 net/ceph/string_table.c 			p = &(*p)->rb_right;
p                  52 net/ceph/string_table.c 	p = &string_tree.rb_node;
p                  54 net/ceph/string_table.c 	while (*p) {
p                  55 net/ceph/string_table.c 		parent = *p;
p                  56 net/ceph/string_table.c 		exist = rb_entry(*p, struct ceph_string, node);
p                  59 net/ceph/string_table.c 			p = &(*p)->rb_left;
p                  61 net/ceph/string_table.c 			p = &(*p)->rb_right;
p                  68 net/ceph/string_table.c 		rb_link_node(&cs->node, parent, p);
p                 978 net/core/dev.c 	const char *p;
p                 986 net/core/dev.c 	p = strchr(name, '%');
p                 987 net/core/dev.c 	if (p) {
p                 993 net/core/dev.c 		if (p[1] != 'd' || strchr(p + 2, '%'))
p                5330 net/core/dev.c 	struct sk_buff *skb, *p;
p                5332 net/core/dev.c 	list_for_each_entry_safe_reverse(skb, p, head, list) {
p                5367 net/core/dev.c 	struct sk_buff *p;
p                5370 net/core/dev.c 	list_for_each_entry(p, head, list) {
p                5373 net/core/dev.c 		NAPI_GRO_CB(p)->flush = 0;
p                5375 net/core/dev.c 		if (hash != skb_get_hash_raw(p)) {
p                5376 net/core/dev.c 			NAPI_GRO_CB(p)->same_flow = 0;
p                5380 net/core/dev.c 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
p                5381 net/core/dev.c 		diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
p                5382 net/core/dev.c 		if (skb_vlan_tag_present(p))
p                5383 net/core/dev.c 			diffs |= p->vlan_tci ^ skb->vlan_tci;
p                5384 net/core/dev.c 		diffs |= skb_metadata_dst_cmp(p, skb);
p                5385 net/core/dev.c 		diffs |= skb_metadata_differs(p, skb);
p                5387 net/core/dev.c 			diffs |= compare_ether_header(skb_mac_header(p),
p                5390 net/core/dev.c 			diffs = memcmp(skb_mac_header(p),
p                5393 net/core/dev.c 		NAPI_GRO_CB(p)->same_flow = !diffs;
p                9475 net/core/dev.c 	struct net_device *p;
p                9498 net/core/dev.c 	p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
p                9499 net/core/dev.c 	if (!p)
p                9502 net/core/dev.c 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
p                9503 net/core/dev.c 	dev->padded = (char *)dev - (char *)p;
p                9586 net/core/dev.c 	struct napi_struct *p, *n;
p                9597 net/core/dev.c 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
p                9598 net/core/dev.c 		netif_napi_del(p);
p                 201 net/core/dst.c 	struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
p                 203 net/core/dst.c 	if (p) {
p                 207 net/core/dst.c 		refcount_set(&p->refcnt, 1);
p                 208 net/core/dst.c 		memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
p                 210 net/core/dst.c 		new = (unsigned long) p;
p                 214 net/core/dst.c 			kfree(p);
p                 215 net/core/dst.c 			p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
p                 217 net/core/dst.c 				p = NULL;
p                 224 net/core/dst.c 	return (u32 *)p;
p                 337 net/core/lwt_bpf.c 	struct bpf_prog *p;
p                 354 net/core/lwt_bpf.c 	p = bpf_prog_get_type(fd, type);
p                 355 net/core/lwt_bpf.c 	if (IS_ERR(p))
p                 356 net/core/lwt_bpf.c 		return PTR_ERR(p);
p                 358 net/core/lwt_bpf.c 	prog->prog = p;
p                 904 net/core/neighbour.c 		struct neigh_parms *p;
p                 906 net/core/neighbour.c 		list_for_each_entry(p, &tbl->parms_list, list)
p                 907 net/core/neighbour.c 			p->reachable_time =
p                 908 net/core/neighbour.c 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
p                 969 net/core/neighbour.c 	struct neigh_parms *p = n->parms;
p                 970 net/core/neighbour.c 	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
p                 971 net/core/neighbour.c 	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
p                 972 net/core/neighbour.c 	        NEIGH_VAR(p, MCAST_PROBES));
p                1565 net/core/neighbour.c void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
p                1571 net/core/neighbour.c 					  NEIGH_VAR(p, PROXY_DELAY));
p                1573 net/core/neighbour.c 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
p                1597 net/core/neighbour.c 	struct neigh_parms *p;
p                1599 net/core/neighbour.c 	list_for_each_entry(p, &tbl->parms_list, list) {
p                1600 net/core/neighbour.c 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
p                1601 net/core/neighbour.c 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
p                1602 net/core/neighbour.c 			return p;
p                1611 net/core/neighbour.c 	struct neigh_parms *p;
p                1615 net/core/neighbour.c 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
p                1616 net/core/neighbour.c 	if (p) {
p                1617 net/core/neighbour.c 		p->tbl		  = tbl;
p                1618 net/core/neighbour.c 		refcount_set(&p->refcnt, 1);
p                1619 net/core/neighbour.c 		p->reachable_time =
p                1620 net/core/neighbour.c 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
p                1622 net/core/neighbour.c 		p->dev = dev;
p                1623 net/core/neighbour.c 		write_pnet(&p->net, net);
p                1624 net/core/neighbour.c 		p->sysctl_table = NULL;
p                1626 net/core/neighbour.c 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
p                1628 net/core/neighbour.c 			kfree(p);
p                1633 net/core/neighbour.c 		list_add(&p->list, &tbl->parms.list);
p                1636 net/core/neighbour.c 		neigh_parms_data_state_cleanall(p);
p                1638 net/core/neighbour.c 	return p;
p                2220 net/core/neighbour.c 		struct neigh_parms *p;
p                2232 net/core/neighbour.c 		p = lookup_neigh_parms(tbl, net, ifindex);
p                2233 net/core/neighbour.c 		if (p == NULL) {
p                2244 net/core/neighbour.c 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
p                2249 net/core/neighbour.c 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
p                2253 net/core/neighbour.c 				NEIGH_VAR_SET(p, PROXY_QLEN,
p                2257 net/core/neighbour.c 				NEIGH_VAR_SET(p, APP_PROBES,
p                2261 net/core/neighbour.c 				NEIGH_VAR_SET(p, UCAST_PROBES,
p                2265 net/core/neighbour.c 				NEIGH_VAR_SET(p, MCAST_PROBES,
p                2269 net/core/neighbour.c 				NEIGH_VAR_SET(p, MCAST_REPROBES,
p                2273 net/core/neighbour.c 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
p                2279 net/core/neighbour.c 				p->reachable_time =
p                2280 net/core/neighbour.c 					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
p                2283 net/core/neighbour.c 				NEIGH_VAR_SET(p, GC_STALETIME,
p                2287 net/core/neighbour.c 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
p                2289 net/core/neighbour.c 				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
p                2292 net/core/neighbour.c 				NEIGH_VAR_SET(p, RETRANS_TIME,
p                2296 net/core/neighbour.c 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
p                2300 net/core/neighbour.c 				NEIGH_VAR_SET(p, PROXY_DELAY,
p                2304 net/core/neighbour.c 				NEIGH_VAR_SET(p, LOCKTIME,
p                2380 net/core/neighbour.c 		struct neigh_parms *p;
p                2395 net/core/neighbour.c 		p = list_next_entry(&tbl->parms, list);
p                2396 net/core/neighbour.c 		list_for_each_entry_from(p, &tbl->parms_list, list) {
p                2397 net/core/neighbour.c 			if (!net_eq(neigh_parms_net(p), net))
p                2403 net/core/neighbour.c 			if (neightbl_fill_param_info(skb, tbl, p,
p                3408 net/core/neighbour.c static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
p                3412 net/core/neighbour.c 	int family = neigh_parms_family(p);
p                3420 net/core/neighbour.c 			dst_p->data[index] = p->data[index];
p                3428 net/core/neighbour.c 	struct neigh_parms *p = ctl->extra2;
p                3429 net/core/neighbour.c 	struct net *net = neigh_parms_net(p);
p                3430 net/core/neighbour.c 	int index = (int *) ctl->data - p->data;
p                3435 net/core/neighbour.c 	set_bit(index, p->data_state);
p                3437 net/core/neighbour.c 		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
p                3439 net/core/neighbour.c 		neigh_copy_dflt_parms(net, p, index);
p                3513 net/core/neighbour.c 	struct neigh_parms *p = ctl->extra2;
p                3528 net/core/neighbour.c 		p->reachable_time =
p                3529 net/core/neighbour.c 			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
p                3619 net/core/neighbour.c int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
p                3633 net/core/neighbour.c 		t->neigh_vars[i].data += (long) p;
p                3635 net/core/neighbour.c 		t->neigh_vars[i].extra2 = p;
p                3644 net/core/neighbour.c 		struct neigh_table *tbl = p->tbl;
p                3677 net/core/neighbour.c 	if (neigh_parms_net(p)->user_ns != &init_user_ns)
p                3680 net/core/neighbour.c 	switch (neigh_parms_family(p)) {
p                3694 net/core/neighbour.c 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
p                3698 net/core/neighbour.c 	p->sysctl_table = t;
p                3708 net/core/neighbour.c void neigh_sysctl_unregister(struct neigh_parms *p)
p                3710 net/core/neighbour.c 	if (p->sysctl_table) {
p                3711 net/core/neighbour.c 		struct neigh_sysctl_table *t = p->sysctl_table;
p                3712 net/core/neighbour.c 		p->sysctl_table = NULL;
p                 442 net/core/net_namespace.c void net_drop_ns(void *p)
p                 444 net/core/net_namespace.c 	struct net *ns = p;
p                  21 net/core/netclassid_cgroup.c struct cgroup_cls_state *task_cls_state(struct task_struct *p)
p                  23 net/core/netclassid_cgroup.c 	return css_cls_state(task_css_check(p, net_cls_cgrp_id,
p                  87 net/core/netclassid_cgroup.c static void update_classid_task(struct task_struct *p, u32 classid)
p                  96 net/core/netclassid_cgroup.c 		task_lock(p);
p                  97 net/core/netclassid_cgroup.c 		fd = iterate_fd(p->files, fd, update_classid_sock, &ctx);
p                  98 net/core/netclassid_cgroup.c 		task_unlock(p);
p                 106 net/core/netclassid_cgroup.c 	struct task_struct *p;
p                 108 net/core/netclassid_cgroup.c 	cgroup_taskset_for_each(p, css, tset) {
p                 109 net/core/netclassid_cgroup.c 		update_classid_task(p, css_cls_state(css)->classid);
p                 123 net/core/netclassid_cgroup.c 	struct task_struct *p;
p                 130 net/core/netclassid_cgroup.c 	while ((p = css_task_iter_next(&it)))
p                 131 net/core/netclassid_cgroup.c 		update_classid_task(p, cs->classid);
p                 236 net/core/netprio_cgroup.c 	struct task_struct *p;
p                 241 net/core/netprio_cgroup.c 	cgroup_taskset_for_each(p, css, tset) {
p                 244 net/core/netprio_cgroup.c 		task_lock(p);
p                 245 net/core/netprio_cgroup.c 		iterate_fd(p->files, 0, update_netprio, v);
p                 246 net/core/netprio_cgroup.c 		task_unlock(p);
p                  29 net/core/page_pool.c 	memcpy(&pool->p, params, sizeof(pool->p));
p                  32 net/core/page_pool.c 	if (pool->p.flags & ~(PP_FLAG_ALL))
p                  35 net/core/page_pool.c 	if (pool->p.pool_size)
p                  36 net/core/page_pool.c 		ring_qsize = pool->p.pool_size;
p                  46 net/core/page_pool.c 	if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
p                  47 net/core/page_pool.c 	    (pool->p.dma_dir != DMA_BIDIRECTIONAL))
p                  58 net/core/page_pool.c 	if (pool->p.flags & PP_FLAG_DMA_MAP)
p                  59 net/core/page_pool.c 		get_device(pool->p.dev);
p                 130 net/core/page_pool.c 	if (pool->p.order)
p                 141 net/core/page_pool.c 	page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
p                 145 net/core/page_pool.c 	if (!(pool->p.flags & PP_FLAG_DMA_MAP))
p                 153 net/core/page_pool.c 	dma = dma_map_page_attrs(pool->p.dev, page, 0,
p                 154 net/core/page_pool.c 				 (PAGE_SIZE << pool->p.order),
p                 155 net/core/page_pool.c 				 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
p                 156 net/core/page_pool.c 	if (dma_mapping_error(pool->p.dev, dma)) {
p                 216 net/core/page_pool.c 	if (!(pool->p.flags & PP_FLAG_DMA_MAP))
p                 221 net/core/page_pool.c 	dma_unmap_page_attrs(pool->p.dev, dma,
p                 222 net/core/page_pool.c 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
p                 346 net/core/page_pool.c 	if (pool->p.flags & PP_FLAG_DMA_MAP)
p                 347 net/core/page_pool.c 		put_device(pool->p.dev);
p                3145 net/core/pktgen.c 	char *p = pkt_dev->result;
p                3150 net/core/pktgen.c 	p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n",
p                3164 net/core/pktgen.c 	p += sprintf(p, "  %llupps %lluMb/sec (%llubps) errors: %llu",
p                3537 net/core/pktgen.c 	struct pktgen_dev *p, *pkt_dev = NULL;
p                3541 net/core/pktgen.c 	list_for_each_entry_rcu(p, &t->if_list, list)
p                3542 net/core/pktgen.c 		if (strncmp(p->odevname, ifname, len) == 0) {
p                3543 net/core/pktgen.c 			if (p->odevname[len]) {
p                3544 net/core/pktgen.c 				if (exact || p->odevname[len] != '@')
p                3547 net/core/pktgen.c 			pkt_dev = p;
p                3681 net/core/pktgen.c 	struct task_struct *p;
p                3698 net/core/pktgen.c 	p = kthread_create_on_node(pktgen_thread_worker,
p                3702 net/core/pktgen.c 	if (IS_ERR(p)) {
p                3706 net/core/pktgen.c 		return PTR_ERR(p);
p                3708 net/core/pktgen.c 	kthread_bind(p, cpu);
p                3709 net/core/pktgen.c 	t->tsk = p;
p                3716 net/core/pktgen.c 		kthread_stop(p);
p                3723 net/core/pktgen.c 	get_task_struct(p);
p                3724 net/core/pktgen.c 	wake_up_process(p);
p                3737 net/core/pktgen.c 	struct pktgen_dev *p;
p                3741 net/core/pktgen.c 		p = list_entry(q, struct pktgen_dev, list);
p                3742 net/core/pktgen.c 		if (p == pkt_dev)
p                3743 net/core/pktgen.c 			list_del_rcu(&p->list);
p                 131 net/core/scm.c int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
p                 158 net/core/scm.c 			err=scm_fp_copy(cmsg, &p->fp);
p                 174 net/core/scm.c 			p->creds.pid = creds.pid;
p                 175 net/core/scm.c 			if (!p->pid || pid_vnr(p->pid) != creds.pid) {
p                 181 net/core/scm.c 				put_pid(p->pid);
p                 182 net/core/scm.c 				p->pid = pid;
p                 191 net/core/scm.c 			p->creds.uid = uid;
p                 192 net/core/scm.c 			p->creds.gid = gid;
p                 200 net/core/scm.c 	if (p->fp && !p->fp->count)
p                 202 net/core/scm.c 		kfree(p->fp);
p                 203 net/core/scm.c 		p->fp = NULL;
p                 208 net/core/scm.c 	scm_destroy(p);
p                 785 net/core/skbuff.c 		struct page *p;
p                 789 net/core/skbuff.c 				      skb_frag_size(frag), p, p_off, p_len,
p                 792 net/core/skbuff.c 			vaddr = kmap_atomic(p);
p                1375 net/core/skbuff.c 		struct page *p;
p                1379 net/core/skbuff.c 				      p, p_off, p_len, copied) {
p                1381 net/core/skbuff.c 			vaddr = kmap_atomic(p);
p                2217 net/core/skbuff.c 			struct page *p;
p                2225 net/core/skbuff.c 					      copy, p, p_off, p_len, copied) {
p                2226 net/core/skbuff.c 				vaddr = kmap_atomic(p);
p                2578 net/core/skbuff.c 			struct page *p;
p                2586 net/core/skbuff.c 					      copy, p, p_off, p_len, copied) {
p                2587 net/core/skbuff.c 				vaddr = kmap_atomic(p);
p                2657 net/core/skbuff.c 			struct page *p;
p                2666 net/core/skbuff.c 					      copy, p, p_off, p_len, copied) {
p                2667 net/core/skbuff.c 				vaddr = kmap_atomic(p);
p                2756 net/core/skbuff.c 			struct page *p;
p                2765 net/core/skbuff.c 					      copy, p, p_off, p_len, copied) {
p                2766 net/core/skbuff.c 				vaddr = kmap_atomic(p);
p                3094 net/core/skbuff.c 	struct rb_node *p = rb_first(root);
p                3097 net/core/skbuff.c 	while (p) {
p                3098 net/core/skbuff.c 		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
p                3100 net/core/skbuff.c 		p = rb_next(p);
p                3986 net/core/skbuff.c int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
p                3995 net/core/skbuff.c 	if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
p                3998 net/core/skbuff.c 	lp = NAPI_GRO_CB(p)->last;
p                4075 net/core/skbuff.c 	if (NAPI_GRO_CB(p)->last == p)
p                4076 net/core/skbuff.c 		skb_shinfo(p)->frag_list = skb;
p                4078 net/core/skbuff.c 		NAPI_GRO_CB(p)->last->next = skb;
p                4079 net/core/skbuff.c 	NAPI_GRO_CB(p)->last = skb;
p                4081 net/core/skbuff.c 	lp = p;
p                4084 net/core/skbuff.c 	NAPI_GRO_CB(p)->count++;
p                4085 net/core/skbuff.c 	p->data_len += len;
p                4086 net/core/skbuff.c 	p->truesize += delta_truesize;
p                4087 net/core/skbuff.c 	p->len += len;
p                4088 net/core/skbuff.c 	if (lp != p) {
p                3602 net/core/sock.c bool sk_busy_loop_end(void *p, unsigned long start_time)
p                3604 net/core/sock.c 	struct sock *sk = p;
p                 669 net/dccp/ccids/ccid3.c 	u32 x_recv, p;
p                 693 net/dccp/ccids/ccid3.c 	p = tfrc_calc_x_reverse_lookup(fval);
p                 696 net/dccp/ccids/ccid3.c 		       "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
p                 698 net/dccp/ccids/ccid3.c 	return p == 0 ? ~0U : scaled_div(1, p);
p                  54 net/dccp/ccids/lib/tfrc.h u32 tfrc_calc_x(u16 s, u32 R, u32 p);
p                 613 net/dccp/ccids/lib/tfrc_equation.c u32 tfrc_calc_x(u16 s, u32 R, u32 p)
p                 620 net/dccp/ccids/lib/tfrc_equation.c 	BUG_ON(p >  1000000);		/* p must not exceed 100%   */
p                 621 net/dccp/ccids/lib/tfrc_equation.c 	BUG_ON(p == 0);			/* f(0) = 0, divide by zero */
p                 627 net/dccp/ccids/lib/tfrc_equation.c 	if (p <= TFRC_CALC_X_SPLIT)		{     /* 0.0000 < p <= 0.05   */
p                 628 net/dccp/ccids/lib/tfrc_equation.c 		if (p < TFRC_SMALLEST_P) {	      /* 0.0000 < p <  0.0001 */
p                 630 net/dccp/ccids/lib/tfrc_equation.c 				  "Substituting %d\n", p, TFRC_SMALLEST_P);
p                 633 net/dccp/ccids/lib/tfrc_equation.c 			index =  p/TFRC_SMALLEST_P - 1;
p                 638 net/dccp/ccids/lib/tfrc_equation.c 		index = p/(1000000/TFRC_CALC_X_ARRSIZE) - 1;
p                1082 net/decnet/dn_dev.c 	struct dn_dev_parms *p = dn_dev_list;
p                1085 net/decnet/dn_dev.c 	for(i = 0; i < DN_DEV_LIST_SIZE; i++, p++) {
p                1086 net/decnet/dn_dev.c 		if (p->type == dev->type)
p                1098 net/decnet/dn_dev.c 	memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
p                 179 net/dsa/dsa.c  static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
p                 182 net/dsa/dsa.c  	struct dsa_switch *ds = p->dp->ds;
p                 198 net/dsa/dsa.c  		return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
p                 209 net/dsa/dsa.c  	struct dsa_slave_priv *p;
p                 227 net/dsa/dsa.c  	p = netdev_priv(skb->dev);
p                 232 net/dsa/dsa.c  	s = this_cpu_ptr(p->stats64);
p                 238 net/dsa/dsa.c  	if (dsa_skb_defer_rx_timestamp(p, skb))
p                 247 net/dsa/dsa.c  static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
p                 249 net/dsa/dsa.c  	return dsa_is_user_port(ds, p) && ds->ports[p].slave;
p                 198 net/dsa/dsa_priv.h 	struct dsa_slave_priv *p = netdev_priv(dev);
p                 200 net/dsa/dsa_priv.h 	return p->dp;
p                 256 net/dsa/slave.c 	struct dsa_slave_priv *p = netdev_priv(dev);
p                 257 net/dsa/slave.c 	struct dsa_switch *ds = p->dp->ds;
p                 258 net/dsa/slave.c 	int port = p->dp->index;
p                 272 net/dsa/slave.c 	return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
p                 451 net/dsa/slave.c 	struct dsa_slave_priv *p = netdev_priv(dev);
p                 453 net/dsa/slave.c 	if (p->netpoll)
p                 454 net/dsa/slave.c 		netpoll_send_skb(p->netpoll, skb);
p                 461 net/dsa/slave.c static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
p                 464 net/dsa/slave.c 	struct dsa_switch *ds = p->dp->ds;
p                 481 net/dsa/slave.c 	if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type))
p                 507 net/dsa/slave.c 	struct dsa_slave_priv *p = netdev_priv(dev);
p                 511 net/dsa/slave.c 	s = this_cpu_ptr(p->stats64);
p                 523 net/dsa/slave.c 	dsa_skb_tx_timestamp(p, skb);
p                 528 net/dsa/slave.c 	nskb = p->xmit(skb, dev);
p                 663 net/dsa/slave.c 	struct dsa_slave_priv *p = netdev_priv(dev);
p                 672 net/dsa/slave.c 		s = per_cpu_ptr(p->stats64, i);
p                 793 net/dsa/slave.c 	struct dsa_slave_priv *p = netdev_priv(dev);
p                 807 net/dsa/slave.c 	p->netpoll = netpoll;
p                 814 net/dsa/slave.c 	struct dsa_slave_priv *p = netdev_priv(dev);
p                 815 net/dsa/slave.c 	struct netpoll *netpoll = p->netpoll;
p                 820 net/dsa/slave.c 	p->netpoll = NULL;
p                 851 net/dsa/slave.c 	struct dsa_slave_priv *p = netdev_priv(dev);
p                 854 net/dsa/slave.c 	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
p                 866 net/dsa/slave.c 	struct dsa_slave_priv *p = netdev_priv(dev);
p                 910 net/dsa/slave.c 		list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
p                1049 net/dsa/slave.c 	struct dsa_slave_priv *p = netdev_priv(dev);
p                1058 net/dsa/slave.c 		s = per_cpu_ptr(p->stats64, i);
p                1101 net/dsa/slave.c 	struct dsa_slave_priv *p = netdev_priv(dev);
p                1102 net/dsa/slave.c 	struct dsa_switch *ds = p->dp->ds;
p                1107 net/dsa/slave.c 	return ds->ops->get_ts_info(ds, p->dp->index, ts);
p                1396 net/dsa/slave.c 	struct dsa_slave_priv *p;
p                1428 net/dsa/slave.c 	p = netdev_priv(slave_dev);
p                1429 net/dsa/slave.c 	p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
p                1430 net/dsa/slave.c 	if (!p->stats64) {
p                1434 net/dsa/slave.c 	p->dp = port;
p                1435 net/dsa/slave.c 	INIT_LIST_HEAD(&p->mall_tc_list);
p                1438 net/dsa/slave.c 	p->xmit = cpu_dp->tag_ops->xmit;
p                1462 net/dsa/slave.c 	phylink_disconnect_phy(p->dp->pl);
p                1464 net/dsa/slave.c 	phylink_destroy(p->dp->pl);
p                1466 net/dsa/slave.c 	free_percpu(p->stats64);
p                1475 net/dsa/slave.c 	struct dsa_slave_priv *p = netdev_priv(slave_dev);
p                1485 net/dsa/slave.c 	free_percpu(p->stats64);
p                 291 net/ethernet/eth.c int eth_prepare_mac_addr_change(struct net_device *dev, void *p)
p                 293 net/ethernet/eth.c 	struct sockaddr *addr = p;
p                 308 net/ethernet/eth.c void eth_commit_mac_addr_change(struct net_device *dev, void *p)
p                 310 net/ethernet/eth.c 	struct sockaddr *addr = p;
p                 326 net/ethernet/eth.c int eth_mac_addr(struct net_device *dev, void *p)
p                 330 net/ethernet/eth.c 	ret = eth_prepare_mac_addr_change(dev, p);
p                 333 net/ethernet/eth.c 	eth_commit_mac_addr_change(dev, p);
p                 459 net/ethernet/eth.c 	struct sk_buff *p;
p                 474 net/ethernet/eth.c 	list_for_each_entry(p, head, list) {
p                 475 net/ethernet/eth.c 		if (!NAPI_GRO_CB(p)->same_flow)
p                 478 net/ethernet/eth.c 		eh2 = (struct ethhdr *)(p->data + off_eth);
p                 480 net/ethernet/eth.c 			NAPI_GRO_CB(p)->same_flow = 0;
p                 918 net/ipv4/af_inet.c 	void __user *p = (void __user *)arg;
p                 925 net/ipv4/af_inet.c 		if (copy_from_user(&rt, p, sizeof(struct rtentry)))
p                 942 net/ipv4/af_inet.c 		if (copy_from_user(&ifr, p, sizeof(struct ifreq)))
p                 945 net/ipv4/af_inet.c 		if (!err && copy_to_user(p, &ifr, sizeof(struct ifreq)))
p                 955 net/ipv4/af_inet.c 		if (copy_from_user(&ifr, p, sizeof(struct ifreq)))
p                1126 net/ipv4/af_inet.c void inet_register_protosw(struct inet_protosw *p)
p                1130 net/ipv4/af_inet.c 	int protocol = p->protocol;
p                1135 net/ipv4/af_inet.c 	if (p->type >= SOCK_MAX)
p                1139 net/ipv4/af_inet.c 	last_perm = &inetsw[p->type];
p                1140 net/ipv4/af_inet.c 	list_for_each(lh, &inetsw[p->type]) {
p                1156 net/ipv4/af_inet.c 	list_add_rcu(&p->list, last_perm);
p                1168 net/ipv4/af_inet.c 	       p->type);
p                1173 net/ipv4/af_inet.c void inet_unregister_protosw(struct inet_protosw *p)
p                1175 net/ipv4/af_inet.c 	if (INET_PROTOSW_PERMANENT & p->flags) {
p                1177 net/ipv4/af_inet.c 		       p->protocol);
p                1180 net/ipv4/af_inet.c 		list_del_rcu(&p->list);
p                1412 net/ipv4/af_inet.c 	struct sk_buff *p;
p                1448 net/ipv4/af_inet.c 	list_for_each_entry(p, head, list) {
p                1452 net/ipv4/af_inet.c 		if (!NAPI_GRO_CB(p)->same_flow)
p                1455 net/ipv4/af_inet.c 		iph2 = (struct iphdr *)(p->data + off);
p                1464 net/ipv4/af_inet.c 			NAPI_GRO_CB(p)->same_flow = 0;
p                1469 net/ipv4/af_inet.c 		NAPI_GRO_CB(p)->flush |=
p                1474 net/ipv4/af_inet.c 		NAPI_GRO_CB(p)->flush |= flush;
p                1488 net/ipv4/af_inet.c 		if (!NAPI_GRO_CB(p)->is_atomic ||
p                1490 net/ipv4/af_inet.c 			flush_id ^= NAPI_GRO_CB(p)->count;
p                1498 net/ipv4/af_inet.c 			NAPI_GRO_CB(p)->flush_id = flush_id;
p                1500 net/ipv4/af_inet.c 			NAPI_GRO_CB(p)->flush_id |= flush_id;
p                2557 net/ipv4/devinet.c 				     int ifindex, struct ipv4_devconf *p)
p                2568 net/ipv4/devinet.c 		t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
p                2569 net/ipv4/devinet.c 		t->devinet_vars[i].extra1 = p;
p                2579 net/ipv4/devinet.c 	p->sysctl = t;
p                2582 net/ipv4/devinet.c 				    ifindex, p);
p                 867 net/ipv4/esp4.c 	char *p;
p                 910 net/ipv4/esp4.c 	p = key;
p                 911 net/ipv4/esp4.c 	rta = (void *)p;
p                 915 net/ipv4/esp4.c 	p += RTA_SPACE(sizeof(*param));
p                 920 net/ipv4/esp4.c 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
p                 921 net/ipv4/esp4.c 		p += (x->aalg->alg_key_len + 7) / 8;
p                 943 net/ipv4/esp4.c 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
p                 208 net/ipv4/fib_trie.c #define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER(tn_info(n)->parent, p)
p                2259 net/ipv4/fib_trie.c 	struct seq_net_private p;
p                2659 net/ipv4/fib_trie.c 	struct seq_net_private p;
p                 317 net/ipv4/fou.c 	struct sk_buff *p;
p                 402 net/ipv4/fou.c 	list_for_each_entry(p, head, list) {
p                 405 net/ipv4/fou.c 		if (!NAPI_GRO_CB(p)->same_flow)
p                 408 net/ipv4/fou.c 		guehdr2 = (struct guehdr *)(p->data + off);
p                 414 net/ipv4/fou.c 			NAPI_GRO_CB(p)->same_flow = 0;
p                 421 net/ipv4/fou.c 			NAPI_GRO_CB(p)->same_flow = 0;
p                 111 net/ipv4/gre_offload.c 	struct sk_buff *p;
p                 181 net/ipv4/gre_offload.c 	list_for_each_entry(p, head, list) {
p                 184 net/ipv4/gre_offload.c 		if (!NAPI_GRO_CB(p)->same_flow)
p                 195 net/ipv4/gre_offload.c 		greh2 = (struct gre_base_hdr *)(p->data + off);
p                 199 net/ipv4/gre_offload.c 			NAPI_GRO_CB(p)->same_flow = 0;
p                 205 net/ipv4/gre_offload.c 				NAPI_GRO_CB(p)->same_flow = 0;
p                2743 net/ipv4/igmp.c 	struct seq_net_private p;
p                2871 net/ipv4/igmp.c 	struct seq_net_private p;
p                 234 net/ipv4/inet_fragment.c 	struct rb_node *p = rb_first(root);
p                 237 net/ipv4/inet_fragment.c 	while (p) {
p                 238 net/ipv4/inet_fragment.c 		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
p                 240 net/ipv4/inet_fragment.c 		p = rb_next(p);
p                 107 net/ipv4/inetpeer.c 	struct inet_peer *p;
p                 118 net/ipv4/inetpeer.c 		p = rb_entry(parent, struct inet_peer, rb_node);
p                 119 net/ipv4/inetpeer.c 		cmp = inetpeer_addr_cmp(daddr, &p->daddr);
p                 121 net/ipv4/inetpeer.c 			if (!refcount_inc_not_zero(&p->refcnt))
p                 123 net/ipv4/inetpeer.c 			return p;
p                 127 net/ipv4/inetpeer.c 				gc_stack[(*gc_cnt)++] = p;
p                 151 net/ipv4/inetpeer.c 	struct inet_peer *p;
p                 162 net/ipv4/inetpeer.c 		p = gc_stack[i];
p                 167 net/ipv4/inetpeer.c 		delta = (__u32)jiffies - READ_ONCE(p->dtime);
p                 169 net/ipv4/inetpeer.c 		if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
p                 173 net/ipv4/inetpeer.c 		p = gc_stack[i];
p                 174 net/ipv4/inetpeer.c 		if (p) {
p                 175 net/ipv4/inetpeer.c 			rb_erase(&p->rb_node, &base->rb_root);
p                 177 net/ipv4/inetpeer.c 			call_rcu(&p->rcu, inetpeer_free_rcu);
p                 186 net/ipv4/inetpeer.c 	struct inet_peer *p, *gc_stack[PEER_MAX_GC];
p                 196 net/ipv4/inetpeer.c 	p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
p                 200 net/ipv4/inetpeer.c 	if (p)
p                 201 net/ipv4/inetpeer.c 		return p;
p                 214 net/ipv4/inetpeer.c 	p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
p                 215 net/ipv4/inetpeer.c 	if (!p && create) {
p                 216 net/ipv4/inetpeer.c 		p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
p                 217 net/ipv4/inetpeer.c 		if (p) {
p                 218 net/ipv4/inetpeer.c 			p->daddr = *daddr;
p                 219 net/ipv4/inetpeer.c 			p->dtime = (__u32)jiffies;
p                 220 net/ipv4/inetpeer.c 			refcount_set(&p->refcnt, 2);
p                 221 net/ipv4/inetpeer.c 			atomic_set(&p->rid, 0);
p                 222 net/ipv4/inetpeer.c 			p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p                 223 net/ipv4/inetpeer.c 			p->rate_tokens = 0;
p                 224 net/ipv4/inetpeer.c 			p->n_redirects = 0;
p                 228 net/ipv4/inetpeer.c 			p->rate_last = jiffies - 60*HZ;
p                 230 net/ipv4/inetpeer.c 			rb_link_node(&p->rb_node, parent, pp);
p                 231 net/ipv4/inetpeer.c 			rb_insert_color(&p->rb_node, &base->rb_root);
p                 239 net/ipv4/inetpeer.c 	return p;
p                 243 net/ipv4/inetpeer.c void inet_putpeer(struct inet_peer *p)
p                 248 net/ipv4/inetpeer.c 	WRITE_ONCE(p->dtime, (__u32)jiffies);
p                 250 net/ipv4/inetpeer.c 	if (refcount_dec_and_test(&p->refcnt))
p                 251 net/ipv4/inetpeer.c 		call_rcu(&p->rcu, inetpeer_free_rcu);
p                 298 net/ipv4/inetpeer.c 	struct rb_node *p = rb_first(&base->rb_root);
p                 300 net/ipv4/inetpeer.c 	while (p) {
p                 301 net/ipv4/inetpeer.c 		struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
p                 303 net/ipv4/inetpeer.c 		p = rb_next(p);
p                 749 net/ipv4/ip_gre.c 	struct ip_tunnel_parm p;
p                 752 net/ipv4/ip_gre.c 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
p                 756 net/ipv4/ip_gre.c 		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
p                 757 net/ipv4/ip_gre.c 		    p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
p                 758 net/ipv4/ip_gre.c 		    ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
p                 762 net/ipv4/ip_gre.c 	p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
p                 763 net/ipv4/ip_gre.c 	p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
p                 765 net/ipv4/ip_gre.c 	err = ip_tunnel_ioctl(dev, &p, cmd);
p                 772 net/ipv4/ip_gre.c 		t->parms.i_flags = p.i_flags;
p                 773 net/ipv4/ip_gre.c 		t->parms.o_flags = p.o_flags;
p                 779 net/ipv4/ip_gre.c 	p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
p                 780 net/ipv4/ip_gre.c 	p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
p                 782 net/ipv4/ip_gre.c 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
p                1313 net/ipv4/ip_gre.c 	struct ip_tunnel_parm p;
p                1321 net/ipv4/ip_gre.c 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
p                1324 net/ipv4/ip_gre.c 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
p                1331 net/ipv4/ip_gre.c 	struct ip_tunnel_parm p;
p                1339 net/ipv4/ip_gre.c 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
p                1342 net/ipv4/ip_gre.c 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
p                1351 net/ipv4/ip_gre.c 	struct ip_tunnel_parm p;
p                1358 net/ipv4/ip_gre.c 	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
p                1362 net/ipv4/ip_gre.c 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
p                1366 net/ipv4/ip_gre.c 	t->parms.i_flags = p.i_flags;
p                1367 net/ipv4/ip_gre.c 	t->parms.o_flags = p.o_flags;
p                1380 net/ipv4/ip_gre.c 	struct ip_tunnel_parm p;
p                1387 net/ipv4/ip_gre.c 	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
p                1391 net/ipv4/ip_gre.c 	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
p                1395 net/ipv4/ip_gre.c 	t->parms.i_flags = p.i_flags;
p                1396 net/ipv4/ip_gre.c 	t->parms.o_flags = p.o_flags;
p                1452 net/ipv4/ip_gre.c 	struct ip_tunnel_parm *p = &t->parms;
p                1453 net/ipv4/ip_gre.c 	__be16 o_flags = p->o_flags;
p                1473 net/ipv4/ip_gre.c 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
p                1475 net/ipv4/ip_gre.c 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
p                1478 net/ipv4/ip_gre.c 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
p                1479 net/ipv4/ip_gre.c 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
p                1480 net/ipv4/ip_gre.c 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
p                1481 net/ipv4/ip_gre.c 	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
p                1482 net/ipv4/ip_gre.c 	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
p                1483 net/ipv4/ip_gre.c 	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
p                1485 net/ipv4/ip_gre.c 		       !!(p->iph.frag_off & htons(IP_DF))) ||
p                  59 net/ipv4/ip_tunnel.c static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
p                  62 net/ipv4/ip_tunnel.c 	if (p->i_flags & TUNNEL_KEY) {
p                  64 net/ipv4/ip_tunnel.c 			return key == p->i_key;
p                 831 net/ipv4/ip_tunnel.c 			     struct ip_tunnel_parm *p,
p                 836 net/ipv4/ip_tunnel.c 	t->parms.iph.saddr = p->iph.saddr;
p                 837 net/ipv4/ip_tunnel.c 	t->parms.iph.daddr = p->iph.daddr;
p                 838 net/ipv4/ip_tunnel.c 	t->parms.i_key = p->i_key;
p                 839 net/ipv4/ip_tunnel.c 	t->parms.o_key = p->o_key;
p                 841 net/ipv4/ip_tunnel.c 		memcpy(dev->dev_addr, &p->iph.saddr, 4);
p                 842 net/ipv4/ip_tunnel.c 		memcpy(dev->broadcast, &p->iph.daddr, 4);
p                 846 net/ipv4/ip_tunnel.c 	t->parms.iph.ttl = p->iph.ttl;
p                 847 net/ipv4/ip_tunnel.c 	t->parms.iph.tos = p->iph.tos;
p                 848 net/ipv4/ip_tunnel.c 	t->parms.iph.frag_off = p->iph.frag_off;
p                 850 net/ipv4/ip_tunnel.c 	if (t->parms.link != p->link || t->fwmark != fwmark) {
p                 853 net/ipv4/ip_tunnel.c 		t->parms.link = p->link;
p                 863 net/ipv4/ip_tunnel.c int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
p                 873 net/ipv4/ip_tunnel.c 			t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
p                 877 net/ipv4/ip_tunnel.c 		memcpy(p, &t->parms, sizeof(*p));
p                 885 net/ipv4/ip_tunnel.c 		if (p->iph.ttl)
p                 886 net/ipv4/ip_tunnel.c 			p->iph.frag_off |= htons(IP_DF);
p                 887 net/ipv4/ip_tunnel.c 		if (!(p->i_flags & VTI_ISVTI)) {
p                 888 net/ipv4/ip_tunnel.c 			if (!(p->i_flags & TUNNEL_KEY))
p                 889 net/ipv4/ip_tunnel.c 				p->i_key = 0;
p                 890 net/ipv4/ip_tunnel.c 			if (!(p->o_flags & TUNNEL_KEY))
p                 891 net/ipv4/ip_tunnel.c 				p->o_key = 0;
p                 894 net/ipv4/ip_tunnel.c 		t = ip_tunnel_find(itn, p, itn->type);
p                 898 net/ipv4/ip_tunnel.c 				t = ip_tunnel_create(net, itn, p);
p                 915 net/ipv4/ip_tunnel.c 				if (ipv4_is_multicast(p->iph.daddr))
p                 917 net/ipv4/ip_tunnel.c 				else if (p->iph.daddr)
p                 931 net/ipv4/ip_tunnel.c 			ip_tunnel_update(itn, t, dev, p, true, 0);
p                 944 net/ipv4/ip_tunnel.c 			t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
p                1115 net/ipv4/ip_tunnel.c 		      struct ip_tunnel_parm *p, __u32 fwmark)
p                1130 net/ipv4/ip_tunnel.c 		if (ip_tunnel_find(itn, p, dev->type))
p                1135 net/ipv4/ip_tunnel.c 	nt->parms = *p;
p                1167 net/ipv4/ip_tunnel.c 			 struct ip_tunnel_parm *p, __u32 fwmark)
p                1177 net/ipv4/ip_tunnel.c 	t = ip_tunnel_find(itn, p, dev->type);
p                1188 net/ipv4/ip_tunnel.c 			if (ipv4_is_multicast(p->iph.daddr))
p                1190 net/ipv4/ip_tunnel.c 			else if (p->iph.daddr)
p                1199 net/ipv4/ip_tunnel.c 	ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU], fwmark);
p                 405 net/ipv4/ip_vti.c 	struct ip_tunnel_parm p;
p                 407 net/ipv4/ip_vti.c 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
p                 411 net/ipv4/ip_vti.c 		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
p                 412 net/ipv4/ip_vti.c 		    p.iph.ihl != 5)
p                 416 net/ipv4/ip_vti.c 	if (!(p.i_flags & GRE_KEY))
p                 417 net/ipv4/ip_vti.c 		p.i_key = 0;
p                 418 net/ipv4/ip_vti.c 	if (!(p.o_flags & GRE_KEY))
p                 419 net/ipv4/ip_vti.c 		p.o_key = 0;
p                 421 net/ipv4/ip_vti.c 	p.i_flags = VTI_ISVTI;
p                 423 net/ipv4/ip_vti.c 	err = ip_tunnel_ioctl(dev, &p, cmd);
p                 428 net/ipv4/ip_vti.c 		p.i_flags |= GRE_KEY;
p                 429 net/ipv4/ip_vti.c 		p.o_flags |= GRE_KEY;
p                 432 net/ipv4/ip_vti.c 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
p                 591 net/ipv4/ip_vti.c 	struct ip_tunnel_parm p;
p                 593 net/ipv4/ip_vti.c 	vti_netlink_parms(data, &p, &fwmark);
p                 594 net/ipv4/ip_vti.c 	return ip_tunnel_changelink(dev, tb, &p, fwmark);
p                 618 net/ipv4/ip_vti.c 	struct ip_tunnel_parm *p = &t->parms;
p                 620 net/ipv4/ip_vti.c 	if (nla_put_u32(skb, IFLA_VTI_LINK, p->link) ||
p                 621 net/ipv4/ip_vti.c 	    nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key) ||
p                 622 net/ipv4/ip_vti.c 	    nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key) ||
p                 623 net/ipv4/ip_vti.c 	    nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr) ||
p                 624 net/ipv4/ip_vti.c 	    nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr) ||
p                1340 net/ipv4/ipconfig.c 	struct proc_dir_entry *p;
p                1349 net/ipv4/ipconfig.c 	p = proc_create(pname, 0444, init_net.proc_net, fops);
p                1351 net/ipv4/ipconfig.c 	if (!p)
p                 333 net/ipv4/ipip.c 	struct ip_tunnel_parm p;
p                 335 net/ipv4/ipip.c 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
p                 339 net/ipv4/ipip.c 		if (p.iph.version != 4 ||
p                 340 net/ipv4/ipip.c 		    !ipip_tunnel_ioctl_verify_protocol(p.iph.protocol) ||
p                 341 net/ipv4/ipip.c 		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
p                 345 net/ipv4/ipip.c 	p.i_key = p.o_key = 0;
p                 346 net/ipv4/ipip.c 	p.i_flags = p.o_flags = 0;
p                 347 net/ipv4/ipip.c 	err = ip_tunnel_ioctl(dev, &p, cmd);
p                 351 net/ipv4/ipip.c 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
p                 499 net/ipv4/ipip.c 	struct ip_tunnel_parm p;
p                 510 net/ipv4/ipip.c 	ipip_netlink_parms(data, &p, &t->collect_md, &fwmark);
p                 511 net/ipv4/ipip.c 	return ip_tunnel_newlink(dev, tb, &p, fwmark);
p                 519 net/ipv4/ipip.c 	struct ip_tunnel_parm p;
p                 531 net/ipv4/ipip.c 	ipip_netlink_parms(data, &p, &collect_md, &fwmark);
p                 535 net/ipv4/ipip.c 	if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
p                 536 net/ipv4/ipip.c 	    (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
p                 539 net/ipv4/ipip.c 	return ip_tunnel_changelink(dev, tb, &p, fwmark);
p                 430 net/ipv4/ipmr.c 		struct ip_tunnel_parm p;
p                 432 net/ipv4/ipmr.c 		memset(&p, 0, sizeof(p));
p                 433 net/ipv4/ipmr.c 		p.iph.daddr = v->vifc_rmt_addr.s_addr;
p                 434 net/ipv4/ipmr.c 		p.iph.saddr = v->vifc_lcl_addr.s_addr;
p                 435 net/ipv4/ipmr.c 		p.iph.version = 4;
p                 436 net/ipv4/ipmr.c 		p.iph.ihl = 5;
p                 437 net/ipv4/ipmr.c 		p.iph.protocol = IPPROTO_IPIP;
p                 438 net/ipv4/ipmr.c 		sprintf(p.name, "dvmrp%d", v->vifc_vifi);
p                 439 net/ipv4/ipmr.c 		ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
p                 478 net/ipv4/ipmr.c 		struct ip_tunnel_parm p;
p                 480 net/ipv4/ipmr.c 		memset(&p, 0, sizeof(p));
p                 481 net/ipv4/ipmr.c 		p.iph.daddr = v->vifc_rmt_addr.s_addr;
p                 482 net/ipv4/ipmr.c 		p.iph.saddr = v->vifc_lcl_addr.s_addr;
p                 483 net/ipv4/ipmr.c 		p.iph.version = 4;
p                 484 net/ipv4/ipmr.c 		p.iph.ihl = 5;
p                 485 net/ipv4/ipmr.c 		p.iph.protocol = IPPROTO_IPIP;
p                 486 net/ipv4/ipmr.c 		sprintf(p.name, "dvmrp%d", v->vifc_vifi);
p                 487 net/ipv4/ipmr.c 		ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
p                 501 net/ipv4/ipmr.c 		    (dev = __dev_get_by_name(net, p.name)) != NULL) {
p                 299 net/ipv4/netfilter/nf_log_ipv4.c 		const unsigned char *p = skb_mac_header(skb);
p                 302 net/ipv4/netfilter/nf_log_ipv4.c 		nf_log_buf_add(m, "%02x", *p++);
p                 303 net/ipv4/netfilter/nf_log_ipv4.c 		for (i = 1; i < dev->hard_header_len; i++, p++)
p                 304 net/ipv4/netfilter/nf_log_ipv4.c 			nf_log_buf_add(m, ":%02x", *p);
p                 186 net/ipv4/nexthop.c 	struct nexthop_grp *p;
p                 187 net/ipv4/nexthop.c 	size_t len = nhg->num_nh * sizeof(*p);
p                 202 net/ipv4/nexthop.c 	p = nla_data(nla);
p                 204 net/ipv4/nexthop.c 		p->id = nhg->nh_entries[i].nh->id;
p                 205 net/ipv4/nexthop.c 		p->weight = nhg->nh_entries[i].weight - 1;
p                 206 net/ipv4/nexthop.c 		p += 1;
p                1468 net/ipv4/route.c 	struct rtable *orig, *prev, **p;
p                1472 net/ipv4/route.c 		p = (struct rtable **)&nhc->nhc_rth_input;
p                1474 net/ipv4/route.c 		p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
p                1476 net/ipv4/route.c 	orig = *p;
p                1482 net/ipv4/route.c 	prev = cmpxchg(p, orig, rt);
p                2532 net/ipv4/tcp.c 	struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
p                2535 net/ipv4/tcp.c 	while (p) {
p                2536 net/ipv4/tcp.c 		struct sk_buff *skb = rb_to_skb(p);
p                2538 net/ipv4/tcp.c 		p = rb_next(p);
p                1606 net/ipv4/tcp_input.c 	struct rb_node *parent, **p = &sk->tcp_rtx_queue.rb_node;
p                1609 net/ipv4/tcp_input.c 	while (*p) {
p                1610 net/ipv4/tcp_input.c 		parent = *p;
p                1613 net/ipv4/tcp_input.c 			p = &parent->rb_left;
p                1617 net/ipv4/tcp_input.c 			p = &parent->rb_right;
p                4484 net/ipv4/tcp_input.c 	struct rb_node *p;
p                4486 net/ipv4/tcp_input.c 	p = rb_first(&tp->out_of_order_queue);
p                4487 net/ipv4/tcp_input.c 	while (p) {
p                4488 net/ipv4/tcp_input.c 		skb = rb_to_skb(p);
p                4498 net/ipv4/tcp_input.c 		p = rb_next(p);
p                4548 net/ipv4/tcp_input.c 	struct rb_node **p, *parent;
p                4570 net/ipv4/tcp_input.c 	p = &tp->out_of_order_queue.rb_node;
p                4578 net/ipv4/tcp_input.c 		rb_link_node(&skb->rbnode, NULL, p);
p                4598 net/ipv4/tcp_input.c 		p = &parent->rb_right;
p                4604 net/ipv4/tcp_input.c 	while (*p) {
p                4605 net/ipv4/tcp_input.c 		parent = *p;
p                4608 net/ipv4/tcp_input.c 			p = &parent->rb_left;
p                4642 net/ipv4/tcp_input.c 		p = &parent->rb_right;
p                4646 net/ipv4/tcp_input.c 	rb_link_node(&skb->rbnode, parent, p);
p                4888 net/ipv4/tcp_input.c 	struct rb_node **p = &root->rb_node;
p                4892 net/ipv4/tcp_input.c 	while (*p) {
p                4893 net/ipv4/tcp_input.c 		parent = *p;
p                4896 net/ipv4/tcp_input.c 			p = &parent->rb_left;
p                4898 net/ipv4/tcp_input.c 			p = &parent->rb_right;
p                4900 net/ipv4/tcp_input.c 	rb_link_node(&skb->rbnode, parent, p);
p                 143 net/ipv4/tcp_metrics.c #define deref_locked(p)	\
p                 144 net/ipv4/tcp_metrics.c 	rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
p                 183 net/ipv4/tcp_offload.c 	struct sk_buff *p;
p                 220 net/ipv4/tcp_offload.c 	list_for_each_entry(p, head, list) {
p                 221 net/ipv4/tcp_offload.c 		if (!NAPI_GRO_CB(p)->same_flow)
p                 224 net/ipv4/tcp_offload.c 		th2 = tcp_hdr(p);
p                 227 net/ipv4/tcp_offload.c 			NAPI_GRO_CB(p)->same_flow = 0;
p                 233 net/ipv4/tcp_offload.c 	p = NULL;
p                 238 net/ipv4/tcp_offload.c 	flush = NAPI_GRO_CB(p)->flush;
p                 251 net/ipv4/tcp_offload.c 	if (NAPI_GRO_CB(p)->flush_id != 1 ||
p                 252 net/ipv4/tcp_offload.c 	    NAPI_GRO_CB(p)->count != 1 ||
p                 253 net/ipv4/tcp_offload.c 	    !NAPI_GRO_CB(p)->is_atomic)
p                 254 net/ipv4/tcp_offload.c 		flush |= NAPI_GRO_CB(p)->flush_id;
p                 256 net/ipv4/tcp_offload.c 		NAPI_GRO_CB(p)->is_atomic = false;
p                 258 net/ipv4/tcp_offload.c 	mss = skb_shinfo(p)->gso_size;
p                 261 net/ipv4/tcp_offload.c 	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
p                 263 net/ipv4/tcp_offload.c 	flush |= p->decrypted ^ skb->decrypted;
p                 266 net/ipv4/tcp_offload.c 	if (flush || skb_gro_receive(p, skb)) {
p                 279 net/ipv4/tcp_offload.c 	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
p                 280 net/ipv4/tcp_offload.c 		pp = p;
p                 529 net/ipv4/tcp_output.c 		u8 *p = (u8 *)ptr;
p                 536 net/ipv4/tcp_output.c 			p += TCPOLEN_EXP_FASTOPEN_BASE;
p                 539 net/ipv4/tcp_output.c 			*p++ = TCPOPT_FASTOPEN;
p                 540 net/ipv4/tcp_output.c 			*p++ = len;
p                 543 net/ipv4/tcp_output.c 		memcpy(p, foc->val, foc->len);
p                 545 net/ipv4/tcp_output.c 			p[foc->len] = TCPOPT_NOP;
p                 546 net/ipv4/tcp_output.c 			p[foc->len + 1] = TCPOPT_NOP;
p                 355 net/ipv4/udp_offload.c 	struct sk_buff *p;
p                 374 net/ipv4/udp_offload.c 	list_for_each_entry(p, head, list) {
p                 375 net/ipv4/udp_offload.c 		if (!NAPI_GRO_CB(p)->same_flow)
p                 378 net/ipv4/udp_offload.c 		uh2 = udp_hdr(p);
p                 382 net/ipv4/udp_offload.c 			NAPI_GRO_CB(p)->same_flow = 0;
p                 392 net/ipv4/udp_offload.c 		if (ulen > ntohs(uh2->len) || skb_gro_receive(p, skb) ||
p                 394 net/ipv4/udp_offload.c 		    NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
p                 395 net/ipv4/udp_offload.c 			pp = p;
p                 410 net/ipv4/udp_offload.c 	struct sk_buff *p;
p                 440 net/ipv4/udp_offload.c 	list_for_each_entry(p, head, list) {
p                 441 net/ipv4/udp_offload.c 		if (!NAPI_GRO_CB(p)->same_flow)
p                 444 net/ipv4/udp_offload.c 		uh2 = (struct udphdr   *)(p->data + off);
p                 451 net/ipv4/udp_offload.c 			NAPI_GRO_CB(p)->same_flow = 0;
p                 840 net/ipv6/addrconf.c static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
p                 849 net/ipv6/addrconf.c 	old = *p;
p                 850 net/ipv6/addrconf.c 	*p = newf;
p                 852 net/ipv6/addrconf.c 	if (p == &net->ipv6.devconf_dflt->forwarding) {
p                 862 net/ipv6/addrconf.c 	if (p == &net->ipv6.devconf_all->forwarding) {
p                 908 net/ipv6/addrconf.c static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
p                 917 net/ipv6/addrconf.c 	old = *p;
p                 918 net/ipv6/addrconf.c 	*p = newf;
p                 920 net/ipv6/addrconf.c 	if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
p                 931 net/ipv6/addrconf.c 	if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
p                 974 net/ipv6/addrconf.c 	struct list_head *p;
p                 981 net/ipv6/addrconf.c 	list_for_each(p, &idev->addr_list) {
p                 983 net/ipv6/addrconf.c 			= list_entry(p, struct inet6_ifaddr, if_list);
p                 988 net/ipv6/addrconf.c 	list_add_tail_rcu(&ifp->if_list, p);
p                2837 net/ipv6/addrconf.c 		struct ip_tunnel_parm p;
p                2843 net/ipv6/addrconf.c 		memset(&p, 0, sizeof(p));
p                2844 net/ipv6/addrconf.c 		p.iph.daddr = ireq.ifr6_addr.s6_addr32[3];
p                2845 net/ipv6/addrconf.c 		p.iph.saddr = 0;
p                2846 net/ipv6/addrconf.c 		p.iph.version = 4;
p                2847 net/ipv6/addrconf.c 		p.iph.ihl = 5;
p                2848 net/ipv6/addrconf.c 		p.iph.protocol = IPPROTO_IPV6;
p                2849 net/ipv6/addrconf.c 		p.iph.ttl = 64;
p                2850 net/ipv6/addrconf.c 		ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
p                2863 net/ipv6/addrconf.c 			dev = __dev_get_by_name(net, p.name);
p                4249 net/ipv6/addrconf.c 	struct seq_net_private p;
p                4259 net/ipv6/addrconf.c 	int p = 0;
p                4273 net/ipv6/addrconf.c 			if (p < state->offset) {
p                4274 net/ipv6/addrconf.c 				p++;
p                4282 net/ipv6/addrconf.c 		p = 0;
p                6128 net/ipv6/addrconf.c static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
p                6137 net/ipv6/addrconf.c 	old = *p;
p                6138 net/ipv6/addrconf.c 	*p = newf;
p                6140 net/ipv6/addrconf.c 	if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
p                6145 net/ipv6/addrconf.c 	if (p == &net->ipv6.devconf_all->disable_ipv6) {
p                6863 net/ipv6/addrconf.c 		struct inet6_dev *idev, struct ipv6_devconf *p)
p                6874 net/ipv6/addrconf.c 		table[i].data += (char *)p - (char *)&ipv6_devconf;
p                6887 net/ipv6/addrconf.c 	p->sysctl_header = register_net_sysctl(net, path, table);
p                6888 net/ipv6/addrconf.c 	if (!p->sysctl_header)
p                6898 net/ipv6/addrconf.c 				     ifindex, p);
p                6908 net/ipv6/addrconf.c 					 struct ipv6_devconf *p, int ifindex)
p                6912 net/ipv6/addrconf.c 	if (!p->sysctl_header)
p                6915 net/ipv6/addrconf.c 	table = p->sysctl_header->ctl_table_arg;
p                6916 net/ipv6/addrconf.c 	unregister_net_sysctl_table(p->sysctl_header);
p                6917 net/ipv6/addrconf.c 	p->sysctl_header = NULL;
p                 114 net/ipv6/addrlabel.c static bool __ip6addrlbl_match(const struct ip6addrlbl_entry *p,
p                 118 net/ipv6/addrlabel.c 	if (p->ifindex && p->ifindex != ifindex)
p                 120 net/ipv6/addrlabel.c 	if (p->addrtype && p->addrtype != addrtype)
p                 122 net/ipv6/addrlabel.c 	if (!ipv6_prefix_equal(addr, &p->prefix, p->prefixlen))
p                 131 net/ipv6/addrlabel.c 	struct ip6addrlbl_entry *p;
p                 133 net/ipv6/addrlabel.c 	hlist_for_each_entry_rcu(p, &net->ipv6.ip6addrlbl_table.head, list) {
p                 134 net/ipv6/addrlabel.c 		if (__ip6addrlbl_match(p, addr, type, ifindex))
p                 135 net/ipv6/addrlabel.c 			return p;
p                 144 net/ipv6/addrlabel.c 	struct ip6addrlbl_entry *p;
p                 149 net/ipv6/addrlabel.c 	p = __ipv6_addr_label(net, addr, type, ifindex);
p                 150 net/ipv6/addrlabel.c 	label = p ? p->label : IPV6_ADDR_LABEL_DEFAULT;
p                 206 net/ipv6/addrlabel.c 	struct ip6addrlbl_entry *last = NULL, *p = NULL;
p                 213 net/ipv6/addrlabel.c 	hlist_for_each_entry_safe(p, n,	&net->ipv6.ip6addrlbl_table.head, list) {
p                 214 net/ipv6/addrlabel.c 		if (p->prefixlen == newp->prefixlen &&
p                 215 net/ipv6/addrlabel.c 		    p->ifindex == newp->ifindex &&
p                 216 net/ipv6/addrlabel.c 		    ipv6_addr_equal(&p->prefix, &newp->prefix)) {
p                 221 net/ipv6/addrlabel.c 			hlist_replace_rcu(&p->list, &newp->list);
p                 222 net/ipv6/addrlabel.c 			kfree_rcu(p, rcu);
p                 224 net/ipv6/addrlabel.c 		} else if ((p->prefixlen == newp->prefixlen && !p->ifindex) ||
p                 225 net/ipv6/addrlabel.c 			   (p->prefixlen < newp->prefixlen)) {
p                 226 net/ipv6/addrlabel.c 			hlist_add_before_rcu(&newp->list, &p->list);
p                 229 net/ipv6/addrlabel.c 		last = p;
p                 269 net/ipv6/addrlabel.c 	struct ip6addrlbl_entry *p = NULL;
p                 276 net/ipv6/addrlabel.c 	hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) {
p                 277 net/ipv6/addrlabel.c 		if (p->prefixlen == prefixlen &&
p                 278 net/ipv6/addrlabel.c 		    p->ifindex == ifindex &&
p                 279 net/ipv6/addrlabel.c 		    ipv6_addr_equal(&p->prefix, prefix)) {
p                 280 net/ipv6/addrlabel.c 			hlist_del_rcu(&p->list);
p                 281 net/ipv6/addrlabel.c 			kfree_rcu(p, rcu);
p                 332 net/ipv6/addrlabel.c 	struct ip6addrlbl_entry *p = NULL;
p                 337 net/ipv6/addrlabel.c 	hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) {
p                 338 net/ipv6/addrlabel.c 		hlist_del_rcu(&p->list);
p                 339 net/ipv6/addrlabel.c 		kfree_rcu(p, rcu);
p                 439 net/ipv6/addrlabel.c 			   struct ip6addrlbl_entry *p,
p                 449 net/ipv6/addrlabel.c 	ip6addrlbl_putmsg(nlh, p->prefixlen, p->ifindex, lseq);
p                 451 net/ipv6/addrlabel.c 	if (nla_put_in6_addr(skb, IFAL_ADDRESS, &p->prefix) < 0 ||
p                 452 net/ipv6/addrlabel.c 	    nla_put_u32(skb, IFAL_LABEL, p->label) < 0) {
p                 490 net/ipv6/addrlabel.c 	struct ip6addrlbl_entry *p;
p                 501 net/ipv6/addrlabel.c 	hlist_for_each_entry_rcu(p, &net->ipv6.ip6addrlbl_table.head, list) {
p                 503 net/ipv6/addrlabel.c 			err = ip6addrlbl_fill(skb, p,
p                 579 net/ipv6/addrlabel.c 	struct ip6addrlbl_entry *p;
p                 607 net/ipv6/addrlabel.c 	p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index);
p                 609 net/ipv6/addrlabel.c 	if (p)
p                 610 net/ipv6/addrlabel.c 		err = ip6addrlbl_fill(skb, p, lseq,
p                 667 net/ipv6/af_inet6.c int inet6_register_protosw(struct inet_protosw *p)
p                 672 net/ipv6/af_inet6.c 	int protocol = p->protocol;
p                 678 net/ipv6/af_inet6.c 	if (p->type >= SOCK_MAX)
p                 684 net/ipv6/af_inet6.c 	last_perm = &inetsw6[p->type];
p                 685 net/ipv6/af_inet6.c 	list_for_each(lh, &inetsw6[p->type]) {
p                 706 net/ipv6/af_inet6.c 	list_add_rcu(&p->list, last_perm);
p                 718 net/ipv6/af_inet6.c 	       p->type);
p                 724 net/ipv6/af_inet6.c inet6_unregister_protosw(struct inet_protosw *p)
p                 726 net/ipv6/af_inet6.c 	if (INET_PROTOSW_PERMANENT & p->flags) {
p                 728 net/ipv6/af_inet6.c 		       p->protocol);
p                 731 net/ipv6/af_inet6.c 		list_del_rcu(&p->list);
p                 471 net/ipv6/anycast.c 	struct seq_net_private p;
p                 762 net/ipv6/esp6.c 	char *p;
p                 805 net/ipv6/esp6.c 	p = key;
p                 806 net/ipv6/esp6.c 	rta = (void *)p;
p                 810 net/ipv6/esp6.c 	p += RTA_SPACE(sizeof(*param));
p                 815 net/ipv6/esp6.c 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
p                 816 net/ipv6/esp6.c 		p += (x->aalg->alg_key_len + 7) / 8;
p                 838 net/ipv6/esp6.c 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
p                1018 net/ipv6/exthdrs.c 			      int newtype, char **p)
p                1026 net/ipv6/exthdrs.c 	memcpy(*p, src, ipv6_optlen(src));
p                1027 net/ipv6/exthdrs.c 	*dest = (struct ipv6_opt_hdr *)*p;
p                1028 net/ipv6/exthdrs.c 	*p += CMSG_ALIGN(ipv6_optlen(*dest));
p                1057 net/ipv6/exthdrs.c 	char *p;
p                1085 net/ipv6/exthdrs.c 	p = (char *)(opt2 + 1);
p                1089 net/ipv6/exthdrs.c 			  newopt, newtype, &p);
p                1092 net/ipv6/exthdrs.c 			  newopt, newtype, &p);
p                1096 net/ipv6/exthdrs.c 			  newopt, newtype, &p);
p                1099 net/ipv6/exthdrs.c 			  newopt, newtype, &p);
p                  98 net/ipv6/ila/ila.h void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p,
p                 101 net/ipv6/ila/ila.h void ila_init_saved_csum(struct ila_params *p);
p                  16 net/ipv6/ila/ila_common.c void ila_init_saved_csum(struct ila_params *p)
p                  18 net/ipv6/ila/ila_common.c 	if (!p->locator_match.v64)
p                  21 net/ipv6/ila/ila_common.c 	p->csum_diff = compute_csum_diff8(
p                  22 net/ipv6/ila/ila_common.c 				(__be32 *)&p->locator,
p                  23 net/ipv6/ila/ila_common.c 				(__be32 *)&p->locator_match);
p                  26 net/ipv6/ila/ila_common.c static __wsum get_csum_diff_iaddr(struct ila_addr *iaddr, struct ila_params *p)
p                  28 net/ipv6/ila/ila_common.c 	if (p->locator_match.v64)
p                  29 net/ipv6/ila/ila_common.c 		return p->csum_diff;
p                  31 net/ipv6/ila/ila_common.c 		return compute_csum_diff8((__be32 *)&p->locator,
p                  35 net/ipv6/ila/ila_common.c static __wsum get_csum_diff(struct ipv6hdr *ip6h, struct ila_params *p)
p                  37 net/ipv6/ila/ila_common.c 	return get_csum_diff_iaddr(ila_a2i(&ip6h->daddr), p);
p                  41 net/ipv6/ila/ila_common.c 				    struct ila_params *p)
p                  46 net/ipv6/ila/ila_common.c 	diff = get_csum_diff_iaddr(iaddr, p);
p                  64 net/ipv6/ila/ila_common.c 				      struct ila_params *p)
p                  69 net/ipv6/ila/ila_common.c 	diff = get_csum_diff_iaddr(iaddr, p);
p                  75 net/ipv6/ila/ila_common.c 				      struct ila_params *p)
p                  87 net/ipv6/ila/ila_common.c 			diff = get_csum_diff(ip6h, p);
p                  98 net/ipv6/ila/ila_common.c 				diff = get_csum_diff(ip6h, p);
p                 112 net/ipv6/ila/ila_common.c 			diff = get_csum_diff(ip6h, p);
p                 120 net/ipv6/ila/ila_common.c void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p,
p                 126 net/ipv6/ila/ila_common.c 	switch (p->csum_mode) {
p                 128 net/ipv6/ila/ila_common.c 		ila_csum_adjust_transport(skb, p);
p                 144 net/ipv6/ila/ila_common.c 		ila_csum_do_neutral_fmt(iaddr, p);
p                 147 net/ipv6/ila/ila_common.c 		ila_csum_do_neutral_nofmt(iaddr, p);
p                 154 net/ipv6/ila/ila_common.c 	iaddr->loc = p->locator;
p                  20 net/ipv6/ila/ila_lwt.c 	struct ila_params p;
p                  35 net/ipv6/ila/ila_lwt.c 	return &ila_lwt_lwtunnel(lwt)->p;
p                 134 net/ipv6/ila/ila_lwt.c 	struct ila_params *p;
p                 232 net/ipv6/ila/ila_lwt.c 	p = ila_params_lwtunnel(newts);
p                 234 net/ipv6/ila/ila_lwt.c 	p->csum_mode = csum_mode;
p                 235 net/ipv6/ila/ila_lwt.c 	p->ident_type = ident_type;
p                 236 net/ipv6/ila/ila_lwt.c 	p->locator.v64 = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]);
p                 241 net/ipv6/ila/ila_lwt.c 	p->locator_match = iaddr->loc;
p                 243 net/ipv6/ila/ila_lwt.c 	ila_init_saved_csum(p);
p                 265 net/ipv6/ila/ila_lwt.c 	struct ila_params *p = ila_params_lwtunnel(lwtstate);
p                 268 net/ipv6/ila/ila_lwt.c 	if (nla_put_u64_64bit(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator.v64,
p                 272 net/ipv6/ila/ila_lwt.c 	if (nla_put_u8(skb, ILA_ATTR_CSUM_MODE, (__force u8)p->csum_mode))
p                 275 net/ipv6/ila/ila_lwt.c 	if (nla_put_u8(skb, ILA_ATTR_IDENT_TYPE, (__force u8)p->ident_type))
p                 348 net/ipv6/ila/ila_xlat.c 	struct ila_xlat_params p;
p                 351 net/ipv6/ila/ila_xlat.c 	err = parse_nl_config(info, &p);
p                 355 net/ipv6/ila/ila_xlat.c 	return ila_add_mapping(net, &p);
p                 665 net/ipv6/ip6_fib.c 		struct dst_metrics *p = kzalloc(sizeof(*p), GFP_ATOMIC);
p                 667 net/ipv6/ip6_fib.c 		if (!p)
p                 670 net/ipv6/ip6_fib.c 		refcount_set(&p->refcnt, 1);
p                 671 net/ipv6/ip6_fib.c 		f6i->fib6_metrics = p;
p                 720 net/ipv6/ip6_flowlabel.c 	struct seq_net_private p;
p                 249 net/ipv6/ip6_gre.c 		const struct __ip6_tnl_parm *p)
p                 251 net/ipv6/ip6_gre.c 	const struct in6_addr *remote = &p->raddr;
p                 252 net/ipv6/ip6_gre.c 	const struct in6_addr *local = &p->laddr;
p                 253 net/ipv6/ip6_gre.c 	unsigned int h = HASH_KEY(p->i_key);
p                1072 net/ipv6/ip6_gre.c 	struct __ip6_tnl_parm *p = &t->parms;
p                1076 net/ipv6/ip6_gre.c 		memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
p                1077 net/ipv6/ip6_gre.c 		memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
p                1081 net/ipv6/ip6_gre.c 	fl6->saddr = p->laddr;
p                1082 net/ipv6/ip6_gre.c 	fl6->daddr = p->raddr;
p                1083 net/ipv6/ip6_gre.c 	fl6->flowi6_oif = p->link;
p                1087 net/ipv6/ip6_gre.c 	if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
p                1088 net/ipv6/ip6_gre.c 		fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
p                1089 net/ipv6/ip6_gre.c 	if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
p                1090 net/ipv6/ip6_gre.c 		fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
p                1092 net/ipv6/ip6_gre.c 	p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
p                1093 net/ipv6/ip6_gre.c 	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
p                1095 net/ipv6/ip6_gre.c 	if (p->flags&IP6_TNL_F_CAP_XMIT &&
p                1096 net/ipv6/ip6_gre.c 			p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
p                1105 net/ipv6/ip6_gre.c 	const struct __ip6_tnl_parm *p = &t->parms;
p                1108 net/ipv6/ip6_gre.c 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
p                1109 net/ipv6/ip6_gre.c 		int strict = (ipv6_addr_type(&p->raddr) &
p                1113 net/ipv6/ip6_gre.c 						 &p->raddr, &p->laddr,
p                1114 net/ipv6/ip6_gre.c 						 p->link, NULL, strict);
p                1157 net/ipv6/ip6_gre.c 				     const struct __ip6_tnl_parm *p)
p                1159 net/ipv6/ip6_gre.c 	t->parms.laddr = p->laddr;
p                1160 net/ipv6/ip6_gre.c 	t->parms.raddr = p->raddr;
p                1161 net/ipv6/ip6_gre.c 	t->parms.flags = p->flags;
p                1162 net/ipv6/ip6_gre.c 	t->parms.hop_limit = p->hop_limit;
p                1163 net/ipv6/ip6_gre.c 	t->parms.encap_limit = p->encap_limit;
p                1164 net/ipv6/ip6_gre.c 	t->parms.flowinfo = p->flowinfo;
p                1165 net/ipv6/ip6_gre.c 	t->parms.link = p->link;
p                1166 net/ipv6/ip6_gre.c 	t->parms.proto = p->proto;
p                1167 net/ipv6/ip6_gre.c 	t->parms.i_key = p->i_key;
p                1168 net/ipv6/ip6_gre.c 	t->parms.o_key = p->o_key;
p                1169 net/ipv6/ip6_gre.c 	t->parms.i_flags = p->i_flags;
p                1170 net/ipv6/ip6_gre.c 	t->parms.o_flags = p->o_flags;
p                1171 net/ipv6/ip6_gre.c 	t->parms.fwmark = p->fwmark;
p                1172 net/ipv6/ip6_gre.c 	t->parms.erspan_ver = p->erspan_ver;
p                1173 net/ipv6/ip6_gre.c 	t->parms.index = p->index;
p                1174 net/ipv6/ip6_gre.c 	t->parms.dir = p->dir;
p                1175 net/ipv6/ip6_gre.c 	t->parms.hwid = p->hwid;
p                1179 net/ipv6/ip6_gre.c static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
p                1182 net/ipv6/ip6_gre.c 	ip6gre_tnl_copy_tnl_parm(t, p);
p                1187 net/ipv6/ip6_gre.c static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
p                1190 net/ipv6/ip6_gre.c 	p->laddr = u->laddr;
p                1191 net/ipv6/ip6_gre.c 	p->raddr = u->raddr;
p                1192 net/ipv6/ip6_gre.c 	p->flags = u->flags;
p                1193 net/ipv6/ip6_gre.c 	p->hop_limit = u->hop_limit;
p                1194 net/ipv6/ip6_gre.c 	p->encap_limit = u->encap_limit;
p                1195 net/ipv6/ip6_gre.c 	p->flowinfo = u->flowinfo;
p                1196 net/ipv6/ip6_gre.c 	p->link = u->link;
p                1197 net/ipv6/ip6_gre.c 	p->i_key = u->i_key;
p                1198 net/ipv6/ip6_gre.c 	p->o_key = u->o_key;
p                1199 net/ipv6/ip6_gre.c 	p->i_flags = gre_flags_to_tnl_flags(u->i_flags);
p                1200 net/ipv6/ip6_gre.c 	p->o_flags = gre_flags_to_tnl_flags(u->o_flags);
p                1201 net/ipv6/ip6_gre.c 	memcpy(p->name, u->name, sizeof(u->name));
p                1205 net/ipv6/ip6_gre.c 	const struct __ip6_tnl_parm *p)
p                1208 net/ipv6/ip6_gre.c 	u->laddr = p->laddr;
p                1209 net/ipv6/ip6_gre.c 	u->raddr = p->raddr;
p                1210 net/ipv6/ip6_gre.c 	u->flags = p->flags;
p                1211 net/ipv6/ip6_gre.c 	u->hop_limit = p->hop_limit;
p                1212 net/ipv6/ip6_gre.c 	u->encap_limit = p->encap_limit;
p                1213 net/ipv6/ip6_gre.c 	u->flowinfo = p->flowinfo;
p                1214 net/ipv6/ip6_gre.c 	u->link = p->link;
p                1215 net/ipv6/ip6_gre.c 	u->i_key = p->i_key;
p                1216 net/ipv6/ip6_gre.c 	u->o_key = p->o_key;
p                1217 net/ipv6/ip6_gre.c 	u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
p                1218 net/ipv6/ip6_gre.c 	u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
p                1219 net/ipv6/ip6_gre.c 	memcpy(u->name, p->name, sizeof(u->name));
p                1226 net/ipv6/ip6_gre.c 	struct ip6_tnl_parm2 p;
p                1237 net/ipv6/ip6_gre.c 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
p                1241 net/ipv6/ip6_gre.c 			ip6gre_tnl_parm_from_user(&p1, &p);
p                1246 net/ipv6/ip6_gre.c 		memset(&p, 0, sizeof(p));
p                1247 net/ipv6/ip6_gre.c 		ip6gre_tnl_parm_to_user(&p, &t->parms);
p                1248 net/ipv6/ip6_gre.c 		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
p                1259 net/ipv6/ip6_gre.c 		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
p                1263 net/ipv6/ip6_gre.c 		if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
p                1266 net/ipv6/ip6_gre.c 		if (!(p.i_flags&GRE_KEY))
p                1267 net/ipv6/ip6_gre.c 			p.i_key = 0;
p                1268 net/ipv6/ip6_gre.c 		if (!(p.o_flags&GRE_KEY))
p                1269 net/ipv6/ip6_gre.c 			p.o_key = 0;
p                1271 net/ipv6/ip6_gre.c 		ip6gre_tnl_parm_from_user(&p1, &p);
p                1294 net/ipv6/ip6_gre.c 			memset(&p, 0, sizeof(p));
p                1295 net/ipv6/ip6_gre.c 			ip6gre_tnl_parm_to_user(&p, &t->parms);
p                1296 net/ipv6/ip6_gre.c 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
p                1309 net/ipv6/ip6_gre.c 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
p                1312 net/ipv6/ip6_gre.c 			ip6gre_tnl_parm_from_user(&p1, &p);
p                1339 net/ipv6/ip6_gre.c 	__be16 *p;
p                1350 net/ipv6/ip6_gre.c 	p = (__be16 *)(ipv6h + 1);
p                1351 net/ipv6/ip6_gre.c 	p[0] = t->parms.o_flags;
p                1352 net/ipv6/ip6_gre.c 	p[1] = htons(type);
p                2039 net/ipv6/ip6_gre.c 	struct __ip6_tnl_parm p;
p                2041 net/ipv6/ip6_gre.c 	t = ip6gre_changelink_common(dev, tb, data, &p, extack);
p                2047 net/ipv6/ip6_gre.c 	ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
p                2107 net/ipv6/ip6_gre.c 	struct __ip6_tnl_parm *p = &t->parms;
p                2108 net/ipv6/ip6_gre.c 	__be16 o_flags = p->o_flags;
p                2110 net/ipv6/ip6_gre.c 	if (p->erspan_ver == 1 || p->erspan_ver == 2) {
p                2111 net/ipv6/ip6_gre.c 		if (!p->collect_md)
p                2114 net/ipv6/ip6_gre.c 		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
p                2117 net/ipv6/ip6_gre.c 		if (p->erspan_ver == 1) {
p                2118 net/ipv6/ip6_gre.c 			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
p                2121 net/ipv6/ip6_gre.c 			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
p                2123 net/ipv6/ip6_gre.c 			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
p                2128 net/ipv6/ip6_gre.c 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
p                2130 net/ipv6/ip6_gre.c 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
p                2133 net/ipv6/ip6_gre.c 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
p                2134 net/ipv6/ip6_gre.c 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
p                2135 net/ipv6/ip6_gre.c 	    nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
p                2136 net/ipv6/ip6_gre.c 	    nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) ||
p                2137 net/ipv6/ip6_gre.c 	    nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
p                2138 net/ipv6/ip6_gre.c 	    nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
p                2139 net/ipv6/ip6_gre.c 	    nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
p                2140 net/ipv6/ip6_gre.c 	    nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
p                2141 net/ipv6/ip6_gre.c 	    nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
p                2154 net/ipv6/ip6_gre.c 	if (p->collect_md) {
p                2240 net/ipv6/ip6_gre.c 				const struct __ip6_tnl_parm *p, int set_mtu)
p                2242 net/ipv6/ip6_gre.c 	ip6gre_tnl_copy_tnl_parm(t, p);
p                2252 net/ipv6/ip6_gre.c 	struct __ip6_tnl_parm p;
p                2255 net/ipv6/ip6_gre.c 	t = ip6gre_changelink_common(dev, tb, data, &p, extack);
p                2259 net/ipv6/ip6_gre.c 	ip6erspan_set_version(data, &p);
p                2262 net/ipv6/ip6_gre.c 	ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
p                 189 net/ipv6/ip6_offload.c 	struct sk_buff *p;
p                 235 net/ipv6/ip6_offload.c 	list_for_each_entry(p, head, list) {
p                 239 net/ipv6/ip6_offload.c 		if (!NAPI_GRO_CB(p)->same_flow)
p                 242 net/ipv6/ip6_offload.c 		iph2 = (struct ipv6hdr *)(p->data + off);
p                 256 net/ipv6/ip6_offload.c 			NAPI_GRO_CB(p)->same_flow = 0;
p                 265 net/ipv6/ip6_offload.c 		NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
p                 266 net/ipv6/ip6_offload.c 		NAPI_GRO_CB(p)->flush |= flush;
p                 272 net/ipv6/ip6_offload.c 			NAPI_GRO_CB(p)->flush_id = 0;
p                 191 net/ipv6/ip6_tunnel.c ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
p                 193 net/ipv6/ip6_tunnel.c 	const struct in6_addr *remote = &p->raddr;
p                 194 net/ipv6/ip6_tunnel.c 	const struct in6_addr *local = &p->laddr;
p                 290 net/ipv6/ip6_tunnel.c static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
p                 297 net/ipv6/ip6_tunnel.c 	if (p->name[0]) {
p                 298 net/ipv6/ip6_tunnel.c 		if (!dev_valid_name(p->name))
p                 300 net/ipv6/ip6_tunnel.c 		strlcpy(name, p->name, IFNAMSIZ);
p                 313 net/ipv6/ip6_tunnel.c 	t->parms = *p;
p                 342 net/ipv6/ip6_tunnel.c 		struct __ip6_tnl_parm *p, int create)
p                 344 net/ipv6/ip6_tunnel.c 	const struct in6_addr *remote = &p->raddr;
p                 345 net/ipv6/ip6_tunnel.c 	const struct in6_addr *local = &p->laddr;
p                 350 net/ipv6/ip6_tunnel.c 	for (tp = ip6_tnl_bucket(ip6n, p);
p                 363 net/ipv6/ip6_tunnel.c 	return ip6_tnl_create(net, p);
p                 721 net/ipv6/ip6_tunnel.c 	struct __ip6_tnl_parm *p = &t->parms;
p                 731 net/ipv6/ip6_tunnel.c 		   (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
p                 746 net/ipv6/ip6_tunnel.c 	struct __ip6_tnl_parm *p = &t->parms;
p                 750 net/ipv6/ip6_tunnel.c 	if ((p->flags & IP6_TNL_F_CAP_RCV) ||
p                 751 net/ipv6/ip6_tunnel.c 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
p                 755 net/ipv6/ip6_tunnel.c 		if (p->link)
p                 756 net/ipv6/ip6_tunnel.c 			ldev = dev_get_by_index_rcu(net, p->link);
p                 761 net/ipv6/ip6_tunnel.c 		    ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) ||
p                 978 net/ipv6/ip6_tunnel.c 	struct __ip6_tnl_parm *p = &t->parms;
p                 985 net/ipv6/ip6_tunnel.c 	if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
p                 986 net/ipv6/ip6_tunnel.c 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
p                 991 net/ipv6/ip6_tunnel.c 		if (p->link)
p                 992 net/ipv6/ip6_tunnel.c 			ldev = dev_get_by_index_rcu(net, p->link);
p                 997 net/ipv6/ip6_tunnel.c 				p->name);
p                 998 net/ipv6/ip6_tunnel.c 		else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
p                1003 net/ipv6/ip6_tunnel.c 				p->name);
p                1423 net/ipv6/ip6_tunnel.c 	struct __ip6_tnl_parm *p = &t->parms;
p                1427 net/ipv6/ip6_tunnel.c 	memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
p                1428 net/ipv6/ip6_tunnel.c 	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
p                1431 net/ipv6/ip6_tunnel.c 	fl6->saddr = p->laddr;
p                1432 net/ipv6/ip6_tunnel.c 	fl6->daddr = p->raddr;
p                1433 net/ipv6/ip6_tunnel.c 	fl6->flowi6_oif = p->link;
p                1436 net/ipv6/ip6_tunnel.c 	if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
p                1437 net/ipv6/ip6_tunnel.c 		fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
p                1438 net/ipv6/ip6_tunnel.c 	if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
p                1439 net/ipv6/ip6_tunnel.c 		fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
p                1441 net/ipv6/ip6_tunnel.c 	p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
p                1442 net/ipv6/ip6_tunnel.c 	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
p                1444 net/ipv6/ip6_tunnel.c 	if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
p                1453 net/ipv6/ip6_tunnel.c 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
p                1454 net/ipv6/ip6_tunnel.c 		int strict = (ipv6_addr_type(&p->raddr) &
p                1458 net/ipv6/ip6_tunnel.c 						 &p->raddr, &p->laddr,
p                1459 net/ipv6/ip6_tunnel.c 						 p->link, NULL, strict);
p                1489 net/ipv6/ip6_tunnel.c ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
p                1491 net/ipv6/ip6_tunnel.c 	t->parms.laddr = p->laddr;
p                1492 net/ipv6/ip6_tunnel.c 	t->parms.raddr = p->raddr;
p                1493 net/ipv6/ip6_tunnel.c 	t->parms.flags = p->flags;
p                1494 net/ipv6/ip6_tunnel.c 	t->parms.hop_limit = p->hop_limit;
p                1495 net/ipv6/ip6_tunnel.c 	t->parms.encap_limit = p->encap_limit;
p                1496 net/ipv6/ip6_tunnel.c 	t->parms.flowinfo = p->flowinfo;
p                1497 net/ipv6/ip6_tunnel.c 	t->parms.link = p->link;
p                1498 net/ipv6/ip6_tunnel.c 	t->parms.proto = p->proto;
p                1499 net/ipv6/ip6_tunnel.c 	t->parms.fwmark = p->fwmark;
p                1505 net/ipv6/ip6_tunnel.c static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
p                1513 net/ipv6/ip6_tunnel.c 	err = ip6_tnl_change(t, p);
p                1519 net/ipv6/ip6_tunnel.c static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
p                1522 net/ipv6/ip6_tunnel.c 	t->parms.proto = p->proto;
p                1528 net/ipv6/ip6_tunnel.c ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
p                1530 net/ipv6/ip6_tunnel.c 	p->laddr = u->laddr;
p                1531 net/ipv6/ip6_tunnel.c 	p->raddr = u->raddr;
p                1532 net/ipv6/ip6_tunnel.c 	p->flags = u->flags;
p                1533 net/ipv6/ip6_tunnel.c 	p->hop_limit = u->hop_limit;
p                1534 net/ipv6/ip6_tunnel.c 	p->encap_limit = u->encap_limit;
p                1535 net/ipv6/ip6_tunnel.c 	p->flowinfo = u->flowinfo;
p                1536 net/ipv6/ip6_tunnel.c 	p->link = u->link;
p                1537 net/ipv6/ip6_tunnel.c 	p->proto = u->proto;
p                1538 net/ipv6/ip6_tunnel.c 	memcpy(p->name, u->name, sizeof(u->name));
p                1542 net/ipv6/ip6_tunnel.c ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
p                1544 net/ipv6/ip6_tunnel.c 	u->laddr = p->laddr;
p                1545 net/ipv6/ip6_tunnel.c 	u->raddr = p->raddr;
p                1546 net/ipv6/ip6_tunnel.c 	u->flags = p->flags;
p                1547 net/ipv6/ip6_tunnel.c 	u->hop_limit = p->hop_limit;
p                1548 net/ipv6/ip6_tunnel.c 	u->encap_limit = p->encap_limit;
p                1549 net/ipv6/ip6_tunnel.c 	u->flowinfo = p->flowinfo;
p                1550 net/ipv6/ip6_tunnel.c 	u->link = p->link;
p                1551 net/ipv6/ip6_tunnel.c 	u->proto = p->proto;
p                1552 net/ipv6/ip6_tunnel.c 	memcpy(u->name, p->name, sizeof(u->name));
p                1587 net/ipv6/ip6_tunnel.c 	struct ip6_tnl_parm p;
p                1598 net/ipv6/ip6_tunnel.c 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
p                1602 net/ipv6/ip6_tunnel.c 			ip6_tnl_parm_from_user(&p1, &p);
p                1607 net/ipv6/ip6_tunnel.c 			memset(&p, 0, sizeof(p));
p                1609 net/ipv6/ip6_tunnel.c 		ip6_tnl_parm_to_user(&p, &t->parms);
p                1610 net/ipv6/ip6_tunnel.c 		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
p                1620 net/ipv6/ip6_tunnel.c 		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
p                1623 net/ipv6/ip6_tunnel.c 		if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
p                1624 net/ipv6/ip6_tunnel.c 		    p.proto != 0)
p                1626 net/ipv6/ip6_tunnel.c 		ip6_tnl_parm_from_user(&p1, &p);
p                1643 net/ipv6/ip6_tunnel.c 			ip6_tnl_parm_to_user(&p, &t->parms);
p                1644 net/ipv6/ip6_tunnel.c 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
p                1658 net/ipv6/ip6_tunnel.c 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
p                1661 net/ipv6/ip6_tunnel.c 			ip6_tnl_parm_from_user(&p1, &p);
p                2035 net/ipv6/ip6_tunnel.c 	struct __ip6_tnl_parm p;
p                2049 net/ipv6/ip6_tunnel.c 	ip6_tnl_netlink_parms(data, &p);
p                2050 net/ipv6/ip6_tunnel.c 	if (p.collect_md)
p                2053 net/ipv6/ip6_tunnel.c 	t = ip6_tnl_locate(net, &p, 0);
p                2060 net/ipv6/ip6_tunnel.c 	return ip6_tnl_update(t, &p);
p                 137 net/ipv6/ip6_vti.c vti6_tnl_bucket(struct vti6_net *ip6n, const struct __ip6_tnl_parm *p)
p                 139 net/ipv6/ip6_vti.c 	const struct in6_addr *remote = &p->raddr;
p                 140 net/ipv6/ip6_vti.c 	const struct in6_addr *local = &p->laddr;
p                 204 net/ipv6/ip6_vti.c static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
p                 211 net/ipv6/ip6_vti.c 	if (p->name[0]) {
p                 212 net/ipv6/ip6_vti.c 		if (!dev_valid_name(p->name))
p                 214 net/ipv6/ip6_vti.c 		strlcpy(name, p->name, IFNAMSIZ);
p                 226 net/ipv6/ip6_vti.c 	t->parms = *p;
p                 255 net/ipv6/ip6_vti.c static struct ip6_tnl *vti6_locate(struct net *net, struct __ip6_tnl_parm *p,
p                 258 net/ipv6/ip6_vti.c 	const struct in6_addr *remote = &p->raddr;
p                 259 net/ipv6/ip6_vti.c 	const struct in6_addr *local = &p->laddr;
p                 264 net/ipv6/ip6_vti.c 	for (tp = vti6_tnl_bucket(ip6n, p);
p                 277 net/ipv6/ip6_vti.c 	return vti6_tnl_create(net, p);
p                 648 net/ipv6/ip6_vti.c 	struct __ip6_tnl_parm *p = &t->parms;
p                 652 net/ipv6/ip6_vti.c 	memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
p                 653 net/ipv6/ip6_vti.c 	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
p                 655 net/ipv6/ip6_vti.c 	p->flags &= ~(IP6_TNL_F_CAP_XMIT | IP6_TNL_F_CAP_RCV |
p                 657 net/ipv6/ip6_vti.c 	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
p                 659 net/ipv6/ip6_vti.c 	if (p->flags & IP6_TNL_F_CAP_XMIT && p->flags & IP6_TNL_F_CAP_RCV)
p                 669 net/ipv6/ip6_vti.c 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
p                 670 net/ipv6/ip6_vti.c 		int strict = (ipv6_addr_type(&p->raddr) &
p                 673 net/ipv6/ip6_vti.c 						 &p->raddr, &p->laddr,
p                 674 net/ipv6/ip6_vti.c 						 p->link, NULL, strict);
p                 681 net/ipv6/ip6_vti.c 	if (!tdev && p->link)
p                 682 net/ipv6/ip6_vti.c 		tdev = __dev_get_by_index(t->net, p->link);
p                 702 net/ipv6/ip6_vti.c vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
p                 705 net/ipv6/ip6_vti.c 	t->parms.laddr = p->laddr;
p                 706 net/ipv6/ip6_vti.c 	t->parms.raddr = p->raddr;
p                 707 net/ipv6/ip6_vti.c 	t->parms.link = p->link;
p                 708 net/ipv6/ip6_vti.c 	t->parms.i_key = p->i_key;
p                 709 net/ipv6/ip6_vti.c 	t->parms.o_key = p->o_key;
p                 710 net/ipv6/ip6_vti.c 	t->parms.proto = p->proto;
p                 711 net/ipv6/ip6_vti.c 	t->parms.fwmark = p->fwmark;
p                 717 net/ipv6/ip6_vti.c static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p,
p                 726 net/ipv6/ip6_vti.c 	err = vti6_tnl_change(t, p, keep_mtu);
p                 733 net/ipv6/ip6_vti.c vti6_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm2 *u)
p                 735 net/ipv6/ip6_vti.c 	p->laddr = u->laddr;
p                 736 net/ipv6/ip6_vti.c 	p->raddr = u->raddr;
p                 737 net/ipv6/ip6_vti.c 	p->link = u->link;
p                 738 net/ipv6/ip6_vti.c 	p->i_key = u->i_key;
p                 739 net/ipv6/ip6_vti.c 	p->o_key = u->o_key;
p                 740 net/ipv6/ip6_vti.c 	p->proto = u->proto;
p                 742 net/ipv6/ip6_vti.c 	memcpy(p->name, u->name, sizeof(u->name));
p                 746 net/ipv6/ip6_vti.c vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p)
p                 748 net/ipv6/ip6_vti.c 	u->laddr = p->laddr;
p                 749 net/ipv6/ip6_vti.c 	u->raddr = p->raddr;
p                 750 net/ipv6/ip6_vti.c 	u->link = p->link;
p                 751 net/ipv6/ip6_vti.c 	u->i_key = p->i_key;
p                 752 net/ipv6/ip6_vti.c 	u->o_key = p->o_key;
p                 757 net/ipv6/ip6_vti.c 	u->proto = p->proto;
p                 759 net/ipv6/ip6_vti.c 	memcpy(u->name, p->name, sizeof(u->name));
p                 793 net/ipv6/ip6_vti.c 	struct ip6_tnl_parm2 p;
p                 802 net/ipv6/ip6_vti.c 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
p                 806 net/ipv6/ip6_vti.c 			vti6_parm_from_user(&p1, &p);
p                 809 net/ipv6/ip6_vti.c 			memset(&p, 0, sizeof(p));
p                 813 net/ipv6/ip6_vti.c 		vti6_parm_to_user(&p, &t->parms);
p                 814 net/ipv6/ip6_vti.c 		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
p                 823 net/ipv6/ip6_vti.c 		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
p                 826 net/ipv6/ip6_vti.c 		if (p.proto != IPPROTO_IPV6  && p.proto != 0)
p                 828 net/ipv6/ip6_vti.c 		vti6_parm_from_user(&p1, &p);
p                 843 net/ipv6/ip6_vti.c 			vti6_parm_to_user(&p, &t->parms);
p                 844 net/ipv6/ip6_vti.c 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
p                 857 net/ipv6/ip6_vti.c 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
p                 860 net/ipv6/ip6_vti.c 			vti6_parm_from_user(&p1, &p);
p                1026 net/ipv6/ip6_vti.c 	struct __ip6_tnl_parm p;
p                1033 net/ipv6/ip6_vti.c 	vti6_netlink_parms(data, &p);
p                1035 net/ipv6/ip6_vti.c 	t = vti6_locate(net, &p, 0);
p                1043 net/ipv6/ip6_vti.c 	return vti6_update(t, &p, tb && tb[IFLA_MTU]);
p                2667 net/ipv6/mcast.c 	struct seq_net_private p;
p                2780 net/ipv6/mcast.c 	struct seq_net_private p;
p                1439 net/ipv6/ndisc.c 		struct nd_opt_hdr *p;
p                1440 net/ipv6/ndisc.c 		for (p = ndopts.nd_opts_ri;
p                1441 net/ipv6/ndisc.c 		     p;
p                1442 net/ipv6/ndisc.c 		     p = ndisc_next_option(p, ndopts.nd_opts_ri_end)) {
p                1443 net/ipv6/ndisc.c 			struct route_info *ri = (struct route_info *)p;
p                1456 net/ipv6/ndisc.c 			rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3,
p                1475 net/ipv6/ndisc.c 		struct nd_opt_hdr *p;
p                1476 net/ipv6/ndisc.c 		for (p = ndopts.nd_opts_pi;
p                1477 net/ipv6/ndisc.c 		     p;
p                1478 net/ipv6/ndisc.c 		     p = ndisc_next_option(p, ndopts.nd_opts_pi_end)) {
p                1479 net/ipv6/ndisc.c 			addrconf_prefix_rcv(skb->dev, (u8 *)p,
p                1480 net/ipv6/ndisc.c 					    (p->nd_opt_len) << 3,
p                1502 net/ipv6/ndisc.c 		struct nd_opt_hdr *p;
p                1503 net/ipv6/ndisc.c 		for (p = ndopts.nd_useropts;
p                1504 net/ipv6/ndisc.c 		     p;
p                1505 net/ipv6/ndisc.c 		     p = ndisc_next_useropt(skb->dev, p,
p                1507 net/ipv6/ndisc.c 			ndisc_ra_useropt(skb, p);
p                 312 net/ipv6/netfilter/nf_log_ipv6.c 		const unsigned char *p = skb_mac_header(skb);
p                 317 net/ipv6/netfilter/nf_log_ipv6.c 			p -= ETH_HLEN;
p                 319 net/ipv6/netfilter/nf_log_ipv6.c 			if (p < skb->head)
p                 320 net/ipv6/netfilter/nf_log_ipv6.c 				p = NULL;
p                 323 net/ipv6/netfilter/nf_log_ipv6.c 		if (p != NULL) {
p                 324 net/ipv6/netfilter/nf_log_ipv6.c 			nf_log_buf_add(m, "%02x", *p++);
p                 326 net/ipv6/netfilter/nf_log_ipv6.c 				nf_log_buf_add(m, ":%02x", *p++);
p                 151 net/ipv6/proc.c 		const char *p;
p                 154 net/ipv6/proc.c 		p = icmp6type2name[icmptype];
p                 155 net/ipv6/proc.c 		if (!p)	/* don't print un-named types here */
p                 158 net/ipv6/proc.c 			i & 0x100 ? "Out" : "In", p);
p                 244 net/ipv6/proc.c 	struct proc_dir_entry *p;
p                 254 net/ipv6/proc.c 	p = proc_create_single_data(idev->dev->name, 0444,
p                 256 net/ipv6/proc.c 	if (!p)
p                 259 net/ipv6/proc.c 	idev->stats.proc_dir_entry = p;
p                 189 net/ipv6/route.c static inline const void *choose_neigh_daddr(const struct in6_addr *p,
p                 193 net/ipv6/route.c 	if (!ipv6_addr_any(p))
p                 194 net/ipv6/route.c 		return (const void *) p;
p                1411 net/ipv6/route.c 		struct rt6_info *prev, **p;
p                1413 net/ipv6/route.c 		p = this_cpu_ptr(res->nh->rt6i_pcpu);
p                1414 net/ipv6/route.c 		prev = xchg(p, NULL);
p                1429 net/ipv6/route.c 	struct rt6_info *pcpu_rt, *prev, **p;
p                1435 net/ipv6/route.c 	p = this_cpu_ptr(res->nh->rt6i_pcpu);
p                1436 net/ipv6/route.c 	prev = cmpxchg(p, NULL, pcpu_rt);
p                1626 net/ipv6/route.c 		unsigned long p = (unsigned long)bucket;
p                1628 net/ipv6/route.c 		p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
p                1629 net/ipv6/route.c 		bucket = (struct rt6_exception_bucket *)p;
p                1637 net/ipv6/route.c 	unsigned long p = (unsigned long)bucket;
p                1639 net/ipv6/route.c 	return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
p                1647 net/ipv6/route.c 	unsigned long p;
p                1652 net/ipv6/route.c 	p = (unsigned long)bucket;
p                1653 net/ipv6/route.c 	p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
p                1654 net/ipv6/route.c 	bucket = (struct rt6_exception_bucket *)p;
p                 830 net/ipv6/seg6_local.c 	struct bpf_prog *p;
p                 848 net/ipv6/seg6_local.c 	p = bpf_prog_get_type(fd, BPF_PROG_TYPE_LWT_SEG6LOCAL);
p                 849 net/ipv6/seg6_local.c 	if (IS_ERR(p)) {
p                 851 net/ipv6/seg6_local.c 		return PTR_ERR(p);
p                 854 net/ipv6/seg6_local.c 	slwt->bpf.prog = p;
p                 360 net/ipv6/sit.c 	struct ip_tunnel_prl_entry *p;
p                 368 net/ipv6/sit.c 	for (p = rtnl_dereference(t->prl); p; p = rtnl_dereference(p->next)) {
p                 369 net/ipv6/sit.c 		if (p->addr == a->addr) {
p                 371 net/ipv6/sit.c 				p->flags = a->flags;
p                 384 net/ipv6/sit.c 	p = kzalloc(sizeof(struct ip_tunnel_prl_entry), GFP_KERNEL);
p                 385 net/ipv6/sit.c 	if (!p) {
p                 390 net/ipv6/sit.c 	p->next = t->prl;
p                 391 net/ipv6/sit.c 	p->addr = a->addr;
p                 392 net/ipv6/sit.c 	p->flags = a->flags;
p                 394 net/ipv6/sit.c 	rcu_assign_pointer(t->prl, p);
p                 401 net/ipv6/sit.c 	struct ip_tunnel_prl_entry *p, *n;
p                 403 net/ipv6/sit.c 	p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
p                 405 net/ipv6/sit.c 		n = rcu_dereference_protected(p->next, 1);
p                 406 net/ipv6/sit.c 		kfree(p);
p                 407 net/ipv6/sit.c 		p = n;
p                 408 net/ipv6/sit.c 	} while (p);
p                 415 net/ipv6/sit.c 	struct ip_tunnel_prl_entry __rcu **p;
p                 421 net/ipv6/sit.c 		for (p = &t->prl;
p                 422 net/ipv6/sit.c 		     (x = rtnl_dereference(*p)) != NULL;
p                 423 net/ipv6/sit.c 		     p = &x->next) {
p                 425 net/ipv6/sit.c 				*p = x->next;
p                 447 net/ipv6/sit.c 	struct ip_tunnel_prl_entry *p;
p                 451 net/ipv6/sit.c 	p = __ipip6_tunnel_locate_prl(t, iph->saddr);
p                 452 net/ipv6/sit.c 	if (p) {
p                 453 net/ipv6/sit.c 		if (p->flags & PRL_DEFAULT)
p                1098 net/ipv6/sit.c static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
p                1106 net/ipv6/sit.c 	t->parms.iph.saddr = p->iph.saddr;
p                1107 net/ipv6/sit.c 	t->parms.iph.daddr = p->iph.daddr;
p                1108 net/ipv6/sit.c 	memcpy(t->dev->dev_addr, &p->iph.saddr, 4);
p                1109 net/ipv6/sit.c 	memcpy(t->dev->broadcast, &p->iph.daddr, 4);
p                1111 net/ipv6/sit.c 	t->parms.iph.ttl = p->iph.ttl;
p                1112 net/ipv6/sit.c 	t->parms.iph.tos = p->iph.tos;
p                1113 net/ipv6/sit.c 	t->parms.iph.frag_off = p->iph.frag_off;
p                1114 net/ipv6/sit.c 	if (t->parms.link != p->link || t->fwmark != fwmark) {
p                1115 net/ipv6/sit.c 		t->parms.link = p->link;
p                1170 net/ipv6/sit.c 	struct ip_tunnel_parm p;
p                1185 net/ipv6/sit.c 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
p                1189 net/ipv6/sit.c 			t = ipip6_tunnel_locate(net, &p, 0);
p                1196 net/ipv6/sit.c 			memcpy(&p, &t->parms, sizeof(p));
p                1197 net/ipv6/sit.c 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p,
p                1198 net/ipv6/sit.c 					 sizeof(p)))
p                1221 net/ipv6/sit.c 		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
p                1225 net/ipv6/sit.c 		if (!ipip6_valid_ip_proto(p.iph.protocol))
p                1227 net/ipv6/sit.c 		if (p.iph.version != 4 ||
p                1228 net/ipv6/sit.c 		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
p                1230 net/ipv6/sit.c 		if (p.iph.ttl)
p                1231 net/ipv6/sit.c 			p.iph.frag_off |= htons(IP_DF);
p                1233 net/ipv6/sit.c 		t = ipip6_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
p                1242 net/ipv6/sit.c 				if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
p                1243 net/ipv6/sit.c 				    (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
p                1250 net/ipv6/sit.c 			ipip6_tunnel_update(t, &p, t->fwmark);
p                1255 net/ipv6/sit.c 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
p                1268 net/ipv6/sit.c 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
p                1271 net/ipv6/sit.c 			t = ipip6_tunnel_locate(net, &p, 0);
p                1614 net/ipv6/sit.c 	struct ip_tunnel_parm p;
p                1633 net/ipv6/sit.c 	ipip6_netlink_parms(data, &p, &fwmark);
p                1635 net/ipv6/sit.c 	if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
p                1636 net/ipv6/sit.c 	    (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
p                1639 net/ipv6/sit.c 	t = ipip6_tunnel_locate(net, &p, 0);
p                1647 net/ipv6/sit.c 	ipip6_tunnel_update(t, &p, fwmark);
p                1365 net/iucv/af_iucv.c 	struct sock_msg_q *p, *n;
p                1367 net/iucv/af_iucv.c 	list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
p                1368 net/iucv/af_iucv.c 		skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
p                1371 net/iucv/af_iucv.c 		iucv_process_message(sk, skb, p->path, &p->msg);
p                1372 net/iucv/af_iucv.c 		list_del(&p->list);
p                1373 net/iucv/af_iucv.c 		kfree(p);
p                 734 net/iucv/iucv.c 	struct iucv_irq_list *p, *n;
p                 747 net/iucv/iucv.c 	list_for_each_entry_safe(p, n, &iucv_task_queue, list) {
p                 749 net/iucv/iucv.c 		if (iucv_path_table[p->data.ippathid] == NULL) {
p                 750 net/iucv/iucv.c 			list_del(&p->list);
p                 751 net/iucv/iucv.c 			kfree(p);
p                 803 net/iucv/iucv.c 	struct iucv_path *p, *n;
p                 810 net/iucv/iucv.c 	list_for_each_entry_safe(p, n, &handler->paths, list) {
p                 811 net/iucv/iucv.c 		iucv_sever_pathid(p->pathid, NULL);
p                 812 net/iucv/iucv.c 		iucv_path_table[p->pathid] = NULL;
p                 813 net/iucv/iucv.c 		list_del(&p->list);
p                 814 net/iucv/iucv.c 		iucv_path_free(p);
p                1743 net/iucv/iucv.c 	struct iucv_irq_list *p, *n;
p                1756 net/iucv/iucv.c 	list_for_each_entry_safe(p, n, &task_queue, list) {
p                1757 net/iucv/iucv.c 		list_del_init(&p->list);
p                1758 net/iucv/iucv.c 		irq_fn[p->data.iptype](&p->data);
p                1759 net/iucv/iucv.c 		kfree(p);
p                1776 net/iucv/iucv.c 	struct iucv_irq_list *p, *n;
p                1787 net/iucv/iucv.c 	list_for_each_entry_safe(p, n, &work_queue, list) {
p                1788 net/iucv/iucv.c 		list_del_init(&p->list);
p                1789 net/iucv/iucv.c 		iucv_path_pending(&p->data);
p                1790 net/iucv/iucv.c 		kfree(p);
p                1807 net/iucv/iucv.c 	struct iucv_irq_data *p;
p                1811 net/iucv/iucv.c 	p = iucv_irq_data[smp_processor_id()];
p                1812 net/iucv/iucv.c 	if (p->ippathid >= iucv_max_pathid) {
p                1813 net/iucv/iucv.c 		WARN_ON(p->ippathid >= iucv_max_pathid);
p                1814 net/iucv/iucv.c 		iucv_sever_pathid(p->ippathid, iucv_error_no_listener);
p                1817 net/iucv/iucv.c 	BUG_ON(p->iptype  < 0x01 || p->iptype > 0x09);
p                1823 net/iucv/iucv.c 	memcpy(&work->data, p, sizeof(work->data));
p                1825 net/iucv/iucv.c 	if (p->iptype == 0x01) {
p                1886 net/iucv/iucv.c 	struct iucv_irq_list *p, *n;
p                1897 net/iucv/iucv.c 		list_for_each_entry_safe(p, n, &iucv_work_queue, list) {
p                1898 net/iucv/iucv.c 			list_del_init(&p->list);
p                1899 net/iucv/iucv.c 			iucv_sever_pathid(p->data.ippathid,
p                1901 net/iucv/iucv.c 			kfree(p);
p                2072 net/iucv/iucv.c 	struct iucv_irq_list *p, *n;
p                2075 net/iucv/iucv.c 	list_for_each_entry_safe(p, n, &iucv_task_queue, list)
p                2076 net/iucv/iucv.c 		kfree(p);
p                2077 net/iucv/iucv.c 	list_for_each_entry_safe(p, n, &iucv_work_queue, list)
p                2078 net/iucv/iucv.c 		kfree(p);
p                  51 net/kcm/kcmproc.c 	void *p;
p                  54 net/kcm/kcmproc.c 		p = kcm_get_first(seq);
p                  56 net/kcm/kcmproc.c 		p = kcm_get_next(v);
p                  58 net/kcm/kcmproc.c 	return p;
p                  79 net/kcm/kcmproc.c 	struct seq_net_private p;
p                 380 net/key/af_key.c static int verify_address_len(const void *p)
p                 382 net/key/af_key.c 	const struct sadb_address *sp = p;
p                 434 net/key/af_key.c static int verify_key_len(const void *p)
p                 436 net/key/af_key.c 	const struct sadb_key *key = p;
p                 451 net/key/af_key.c static inline int verify_sec_ctx_len(const void *p)
p                 453 net/key/af_key.c 	const struct sadb_x_sec_ctx *sec_ctx = p;
p                 513 net/key/af_key.c 	const char *p = (char *) hdr;
p                 517 net/key/af_key.c 	p += sizeof(*hdr);
p                 519 net/key/af_key.c 		const struct sadb_ext *ehdr = (const struct sadb_ext *) p;
p                 545 net/key/af_key.c 				if (verify_address_len(p))
p                 549 net/key/af_key.c 				if (verify_sec_ctx_len(p))
p                 554 net/key/af_key.c 				if (verify_key_len(p))
p                 560 net/key/af_key.c 			ext_hdrs[ext_type-1] = (void *) p;
p                 562 net/key/af_key.c 		p   += ext_len;
p                2936 net/key/af_key.c 	struct sadb_prop *p;
p                2939 net/key/af_key.c 	p = skb_put(skb, sizeof(struct sadb_prop));
p                2940 net/key/af_key.c 	p->sadb_prop_len = sizeof(struct sadb_prop)/8;
p                2941 net/key/af_key.c 	p->sadb_prop_exttype = SADB_EXT_PROPOSAL;
p                2942 net/key/af_key.c 	p->sadb_prop_replay = 32;
p                2943 net/key/af_key.c 	memset(p->sadb_prop_reserved, 0, sizeof(p->sadb_prop_reserved));
p                2956 net/key/af_key.c 			p->sadb_prop_len += sizeof(struct sadb_comb)/8;
p                2970 net/key/af_key.c 	struct sadb_prop *p;
p                2973 net/key/af_key.c 	p = skb_put(skb, sizeof(struct sadb_prop));
p                2974 net/key/af_key.c 	p->sadb_prop_len = sizeof(struct sadb_prop)/8;
p                2975 net/key/af_key.c 	p->sadb_prop_exttype = SADB_EXT_PROPOSAL;
p                2976 net/key/af_key.c 	p->sadb_prop_replay = 32;
p                2977 net/key/af_key.c 	memset(p->sadb_prop_reserved, 0, sizeof(p->sadb_prop_reserved));
p                3001 net/key/af_key.c 			p->sadb_prop_len += sizeof(struct sadb_comb)/8;
p                3299 net/key/af_key.c 		char *p = (char *)pol;
p                3302 net/key/af_key.c 		p += pol->sadb_x_policy_len*8;
p                3303 net/key/af_key.c 		sec_ctx = (struct sadb_x_sec_ctx *)p;
p                3309 net/key/af_key.c 		if ((*dir = verify_sec_ctx_len(p)))
p                 100 net/l2tp/l2tp_debugfs.c static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
p                1432 net/l2tp/l2tp_ppp.c 	struct seq_net_private p;
p                1504 net/l2tp/l2tp_ppp.c static void pppol2tp_seq_stop(struct seq_file *p, void *v)
p                 222 net/llc/llc_proc.c 	struct proc_dir_entry *p;
p                 228 net/llc/llc_proc.c 	p = proc_create_seq("socket", 0444, llc_proc_dir, &llc_seq_socket_ops);
p                 229 net/llc/llc_proc.c 	if (!p)
p                 232 net/llc/llc_proc.c 	p = proc_create_seq("core", 0444, llc_proc_dir, &llc_seq_core_ops);
p                 233 net/llc/llc_proc.c 	if (!p)
p                2239 net/mac80211/cfg.c 	struct ieee80211_tx_queue_params p;
p                2247 net/mac80211/cfg.c 	memset(&p, 0, sizeof(p));
p                2248 net/mac80211/cfg.c 	p.aifs = params->aifs;
p                2249 net/mac80211/cfg.c 	p.cw_max = params->cwmax;
p                2250 net/mac80211/cfg.c 	p.cw_min = params->cwmin;
p                2251 net/mac80211/cfg.c 	p.txop = params->txop;
p                2257 net/mac80211/cfg.c 	p.uapsd = false;
p                2259 net/mac80211/cfg.c 	ieee80211_regulatory_limit_wmm_params(sdata, &p, params->ac);
p                2261 net/mac80211/cfg.c 	sdata->tx_conf[params->ac] = p;
p                2262 net/mac80211/cfg.c 	if (drv_conf_tx(local, sdata, params->ac, &p)) {
p                 161 net/mac80211/debugfs_key.c 	char buf[14*IEEE80211_NUM_TIDS+1], *p = buf;
p                 172 net/mac80211/debugfs_key.c 			p += scnprintf(p, sizeof(buf)+buf-p,
p                 176 net/mac80211/debugfs_key.c 		len = p - buf;
p                 182 net/mac80211/debugfs_key.c 			p += scnprintf(p, sizeof(buf)+buf-p,
p                 187 net/mac80211/debugfs_key.c 		len = p - buf;
p                 192 net/mac80211/debugfs_key.c 		p += scnprintf(p, sizeof(buf)+buf-p,
p                 196 net/mac80211/debugfs_key.c 		len = p - buf;
p                 201 net/mac80211/debugfs_key.c 		p += scnprintf(p, sizeof(buf)+buf-p,
p                 205 net/mac80211/debugfs_key.c 		len = p - buf;
p                 211 net/mac80211/debugfs_key.c 			p += scnprintf(p, sizeof(buf)+buf-p,
p                 216 net/mac80211/debugfs_key.c 		len = p - buf;
p                 305 net/mac80211/debugfs_key.c 	char *p = buf;
p                 312 net/mac80211/debugfs_key.c 		p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
p                 313 net/mac80211/debugfs_key.c 	p += scnprintf(p, bufsize+buf-p, "\n");
p                 314 net/mac80211/debugfs_key.c 	res = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
p                  88 net/mac80211/debugfs_netdev.c 	char *p = buf;							\
p                  91 net/mac80211/debugfs_netdev.c 		p += scnprintf(p, buflen + buf - p, "%.2x ",		\
p                  94 net/mac80211/debugfs_netdev.c 	p += scnprintf(p, buflen + buf - p, "\n");			\
p                  95 net/mac80211/debugfs_netdev.c 	return p - buf;							\
p                 110 net/mac80211/debugfs_sta.c 	char buf[17*IEEE80211_NUM_ACS], *p = buf;
p                 114 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf)+buf-p, "AC%d: %d\n", ac,
p                 117 net/mac80211/debugfs_sta.c 	return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
p                 124 net/mac80211/debugfs_sta.c 	char buf[15*IEEE80211_NUM_TIDS], *p = buf;
p                 128 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf)+buf-p, "%x ",
p                 130 net/mac80211/debugfs_sta.c 	p += scnprintf(p, sizeof(buf)+buf-p, "\n");
p                 131 net/mac80211/debugfs_sta.c 	return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
p                 143 net/mac80211/debugfs_sta.c 	char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
p                 154 net/mac80211/debugfs_sta.c 	p += scnprintf(p,
p                 155 net/mac80211/debugfs_sta.c 		       bufsz+buf-p,
p                 160 net/mac80211/debugfs_sta.c 	p += scnprintf(p,
p                 161 net/mac80211/debugfs_sta.c 		       bufsz+buf-p,
p                 168 net/mac80211/debugfs_sta.c 		p += scnprintf(p, bufsz+buf-p,
p                 190 net/mac80211/debugfs_sta.c 	rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
p                 202 net/mac80211/debugfs_sta.c 	char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
p                 219 net/mac80211/debugfs_sta.c 	p += scnprintf(p, bufsz + buf - p,
p                 230 net/mac80211/debugfs_sta.c 	rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
p                 257 net/mac80211/debugfs_sta.c 	char buf[71 + IEEE80211_NUM_TIDS * 40], *p = buf;
p                 265 net/mac80211/debugfs_sta.c 	p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
p                 267 net/mac80211/debugfs_sta.c 	p += scnprintf(p, sizeof(buf) + buf - p,
p                 277 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
p                 278 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
p                 280 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
p                 283 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
p                 286 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", !!tid_tx);
p                 287 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
p                 289 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d",
p                 291 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf) + buf - p, "\n");
p                 295 net/mac80211/debugfs_sta.c 	return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
p                 375 net/mac80211/debugfs_sta.c 			p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \
p                 377 net/mac80211/debugfs_sta.c 	char buf[512], *p = buf;
p                 382 net/mac80211/debugfs_sta.c 	p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
p                 385 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap);
p                 427 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n",
p                 429 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:");
p                 432 net/mac80211/debugfs_sta.c 			p += scnprintf(p, sizeof(buf)+buf-p, " %.2x",
p                 434 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf)+buf-p, "\n");
p                 438 net/mac80211/debugfs_sta.c 			p += scnprintf(p, sizeof(buf)+buf-p,
p                 443 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n",
p                 447 net/mac80211/debugfs_sta.c 	return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
p                 454 net/mac80211/debugfs_sta.c 	char buf[512], *p = buf;
p                 458 net/mac80211/debugfs_sta.c 	p += scnprintf(p, sizeof(buf) + buf - p, "VHT %ssupported\n",
p                 461 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf) + buf - p, "cap: %#.8x\n",
p                 466 net/mac80211/debugfs_sta.c 				p += scnprintf(p, sizeof(buf) + buf - p, \
p                 472 net/mac80211/debugfs_sta.c 			p += scnprintf(p, sizeof(buf) + buf - p,
p                 476 net/mac80211/debugfs_sta.c 			p += scnprintf(p, sizeof(buf) + buf - p,
p                 480 net/mac80211/debugfs_sta.c 			p += scnprintf(p, sizeof(buf) + buf - p,
p                 484 net/mac80211/debugfs_sta.c 			p += scnprintf(p, sizeof(buf) + buf - p,
p                 489 net/mac80211/debugfs_sta.c 			p += scnprintf(p, sizeof(buf) + buf - p,
p                 493 net/mac80211/debugfs_sta.c 			p += scnprintf(p, sizeof(buf) + buf - p,
p                 497 net/mac80211/debugfs_sta.c 			p += scnprintf(p, sizeof(buf) + buf - p,
p                 501 net/mac80211/debugfs_sta.c 			p += scnprintf(p, sizeof(buf) + buf - p,
p                 509 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf) + buf - p,
p                 513 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf) + buf - p,
p                 517 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf) + buf - p,
p                 525 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf) + buf - p,
p                 531 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf) + buf - p,
p                 537 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf)+buf-p, "RX MCS: %.4x\n",
p                 540 net/mac80211/debugfs_sta.c 			p += scnprintf(p, sizeof(buf)+buf-p,
p                 543 net/mac80211/debugfs_sta.c 		p += scnprintf(p, sizeof(buf)+buf-p, "TX MCS: %.4x\n",
p                 546 net/mac80211/debugfs_sta.c 			p += scnprintf(p, sizeof(buf)+buf-p,
p                 552 net/mac80211/debugfs_sta.c 	return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
p                 559 net/mac80211/debugfs_sta.c 	char *buf, *p;
p                 572 net/mac80211/debugfs_sta.c 	p = buf;
p                 574 net/mac80211/debugfs_sta.c 	p += scnprintf(p, buf_sz + buf - p, "HE %ssupported\n",
p                 580 net/mac80211/debugfs_sta.c 	p += scnprintf(p, buf_sz + buf - p,
p                 585 net/mac80211/debugfs_sta.c 	p += scnprintf(p, buf_sz + buf - p, "\t\t" fmt "\n",		\
p                 691 net/mac80211/debugfs_sta.c 	p += scnprintf(p, buf_sz + buf - p,
p                 871 net/mac80211/debugfs_sta.c 		p += scnprintf(p, buf_sz + buf - p, n ": %#.4x\n", v);	\
p                 910 net/mac80211/debugfs_sta.c 	p += scnprintf(p, buf_sz + buf - p, "PPE-THRESHOLDS: %#.2x",
p                 915 net/mac80211/debugfs_sta.c 		p += scnprintf(p, buf_sz + buf - p, " %#.2x",
p                 918 net/mac80211/debugfs_sta.c 	p += scnprintf(p, buf_sz + buf - p, "\n");
p                 921 net/mac80211/debugfs_sta.c 	ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
p                 992 net/mac80211/ieee80211_i.h struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
p                 994 net/mac80211/ieee80211_i.h 	return container_of(p, struct ieee80211_sub_if_data, vif);
p                1011 net/mac80211/ieee80211_i.h #define sdata_dereference(p, sdata) \
p                1012 net/mac80211/ieee80211_i.h 	rcu_dereference_protected(p, lockdep_is_held(&sdata->wdev.mtx))
p                1360 net/mac80211/main.c static int ieee80211_free_ack_frame(int id, void *p, void *data)
p                1363 net/mac80211/main.c 	kfree_skb(p);
p                 179 net/mac80211/mesh.c 	struct rmc_entry *p;
p                 187 net/mac80211/mesh.c 		hlist_for_each_entry_safe(p, n, &rmc->bucket[i], list) {
p                 188 net/mac80211/mesh.c 			hlist_del(&p->list);
p                 189 net/mac80211/mesh.c 			kmem_cache_free(rm_cache, p);
p                 217 net/mac80211/mesh.c 	struct rmc_entry *p;
p                 226 net/mac80211/mesh.c 	hlist_for_each_entry_safe(p, n, &rmc->bucket[idx], list) {
p                 228 net/mac80211/mesh.c 		if (time_after(jiffies, p->exp_time) ||
p                 230 net/mac80211/mesh.c 			hlist_del(&p->list);
p                 231 net/mac80211/mesh.c 			kmem_cache_free(rm_cache, p);
p                 233 net/mac80211/mesh.c 		} else if ((seqnum == p->seqnum) && ether_addr_equal(sa, p->sa))
p                 237 net/mac80211/mesh.c 	p = kmem_cache_alloc(rm_cache, GFP_ATOMIC);
p                 238 net/mac80211/mesh.c 	if (!p)
p                 241 net/mac80211/mesh.c 	p->seqnum = seqnum;
p                 242 net/mac80211/mesh.c 	p->exp_time = jiffies + RMC_TIMEOUT;
p                 243 net/mac80211/mesh.c 	memcpy(p->sa, sa, ETH_ALEN);
p                 244 net/mac80211/mesh.c 	hlist_add_head(&p->list, &rmc->bucket[idx]);
p                  17 net/mac80211/mesh_plink.c #define PLINK_GET_LLID(p) (p + 2)
p                  18 net/mac80211/mesh_plink.c #define PLINK_GET_PLID(p) (p + 4)
p                  63 net/mac80211/rc80211_minstrel_debugfs.c 	char *p;
p                  70 net/mac80211/rc80211_minstrel_debugfs.c 	p = ms->buf;
p                  71 net/mac80211/rc80211_minstrel_debugfs.c 	p += sprintf(p, "\n");
p                  72 net/mac80211/rc80211_minstrel_debugfs.c 	p += sprintf(p,
p                  74 net/mac80211/rc80211_minstrel_debugfs.c 	p += sprintf(p,
p                  81 net/mac80211/rc80211_minstrel_debugfs.c 		*(p++) = (i == mi->max_tp_rate[0]) ? 'A' : ' ';
p                  82 net/mac80211/rc80211_minstrel_debugfs.c 		*(p++) = (i == mi->max_tp_rate[1]) ? 'B' : ' ';
p                  83 net/mac80211/rc80211_minstrel_debugfs.c 		*(p++) = (i == mi->max_tp_rate[2]) ? 'C' : ' ';
p                  84 net/mac80211/rc80211_minstrel_debugfs.c 		*(p++) = (i == mi->max_tp_rate[3]) ? 'D' : ' ';
p                  85 net/mac80211/rc80211_minstrel_debugfs.c 		*(p++) = (i == mi->max_prob_rate) ? 'P' : ' ';
p                  87 net/mac80211/rc80211_minstrel_debugfs.c 		p += sprintf(p, " %3u%s ", mr->bitrate / 2,
p                  89 net/mac80211/rc80211_minstrel_debugfs.c 		p += sprintf(p, "%3u  ", i);
p                  90 net/mac80211/rc80211_minstrel_debugfs.c 		p += sprintf(p, "%6u ", mr->perfect_tx_time);
p                  96 net/mac80211/rc80211_minstrel_debugfs.c 		p += sprintf(p, "%4u.%1u    %4u.%1u     %3u.%1u"
p                 108 net/mac80211/rc80211_minstrel_debugfs.c 	p += sprintf(p, "\nTotal packet count::    ideal %d      "
p                 112 net/mac80211/rc80211_minstrel_debugfs.c 	ms->len = p - ms->buf;
p                 125 net/mac80211/rc80211_minstrel_debugfs.c 	char *p;
p                 132 net/mac80211/rc80211_minstrel_debugfs.c 	p = ms->buf;
p                 138 net/mac80211/rc80211_minstrel_debugfs.c 		p += sprintf(p, "%s" ,((i == mi->max_tp_rate[0]) ? "A" : ""));
p                 139 net/mac80211/rc80211_minstrel_debugfs.c 		p += sprintf(p, "%s" ,((i == mi->max_tp_rate[1]) ? "B" : ""));
p                 140 net/mac80211/rc80211_minstrel_debugfs.c 		p += sprintf(p, "%s" ,((i == mi->max_tp_rate[2]) ? "C" : ""));
p                 141 net/mac80211/rc80211_minstrel_debugfs.c 		p += sprintf(p, "%s" ,((i == mi->max_tp_rate[3]) ? "D" : ""));
p                 142 net/mac80211/rc80211_minstrel_debugfs.c 		p += sprintf(p, "%s" ,((i == mi->max_prob_rate) ? "P" : ""));
p                 144 net/mac80211/rc80211_minstrel_debugfs.c 		p += sprintf(p, ",%u%s", mr->bitrate / 2,
p                 146 net/mac80211/rc80211_minstrel_debugfs.c 		p += sprintf(p, "%u,", i);
p                 147 net/mac80211/rc80211_minstrel_debugfs.c 		p += sprintf(p, "%u,",mr->perfect_tx_time);
p                 153 net/mac80211/rc80211_minstrel_debugfs.c 		p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,%u,"
p                 167 net/mac80211/rc80211_minstrel_debugfs.c 	ms->len = p - ms->buf;
p                  32 net/mac80211/rc80211_minstrel_ht_debugfs.c minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
p                  41 net/mac80211/rc80211_minstrel_ht_debugfs.c 		return p;
p                  63 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "HT%c0  ", htmode);
p                  64 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "%cGI  ", gimode);
p                  65 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "%d  ", mg->streams);
p                  67 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "VHT%c0 ", htmode);
p                  68 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "%cGI ", gimode);
p                  69 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "%d  ", mg->streams);
p                  71 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "CCK    ");
p                  72 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "%cP  ", j < 4 ? 'L' : 'S');
p                  73 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "1 ");
p                  76 net/mac80211/rc80211_minstrel_ht_debugfs.c 		*(p++) = (idx == mi->max_tp_rate[0]) ? 'A' : ' ';
p                  77 net/mac80211/rc80211_minstrel_ht_debugfs.c 		*(p++) = (idx == mi->max_tp_rate[1]) ? 'B' : ' ';
p                  78 net/mac80211/rc80211_minstrel_ht_debugfs.c 		*(p++) = (idx == mi->max_tp_rate[2]) ? 'C' : ' ';
p                  79 net/mac80211/rc80211_minstrel_ht_debugfs.c 		*(p++) = (idx == mi->max_tp_rate[3]) ? 'D' : ' ';
p                  80 net/mac80211/rc80211_minstrel_ht_debugfs.c 		*(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';
p                  83 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "  MCS%-2u", (mg->streams - 1) * 8 + j);
p                  85 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "  MCS%-1u/%1u", j, mg->streams);
p                  89 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "   %2u.%1uM", r / 10, r % 10);
p                  92 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p += sprintf(p, "  %3u  ", idx);
p                  98 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p += sprintf(p, "%6u  ", tx_time);
p                 104 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p += sprintf(p, "%4u.%1u    %4u.%1u     %3u.%1u"
p                 117 net/mac80211/rc80211_minstrel_ht_debugfs.c 	return p;
p                 128 net/mac80211/rc80211_minstrel_ht_debugfs.c 	char *p;
p                 142 net/mac80211/rc80211_minstrel_ht_debugfs.c 	p = ms->buf;
p                 144 net/mac80211/rc80211_minstrel_ht_debugfs.c 	p += sprintf(p, "\n");
p                 145 net/mac80211/rc80211_minstrel_ht_debugfs.c 	p += sprintf(p,
p                 147 net/mac80211/rc80211_minstrel_ht_debugfs.c 	p += sprintf(p,
p                 150 net/mac80211/rc80211_minstrel_ht_debugfs.c 	p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p);
p                 152 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p = minstrel_ht_stats_dump(mi, i, p);
p                 154 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p = minstrel_ht_stats_dump(mi, i, p);
p                 156 net/mac80211/rc80211_minstrel_ht_debugfs.c 	p += sprintf(p, "\nTotal packet count::    ideal %d      "
p                 161 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p += sprintf(p, "Average # of aggregated frames per A-MPDU: %d.%d\n",
p                 164 net/mac80211/rc80211_minstrel_ht_debugfs.c 	ms->len = p - ms->buf;
p                 179 net/mac80211/rc80211_minstrel_ht_debugfs.c minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p)
p                 188 net/mac80211/rc80211_minstrel_ht_debugfs.c 		return p;
p                 210 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "HT%c0,", htmode);
p                 211 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "%cGI,", gimode);
p                 212 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "%d,", mg->streams);
p                 214 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "VHT%c0,", htmode);
p                 215 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "%cGI,", gimode);
p                 216 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "%d,", mg->streams);
p                 218 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "CCK,");
p                 219 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "%cP,", j < 4 ? 'L' : 'S');
p                 220 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, "1,");
p                 223 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[0]) ? "A" : ""));
p                 224 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[1]) ? "B" : ""));
p                 225 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[2]) ? "C" : ""));
p                 226 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[3]) ? "D" : ""));
p                 227 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p += sprintf(p, "%s" ,((idx == mi->max_prob_rate) ? "P" : ""));
p                 230 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, ",MCS%-2u,", (mg->streams - 1) * 8 + j);
p                 232 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, ",MCS%-1u/%1u,", j, mg->streams);
p                 235 net/mac80211/rc80211_minstrel_ht_debugfs.c 			p += sprintf(p, ",%2u.%1uM,", r / 10, r % 10);
p                 238 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p += sprintf(p, "%u,", idx);
p                 243 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p += sprintf(p, "%u,", tx_time);
p                 249 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,"
p                 259 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p += sprintf(p, "%d,%d,%d.%d\n",
p                 267 net/mac80211/rc80211_minstrel_ht_debugfs.c 	return p;
p                 278 net/mac80211/rc80211_minstrel_ht_debugfs.c 	char *p;
p                 294 net/mac80211/rc80211_minstrel_ht_debugfs.c 	p = ms->buf;
p                 296 net/mac80211/rc80211_minstrel_ht_debugfs.c 	p = minstrel_ht_stats_csv_dump(mi, MINSTREL_CCK_GROUP, p);
p                 298 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p = minstrel_ht_stats_csv_dump(mi, i, p);
p                 300 net/mac80211/rc80211_minstrel_ht_debugfs.c 		p = minstrel_ht_stats_csv_dump(mi, i, p);
p                 302 net/mac80211/rc80211_minstrel_ht_debugfs.c 	ms->len = p - ms->buf;
p                  86 net/mac80211/status.c 		u8 *p = ieee80211_get_qos_ctl(hdr);
p                  87 net/mac80211/status.c 		int tid = *p & IEEE80211_QOS_CTL_TID_MASK;
p                  94 net/mac80211/status.c 		if (*p & IEEE80211_QOS_CTL_EOSP)
p                  95 net/mac80211/status.c 			*p &= ~IEEE80211_QOS_CTL_EOSP;
p                2277 net/mac80211/tx.c 		u8 *p = ieee80211_get_qos_ctl(hdr);
p                2278 net/mac80211/tx.c 		skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
p                 121 net/mac80211/wme.c 	u8 *p;
p                 135 net/mac80211/wme.c 	p = ieee80211_get_qos_ctl(hdr);
p                 136 net/mac80211/wme.c 	skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
p                 245 net/mac80211/wme.c 	u8 *p;
p                 250 net/mac80211/wme.c 	p = ieee80211_get_qos_ctl(hdr);
p                 258 net/mac80211/wme.c 	flags = *p & ~(IEEE80211_QOS_CTL_TID_MASK |
p                 267 net/mac80211/wme.c 	*p = flags | tid;
p                 270 net/mac80211/wme.c 	p++;
p                 274 net/mac80211/wme.c 		*p &= ((IEEE80211_QOS_CTL_RSPI |
p                 279 net/mac80211/wme.c 			*p |= (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8);
p                 281 net/mac80211/wme.c 		*p = 0;
p                 111 net/mac802154/iface.c static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
p                 114 net/mac802154/iface.c 	struct sockaddr *addr = p;
p                1070 net/mpls/af_mpls.c 	struct mpls_pcpu_stats *p;
p                1079 net/mpls/af_mpls.c 		p = per_cpu_ptr(mdev->stats, i);
p                1081 net/mpls/af_mpls.c 			start = u64_stats_fetch_begin(&p->syncp);
p                1082 net/mpls/af_mpls.c 			local = p->stats;
p                1083 net/mpls/af_mpls.c 		} while (u64_stats_fetch_retry(&p->syncp, start));
p                 185 net/ncsi/internal.h #define NCSI_TO_CHANNEL(p, c)	(((p) << NCSI_PACKAGE_SHIFT) | (c))
p                 329 net/ncsi/ncsi-manage.c 	struct ncsi_package *p;
p                 332 net/ncsi/ncsi-manage.c 	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
p                 333 net/ncsi/ncsi-manage.c 	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
p                 336 net/ncsi/ncsi-manage.c 		*np = p;
p                 189 net/netfilter/core.c 	struct nf_hook_entries *p;
p                 191 net/netfilter/core.c 	p = rcu_dereference_raw(*pp);
p                 192 net/netfilter/core.c 	new_hooks = nf_hook_entries_grow(p, reg);
p                 200 net/netfilter/core.c 	BUG_ON(p == new_hooks);
p                 201 net/netfilter/core.c 	nf_hook_entries_free(p);
p                 317 net/netfilter/core.c 	struct nf_hook_entries *p, *new_hooks;
p                 336 net/netfilter/core.c 	p = nf_entry_dereference(*pp);
p                 337 net/netfilter/core.c 	new_hooks = nf_hook_entries_grow(p, reg);
p                 354 net/netfilter/core.c 	BUG_ON(p == new_hooks);
p                 355 net/netfilter/core.c 	nf_hook_entries_free(p);
p                 390 net/netfilter/core.c 	struct nf_hook_entries *p;
p                 398 net/netfilter/core.c 	p = nf_entry_dereference(*pp);
p                 399 net/netfilter/core.c 	if (WARN_ON_ONCE(!p)) {
p                 404 net/netfilter/core.c 	if (nf_remove_net_hook(p, reg)) {
p                 416 net/netfilter/core.c 	p = __nf_hook_entries_try_shrink(p, pp);
p                 418 net/netfilter/core.c 	if (!p)
p                 422 net/netfilter/core.c 	nf_hook_entries_free(p);
p                 439 net/netfilter/core.c 	struct nf_hook_entries *p;
p                 441 net/netfilter/core.c 	p = rcu_dereference_raw(*pp);
p                 442 net/netfilter/core.c 	if (nf_remove_net_hook(p, reg)) {
p                 443 net/netfilter/core.c 		p = __nf_hook_entries_try_shrink(p, pp);
p                 444 net/netfilter/core.c 		nf_hook_entries_free(p);
p                  56 net/netfilter/ipset/ip_set_core.c #define ip_set_dereference(p)		\
p                  57 net/netfilter/ipset/ip_set_core.c 	rcu_dereference_protected(p,	\
p                  13 net/netfilter/ipset/ip_set_hash_gen.h #define __ipset_dereference(p)		\
p                  14 net/netfilter/ipset/ip_set_hash_gen.h 	rcu_dereference_protected(p, 1)
p                  15 net/netfilter/ipset/ip_set_hash_gen.h #define ipset_dereference_nfnl(p)	\
p                  16 net/netfilter/ipset/ip_set_hash_gen.h 	rcu_dereference_protected(p,	\
p                  18 net/netfilter/ipset/ip_set_hash_gen.h #define ipset_dereference_set(p, set) 	\
p                  19 net/netfilter/ipset/ip_set_hash_gen.h 	rcu_dereference_protected(p,	\
p                  22 net/netfilter/ipset/ip_set_hash_gen.h #define ipset_dereference_bh_nfnl(p)	\
p                  23 net/netfilter/ipset/ip_set_hash_gen.h 	rcu_dereference_bh_check(p, 	\
p                 111 net/netfilter/ipset/ip_set_hash_ipport.c 	u32 ip, ip_to = 0, p = 0, port, port_to;
p                 178 net/netfilter/ipset/ip_set_hash_ipport.c 		p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
p                 180 net/netfilter/ipset/ip_set_hash_ipport.c 		for (; p <= port_to; p++) {
p                 182 net/netfilter/ipset/ip_set_hash_ipport.c 			e.port = htons(p);
p                 114 net/netfilter/ipset/ip_set_hash_ipportip.c 	u32 ip, ip_to = 0, p = 0, port, port_to;
p                 185 net/netfilter/ipset/ip_set_hash_ipportip.c 		p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
p                 187 net/netfilter/ipset/ip_set_hash_ipportip.c 		for (; p <= port_to; p++) {
p                 189 net/netfilter/ipset/ip_set_hash_ipportip.c 			e.port = htons(p);
p                 166 net/netfilter/ipset/ip_set_hash_ipportnet.c 	u32 ip = 0, ip_to = 0, p = 0, port, port_to;
p                 270 net/netfilter/ipset/ip_set_hash_ipportnet.c 		p = ntohs(h->next.port);
p                 273 net/netfilter/ipset/ip_set_hash_ipportnet.c 		p = port;
p                 278 net/netfilter/ipset/ip_set_hash_ipportnet.c 		for (; p <= port_to; p++) {
p                 279 net/netfilter/ipset/ip_set_hash_ipportnet.c 			e.port = htons(p);
p                 293 net/netfilter/ipset/ip_set_hash_ipportnet.c 		p = port;
p                 160 net/netfilter/ipset/ip_set_hash_netport.c 	u32 port, port_to, p = 0, ip = 0, ip_to = 0;
p                 240 net/netfilter/ipset/ip_set_hash_netport.c 		p = ntohs(h->next.port);
p                 242 net/netfilter/ipset/ip_set_hash_netport.c 		p = port;
p                 248 net/netfilter/ipset/ip_set_hash_netport.c 		for (; p <= port_to; p++) {
p                 249 net/netfilter/ipset/ip_set_hash_netport.c 			e.port = htons(p);
p                 256 net/netfilter/ipset/ip_set_hash_netport.c 		p = port;
p                 183 net/netfilter/ipset/ip_set_hash_netportnet.c 	u32 ip = 0, ip_to = 0, p = 0, port, port_to;
p                 289 net/netfilter/ipset/ip_set_hash_netportnet.c 		p = ntohs(h->next.port);
p                 292 net/netfilter/ipset/ip_set_hash_netportnet.c 		p = port;
p                 299 net/netfilter/ipset/ip_set_hash_netportnet.c 		for (; p <= port_to; p++) {
p                 300 net/netfilter/ipset/ip_set_hash_netportnet.c 			e.port = htons(p);
p                 313 net/netfilter/ipset/ip_set_hash_netportnet.c 		p = port;
p                 122 net/netfilter/ipvs/ip_vs_conn.c static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
p                 128 net/netfilter/ipvs/ip_vs_conn.c 	if (p->pe_data && p->pe->hashkey_raw)
p                 129 net/netfilter/ipvs/ip_vs_conn.c 		return p->pe->hashkey_raw(p, ip_vs_conn_rnd, inverse) &
p                 133 net/netfilter/ipvs/ip_vs_conn.c 		addr = p->caddr;
p                 134 net/netfilter/ipvs/ip_vs_conn.c 		port = p->cport;
p                 136 net/netfilter/ipvs/ip_vs_conn.c 		addr = p->vaddr;
p                 137 net/netfilter/ipvs/ip_vs_conn.c 		port = p->vport;
p                 140 net/netfilter/ipvs/ip_vs_conn.c 	return ip_vs_conn_hashkey(p->ipvs, p->af, p->protocol, addr, port);
p                 145 net/netfilter/ipvs/ip_vs_conn.c 	struct ip_vs_conn_param p;
p                 148 net/netfilter/ipvs/ip_vs_conn.c 			      &cp->caddr, cp->cport, NULL, 0, &p);
p                 151 net/netfilter/ipvs/ip_vs_conn.c 		p.pe = cp->pe;
p                 152 net/netfilter/ipvs/ip_vs_conn.c 		p.pe_data = cp->pe_data;
p                 153 net/netfilter/ipvs/ip_vs_conn.c 		p.pe_data_len = cp->pe_data_len;
p                 156 net/netfilter/ipvs/ip_vs_conn.c 	return ip_vs_conn_hashkey_param(&p, false);
p                 263 net/netfilter/ipvs/ip_vs_conn.c __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
p                 268 net/netfilter/ipvs/ip_vs_conn.c 	hash = ip_vs_conn_hashkey_param(p, false);
p                 273 net/netfilter/ipvs/ip_vs_conn.c 		if (p->cport == cp->cport && p->vport == cp->vport &&
p                 274 net/netfilter/ipvs/ip_vs_conn.c 		    cp->af == p->af &&
p                 275 net/netfilter/ipvs/ip_vs_conn.c 		    ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
p                 276 net/netfilter/ipvs/ip_vs_conn.c 		    ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
p                 277 net/netfilter/ipvs/ip_vs_conn.c 		    ((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
p                 278 net/netfilter/ipvs/ip_vs_conn.c 		    p->protocol == cp->protocol &&
p                 279 net/netfilter/ipvs/ip_vs_conn.c 		    cp->ipvs == p->ipvs) {
p                 293 net/netfilter/ipvs/ip_vs_conn.c struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
p                 297 net/netfilter/ipvs/ip_vs_conn.c 	cp = __ip_vs_conn_in_get(p);
p                 299 net/netfilter/ipvs/ip_vs_conn.c 		struct ip_vs_conn_param cport_zero_p = *p;
p                 305 net/netfilter/ipvs/ip_vs_conn.c 		      ip_vs_proto_name(p->protocol),
p                 306 net/netfilter/ipvs/ip_vs_conn.c 		      IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
p                 307 net/netfilter/ipvs/ip_vs_conn.c 		      IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
p                 317 net/netfilter/ipvs/ip_vs_conn.c 			    struct ip_vs_conn_param *p)
p                 327 net/netfilter/ipvs/ip_vs_conn.c 				      pptr[0], &iph->daddr, pptr[1], p);
p                 330 net/netfilter/ipvs/ip_vs_conn.c 				      pptr[1], &iph->saddr, pptr[0], p);
p                 339 net/netfilter/ipvs/ip_vs_conn.c 	struct ip_vs_conn_param p;
p                 341 net/netfilter/ipvs/ip_vs_conn.c 	if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
p                 344 net/netfilter/ipvs/ip_vs_conn.c 	return ip_vs_conn_in_get(&p);
p                 349 net/netfilter/ipvs/ip_vs_conn.c struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
p                 354 net/netfilter/ipvs/ip_vs_conn.c 	hash = ip_vs_conn_hashkey_param(p, false);
p                 359 net/netfilter/ipvs/ip_vs_conn.c 		if (unlikely(p->pe_data && p->pe->ct_match)) {
p                 360 net/netfilter/ipvs/ip_vs_conn.c 			if (cp->ipvs != p->ipvs)
p                 362 net/netfilter/ipvs/ip_vs_conn.c 			if (p->pe == cp->pe && p->pe->ct_match(p, cp)) {
p                 369 net/netfilter/ipvs/ip_vs_conn.c 		if (cp->af == p->af &&
p                 370 net/netfilter/ipvs/ip_vs_conn.c 		    ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
p                 373 net/netfilter/ipvs/ip_vs_conn.c 		    ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC :
p                 374 net/netfilter/ipvs/ip_vs_conn.c 				     p->af, p->vaddr, &cp->vaddr) &&
p                 375 net/netfilter/ipvs/ip_vs_conn.c 		    p->vport == cp->vport && p->cport == cp->cport &&
p                 377 net/netfilter/ipvs/ip_vs_conn.c 		    p->protocol == cp->protocol &&
p                 378 net/netfilter/ipvs/ip_vs_conn.c 		    cp->ipvs == p->ipvs) {
p                 389 net/netfilter/ipvs/ip_vs_conn.c 		      ip_vs_proto_name(p->protocol),
p                 390 net/netfilter/ipvs/ip_vs_conn.c 		      IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
p                 391 net/netfilter/ipvs/ip_vs_conn.c 		      IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
p                 401 net/netfilter/ipvs/ip_vs_conn.c struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
p                 409 net/netfilter/ipvs/ip_vs_conn.c 	hash = ip_vs_conn_hashkey_param(p, true);
p                 414 net/netfilter/ipvs/ip_vs_conn.c 		if (p->vport == cp->cport && p->cport == cp->dport &&
p                 415 net/netfilter/ipvs/ip_vs_conn.c 		    cp->af == p->af &&
p                 416 net/netfilter/ipvs/ip_vs_conn.c 		    ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
p                 417 net/netfilter/ipvs/ip_vs_conn.c 		    ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
p                 418 net/netfilter/ipvs/ip_vs_conn.c 		    p->protocol == cp->protocol &&
p                 419 net/netfilter/ipvs/ip_vs_conn.c 		    cp->ipvs == p->ipvs) {
p                 431 net/netfilter/ipvs/ip_vs_conn.c 		      ip_vs_proto_name(p->protocol),
p                 432 net/netfilter/ipvs/ip_vs_conn.c 		      IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
p                 433 net/netfilter/ipvs/ip_vs_conn.c 		      IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
p                 444 net/netfilter/ipvs/ip_vs_conn.c 	struct ip_vs_conn_param p;
p                 446 net/netfilter/ipvs/ip_vs_conn.c 	if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
p                 449 net/netfilter/ipvs/ip_vs_conn.c 	return ip_vs_conn_out_get(&p);
p                 901 net/netfilter/ipvs/ip_vs_conn.c ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
p                 906 net/netfilter/ipvs/ip_vs_conn.c 	struct netns_ipvs *ipvs = p->ipvs;
p                 907 net/netfilter/ipvs/ip_vs_conn.c 	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->ipvs,
p                 908 net/netfilter/ipvs/ip_vs_conn.c 							   p->protocol);
p                 919 net/netfilter/ipvs/ip_vs_conn.c 	cp->af		   = p->af;
p                 921 net/netfilter/ipvs/ip_vs_conn.c 	cp->protocol	   = p->protocol;
p                 922 net/netfilter/ipvs/ip_vs_conn.c 	ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
p                 923 net/netfilter/ipvs/ip_vs_conn.c 	cp->cport	   = p->cport;
p                 925 net/netfilter/ipvs/ip_vs_conn.c 	ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
p                 926 net/netfilter/ipvs/ip_vs_conn.c 		       &cp->vaddr, p->vaddr);
p                 927 net/netfilter/ipvs/ip_vs_conn.c 	cp->vport	   = p->vport;
p                 932 net/netfilter/ipvs/ip_vs_conn.c 	if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) {
p                 933 net/netfilter/ipvs/ip_vs_conn.c 		ip_vs_pe_get(p->pe);
p                 934 net/netfilter/ipvs/ip_vs_conn.c 		cp->pe = p->pe;
p                 935 net/netfilter/ipvs/ip_vs_conn.c 		cp->pe_data = p->pe_data;
p                 936 net/netfilter/ipvs/ip_vs_conn.c 		cp->pe_data_len = p->pe_data_len;
p                 978 net/netfilter/ipvs/ip_vs_conn.c 	if (p->af == AF_INET6)
p                1008 net/netfilter/ipvs/ip_vs_conn.c 	struct seq_net_private	p;
p                 246 net/netfilter/ipvs/ip_vs_core.c 			      struct ip_vs_conn_param *p)
p                 249 net/netfilter/ipvs/ip_vs_core.c 			      vport, p);
p                 250 net/netfilter/ipvs/ip_vs_core.c 	p->pe = rcu_dereference(svc->pe);
p                 251 net/netfilter/ipvs/ip_vs_core.c 	if (p->pe && p->pe->fill_param)
p                 252 net/netfilter/ipvs/ip_vs_core.c 		return p->pe->fill_param(p, skb);
p                 557 net/netfilter/ipvs/ip_vs_core.c 		struct ip_vs_conn_param p;
p                 560 net/netfilter/ipvs/ip_vs_core.c 				      caddr, cport, vaddr, vport, &p);
p                 561 net/netfilter/ipvs/ip_vs_core.c 		cp = ip_vs_conn_new(&p, dest->af, &dest->addr,
p                 625 net/netfilter/ipvs/ip_vs_core.c 			struct ip_vs_conn_param p;
p                 628 net/netfilter/ipvs/ip_vs_core.c 					      &iph->daddr, pptr[1], &p);
p                 629 net/netfilter/ipvs/ip_vs_core.c 			cp = ip_vs_conn_new(&p, svc->af, &daddr, 0,
p                1994 net/netfilter/ipvs/ip_vs_ctl.c 	struct seq_net_private p;  /* Do not move this, netns depends upon it*/
p                 102 net/netfilter/ipvs/ip_vs_dh.c 	struct list_head *p;
p                 107 net/netfilter/ipvs/ip_vs_dh.c 	p = &svc->destinations;
p                 108 net/netfilter/ipvs/ip_vs_dh.c 	empty = list_empty(p);
p                 116 net/netfilter/ipvs/ip_vs_dh.c 			if (p == &svc->destinations)
p                 117 net/netfilter/ipvs/ip_vs_dh.c 				p = p->next;
p                 119 net/netfilter/ipvs/ip_vs_dh.c 			dest = list_entry(p, struct ip_vs_dest, n_list);
p                 123 net/netfilter/ipvs/ip_vs_dh.c 			p = p->next;
p                 103 net/netfilter/ipvs/ip_vs_ftp.c 	unsigned char p[6];
p                 141 net/netfilter/ipvs/ip_vs_ftp.c 		p[0] = 0;
p                 147 net/netfilter/ipvs/ip_vs_ftp.c 				p[i] = p[i]*10 + c - '0';
p                 150 net/netfilter/ipvs/ip_vs_ftp.c 				p[i] = 0;
p                 162 net/netfilter/ipvs/ip_vs_ftp.c 		addr->ip = get_unaligned((__be32 *) p);
p                 163 net/netfilter/ipvs/ip_vs_ftp.c 		*port = get_unaligned((__be16 *) (p + 4));
p                 318 net/netfilter/ipvs/ip_vs_ftp.c 		struct ip_vs_conn_param p;
p                 322 net/netfilter/ipvs/ip_vs_ftp.c 				      &cp->caddr, 0, &p);
p                 323 net/netfilter/ipvs/ip_vs_ftp.c 		n_cp = ip_vs_conn_out_get(&p);
p                 326 net/netfilter/ipvs/ip_vs_ftp.c 		struct ip_vs_conn_param p;
p                 330 net/netfilter/ipvs/ip_vs_ftp.c 				      0, &cp->vaddr, port, &p);
p                 331 net/netfilter/ipvs/ip_vs_ftp.c 		n_cp = ip_vs_conn_new(&p, cp->af, &from, port,
p                 530 net/netfilter/ipvs/ip_vs_ftp.c 		struct ip_vs_conn_param p;
p                 533 net/netfilter/ipvs/ip_vs_ftp.c 				      htons(ntohs(cp->vport)-1), &p);
p                 534 net/netfilter/ipvs/ip_vs_ftp.c 		n_cp = ip_vs_conn_in_get(&p);
p                 536 net/netfilter/ipvs/ip_vs_ftp.c 			n_cp = ip_vs_conn_new(&p, cp->af, &cp->daddr,
p                 125 net/netfilter/ipvs/ip_vs_mh.c 	struct list_head *p;
p                 138 net/netfilter/ipvs/ip_vs_mh.c 	p = &svc->destinations;
p                 140 net/netfilter/ipvs/ip_vs_mh.c 	while ((p = p->next) != &svc->destinations) {
p                 141 net/netfilter/ipvs/ip_vs_mh.c 		dest = list_entry(p, struct ip_vs_dest, n_list);
p                 164 net/netfilter/ipvs/ip_vs_mh.c 	struct list_head *p;
p                 182 net/netfilter/ipvs/ip_vs_mh.c 	p = &svc->destinations;
p                 186 net/netfilter/ipvs/ip_vs_mh.c 		if (p == &svc->destinations)
p                 187 net/netfilter/ipvs/ip_vs_mh.c 			p = p->next;
p                 190 net/netfilter/ipvs/ip_vs_mh.c 		while (p != &svc->destinations) {
p                 193 net/netfilter/ipvs/ip_vs_mh.c 				p = p->next;
p                 210 net/netfilter/ipvs/ip_vs_mh.c 			new_dest = list_entry(p, struct ip_vs_dest, n_list);
p                 223 net/netfilter/ipvs/ip_vs_mh.c 				p = p->next;
p                 145 net/netfilter/ipvs/ip_vs_nfct.c 	struct ip_vs_conn_param p;
p                 160 net/netfilter/ipvs/ip_vs_nfct.c 			      &orig->dst.u3, orig->dst.u.tcp.port, &p);
p                 161 net/netfilter/ipvs/ip_vs_nfct.c 	cp = ip_vs_conn_out_get(&p);
p                 177 net/netfilter/ipvs/ip_vs_nfct.c 	cp = ip_vs_conn_in_get(&p);
p                  67 net/netfilter/ipvs/ip_vs_pe_sip.c ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
p                  74 net/netfilter/ipvs/ip_vs_pe_sip.c 	retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph);
p                  98 net/netfilter/ipvs/ip_vs_pe_sip.c 	p->pe_data = kmemdup(dptr + matchoff, matchlen, GFP_ATOMIC);
p                  99 net/netfilter/ipvs/ip_vs_pe_sip.c 	if (!p->pe_data)
p                 102 net/netfilter/ipvs/ip_vs_pe_sip.c 	p->pe_data_len = matchlen;
p                 107 net/netfilter/ipvs/ip_vs_pe_sip.c static bool ip_vs_sip_ct_match(const struct ip_vs_conn_param *p,
p                 113 net/netfilter/ipvs/ip_vs_pe_sip.c 	if (ct->af == p->af &&
p                 114 net/netfilter/ipvs/ip_vs_pe_sip.c 	    ip_vs_addr_equal(p->af, p->caddr, &ct->caddr) &&
p                 117 net/netfilter/ipvs/ip_vs_pe_sip.c 	    ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
p                 118 net/netfilter/ipvs/ip_vs_pe_sip.c 			     p->vaddr, &ct->vaddr) &&
p                 119 net/netfilter/ipvs/ip_vs_pe_sip.c 	    ct->vport == p->vport &&
p                 121 net/netfilter/ipvs/ip_vs_pe_sip.c 	    ct->protocol == p->protocol &&
p                 122 net/netfilter/ipvs/ip_vs_pe_sip.c 	    ct->pe_data && ct->pe_data_len == p->pe_data_len &&
p                 123 net/netfilter/ipvs/ip_vs_pe_sip.c 	    !memcmp(ct->pe_data, p->pe_data, p->pe_data_len))
p                 127 net/netfilter/ipvs/ip_vs_pe_sip.c 		      ip_vs_proto_name(p->protocol),
p                 128 net/netfilter/ipvs/ip_vs_pe_sip.c 		      IP_VS_DEBUG_CALLID(p->pe_data, p->pe_data_len),
p                 129 net/netfilter/ipvs/ip_vs_pe_sip.c 		      IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
p                 135 net/netfilter/ipvs/ip_vs_pe_sip.c static u32 ip_vs_sip_hashkey_raw(const struct ip_vs_conn_param *p,
p                 138 net/netfilter/ipvs/ip_vs_pe_sip.c 	return jhash(p->pe_data, p->pe_data_len, initval);
p                 344 net/netfilter/ipvs/ip_vs_proto.c #define REGISTER_PROTOCOL(p)			\
p                 346 net/netfilter/ipvs/ip_vs_proto.c 		register_ip_vs_protocol(p);	\
p                 348 net/netfilter/ipvs/ip_vs_proto.c 		strcat(protocols, (p)->name);	\
p                  42 net/netfilter/ipvs/ip_vs_proto_ah_esp.c 			     struct ip_vs_conn_param *p)
p                  47 net/netfilter/ipvs/ip_vs_proto_ah_esp.c 				      &iph->daddr, htons(PORT_ISAKMP), p);
p                  51 net/netfilter/ipvs/ip_vs_proto_ah_esp.c 				      &iph->saddr, htons(PORT_ISAKMP), p);
p                  59 net/netfilter/ipvs/ip_vs_proto_ah_esp.c 	struct ip_vs_conn_param p;
p                  61 net/netfilter/ipvs/ip_vs_proto_ah_esp.c 	ah_esp_conn_fill_param_proto(ipvs, af, iph, &p);
p                  62 net/netfilter/ipvs/ip_vs_proto_ah_esp.c 	cp = ip_vs_conn_in_get(&p);
p                  85 net/netfilter/ipvs/ip_vs_proto_ah_esp.c 	struct ip_vs_conn_param p;
p                  87 net/netfilter/ipvs/ip_vs_proto_ah_esp.c 	ah_esp_conn_fill_param_proto(ipvs, af, iph, &p);
p                  88 net/netfilter/ipvs/ip_vs_proto_ah_esp.c 	cp = ip_vs_conn_out_get(&p);
p                  35 net/netfilter/ipvs/ip_vs_rr.c 	struct list_head *p;
p                  38 net/netfilter/ipvs/ip_vs_rr.c 	p = (struct list_head *) svc->sched_data;
p                  42 net/netfilter/ipvs/ip_vs_rr.c 	if (p == &dest->n_list)
p                  43 net/netfilter/ipvs/ip_vs_rr.c 		svc->sched_data = p->next->prev;
p                  56 net/netfilter/ipvs/ip_vs_rr.c 	struct list_head *p;
p                  63 net/netfilter/ipvs/ip_vs_rr.c 	p = (struct list_head *) svc->sched_data;
p                  64 net/netfilter/ipvs/ip_vs_rr.c 	last = dest = list_entry(p, struct ip_vs_dest, n_list);
p                  81 net/netfilter/ipvs/ip_vs_rr.c 	} while (pass < 2 && p != &svc->destinations);
p                 167 net/netfilter/ipvs/ip_vs_sh.c 	struct list_head *p;
p                 173 net/netfilter/ipvs/ip_vs_sh.c 	p = &svc->destinations;
p                 174 net/netfilter/ipvs/ip_vs_sh.c 	empty = list_empty(p);
p                 183 net/netfilter/ipvs/ip_vs_sh.c 			if (p == &svc->destinations)
p                 184 net/netfilter/ipvs/ip_vs_sh.c 				p = p->next;
p                 186 net/netfilter/ipvs/ip_vs_sh.c 			dest = list_entry(p, struct ip_vs_dest, n_list);
p                 196 net/netfilter/ipvs/ip_vs_sh.c 				p = p->next;
p                 640 net/netfilter/ipvs/ip_vs_sync.c 	__u8 *p;
p                 715 net/netfilter/ipvs/ip_vs_sync.c 	p = buff->head;
p                 720 net/netfilter/ipvs/ip_vs_sync.c 		*(p++) = 0;
p                 722 net/netfilter/ipvs/ip_vs_sync.c 	s = (union ip_vs_sync_conn *)p;
p                 739 net/netfilter/ipvs/ip_vs_sync.c 		p += sizeof(struct ip_vs_sync_v6);
p                 746 net/netfilter/ipvs/ip_vs_sync.c 		p += sizeof(struct ip_vs_sync_v4);	/* options ptr */
p                 752 net/netfilter/ipvs/ip_vs_sync.c 		*(p++) = IPVS_OPT_SEQ_DATA;
p                 753 net/netfilter/ipvs/ip_vs_sync.c 		*(p++) = sizeof(struct ip_vs_sync_conn_options);
p                 754 net/netfilter/ipvs/ip_vs_sync.c 		hton_seq((struct ip_vs_seq *)p, &cp->in_seq);
p                 755 net/netfilter/ipvs/ip_vs_sync.c 		p += sizeof(struct ip_vs_seq);
p                 756 net/netfilter/ipvs/ip_vs_sync.c 		hton_seq((struct ip_vs_seq *)p, &cp->out_seq);
p                 757 net/netfilter/ipvs/ip_vs_sync.c 		p += sizeof(struct ip_vs_seq);
p                 761 net/netfilter/ipvs/ip_vs_sync.c 		*(p++) = IPVS_OPT_PE_DATA;
p                 762 net/netfilter/ipvs/ip_vs_sync.c 		*(p++) = cp->pe_data_len;
p                 763 net/netfilter/ipvs/ip_vs_sync.c 		memcpy(p, cp->pe_data, cp->pe_data_len);
p                 764 net/netfilter/ipvs/ip_vs_sync.c 		p += cp->pe_data_len;
p                 767 net/netfilter/ipvs/ip_vs_sync.c 			*(p++) = IPVS_OPT_PE_NAME;
p                 768 net/netfilter/ipvs/ip_vs_sync.c 			*(p++) = pe_name_len;
p                 769 net/netfilter/ipvs/ip_vs_sync.c 			memcpy(p, cp->pe->name, pe_name_len);
p                 770 net/netfilter/ipvs/ip_vs_sync.c 			p += pe_name_len;
p                 793 net/netfilter/ipvs/ip_vs_sync.c 			   struct ip_vs_conn_param *p,
p                 803 net/netfilter/ipvs/ip_vs_sync.c 				      sc->v6.vport, p);
p                 810 net/netfilter/ipvs/ip_vs_sync.c 				      sc->v4.vport, p);
p                 818 net/netfilter/ipvs/ip_vs_sync.c 			p->pe = __ip_vs_pe_getbyname(buff);
p                 819 net/netfilter/ipvs/ip_vs_sync.c 			if (!p->pe) {
p                 829 net/netfilter/ipvs/ip_vs_sync.c 		p->pe_data = kmemdup(pe_data, pe_data_len, GFP_ATOMIC);
p                 830 net/netfilter/ipvs/ip_vs_sync.c 		if (!p->pe_data) {
p                 831 net/netfilter/ipvs/ip_vs_sync.c 			module_put(p->pe->module);
p                 834 net/netfilter/ipvs/ip_vs_sync.c 		p->pe_data_len = pe_data_len;
p                 970 net/netfilter/ipvs/ip_vs_sync.c 	char *p;
p                 973 net/netfilter/ipvs/ip_vs_sync.c 	p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0);
p                 977 net/netfilter/ipvs/ip_vs_sync.c 		if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
p                 981 net/netfilter/ipvs/ip_vs_sync.c 		s = (struct ip_vs_sync_conn_v0 *) p;
p                 986 net/netfilter/ipvs/ip_vs_sync.c 			p += FULL_CONN_SIZE;
p                 987 net/netfilter/ipvs/ip_vs_sync.c 			if (p > buffer+buflen) {
p                 993 net/netfilter/ipvs/ip_vs_sync.c 			p += SIMPLE_CONN_SIZE;
p                1031 net/netfilter/ipvs/ip_vs_sync.c static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen,
p                1037 net/netfilter/ipvs/ip_vs_sync.c 	topt = (struct ip_vs_sync_conn_options *)p;
p                1053 net/netfilter/ipvs/ip_vs_sync.c static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len,
p                1066 net/netfilter/ipvs/ip_vs_sync.c 	*data = p;
p                1073 net/netfilter/ipvs/ip_vs_sync.c static inline int ip_vs_proc_sync_conn(struct netns_ipvs *ipvs, __u8 *p, __u8 *msg_end)
p                1085 net/netfilter/ipvs/ip_vs_sync.c 	s = (union ip_vs_sync_conn *) p;
p                1090 net/netfilter/ipvs/ip_vs_sync.c 		p += sizeof(struct ip_vs_sync_v6);
p                1098 net/netfilter/ipvs/ip_vs_sync.c 		p += sizeof(struct ip_vs_sync_v4);
p                1102 net/netfilter/ipvs/ip_vs_sync.c 	if (p > msg_end)
p                1106 net/netfilter/ipvs/ip_vs_sync.c 	while (p < msg_end) {
p                1110 net/netfilter/ipvs/ip_vs_sync.c 		if (p+2 > msg_end)
p                1112 net/netfilter/ipvs/ip_vs_sync.c 		ptype = *(p++);
p                1113 net/netfilter/ipvs/ip_vs_sync.c 		plen  = *(p++);
p                1115 net/netfilter/ipvs/ip_vs_sync.c 		if (!plen || ((p + plen) > msg_end))
p                1120 net/netfilter/ipvs/ip_vs_sync.c 			if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt))
p                1125 net/netfilter/ipvs/ip_vs_sync.c 			if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data,
p                1132 net/netfilter/ipvs/ip_vs_sync.c 			if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name,
p                1147 net/netfilter/ipvs/ip_vs_sync.c 		p += plen;  /* Next option */
p                1211 net/netfilter/ipvs/ip_vs_sync.c 	__u8 *p, *msg_end;
p                1240 net/netfilter/ipvs/ip_vs_sync.c 			p = msg_end;
p                1241 net/netfilter/ipvs/ip_vs_sync.c 			if (p + sizeof(s->v4) > buffer+buflen) {
p                1245 net/netfilter/ipvs/ip_vs_sync.c 			s = (union ip_vs_sync_conn *)p;
p                1247 net/netfilter/ipvs/ip_vs_sync.c 			msg_end = p + size;
p                1259 net/netfilter/ipvs/ip_vs_sync.c 			retc = ip_vs_proc_sync_conn(ipvs, p, msg_end);
p                1266 net/netfilter/ipvs/ip_vs_sync.c 			msg_end = p + ((size + 3) & ~3);
p                 775 net/netfilter/ipvs/ip_vs_xmit.c 		__be16 _pt, *p;
p                 777 net/netfilter/ipvs/ip_vs_xmit.c 		p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt);
p                 778 net/netfilter/ipvs/ip_vs_xmit.c 		if (p == NULL)
p                 780 net/netfilter/ipvs/ip_vs_xmit.c 		ip_vs_conn_fill_cport(cp, *p);
p                 781 net/netfilter/ipvs/ip_vs_xmit.c 		IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
p                 863 net/netfilter/ipvs/ip_vs_xmit.c 		__be16 _pt, *p;
p                 864 net/netfilter/ipvs/ip_vs_xmit.c 		p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt);
p                 865 net/netfilter/ipvs/ip_vs_xmit.c 		if (p == NULL)
p                 867 net/netfilter/ipvs/ip_vs_xmit.c 		ip_vs_conn_fill_cport(cp, *p);
p                 868 net/netfilter/ipvs/ip_vs_xmit.c 		IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
p                 545 net/netfilter/nf_conntrack_core.c 	struct nf_conn *tmpl, *p;
p                 552 net/netfilter/nf_conntrack_core.c 		p = tmpl;
p                 553 net/netfilter/nf_conntrack_core.c 		tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
p                 554 net/netfilter/nf_conntrack_core.c 		if (tmpl != p) {
p                 555 net/netfilter/nf_conntrack_core.c 			tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
p                 556 net/netfilter/nf_conntrack_core.c 			tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
p                 416 net/netfilter/nf_conntrack_expect.c 	const struct nf_conntrack_expect_policy *p;
p                 449 net/netfilter/nf_conntrack_expect.c 		p = &helper->expect_policy[expect->class];
p                 450 net/netfilter/nf_conntrack_expect.c 		if (p->max_expected &&
p                 451 net/netfilter/nf_conntrack_expect.c 		    master_help->expecting[expect->class] >= p->max_expected) {
p                 454 net/netfilter/nf_conntrack_expect.c 						>= p->max_expected) {
p                 546 net/netfilter/nf_conntrack_expect.c 	struct seq_net_private p;
p                 873 net/netfilter/nf_conntrack_h323_asn1.c 	unsigned char *p = buf;
p                 876 net/netfilter/nf_conntrack_h323_asn1.c 	if (!p || sz < 1)
p                 880 net/netfilter/nf_conntrack_h323_asn1.c 	if (*p != 0x08) {
p                 884 net/netfilter/nf_conntrack_h323_asn1.c 	p++;
p                 890 net/netfilter/nf_conntrack_h323_asn1.c 	len = *p++;
p                 894 net/netfilter/nf_conntrack_h323_asn1.c 	p += len;
p                 900 net/netfilter/nf_conntrack_h323_asn1.c 	q931->MessageType = *p++;
p                 903 net/netfilter/nf_conntrack_h323_asn1.c 	if (*p & 0x80) {
p                 904 net/netfilter/nf_conntrack_h323_asn1.c 		p++;
p                 910 net/netfilter/nf_conntrack_h323_asn1.c 		if (*p == 0x7e) {	/* UserUserIE */
p                 913 net/netfilter/nf_conntrack_h323_asn1.c 			p++;
p                 914 net/netfilter/nf_conntrack_h323_asn1.c 			len = *p++ << 8;
p                 915 net/netfilter/nf_conntrack_h323_asn1.c 			len |= *p++;
p                 919 net/netfilter/nf_conntrack_h323_asn1.c 			p++;
p                 921 net/netfilter/nf_conntrack_h323_asn1.c 			return DecodeH323_UserInformation(buf, p, len,
p                 924 net/netfilter/nf_conntrack_h323_asn1.c 		p++;
p                 928 net/netfilter/nf_conntrack_h323_asn1.c 		len = *p++;
p                 932 net/netfilter/nf_conntrack_h323_asn1.c 		p += len;
p                 225 net/netfilter/nf_conntrack_h323_main.c 	const unsigned char *p;
p                 235 net/netfilter/nf_conntrack_h323_main.c 		p = data + taddr->unicastAddress.iPAddress.network;
p                 241 net/netfilter/nf_conntrack_h323_main.c 		p = data + taddr->unicastAddress.iP6Address.network;
p                 248 net/netfilter/nf_conntrack_h323_main.c 	memcpy(addr, p, len);
p                 250 net/netfilter/nf_conntrack_h323_main.c 	memcpy(port, p + len, sizeof(__be16));
p                 640 net/netfilter/nf_conntrack_h323_main.c 	const unsigned char *p;
p                 647 net/netfilter/nf_conntrack_h323_main.c 		p = data + taddr->ipAddress.ip;
p                 653 net/netfilter/nf_conntrack_h323_main.c 		p = data + taddr->ip6Address.ip;
p                 660 net/netfilter/nf_conntrack_h323_main.c 	memcpy(addr, p, len);
p                 662 net/netfilter/nf_conntrack_h323_main.c 	memcpy(port, p + len, sizeof(__be16));
p                 244 net/netfilter/nf_conntrack_sip.c 	unsigned int p;
p                 272 net/netfilter/nf_conntrack_sip.c 		p = simple_strtoul(end, (char **)&end, 10);
p                 273 net/netfilter/nf_conntrack_sip.c 		if (p < 1024 || p > 65535)
p                 275 net/netfilter/nf_conntrack_sip.c 		*port = htons(p);
p                 512 net/netfilter/nf_conntrack_sip.c 	unsigned int p;
p                 525 net/netfilter/nf_conntrack_sip.c 		p = simple_strtoul(c, (char **)&c, 10);
p                 526 net/netfilter/nf_conntrack_sip.c 		if (p < 1024 || p > 65535)
p                 528 net/netfilter/nf_conntrack_sip.c 		*port = htons(p);
p                  98 net/netfilter/nf_conntrack_standalone.c 	struct seq_net_private p;
p                 249 net/netfilter/nf_nat_sip.c 			__be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
p                 250 net/netfilter/nf_nat_sip.c 			buflen = sprintf(buffer, "%u", ntohs(p));
p                3228 net/netfilter/nf_tables_api.c 	const char *p;
p                3232 net/netfilter/nf_tables_api.c 	p = strchr(name, '%');
p                3233 net/netfilter/nf_tables_api.c 	if (p != NULL) {
p                3234 net/netfilter/nf_tables_api.c 		if (p[1] != 'd' || strchr(p + 2, '%'))
p                1012 net/netfilter/nfnetlink_log.c 	struct seq_net_private p;
p                1404 net/netfilter/nfnetlink_queue.c 	struct seq_net_private p;
p                 224 net/netfilter/nft_set_rbtree.c 	struct rb_node *parent, **p;
p                 228 net/netfilter/nft_set_rbtree.c 	p = &priv->root.rb_node;
p                 229 net/netfilter/nft_set_rbtree.c 	while (*p != NULL) {
p                 230 net/netfilter/nft_set_rbtree.c 		parent = *p;
p                 236 net/netfilter/nft_set_rbtree.c 			p = &parent->rb_left;
p                 238 net/netfilter/nft_set_rbtree.c 			p = &parent->rb_right;
p                 242 net/netfilter/nft_set_rbtree.c 				p = &parent->rb_left;
p                 245 net/netfilter/nft_set_rbtree.c 				p = &parent->rb_right;
p                 250 net/netfilter/nft_set_rbtree.c 				p = &parent->rb_left;
p                 254 net/netfilter/nft_set_rbtree.c 	rb_link_node_rcu(&new->node, parent, p);
p                 406 net/netfilter/x_tables.c 	char *p = buf;
p                 413 net/netfilter/x_tables.c 	*p = '\0';
p                 417 net/netfilter/x_tables.c 		res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
p                 420 net/netfilter/x_tables.c 			p += res;
p                 612 net/netfilter/xt_hashlimit.c static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
p                 614 net/netfilter/xt_hashlimit.c 	switch (p) {
p                 616 net/netfilter/xt_hashlimit.c 		i[0] = maskl(i[0], p);
p                 620 net/netfilter/xt_hashlimit.c 		i[1] = maskl(i[1], p - 32);
p                 624 net/netfilter/xt_hashlimit.c 		i[2] = maskl(i[2], p - 64);
p                 628 net/netfilter/xt_hashlimit.c 		i[3] = maskl(i[3], p - 96);
p                  28 net/netfilter/xt_osf.c xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
p                  30 net/netfilter/xt_osf.c 	return nf_osf_match(skb, xt_family(p), xt_hooknum(p), xt_in(p),
p                  31 net/netfilter/xt_osf.c 			    xt_out(p), p->matchinfo, xt_net(p), nf_osf_fingers);
p                 477 net/netfilter/xt_recent.c 	loff_t p = *pos;
p                 483 net/netfilter/xt_recent.c 			if (p-- == 0)
p                  38 net/netfilter/xt_set.c #define ADT_OPT(n, f, d, fs, cfs, t, p, b, po, bo)	\
p                  45 net/netfilter/xt_set.c 	.ext.packets = p,				\
p                  44 net/netlabel/netlabel_domainhash.c #define netlbl_domhsh_rcu_deref(p) \
p                  45 net/netlabel/netlabel_domainhash.c 	rcu_dereference_check(p, lockdep_is_held(&netlbl_domhsh_lock))
p                 103 net/netlabel/netlabel_unlabeled.c #define netlbl_unlhsh_rcu_deref(p) \
p                 104 net/netlabel/netlabel_unlabeled.c 	rcu_dereference_check(p, lockdep_is_held(&netlbl_unlhsh_lock))
p                1404 net/netlink/af_netlink.c 				    struct netlink_broadcast_data *p)
p                1409 net/netlink/af_netlink.c 	if (p->exclude_sk == sk)
p                1412 net/netlink/af_netlink.c 	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
p                1413 net/netlink/af_netlink.c 	    !test_bit(p->group - 1, nlk->groups))
p                1416 net/netlink/af_netlink.c 	if (!net_eq(sock_net(sk), p->net)) {
p                1420 net/netlink/af_netlink.c 		if (!peernet_has_id(sock_net(sk), p->net))
p                1423 net/netlink/af_netlink.c 		if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
p                1428 net/netlink/af_netlink.c 	if (p->failure) {
p                1434 net/netlink/af_netlink.c 	if (p->skb2 == NULL) {
p                1435 net/netlink/af_netlink.c 		if (skb_shared(p->skb)) {
p                1436 net/netlink/af_netlink.c 			p->skb2 = skb_clone(p->skb, p->allocation);
p                1438 net/netlink/af_netlink.c 			p->skb2 = skb_get(p->skb);
p                1443 net/netlink/af_netlink.c 			skb_orphan(p->skb2);
p                1446 net/netlink/af_netlink.c 	if (p->skb2 == NULL) {
p                1449 net/netlink/af_netlink.c 		p->failure = 1;
p                1451 net/netlink/af_netlink.c 			p->delivery_failure = 1;
p                1454 net/netlink/af_netlink.c 	if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
p                1455 net/netlink/af_netlink.c 		kfree_skb(p->skb2);
p                1456 net/netlink/af_netlink.c 		p->skb2 = NULL;
p                1459 net/netlink/af_netlink.c 	if (sk_filter(sk, p->skb2)) {
p                1460 net/netlink/af_netlink.c 		kfree_skb(p->skb2);
p                1461 net/netlink/af_netlink.c 		p->skb2 = NULL;
p                1464 net/netlink/af_netlink.c 	NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
p                1465 net/netlink/af_netlink.c 	if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
p                1466 net/netlink/af_netlink.c 		NETLINK_CB(p->skb2).nsid_is_set = true;
p                1467 net/netlink/af_netlink.c 	val = netlink_broadcast_deliver(sk, p->skb2);
p                1471 net/netlink/af_netlink.c 			p->delivery_failure = 1;
p                1473 net/netlink/af_netlink.c 		p->congested |= val;
p                1474 net/netlink/af_netlink.c 		p->delivered = 1;
p                1475 net/netlink/af_netlink.c 		p->skb2 = NULL;
p                1547 net/netlink/af_netlink.c static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
p                1552 net/netlink/af_netlink.c 	if (sk == p->exclude_sk)
p                1555 net/netlink/af_netlink.c 	if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
p                1558 net/netlink/af_netlink.c 	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
p                1559 net/netlink/af_netlink.c 	    !test_bit(p->group - 1, nlk->groups))
p                1562 net/netlink/af_netlink.c 	if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
p                1567 net/netlink/af_netlink.c 	sk->sk_err = p->code;
p                2538 net/netlink/af_netlink.c 	struct seq_net_private p;
p                 204 net/openvswitch/datapath.c void ovs_dp_detach_port(struct vport *p)
p                 209 net/openvswitch/datapath.c 	hlist_del_rcu(&p->dp_hash_node);
p                 212 net/openvswitch/datapath.c 	ovs_vport_del(p);
p                 218 net/openvswitch/datapath.c 	const struct vport *p = OVS_CB(skb)->input_vport;
p                 219 net/openvswitch/datapath.c 	struct datapath *dp = p->dp;
p                 236 net/openvswitch/datapath.c 		upcall.portid = ovs_vport_find_upcall_portid(p, skb);
p                 153 net/openvswitch/datapath.h #define ovsl_dereference(p)					\
p                 154 net/openvswitch/datapath.h 	rcu_dereference_protected(p, lockdep_ovsl_is_held())
p                 155 net/openvswitch/datapath.h #define rcu_dereference_ovsl(p)					\
p                 156 net/openvswitch/datapath.h 	rcu_dereference_check(p, lockdep_ovsl_is_held())
p                 107 net/rds/cong.c 	struct rb_node **p = &rds_cong_tree.rb_node;
p                 111 net/rds/cong.c 	while (*p) {
p                 114 net/rds/cong.c 		parent = *p;
p                 119 net/rds/cong.c 			p = &(*p)->rb_left;
p                 121 net/rds/cong.c 			p = &(*p)->rb_right;
p                 127 net/rds/cong.c 		rb_link_node(&insert->m_rb_node, parent, p);
p                  68 net/rds/rdma.c 	struct rb_node **p = &root->rb_node;
p                  72 net/rds/rdma.c 	while (*p) {
p                  73 net/rds/rdma.c 		parent = *p;
p                  77 net/rds/rdma.c 			p = &(*p)->rb_left;
p                  79 net/rds/rdma.c 			p = &(*p)->rb_right;
p                  85 net/rds/rdma.c 		rb_link_node(&insert->r_rb_node, parent, p);
p                 239 net/rose/rose_subr.c static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *facilities, int len)
p                 246 net/rose/rose_subr.c 		switch (*p & 0xC0) {
p                 250 net/rose/rose_subr.c 			p   += 2;
p                 258 net/rose/rose_subr.c 			if (*p == FAC_NATIONAL_RAND)
p                 259 net/rose/rose_subr.c 				facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF);
p                 260 net/rose/rose_subr.c 			p   += 3;
p                 268 net/rose/rose_subr.c 			p   += 4;
p                 276 net/rose/rose_subr.c 			l = p[1];
p                 279 net/rose/rose_subr.c 			if (*p == FAC_NATIONAL_DEST_DIGI) {
p                 283 net/rose/rose_subr.c 					memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN);
p                 287 net/rose/rose_subr.c 			else if (*p == FAC_NATIONAL_SRC_DIGI) {
p                 291 net/rose/rose_subr.c 					memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN);
p                 295 net/rose/rose_subr.c 			else if (*p == FAC_NATIONAL_FAIL_CALL) {
p                 298 net/rose/rose_subr.c 				memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN);
p                 300 net/rose/rose_subr.c 			else if (*p == FAC_NATIONAL_FAIL_ADD) {
p                 303 net/rose/rose_subr.c 				memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN);
p                 305 net/rose/rose_subr.c 			else if (*p == FAC_NATIONAL_DIGIS) {
p                 311 net/rose/rose_subr.c 				for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) {
p                 323 net/rose/rose_subr.c 			p   += l + 2;
p                 328 net/rose/rose_subr.c 	} while (*p != 0x00 && len > 0);
p                 333 net/rose/rose_subr.c static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *facilities, int len)
p                 339 net/rose/rose_subr.c 		switch (*p & 0xC0) {
p                 343 net/rose/rose_subr.c 			p   += 2;
p                 351 net/rose/rose_subr.c 			p   += 3;
p                 359 net/rose/rose_subr.c 			p   += 4;
p                 367 net/rose/rose_subr.c 			l = p[1];
p                 373 net/rose/rose_subr.c 			if (*p == FAC_CCITT_DEST_NSAP) {
p                 374 net/rose/rose_subr.c 				memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN);
p                 375 net/rose/rose_subr.c 				memcpy(callsign, p + 12,   l - 10);
p                 379 net/rose/rose_subr.c 			if (*p == FAC_CCITT_SRC_NSAP) {
p                 380 net/rose/rose_subr.c 				memcpy(&facilities->dest_addr, p + 7, ROSE_ADDR_LEN);
p                 381 net/rose/rose_subr.c 				memcpy(callsign, p + 12, l - 10);
p                 385 net/rose/rose_subr.c 			p   += l + 2;
p                 390 net/rose/rose_subr.c 	} while (*p != 0x00 && len > 0);
p                 395 net/rose/rose_subr.c int rose_parse_facilities(unsigned char *p, unsigned packet_len,
p                 400 net/rose/rose_subr.c 	facilities_len = *p++;
p                 405 net/rose/rose_subr.c 	while (facilities_len >= 3 && *p == 0x00) {
p                 407 net/rose/rose_subr.c 		p++;
p                 409 net/rose/rose_subr.c 		switch (*p) {
p                 411 net/rose/rose_subr.c 			len = rose_parse_national(p + 1, facilities, facilities_len - 1);
p                 415 net/rose/rose_subr.c 			len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1);
p                 419 net/rose/rose_subr.c 			printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p);
p                 429 net/rose/rose_subr.c 		p += len + 1;
p                 437 net/rose/rose_subr.c 	unsigned char *p = buffer + 1;
p                 444 net/rose/rose_subr.c 		*p++ = 0x00;
p                 445 net/rose/rose_subr.c 		*p++ = FAC_NATIONAL;
p                 448 net/rose/rose_subr.c 			*p++ = FAC_NATIONAL_RAND;
p                 449 net/rose/rose_subr.c 			*p++ = (rose->rand >> 8) & 0xFF;
p                 450 net/rose/rose_subr.c 			*p++ = (rose->rand >> 0) & 0xFF;
p                 456 net/rose/rose_subr.c 			*p++ = FAC_NATIONAL_DIGIS;
p                 457 net/rose/rose_subr.c 			*p++ = AX25_ADDR_LEN * (rose->source_ndigis + rose->dest_ndigis);
p                 461 net/rose/rose_subr.c 				memcpy(p, &rose->source_digis[nb], AX25_ADDR_LEN);
p                 462 net/rose/rose_subr.c 				p[6] |= AX25_HBIT;
p                 463 net/rose/rose_subr.c 				p += AX25_ADDR_LEN;
p                 468 net/rose/rose_subr.c 				memcpy(p, &rose->dest_digis[nb], AX25_ADDR_LEN);
p                 469 net/rose/rose_subr.c 				p[6] &= ~AX25_HBIT;
p                 470 net/rose/rose_subr.c 				p += AX25_ADDR_LEN;
p                 476 net/rose/rose_subr.c 			*p++ = FAC_NATIONAL_SRC_DIGI;
p                 477 net/rose/rose_subr.c 			*p++ = AX25_ADDR_LEN;
p                 478 net/rose/rose_subr.c 			memcpy(p, &rose->source_digis[0], AX25_ADDR_LEN);
p                 479 net/rose/rose_subr.c 			p   += AX25_ADDR_LEN;
p                 484 net/rose/rose_subr.c 			*p++ = FAC_NATIONAL_DEST_DIGI;
p                 485 net/rose/rose_subr.c 			*p++ = AX25_ADDR_LEN;
p                 486 net/rose/rose_subr.c 			memcpy(p, &rose->dest_digis[0], AX25_ADDR_LEN);
p                 487 net/rose/rose_subr.c 			p   += AX25_ADDR_LEN;
p                 491 net/rose/rose_subr.c 	*p++ = 0x00;
p                 492 net/rose/rose_subr.c 	*p++ = FAC_CCITT;
p                 494 net/rose/rose_subr.c 	*p++ = FAC_CCITT_DEST_NSAP;
p                 498 net/rose/rose_subr.c 	*p++ = strlen(callsign) + 10;
p                 499 net/rose/rose_subr.c 	*p++ = (strlen(callsign) + 9) * 2;		/* ??? */
p                 501 net/rose/rose_subr.c 	*p++ = 0x47; *p++ = 0x00; *p++ = 0x11;
p                 502 net/rose/rose_subr.c 	*p++ = ROSE_ADDR_LEN * 2;
p                 503 net/rose/rose_subr.c 	memcpy(p, &rose->dest_addr, ROSE_ADDR_LEN);
p                 504 net/rose/rose_subr.c 	p   += ROSE_ADDR_LEN;
p                 506 net/rose/rose_subr.c 	memcpy(p, callsign, strlen(callsign));
p                 507 net/rose/rose_subr.c 	p   += strlen(callsign);
p                 509 net/rose/rose_subr.c 	*p++ = FAC_CCITT_SRC_NSAP;
p                 513 net/rose/rose_subr.c 	*p++ = strlen(callsign) + 10;
p                 514 net/rose/rose_subr.c 	*p++ = (strlen(callsign) + 9) * 2;		/* ??? */
p                 516 net/rose/rose_subr.c 	*p++ = 0x47; *p++ = 0x00; *p++ = 0x11;
p                 517 net/rose/rose_subr.c 	*p++ = ROSE_ADDR_LEN * 2;
p                 518 net/rose/rose_subr.c 	memcpy(p, &rose->source_addr, ROSE_ADDR_LEN);
p                 519 net/rose/rose_subr.c 	p   += ROSE_ADDR_LEN;
p                 521 net/rose/rose_subr.c 	memcpy(p, callsign, strlen(callsign));
p                 522 net/rose/rose_subr.c 	p   += strlen(callsign);
p                 524 net/rose/rose_subr.c 	len       = p - buffer;
p                 292 net/rxrpc/af_rxrpc.c 	struct rxrpc_call_params p;
p                 310 net/rxrpc/af_rxrpc.c 	memset(&p, 0, sizeof(p));
p                 311 net/rxrpc/af_rxrpc.c 	p.user_call_ID = user_call_ID;
p                 312 net/rxrpc/af_rxrpc.c 	p.tx_total_len = tx_total_len;
p                 313 net/rxrpc/af_rxrpc.c 	p.interruptibility = interruptibility;
p                 322 net/rxrpc/af_rxrpc.c 	call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp, debug_id);
p                  66 net/rxrpc/call_object.c 	struct rb_node *p;
p                  72 net/rxrpc/call_object.c 	p = rx->calls.rb_node;
p                  73 net/rxrpc/call_object.c 	while (p) {
p                  74 net/rxrpc/call_object.c 		call = rb_entry(p, struct rxrpc_call, sock_node);
p                  77 net/rxrpc/call_object.c 			p = p->rb_left;
p                  79 net/rxrpc/call_object.c 			p = p->rb_right;
p                 219 net/rxrpc/call_object.c 					 struct rxrpc_call_params *p,
p                 231 net/rxrpc/call_object.c 	_enter("%p,%lx", rx, p->user_call_ID);
p                 240 net/rxrpc/call_object.c 	call->interruptibility = p->interruptibility;
p                 241 net/rxrpc/call_object.c 	call->tx_total_len = p->tx_total_len;
p                 244 net/rxrpc/call_object.c 			 here, (const void *)p->user_call_ID);
p                 260 net/rxrpc/call_object.c 		if (p->user_call_ID < xcall->user_call_ID)
p                 262 net/rxrpc/call_object.c 		else if (p->user_call_ID > xcall->user_call_ID)
p                 269 net/rxrpc/call_object.c 	call->user_call_ID = p->user_call_ID;
p                 283 net/rxrpc/conn_client.c 	struct rb_node *p, **pp, *parent;
p                 305 net/rxrpc/conn_client.c 		p = local->client_conns.rb_node;
p                 306 net/rxrpc/conn_client.c 		while (p) {
p                 307 net/rxrpc/conn_client.c 			conn = rb_entry(p, struct rxrpc_connection, client_node);
p                 316 net/rxrpc/conn_client.c 				p = p->rb_left;
p                 318 net/rxrpc/conn_client.c 				p = p->rb_right;
p                  27 net/rxrpc/conn_service.c 	struct rb_node *p;
p                  40 net/rxrpc/conn_service.c 		p = rcu_dereference_raw(peer->service_conns.rb_node);
p                  41 net/rxrpc/conn_service.c 		while (p) {
p                  42 net/rxrpc/conn_service.c 			conn = rb_entry(p, struct rxrpc_connection, service_node);
p                  45 net/rxrpc/conn_service.c 				p = rcu_dereference_raw(p->rb_left);
p                  47 net/rxrpc/conn_service.c 				p = rcu_dereference_raw(p->rb_right);
p                  73 net/rxrpc/key.c 	char *p;
p                  75 net/rxrpc/key.c 	num = simple_strtoul(desc, &p, 10);
p                  76 net/rxrpc/key.c 	if (*p != ':' || num > 65535)
p                  78 net/rxrpc/key.c 	num = simple_strtoul(p + 1, &p, 10);
p                  79 net/rxrpc/key.c 	if (*p || num < 1 || num > 255)
p                  31 net/rxrpc/peer_object.c 	const u16 *p;
p                  46 net/rxrpc/peer_object.c 		p = (u16 *)&srx->transport.sin.sin_addr;
p                  52 net/rxrpc/peer_object.c 		p = (u16 *)&srx->transport.sin6.sin6_addr;
p                  61 net/rxrpc/peer_object.c 	for (i = 0; i < size; i += sizeof(*p), p++)
p                  62 net/rxrpc/peer_object.c 		hash_key += *p;
p                 258 net/rxrpc/proc.c 	void *p;
p                 279 net/rxrpc/proc.c 		p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
p                 280 net/rxrpc/proc.c 		if (p)
p                 281 net/rxrpc/proc.c 			return p;
p                 293 net/rxrpc/proc.c 	void *p;
p                 300 net/rxrpc/proc.c 	p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos);
p                 301 net/rxrpc/proc.c 	if (p)
p                 302 net/rxrpc/proc.c 		return p;
p                 318 net/rxrpc/proc.c 		p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
p                 319 net/rxrpc/proc.c 		if (p)
p                 320 net/rxrpc/proc.c 			return p;
p                 769 net/rxrpc/rxkad.c 	u8 *p = (u8 *) response;
p                 772 net/rxrpc/rxkad.c 		csum = csum * 0x10204081 + *p++;
p                 912 net/rxrpc/rxkad.c 	u8 *p, *q, *name, *end;
p                 947 net/rxrpc/rxkad.c 	p = ticket;
p                 948 net/rxrpc/rxkad.c 	end = p + ticket_len;
p                 952 net/rxrpc/rxkad.c 		u8 *__str = p;				\
p                 954 net/rxrpc/rxkad.c 		q = memchr(p, 0, end - p);		\
p                 955 net/rxrpc/rxkad.c 		if (!q || q - p > (field##_SZ))		\
p                 957 net/rxrpc/rxkad.c 		for (; p < q; p++)			\
p                 958 net/rxrpc/rxkad.c 			if (!isprint(*p))		\
p                 960 net/rxrpc/rxkad.c 		p++;					\
p                 965 net/rxrpc/rxkad.c 	_debug("KIV FLAGS: %x", *p);
p                 966 net/rxrpc/rxkad.c 	little_endian = *p & 1;
p                 967 net/rxrpc/rxkad.c 	p++;
p                 982 net/rxrpc/rxkad.c 	if (end - p < 4 + 8 + 4 + 2)
p                 986 net/rxrpc/rxkad.c 	memcpy(&addr, p, sizeof(addr));
p                 987 net/rxrpc/rxkad.c 	p += 4;
p                 991 net/rxrpc/rxkad.c 	memcpy(&key, p, sizeof(key));
p                 992 net/rxrpc/rxkad.c 	p += 8;
p                 997 net/rxrpc/rxkad.c 	life = *p++ * 5 * 60;
p                1003 net/rxrpc/rxkad.c 		memcpy(&stamp, p, 4);
p                1007 net/rxrpc/rxkad.c 		memcpy(&stamp, p, 4);
p                1010 net/rxrpc/rxkad.c 	p += 4;
p                 489 net/rxrpc/sendmsg.c static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
p                 514 net/rxrpc/sendmsg.c 				p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg);
p                 518 net/rxrpc/sendmsg.c 				p->call.user_call_ID = *(unsigned long *)
p                 525 net/rxrpc/sendmsg.c 			if (p->command != RXRPC_CMD_SEND_DATA)
p                 527 net/rxrpc/sendmsg.c 			p->command = RXRPC_CMD_SEND_ABORT;
p                 528 net/rxrpc/sendmsg.c 			if (len != sizeof(p->abort_code))
p                 530 net/rxrpc/sendmsg.c 			p->abort_code = *(unsigned int *)CMSG_DATA(cmsg);
p                 531 net/rxrpc/sendmsg.c 			if (p->abort_code == 0)
p                 536 net/rxrpc/sendmsg.c 			if (p->command != RXRPC_CMD_SEND_DATA)
p                 538 net/rxrpc/sendmsg.c 			p->command = RXRPC_CMD_ACCEPT;
p                 544 net/rxrpc/sendmsg.c 			p->exclusive = true;
p                 550 net/rxrpc/sendmsg.c 			p->upgrade = true;
p                 556 net/rxrpc/sendmsg.c 			if (p->call.tx_total_len != -1 || len != sizeof(__s64))
p                 558 net/rxrpc/sendmsg.c 			p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
p                 559 net/rxrpc/sendmsg.c 			if (p->call.tx_total_len < 0)
p                 566 net/rxrpc/sendmsg.c 			memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
p                 567 net/rxrpc/sendmsg.c 			p->call.nr_timeouts = len / 4;
p                 568 net/rxrpc/sendmsg.c 			if (p->call.timeouts.hard > INT_MAX / HZ)
p                 570 net/rxrpc/sendmsg.c 			if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
p                 572 net/rxrpc/sendmsg.c 			if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
p                 583 net/rxrpc/sendmsg.c 	if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
p                 596 net/rxrpc/sendmsg.c 				  struct rxrpc_send_params *p)
p                 621 net/rxrpc/sendmsg.c 	cp.exclusive		= rx->exclusive | p->exclusive;
p                 622 net/rxrpc/sendmsg.c 	cp.upgrade		= p->upgrade;
p                 624 net/rxrpc/sendmsg.c 	call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL,
p                 647 net/rxrpc/sendmsg.c 	struct rxrpc_send_params p = {
p                 660 net/rxrpc/sendmsg.c 	ret = rxrpc_sendmsg_cmsg(msg, &p);
p                 664 net/rxrpc/sendmsg.c 	if (p.command == RXRPC_CMD_ACCEPT) {
p                 668 net/rxrpc/sendmsg.c 		call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL);
p                 676 net/rxrpc/sendmsg.c 	call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
p                 679 net/rxrpc/sendmsg.c 		if (p.command != RXRPC_CMD_SEND_DATA)
p                 681 net/rxrpc/sendmsg.c 		call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p);
p                 707 net/rxrpc/sendmsg.c 		if (p.call.tx_total_len != -1) {
p                 713 net/rxrpc/sendmsg.c 			call->tx_total_len = p.call.tx_total_len;
p                 717 net/rxrpc/sendmsg.c 	switch (p.call.nr_timeouts) {
p                 719 net/rxrpc/sendmsg.c 		j = msecs_to_jiffies(p.call.timeouts.normal);
p                 720 net/rxrpc/sendmsg.c 		if (p.call.timeouts.normal > 0 && j == 0)
p                 725 net/rxrpc/sendmsg.c 		j = msecs_to_jiffies(p.call.timeouts.idle);
p                 726 net/rxrpc/sendmsg.c 		if (p.call.timeouts.idle > 0 && j == 0)
p                 731 net/rxrpc/sendmsg.c 		if (p.call.timeouts.hard > 0) {
p                 732 net/rxrpc/sendmsg.c 			j = msecs_to_jiffies(p.call.timeouts.hard);
p                 749 net/rxrpc/sendmsg.c 	} else if (p.command == RXRPC_CMD_SEND_ABORT) {
p                 751 net/rxrpc/sendmsg.c 		if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED))
p                 753 net/rxrpc/sendmsg.c 	} else if (p.command != RXRPC_CMD_SEND_DATA) {
p                  33 net/sched/act_api.c static void tcf_free_cookie_rcu(struct rcu_head *p)
p                  35 net/sched/act_api.c 	struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
p                 101 net/sched/act_api.c static void free_tcf(struct tc_action *p)
p                 103 net/sched/act_api.c 	struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
p                 105 net/sched/act_api.c 	free_percpu(p->cpu_bstats);
p                 106 net/sched/act_api.c 	free_percpu(p->cpu_bstats_hw);
p                 107 net/sched/act_api.c 	free_percpu(p->cpu_qstats);
p                 109 net/sched/act_api.c 	tcf_set_action_cookie(&p->act_cookie, NULL);
p                 113 net/sched/act_api.c 	kfree(p);
p                 116 net/sched/act_api.c static void tcf_action_cleanup(struct tc_action *p)
p                 118 net/sched/act_api.c 	if (p->ops->cleanup)
p                 119 net/sched/act_api.c 		p->ops->cleanup(p);
p                 121 net/sched/act_api.c 	gen_kill_estimator(&p->tcfa_rate_est);
p                 122 net/sched/act_api.c 	free_tcf(p);
p                 125 net/sched/act_api.c static int __tcf_action_put(struct tc_action *p, bool bind)
p                 127 net/sched/act_api.c 	struct tcf_idrinfo *idrinfo = p->idrinfo;
p                 129 net/sched/act_api.c 	if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
p                 131 net/sched/act_api.c 			atomic_dec(&p->tcfa_bindcnt);
p                 132 net/sched/act_api.c 		idr_remove(&idrinfo->action_idr, p->tcfa_index);
p                 135 net/sched/act_api.c 		tcf_action_cleanup(p);
p                 140 net/sched/act_api.c 		atomic_dec(&p->tcfa_bindcnt);
p                 145 net/sched/act_api.c int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
p                 161 net/sched/act_api.c 	if (p) {
p                 162 net/sched/act_api.c 		if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
p                 165 net/sched/act_api.c 		if (__tcf_action_put(p, bind))
p                 222 net/sched/act_api.c 	struct tc_action *p;
p                 230 net/sched/act_api.c 	idr_for_each_entry_ul(idr, p, tmp, id) {
p                 237 net/sched/act_api.c 			       (unsigned long)p->tcfa_tm.lastuse))
p                 245 net/sched/act_api.c 		err = tcf_action_dump_1(skb, p, 0, 0);
p                 273 net/sched/act_api.c static int tcf_idr_release_unsafe(struct tc_action *p)
p                 275 net/sched/act_api.c 	if (atomic_read(&p->tcfa_bindcnt) > 0)
p                 278 net/sched/act_api.c 	if (refcount_dec_and_test(&p->tcfa_refcnt)) {
p                 279 net/sched/act_api.c 		idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
p                 280 net/sched/act_api.c 		tcf_action_cleanup(p);
p                 294 net/sched/act_api.c 	struct tc_action *p;
p                 305 net/sched/act_api.c 	idr_for_each_entry_ul(idr, p, tmp, id) {
p                 306 net/sched/act_api.c 		ret = tcf_idr_release_unsafe(p);
p                 349 net/sched/act_api.c 	struct tc_action *p;
p                 352 net/sched/act_api.c 	p = idr_find(&idrinfo->action_idr, index);
p                 353 net/sched/act_api.c 	if (IS_ERR(p))
p                 354 net/sched/act_api.c 		p = NULL;
p                 355 net/sched/act_api.c 	else if (p)
p                 356 net/sched/act_api.c 		refcount_inc(&p->tcfa_refcnt);
p                 359 net/sched/act_api.c 	if (p) {
p                 360 net/sched/act_api.c 		*a = p;
p                 369 net/sched/act_api.c 	struct tc_action *p;
p                 373 net/sched/act_api.c 	p = idr_find(&idrinfo->action_idr, index);
p                 374 net/sched/act_api.c 	if (!p) {
p                 379 net/sched/act_api.c 	if (!atomic_read(&p->tcfa_bindcnt)) {
p                 380 net/sched/act_api.c 		if (refcount_dec_and_test(&p->tcfa_refcnt)) {
p                 381 net/sched/act_api.c 			struct module *owner = p->ops->owner;
p                 383 net/sched/act_api.c 			WARN_ON(p != idr_remove(&idrinfo->action_idr,
p                 384 net/sched/act_api.c 						p->tcfa_index));
p                 387 net/sched/act_api.c 			tcf_action_cleanup(p);
p                 404 net/sched/act_api.c 	struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
p                 408 net/sched/act_api.c 	if (unlikely(!p))
p                 410 net/sched/act_api.c 	refcount_set(&p->tcfa_refcnt, 1);
p                 412 net/sched/act_api.c 		atomic_set(&p->tcfa_bindcnt, 1);
p                 415 net/sched/act_api.c 		p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
p                 416 net/sched/act_api.c 		if (!p->cpu_bstats)
p                 418 net/sched/act_api.c 		p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
p                 419 net/sched/act_api.c 		if (!p->cpu_bstats_hw)
p                 421 net/sched/act_api.c 		p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
p                 422 net/sched/act_api.c 		if (!p->cpu_qstats)
p                 425 net/sched/act_api.c 	spin_lock_init(&p->tcfa_lock);
p                 426 net/sched/act_api.c 	p->tcfa_index = index;
p                 427 net/sched/act_api.c 	p->tcfa_tm.install = jiffies;
p                 428 net/sched/act_api.c 	p->tcfa_tm.lastuse = jiffies;
p                 429 net/sched/act_api.c 	p->tcfa_tm.firstuse = 0;
p                 431 net/sched/act_api.c 		err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
p                 432 net/sched/act_api.c 					&p->tcfa_rate_est,
p                 433 net/sched/act_api.c 					&p->tcfa_lock, NULL, est);
p                 438 net/sched/act_api.c 	p->idrinfo = idrinfo;
p                 439 net/sched/act_api.c 	p->ops = ops;
p                 440 net/sched/act_api.c 	*a = p;
p                 443 net/sched/act_api.c 	free_percpu(p->cpu_qstats);
p                 445 net/sched/act_api.c 	free_percpu(p->cpu_bstats_hw);
p                 447 net/sched/act_api.c 	free_percpu(p->cpu_bstats);
p                 449 net/sched/act_api.c 	kfree(p);
p                 488 net/sched/act_api.c 	struct tc_action *p;
p                 494 net/sched/act_api.c 		p = idr_find(&idrinfo->action_idr, *index);
p                 495 net/sched/act_api.c 		if (IS_ERR(p)) {
p                 503 net/sched/act_api.c 		if (p) {
p                 504 net/sched/act_api.c 			refcount_inc(&p->tcfa_refcnt);
p                 506 net/sched/act_api.c 				atomic_inc(&p->tcfa_bindcnt);
p                 507 net/sched/act_api.c 			*a = p;
p                 535 net/sched/act_api.c 	struct tc_action *p;
p                 540 net/sched/act_api.c 	idr_for_each_entry_ul(idr, p, tmp, id) {
p                 541 net/sched/act_api.c 		ret = __tcf_idr_release(p, false, true);
p                 725 net/sched/act_api.c static int tcf_action_put(struct tc_action *p)
p                 727 net/sched/act_api.c 	return __tcf_action_put(p, false);
p                 992 net/sched/act_api.c int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
p                 998 net/sched/act_api.c 	if (p == NULL)
p                1005 net/sched/act_api.c 		if (p->type == TCA_OLD_COMPAT)
p                1009 net/sched/act_api.c 							   &p->tcfa_lock, &d,
p                1015 net/sched/act_api.c 					    &p->tcfa_lock, &d, TCA_ACT_PAD);
p                1020 net/sched/act_api.c 	if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
p                1021 net/sched/act_api.c 	    gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw,
p                1022 net/sched/act_api.c 				     &p->tcfa_bstats_hw) < 0 ||
p                1023 net/sched/act_api.c 	    gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
p                1024 net/sched/act_api.c 	    gnet_stats_copy_queue(&d, p->cpu_qstats,
p                1025 net/sched/act_api.c 				  &p->tcfa_qstats,
p                1026 net/sched/act_api.c 				  p->tcfa_qstats.qlen) < 0)
p                  53 net/sched/act_csum.c 	struct tcf_csum *p;
p                  93 net/sched/act_csum.c 	p = to_tcf_csum(*a);
p                 102 net/sched/act_csum.c 	spin_lock_bh(&p->tcf_lock);
p                 104 net/sched/act_csum.c 	rcu_swap_protected(p->params, params_new,
p                 105 net/sched/act_csum.c 			   lockdep_is_held(&p->tcf_lock));
p                 106 net/sched/act_csum.c 	spin_unlock_bh(&p->tcf_lock);
p                 572 net/sched/act_csum.c 	struct tcf_csum *p = to_tcf_csum(a);
p                 580 net/sched/act_csum.c 	params = rcu_dereference_bh(p->params);
p                 582 net/sched/act_csum.c 	tcf_lastuse_update(&p->tcf_tm);
p                 583 net/sched/act_csum.c 	bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
p                 585 net/sched/act_csum.c 	action = READ_ONCE(p->tcf_action);
p                 627 net/sched/act_csum.c 	qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
p                 636 net/sched/act_csum.c 	struct tcf_csum *p = to_tcf_csum(a);
p                 639 net/sched/act_csum.c 		.index   = p->tcf_index,
p                 640 net/sched/act_csum.c 		.refcnt  = refcount_read(&p->tcf_refcnt) - ref,
p                 641 net/sched/act_csum.c 		.bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
p                 645 net/sched/act_csum.c 	spin_lock_bh(&p->tcf_lock);
p                 646 net/sched/act_csum.c 	params = rcu_dereference_protected(p->params,
p                 647 net/sched/act_csum.c 					   lockdep_is_held(&p->tcf_lock));
p                 648 net/sched/act_csum.c 	opt.action = p->tcf_action;
p                 654 net/sched/act_csum.c 	tcf_tm_dump(&t, &p->tcf_tm);
p                 657 net/sched/act_csum.c 	spin_unlock_bh(&p->tcf_lock);
p                 662 net/sched/act_csum.c 	spin_unlock_bh(&p->tcf_lock);
p                 669 net/sched/act_csum.c 	struct tcf_csum *p = to_tcf_csum(a);
p                 672 net/sched/act_csum.c 	params = rcu_dereference_protected(p->params, 1);
p                 389 net/sched/act_ct.c 	struct tcf_ct_params *p;
p                 393 net/sched/act_ct.c 	p = rcu_dereference_bh(c->params);
p                 396 net/sched/act_ct.c 	commit = p->ct_action & TCA_CT_ACT_COMMIT;
p                 397 net/sched/act_ct.c 	clear = p->ct_action & TCA_CT_ACT_CLEAR;
p                 398 net/sched/act_ct.c 	force = p->ct_action & TCA_CT_ACT_FORCE;
p                 399 net/sched/act_ct.c 	tmpl = p->tmpl;
p                 420 net/sched/act_ct.c 	err = tcf_ct_handle_fragments(net, skb, family, p->zone);
p                 437 net/sched/act_ct.c 	cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
p                 461 net/sched/act_ct.c 	err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
p                 466 net/sched/act_ct.c 		tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
p                 467 net/sched/act_ct.c 		tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
p                 508 net/sched/act_ct.c static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
p                 515 net/sched/act_ct.c 	if (!(p->ct_action & TCA_CT_ACT_NAT))
p                 523 net/sched/act_ct.c 	if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
p                 526 net/sched/act_ct.c 	if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
p                 527 net/sched/act_ct.c 	    (p->ct_action & TCA_CT_ACT_NAT_DST)) {
p                 532 net/sched/act_ct.c 	range = &p->range;
p                 536 net/sched/act_ct.c 		p->ipv4_range = true;
p                 547 net/sched/act_ct.c 		p->ipv4_range = false;
p                 588 net/sched/act_ct.c 			      struct tcf_ct_params *p,
p                 598 net/sched/act_ct.c 	p->zone = NF_CT_DEFAULT_ZONE_ID;
p                 601 net/sched/act_ct.c 			   &p->ct_action, TCA_CT_ACTION,
p                 603 net/sched/act_ct.c 			   sizeof(p->ct_action));
p                 605 net/sched/act_ct.c 	if (p->ct_action & TCA_CT_ACT_CLEAR)
p                 608 net/sched/act_ct.c 	err = tcf_ct_fill_params_nat(p, parm, tb, extack);
p                 618 net/sched/act_ct.c 				   &p->mark, TCA_CT_MARK,
p                 619 net/sched/act_ct.c 				   &p->mark_mask, TCA_CT_MARK_MASK,
p                 620 net/sched/act_ct.c 				   sizeof(p->mark));
p                 634 net/sched/act_ct.c 				   p->labels, TCA_CT_LABELS,
p                 635 net/sched/act_ct.c 				   p->labels_mask, TCA_CT_LABELS_MASK,
p                 636 net/sched/act_ct.c 				   sizeof(p->labels));
p                 646 net/sched/act_ct.c 				   &p->zone, TCA_CT_ZONE,
p                 648 net/sched/act_ct.c 				   sizeof(p->zone));
p                 651 net/sched/act_ct.c 	if (p->zone == NF_CT_DEFAULT_ZONE_ID)
p                 654 net/sched/act_ct.c 	nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
p                 662 net/sched/act_ct.c 	p->tmpl = tmpl;
p                 789 net/sched/act_ct.c static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
p                 791 net/sched/act_ct.c 	struct nf_nat_range2 *range = &p->range;
p                 793 net/sched/act_ct.c 	if (!(p->ct_action & TCA_CT_ACT_NAT))
p                 796 net/sched/act_ct.c 	if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
p                 800 net/sched/act_ct.c 		if (p->ipv4_range) {
p                 834 net/sched/act_ct.c 	struct tcf_ct_params *p;
p                 844 net/sched/act_ct.c 	p = rcu_dereference_protected(c->params,
p                 849 net/sched/act_ct.c 				&p->ct_action, TCA_CT_ACTION,
p                 851 net/sched/act_ct.c 				sizeof(p->ct_action)))
p                 854 net/sched/act_ct.c 	if (p->ct_action & TCA_CT_ACT_CLEAR)
p                 859 net/sched/act_ct.c 				&p->mark, TCA_CT_MARK,
p                 860 net/sched/act_ct.c 				&p->mark_mask, TCA_CT_MARK_MASK,
p                 861 net/sched/act_ct.c 				sizeof(p->mark)))
p                 866 net/sched/act_ct.c 				p->labels, TCA_CT_LABELS,
p                 867 net/sched/act_ct.c 				p->labels_mask, TCA_CT_LABELS_MASK,
p                 868 net/sched/act_ct.c 				sizeof(p->labels)))
p                 873 net/sched/act_ct.c 				&p->zone, TCA_CT_ZONE,
p                 875 net/sched/act_ct.c 				sizeof(p->zone)))
p                 878 net/sched/act_ct.c 	if (tcf_ct_dump_nat(skb, p))
p                 428 net/sched/act_ife.c 	struct tcf_ife_params *p;
p                 434 net/sched/act_ife.c 	p = rcu_dereference_protected(ife->params, 1);
p                 435 net/sched/act_ife.c 	if (p)
p                 436 net/sched/act_ife.c 		kfree_rcu(p, rcu);
p                 474 net/sched/act_ife.c 	struct tcf_ife_params *p;
p                 507 net/sched/act_ife.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 508 net/sched/act_ife.c 	if (!p)
p                 514 net/sched/act_ife.c 		kfree(p);
p                 519 net/sched/act_ife.c 		kfree(p);
p                 528 net/sched/act_ife.c 			kfree(p);
p                 534 net/sched/act_ife.c 		kfree(p);
p                 546 net/sched/act_ife.c 	p->flags = parm->flags;
p                 559 net/sched/act_ife.c 			ether_addr_copy(p->eth_dst, daddr);
p                 561 net/sched/act_ife.c 			eth_zero_addr(p->eth_dst);
p                 564 net/sched/act_ife.c 			ether_addr_copy(p->eth_src, saddr);
p                 566 net/sched/act_ife.c 			eth_zero_addr(p->eth_src);
p                 568 net/sched/act_ife.c 		p->eth_type = ife_type;
p                 596 net/sched/act_ife.c 	rcu_swap_protected(ife->params, p, 1);
p                 602 net/sched/act_ife.c 	if (p)
p                 603 net/sched/act_ife.c 		kfree_rcu(p, rcu);
p                 613 net/sched/act_ife.c 	kfree(p);
p                 623 net/sched/act_ife.c 	struct tcf_ife_params *p;
p                 633 net/sched/act_ife.c 	p = rcu_dereference_protected(ife->params,
p                 635 net/sched/act_ife.c 	opt.flags = p->flags;
p                 644 net/sched/act_ife.c 	if (!is_zero_ether_addr(p->eth_dst)) {
p                 645 net/sched/act_ife.c 		if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, p->eth_dst))
p                 649 net/sched/act_ife.c 	if (!is_zero_ether_addr(p->eth_src)) {
p                 650 net/sched/act_ife.c 		if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, p->eth_src))
p                 654 net/sched/act_ife.c 	if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type))
p                 763 net/sched/act_ife.c 			  struct tcf_result *res, struct tcf_ife_params *p)
p                 830 net/sched/act_ife.c 	if (!is_zero_ether_addr(p->eth_src))
p                 831 net/sched/act_ife.c 		ether_addr_copy(oethh->h_source, p->eth_src);
p                 832 net/sched/act_ife.c 	if (!is_zero_ether_addr(p->eth_dst))
p                 833 net/sched/act_ife.c 		ether_addr_copy(oethh->h_dest, p->eth_dst);
p                 834 net/sched/act_ife.c 	oethh->h_proto = htons(p->eth_type);
p                 846 net/sched/act_ife.c 	struct tcf_ife_params *p;
p                 849 net/sched/act_ife.c 	p = rcu_dereference_bh(ife->params);
p                 850 net/sched/act_ife.c 	if (p->flags & IFE_ENCODE) {
p                 851 net/sched/act_ife.c 		ret = tcf_ife_encode(skb, a, res, p);
p                  24 net/sched/act_mpls.c 			       struct tcf_mpls_params *p, bool set_bos)
p                  31 net/sched/act_mpls.c 	if (p->tcfm_label != ACT_MPLS_LABEL_NOT_SET) {
p                  33 net/sched/act_mpls.c 		new_lse |= p->tcfm_label << MPLS_LS_LABEL_SHIFT;
p                  35 net/sched/act_mpls.c 	if (p->tcfm_ttl) {
p                  37 net/sched/act_mpls.c 		new_lse |= p->tcfm_ttl << MPLS_LS_TTL_SHIFT;
p                  39 net/sched/act_mpls.c 	if (p->tcfm_tc != ACT_MPLS_TC_NOT_SET) {
p                  41 net/sched/act_mpls.c 		new_lse |= p->tcfm_tc << MPLS_LS_TC_SHIFT;
p                  43 net/sched/act_mpls.c 	if (p->tcfm_bos != ACT_MPLS_BOS_NOT_SET) {
p                  45 net/sched/act_mpls.c 		new_lse |= p->tcfm_bos << MPLS_LS_S_SHIFT;
p                  57 net/sched/act_mpls.c 	struct tcf_mpls_params *p;
p                  76 net/sched/act_mpls.c 	p = rcu_dereference_bh(m->mpls_p);
p                  78 net/sched/act_mpls.c 	switch (p->tcfm_action) {
p                  80 net/sched/act_mpls.c 		if (skb_mpls_pop(skb, p->tcfm_proto, mac_len,
p                  85 net/sched/act_mpls.c 		new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
p                  86 net/sched/act_mpls.c 		if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len,
p                  91 net/sched/act_mpls.c 		new_lse = tcf_mpls_get_lse(mpls_hdr(skb), p, false);
p                 142 net/sched/act_mpls.c 	struct tcf_mpls_params *p;
p                 248 net/sched/act_mpls.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 249 net/sched/act_mpls.c 	if (!p) {
p                 254 net/sched/act_mpls.c 	p->tcfm_action = parm->m_action;
p                 255 net/sched/act_mpls.c 	p->tcfm_label = tb[TCA_MPLS_LABEL] ? nla_get_u32(tb[TCA_MPLS_LABEL]) :
p                 257 net/sched/act_mpls.c 	p->tcfm_tc = tb[TCA_MPLS_TC] ? nla_get_u8(tb[TCA_MPLS_TC]) :
p                 259 net/sched/act_mpls.c 	p->tcfm_ttl = tb[TCA_MPLS_TTL] ? nla_get_u8(tb[TCA_MPLS_TTL]) :
p                 261 net/sched/act_mpls.c 	p->tcfm_bos = tb[TCA_MPLS_BOS] ? nla_get_u8(tb[TCA_MPLS_BOS]) :
p                 263 net/sched/act_mpls.c 	p->tcfm_proto = tb[TCA_MPLS_PROTO] ? nla_get_be16(tb[TCA_MPLS_PROTO]) :
p                 268 net/sched/act_mpls.c 	rcu_swap_protected(m->mpls_p, p, lockdep_is_held(&m->tcf_lock));
p                 273 net/sched/act_mpls.c 	if (p)
p                 274 net/sched/act_mpls.c 		kfree_rcu(p, rcu);
p                 290 net/sched/act_mpls.c 	struct tcf_mpls_params *p;
p                 292 net/sched/act_mpls.c 	p = rcu_dereference_protected(m->mpls_p, 1);
p                 293 net/sched/act_mpls.c 	if (p)
p                 294 net/sched/act_mpls.c 		kfree_rcu(p, rcu);
p                 302 net/sched/act_mpls.c 	struct tcf_mpls_params *p;
p                 312 net/sched/act_mpls.c 	p = rcu_dereference_protected(m->mpls_p, lockdep_is_held(&m->tcf_lock));
p                 313 net/sched/act_mpls.c 	opt.m_action = p->tcfm_action;
p                 318 net/sched/act_mpls.c 	if (p->tcfm_label != ACT_MPLS_LABEL_NOT_SET &&
p                 319 net/sched/act_mpls.c 	    nla_put_u32(skb, TCA_MPLS_LABEL, p->tcfm_label))
p                 322 net/sched/act_mpls.c 	if (p->tcfm_tc != ACT_MPLS_TC_NOT_SET &&
p                 323 net/sched/act_mpls.c 	    nla_put_u8(skb, TCA_MPLS_TC, p->tcfm_tc))
p                 326 net/sched/act_mpls.c 	if (p->tcfm_ttl && nla_put_u8(skb, TCA_MPLS_TTL, p->tcfm_ttl))
p                 329 net/sched/act_mpls.c 	if (p->tcfm_bos != ACT_MPLS_BOS_NOT_SET &&
p                 330 net/sched/act_mpls.c 	    nla_put_u8(skb, TCA_MPLS_BOS, p->tcfm_bos))
p                 333 net/sched/act_mpls.c 	if (nla_put_be16(skb, TCA_MPLS_PROTO, p->tcfm_proto))
p                  46 net/sched/act_nat.c 	struct tcf_nat *p;
p                  83 net/sched/act_nat.c 	p = to_tcf_nat(*a);
p                  85 net/sched/act_nat.c 	spin_lock_bh(&p->tcf_lock);
p                  86 net/sched/act_nat.c 	p->old_addr = parm->old_addr;
p                  87 net/sched/act_nat.c 	p->new_addr = parm->new_addr;
p                  88 net/sched/act_nat.c 	p->mask = parm->mask;
p                  89 net/sched/act_nat.c 	p->flags = parm->flags;
p                  92 net/sched/act_nat.c 	spin_unlock_bh(&p->tcf_lock);
p                 108 net/sched/act_nat.c 	struct tcf_nat *p = to_tcf_nat(a);
p                 119 net/sched/act_nat.c 	spin_lock(&p->tcf_lock);
p                 121 net/sched/act_nat.c 	tcf_lastuse_update(&p->tcf_tm);
p                 122 net/sched/act_nat.c 	old_addr = p->old_addr;
p                 123 net/sched/act_nat.c 	new_addr = p->new_addr;
p                 124 net/sched/act_nat.c 	mask = p->mask;
p                 125 net/sched/act_nat.c 	egress = p->flags & TCA_NAT_FLAG_EGRESS;
p                 126 net/sched/act_nat.c 	action = p->tcf_action;
p                 128 net/sched/act_nat.c 	bstats_update(&p->tcf_bstats, skb);
p                 130 net/sched/act_nat.c 	spin_unlock(&p->tcf_lock);
p                 256 net/sched/act_nat.c 	spin_lock(&p->tcf_lock);
p                 257 net/sched/act_nat.c 	p->tcf_qstats.drops++;
p                 258 net/sched/act_nat.c 	spin_unlock(&p->tcf_lock);
p                 266 net/sched/act_nat.c 	struct tcf_nat *p = to_tcf_nat(a);
p                 268 net/sched/act_nat.c 		.index    = p->tcf_index,
p                 269 net/sched/act_nat.c 		.refcnt   = refcount_read(&p->tcf_refcnt) - ref,
p                 270 net/sched/act_nat.c 		.bindcnt  = atomic_read(&p->tcf_bindcnt) - bind,
p                 274 net/sched/act_nat.c 	spin_lock_bh(&p->tcf_lock);
p                 275 net/sched/act_nat.c 	opt.old_addr = p->old_addr;
p                 276 net/sched/act_nat.c 	opt.new_addr = p->new_addr;
p                 277 net/sched/act_nat.c 	opt.mask = p->mask;
p                 278 net/sched/act_nat.c 	opt.flags = p->flags;
p                 279 net/sched/act_nat.c 	opt.action = p->tcf_action;
p                 284 net/sched/act_nat.c 	tcf_tm_dump(&t, &p->tcf_tm);
p                 287 net/sched/act_nat.c 	spin_unlock_bh(&p->tcf_lock);
p                 292 net/sched/act_nat.c 	spin_unlock_bh(&p->tcf_lock);
p                 149 net/sched/act_pedit.c 	struct tcf_pedit *p;
p                 214 net/sched/act_pedit.c 	p = to_pedit(*a);
p                 215 net/sched/act_pedit.c 	spin_lock_bh(&p->tcf_lock);
p                 218 net/sched/act_pedit.c 	    (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys)) {
p                 221 net/sched/act_pedit.c 			spin_unlock_bh(&p->tcf_lock);
p                 225 net/sched/act_pedit.c 		kfree(p->tcfp_keys);
p                 226 net/sched/act_pedit.c 		p->tcfp_keys = keys;
p                 227 net/sched/act_pedit.c 		p->tcfp_nkeys = parm->nkeys;
p                 229 net/sched/act_pedit.c 	memcpy(p->tcfp_keys, parm->keys, ksize);
p                 231 net/sched/act_pedit.c 	p->tcfp_flags = parm->flags;
p                 234 net/sched/act_pedit.c 	kfree(p->tcfp_keys_ex);
p                 235 net/sched/act_pedit.c 	p->tcfp_keys_ex = keys_ex;
p                 237 net/sched/act_pedit.c 	spin_unlock_bh(&p->tcf_lock);
p                 257 net/sched/act_pedit.c 	struct tcf_pedit *p = to_pedit(a);
p                 258 net/sched/act_pedit.c 	struct tc_pedit_key *keys = p->tcfp_keys;
p                 261 net/sched/act_pedit.c 	kfree(p->tcfp_keys_ex);
p                 311 net/sched/act_pedit.c 	struct tcf_pedit *p = to_pedit(a);
p                 315 net/sched/act_pedit.c 		return p->tcf_action;
p                 317 net/sched/act_pedit.c 	spin_lock(&p->tcf_lock);
p                 319 net/sched/act_pedit.c 	tcf_lastuse_update(&p->tcf_tm);
p                 321 net/sched/act_pedit.c 	if (p->tcfp_nkeys > 0) {
p                 322 net/sched/act_pedit.c 		struct tc_pedit_key *tkey = p->tcfp_keys;
p                 323 net/sched/act_pedit.c 		struct tcf_pedit_key_ex *tkey_ex = p->tcfp_keys_ex;
p                 328 net/sched/act_pedit.c 		for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
p                 400 net/sched/act_pedit.c 		WARN(1, "pedit BUG: index %d\n", p->tcf_index);
p                 404 net/sched/act_pedit.c 	p->tcf_qstats.overlimits++;
p                 406 net/sched/act_pedit.c 	bstats_update(&p->tcf_bstats, skb);
p                 407 net/sched/act_pedit.c 	spin_unlock(&p->tcf_lock);
p                 408 net/sched/act_pedit.c 	return p->tcf_action;
p                 415 net/sched/act_pedit.c 	struct tcf_pedit *p = to_pedit(a);
p                 420 net/sched/act_pedit.c 	s = struct_size(opt, keys, p->tcfp_nkeys);
p                 427 net/sched/act_pedit.c 	spin_lock_bh(&p->tcf_lock);
p                 428 net/sched/act_pedit.c 	memcpy(opt->keys, p->tcfp_keys,
p                 429 net/sched/act_pedit.c 	       p->tcfp_nkeys * sizeof(struct tc_pedit_key));
p                 430 net/sched/act_pedit.c 	opt->index = p->tcf_index;
p                 431 net/sched/act_pedit.c 	opt->nkeys = p->tcfp_nkeys;
p                 432 net/sched/act_pedit.c 	opt->flags = p->tcfp_flags;
p                 433 net/sched/act_pedit.c 	opt->action = p->tcf_action;
p                 434 net/sched/act_pedit.c 	opt->refcnt = refcount_read(&p->tcf_refcnt) - ref;
p                 435 net/sched/act_pedit.c 	opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind;
p                 437 net/sched/act_pedit.c 	if (p->tcfp_keys_ex) {
p                 439 net/sched/act_pedit.c 					  p->tcfp_keys_ex,
p                 440 net/sched/act_pedit.c 					  p->tcfp_nkeys))
p                 450 net/sched/act_pedit.c 	tcf_tm_dump(&t, &p->tcf_tm);
p                 453 net/sched/act_pedit.c 	spin_unlock_bh(&p->tcf_lock);
p                 459 net/sched/act_pedit.c 	spin_unlock_bh(&p->tcf_lock);
p                 222 net/sched/act_police.c 	struct tcf_police_params *p;
p                 230 net/sched/act_police.c 	p = rcu_dereference_bh(police->params);
p                 232 net/sched/act_police.c 	if (p->tcfp_ewma_rate) {
p                 236 net/sched/act_police.c 		    sample.bps >= p->tcfp_ewma_rate)
p                 240 net/sched/act_police.c 	if (qdisc_pkt_len(skb) <= p->tcfp_mtu) {
p                 241 net/sched/act_police.c 		if (!p->rate_present) {
p                 242 net/sched/act_police.c 			ret = p->tcfp_result;
p                 248 net/sched/act_police.c 		toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
p                 249 net/sched/act_police.c 		if (p->peak_present) {
p                 251 net/sched/act_police.c 			if (ptoks > p->tcfp_mtu_ptoks)
p                 252 net/sched/act_police.c 				ptoks = p->tcfp_mtu_ptoks;
p                 253 net/sched/act_police.c 			ptoks -= (s64)psched_l2t_ns(&p->peak,
p                 257 net/sched/act_police.c 		if (toks > p->tcfp_burst)
p                 258 net/sched/act_police.c 			toks = p->tcfp_burst;
p                 259 net/sched/act_police.c 		toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
p                 265 net/sched/act_police.c 			ret = p->tcfp_result;
p                 283 net/sched/act_police.c 	struct tcf_police_params *p;
p                 285 net/sched/act_police.c 	p = rcu_dereference_protected(police->params, 1);
p                 286 net/sched/act_police.c 	if (p)
p                 287 net/sched/act_police.c 		kfree_rcu(p, rcu);
p                 309 net/sched/act_police.c 	struct tcf_police_params *p;
p                 319 net/sched/act_police.c 	p = rcu_dereference_protected(police->params,
p                 321 net/sched/act_police.c 	opt.mtu = p->tcfp_mtu;
p                 322 net/sched/act_police.c 	opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
p                 323 net/sched/act_police.c 	if (p->rate_present) {
p                 324 net/sched/act_police.c 		psched_ratecfg_getrate(&opt.rate, &p->rate);
p                 331 net/sched/act_police.c 	if (p->peak_present) {
p                 332 net/sched/act_police.c 		psched_ratecfg_getrate(&opt.peakrate, &p->peak);
p                 341 net/sched/act_police.c 	if (p->tcfp_result &&
p                 342 net/sched/act_police.c 	    nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result))
p                 344 net/sched/act_police.c 	if (p->tcfp_ewma_rate &&
p                 345 net/sched/act_police.c 	    nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate))
p                  60 net/sched/act_simple.c 			struct tc_defact *p, struct tcf_proto *tp,
p                  67 net/sched/act_simple.c 	err = tcf_action_check_ctrlact(p->action, tp, &goto_ch, extack);
p                  72 net/sched/act_simple.c 	goto_ch = tcf_action_set_ctrlact(a, p->action, goto_ch);
p                  29 net/sched/act_skbmod.c 	struct tcf_skbmod_params *p;
p                  48 net/sched/act_skbmod.c 	p = rcu_dereference_bh(d->skbmod_p);
p                  49 net/sched/act_skbmod.c 	flags = p->flags;
p                  51 net/sched/act_skbmod.c 		ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst);
p                  53 net/sched/act_skbmod.c 		ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src);
p                  55 net/sched/act_skbmod.c 		eth_hdr(skb)->h_proto = p->eth_type;
p                  87 net/sched/act_skbmod.c 	struct tcf_skbmod_params *p, *p_old;
p                 163 net/sched/act_skbmod.c 	p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
p                 164 net/sched/act_skbmod.c 	if (unlikely(!p)) {
p                 169 net/sched/act_skbmod.c 	p->flags = lflags;
p                 178 net/sched/act_skbmod.c 		ether_addr_copy(p->eth_dst, daddr);
p                 180 net/sched/act_skbmod.c 		ether_addr_copy(p->eth_src, saddr);
p                 182 net/sched/act_skbmod.c 		p->eth_type = htons(eth_type);
p                 184 net/sched/act_skbmod.c 	rcu_assign_pointer(d->skbmod_p, p);
p                 207 net/sched/act_skbmod.c 	struct tcf_skbmod_params  *p;
p                 209 net/sched/act_skbmod.c 	p = rcu_dereference_protected(d->skbmod_p, 1);
p                 210 net/sched/act_skbmod.c 	if (p)
p                 211 net/sched/act_skbmod.c 		kfree_rcu(p, rcu);
p                 219 net/sched/act_skbmod.c 	struct tcf_skbmod_params  *p;
p                 229 net/sched/act_skbmod.c 	p = rcu_dereference_protected(d->skbmod_p,
p                 231 net/sched/act_skbmod.c 	opt.flags  = p->flags;
p                 234 net/sched/act_skbmod.c 	if ((p->flags & SKBMOD_F_DMAC) &&
p                 235 net/sched/act_skbmod.c 	    nla_put(skb, TCA_SKBMOD_DMAC, ETH_ALEN, p->eth_dst))
p                 237 net/sched/act_skbmod.c 	if ((p->flags & SKBMOD_F_SMAC) &&
p                 238 net/sched/act_skbmod.c 	    nla_put(skb, TCA_SKBMOD_SMAC, ETH_ALEN, p->eth_src))
p                 240 net/sched/act_skbmod.c 	if ((p->flags & SKBMOD_F_ETYPE) &&
p                 241 net/sched/act_skbmod.c 	    nla_put_u16(skb, TCA_SKBMOD_ETYPE, ntohs(p->eth_type)))
p                 202 net/sched/act_tunnel_key.c static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
p                 204 net/sched/act_tunnel_key.c 	if (!p)
p                 206 net/sched/act_tunnel_key.c 	if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
p                 207 net/sched/act_tunnel_key.c 		dst_release(&p->tcft_enc_metadata->dst);
p                 209 net/sched/act_tunnel_key.c 	kfree_rcu(p, rcu);
p                  26 net/sched/act_vlan.c 	struct tcf_vlan_params *p;
p                  42 net/sched/act_vlan.c 	p = rcu_dereference_bh(v->vlan_p);
p                  44 net/sched/act_vlan.c 	switch (p->tcfv_action) {
p                  51 net/sched/act_vlan.c 		err = skb_vlan_push(skb, p->tcfv_push_proto, p->tcfv_push_vid |
p                  52 net/sched/act_vlan.c 				    (p->tcfv_push_prio << VLAN_PRIO_SHIFT));
p                  71 net/sched/act_vlan.c 		tci = (tci & ~VLAN_VID_MASK) | p->tcfv_push_vid;
p                  73 net/sched/act_vlan.c 		if (p->tcfv_push_prio) {
p                  75 net/sched/act_vlan.c 			tci |= p->tcfv_push_prio << VLAN_PRIO_SHIFT;
p                  78 net/sched/act_vlan.c 		__vlan_hwaccel_put_tag(skb, p->tcfv_push_proto, tci);
p                 110 net/sched/act_vlan.c 	struct tcf_vlan_params *p;
p                 210 net/sched/act_vlan.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 211 net/sched/act_vlan.c 	if (!p) {
p                 216 net/sched/act_vlan.c 	p->tcfv_action = action;
p                 217 net/sched/act_vlan.c 	p->tcfv_push_vid = push_vid;
p                 218 net/sched/act_vlan.c 	p->tcfv_push_prio = push_prio;
p                 219 net/sched/act_vlan.c 	p->tcfv_push_proto = push_proto;
p                 223 net/sched/act_vlan.c 	rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
p                 228 net/sched/act_vlan.c 	if (p)
p                 229 net/sched/act_vlan.c 		kfree_rcu(p, rcu);
p                 245 net/sched/act_vlan.c 	struct tcf_vlan_params *p;
p                 247 net/sched/act_vlan.c 	p = rcu_dereference_protected(v->vlan_p, 1);
p                 248 net/sched/act_vlan.c 	if (p)
p                 249 net/sched/act_vlan.c 		kfree_rcu(p, rcu);
p                 257 net/sched/act_vlan.c 	struct tcf_vlan_params *p;
p                 267 net/sched/act_vlan.c 	p = rcu_dereference_protected(v->vlan_p, lockdep_is_held(&v->tcf_lock));
p                 268 net/sched/act_vlan.c 	opt.v_action = p->tcfv_action;
p                 272 net/sched/act_vlan.c 	if ((p->tcfv_action == TCA_VLAN_ACT_PUSH ||
p                 273 net/sched/act_vlan.c 	     p->tcfv_action == TCA_VLAN_ACT_MODIFY) &&
p                 274 net/sched/act_vlan.c 	    (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, p->tcfv_push_vid) ||
p                 276 net/sched/act_vlan.c 			  p->tcfv_push_proto) ||
p                 278 net/sched/act_vlan.c 					      p->tcfv_push_prio))))
p                  35 net/sched/cls_tcindex.c 	struct tcindex_data	*p;
p                  65 net/sched/cls_tcindex.c static void tcindex_data_get(struct tcindex_data *p)
p                  67 net/sched/cls_tcindex.c 	refcount_inc(&p->refcnt);
p                  70 net/sched/cls_tcindex.c static void tcindex_data_put(struct tcindex_data *p)
p                  72 net/sched/cls_tcindex.c 	if (refcount_dec_and_test(&p->refcnt)) {
p                  73 net/sched/cls_tcindex.c 		kfree(p->perfect);
p                  74 net/sched/cls_tcindex.c 		kfree(p->h);
p                  75 net/sched/cls_tcindex.c 		kfree(p);
p                  79 net/sched/cls_tcindex.c static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
p                  82 net/sched/cls_tcindex.c 	if (p->perfect) {
p                  83 net/sched/cls_tcindex.c 		struct tcindex_filter_result *f = p->perfect + key;
p                  86 net/sched/cls_tcindex.c 	} else if (p->h) {
p                  90 net/sched/cls_tcindex.c 		fp = &p->h[key % p->hash];
p                 105 net/sched/cls_tcindex.c 	struct tcindex_data *p = rcu_dereference_bh(tp->root);
p                 107 net/sched/cls_tcindex.c 	int key = (skb->tc_index & p->mask) >> p->shift;
p                 110 net/sched/cls_tcindex.c 		 skb, tp, res, p);
p                 112 net/sched/cls_tcindex.c 	f = tcindex_lookup(p, key);
p                 116 net/sched/cls_tcindex.c 		if (!p->fall_through)
p                 132 net/sched/cls_tcindex.c 	struct tcindex_data *p = rtnl_dereference(tp->root);
p                 136 net/sched/cls_tcindex.c 	if (p->perfect && handle >= p->alloc_hash)
p                 138 net/sched/cls_tcindex.c 	r = tcindex_lookup(p, handle);
p                 144 net/sched/cls_tcindex.c 	struct tcindex_data *p;
p                 147 net/sched/cls_tcindex.c 	p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
p                 148 net/sched/cls_tcindex.c 	if (!p)
p                 151 net/sched/cls_tcindex.c 	p->mask = 0xffff;
p                 152 net/sched/cls_tcindex.c 	p->hash = DEFAULT_HASH_SIZE;
p                 153 net/sched/cls_tcindex.c 	p->fall_through = 1;
p                 154 net/sched/cls_tcindex.c 	refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
p                 156 net/sched/cls_tcindex.c 	rcu_assign_pointer(tp->root, p);
p                 164 net/sched/cls_tcindex.c 	tcindex_data_put(r->p);
p                 200 net/sched/cls_tcindex.c 	struct tcindex_data *p = rtnl_dereference(tp->root);
p                 205 net/sched/cls_tcindex.c 	pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
p                 206 net/sched/cls_tcindex.c 	if (p->perfect) {
p                 212 net/sched/cls_tcindex.c 		for (i = 0; i < p->hash; i++) {
p                 213 net/sched/cls_tcindex.c 			walk = p->h + i;
p                 236 net/sched/cls_tcindex.c 		tcindex_data_get(p);
p                 250 net/sched/cls_tcindex.c 	struct tcindex_data *p = container_of(to_rcu_work(work),
p                 254 net/sched/cls_tcindex.c 	tcindex_data_put(p);
p                 258 net/sched/cls_tcindex.c valid_perfect_hash(struct tcindex_data *p)
p                 260 net/sched/cls_tcindex.c 	return  p->hash > (p->mask >> p->shift);
p                 272 net/sched/cls_tcindex.c 				      struct tcindex_data *p,
p                 276 net/sched/cls_tcindex.c 	r->p = p;
p                 283 net/sched/cls_tcindex.c 	struct tcindex_data *p = container_of(to_rcu_work(work),
p                 288 net/sched/cls_tcindex.c 	kfree(p->perfect);
p                 289 net/sched/cls_tcindex.c 	kfree(p);
p                 316 net/sched/cls_tcindex.c 		cp->perfect[i].p = cp;
p                 328 net/sched/cls_tcindex.c 		  u32 handle, struct tcindex_data *p,
p                 355 net/sched/cls_tcindex.c 	cp->mask = p->mask;
p                 356 net/sched/cls_tcindex.c 	cp->shift = p->shift;
p                 357 net/sched/cls_tcindex.c 	cp->hash = p->hash;
p                 358 net/sched/cls_tcindex.c 	cp->alloc_hash = p->alloc_hash;
p                 359 net/sched/cls_tcindex.c 	cp->fall_through = p->fall_through;
p                 382 net/sched/cls_tcindex.c 	if (p->perfect) {
p                 388 net/sched/cls_tcindex.c 		for (i = 0; i < min(cp->hash, p->hash); i++)
p                 389 net/sched/cls_tcindex.c 			cp->perfect[i].res = p->perfect[i].res;
p                 392 net/sched/cls_tcindex.c 	cp->h = p->h;
p                 482 net/sched/cls_tcindex.c 	oldp = p;
p                 530 net/sched/cls_tcindex.c 	struct tcindex_data *p = rtnl_dereference(tp->root);
p                 536 net/sched/cls_tcindex.c 	    tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL);
p                 546 net/sched/cls_tcindex.c 	return tcindex_set_parms(net, tp, base, handle, p, r, tb,
p                 553 net/sched/cls_tcindex.c 	struct tcindex_data *p = rtnl_dereference(tp->root);
p                 557 net/sched/cls_tcindex.c 	pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
p                 558 net/sched/cls_tcindex.c 	if (p->perfect) {
p                 559 net/sched/cls_tcindex.c 		for (i = 0; i < p->hash; i++) {
p                 560 net/sched/cls_tcindex.c 			if (!p->perfect[i].res.class)
p                 563 net/sched/cls_tcindex.c 				if (walker->fn(tp, p->perfect + i, walker) < 0) {
p                 571 net/sched/cls_tcindex.c 	if (!p->h)
p                 573 net/sched/cls_tcindex.c 	for (i = 0; i < p->hash; i++) {
p                 574 net/sched/cls_tcindex.c 		for (f = rtnl_dereference(p->h[i]); f; f = next) {
p                 590 net/sched/cls_tcindex.c 	struct tcindex_data *p = rtnl_dereference(tp->root);
p                 593 net/sched/cls_tcindex.c 	pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
p                 595 net/sched/cls_tcindex.c 	if (p->perfect) {
p                 596 net/sched/cls_tcindex.c 		for (i = 0; i < p->hash; i++) {
p                 597 net/sched/cls_tcindex.c 			struct tcindex_filter_result *r = p->perfect + i;
p                 605 net/sched/cls_tcindex.c 			tcindex_data_get(p);
p                 616 net/sched/cls_tcindex.c 	for (i = 0; p->h && i < p->hash; i++) {
p                 620 net/sched/cls_tcindex.c 		for (f = rtnl_dereference(p->h[i]); f; f = next) {
p                 626 net/sched/cls_tcindex.c 	tcf_queue_work(&p->rwork, tcindex_destroy_work);
p                 633 net/sched/cls_tcindex.c 	struct tcindex_data *p = rtnl_dereference(tp->root);
p                 638 net/sched/cls_tcindex.c 		 tp, fh, skb, t, p, r);
p                 639 net/sched/cls_tcindex.c 	pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
p                 647 net/sched/cls_tcindex.c 		if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
p                 648 net/sched/cls_tcindex.c 		    nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
p                 649 net/sched/cls_tcindex.c 		    nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
p                 650 net/sched/cls_tcindex.c 		    nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
p                 654 net/sched/cls_tcindex.c 		if (p->perfect) {
p                 655 net/sched/cls_tcindex.c 			t->tcm_handle = r - p->perfect;
p                 662 net/sched/cls_tcindex.c 			for (i = 0; !t->tcm_handle && i < p->hash; i++) {
p                 663 net/sched/cls_tcindex.c 				fp = &p->h[i];
p                 331 net/sched/sch_api.c static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
p                 334 net/sched/sch_api.c 	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
p                 338 net/sched/sch_api.c 	cl = cops->find(p, classid);
p                 342 net/sched/sch_api.c 	return cops->leaf(p, cl);
p                1150 net/sched/sch_api.c 				  struct Qdisc *p, u32 parent, u32 handle,
p                1261 net/sched/sch_api.c 		    (!p || !(p->flags & TCQ_F_MQROOT)))
p                1354 net/sched/sch_api.c 	struct Qdisc		*p;
p                1361 net/sched/sch_api.c static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
p                1371 net/sched/sch_api.c 	arg.p = p;
p                1385 net/sched/sch_api.c 		if (leaf == arg->p || arg->depth > 7)
p                1387 net/sched/sch_api.c 		return check_loop(leaf, arg->p, arg->depth + 1);
p                1416 net/sched/sch_api.c 	struct Qdisc *p = NULL;
p                1436 net/sched/sch_api.c 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
p                1437 net/sched/sch_api.c 				if (!p) {
p                1441 net/sched/sch_api.c 				q = qdisc_leaf(p, clid);
p                1479 net/sched/sch_api.c 		err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
p                1500 net/sched/sch_api.c 	struct Qdisc *q, *p;
p                1515 net/sched/sch_api.c 	q = p = NULL;
p                1525 net/sched/sch_api.c 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
p                1526 net/sched/sch_api.c 				if (!p) {
p                1530 net/sched/sch_api.c 				q = qdisc_leaf(p, clid);
p                1564 net/sched/sch_api.c 				if (q == p ||
p                1565 net/sched/sch_api.c 				    (p && check_loop(q, p, 0))) {
p                1635 net/sched/sch_api.c 			q = qdisc_create(dev, dev_ingress_queue(dev), p,
p                1645 net/sched/sch_api.c 		if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
p                1646 net/sched/sch_api.c 			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
p                1647 net/sched/sch_api.c 		else if (p)
p                1648 net/sched/sch_api.c 			dev_queue = p->dev_queue;
p                1652 net/sched/sch_api.c 		q = qdisc_create(dev, dev_queue, p,
p                1663 net/sched/sch_api.c 	err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
p                  75 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                  78 net/sched/sch_atm.c 	list_for_each_entry(flow, &p->flows, list) {
p                  89 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                  93 net/sched/sch_atm.c 		sch, p, flow, new, old);
p                 115 net/sched/sch_atm.c 	struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
p                 118 net/sched/sch_atm.c 	pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
p                 127 net/sched/sch_atm.c 	struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
p                 130 net/sched/sch_atm.c 	pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
p                 145 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                 148 net/sched/sch_atm.c 	pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
p                 164 net/sched/sch_atm.c 	if (flow != &p->link)
p                 174 net/sched/sch_atm.c 	struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
p                 176 net/sched/sch_atm.c 	pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
p                 178 net/sched/sch_atm.c 	tasklet_schedule(&p->task);
p                 199 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                 209 net/sched/sch_atm.c 		"flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
p                 305 net/sched/sch_atm.c 	flow->parent = p;
p                 310 net/sched/sch_atm.c 	list_add(&flow->list, &p->link.list);
p                 325 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                 328 net/sched/sch_atm.c 	pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
p                 331 net/sched/sch_atm.c 	if (rcu_access_pointer(flow->filter_list) || flow == &p->link)
p                 349 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                 352 net/sched/sch_atm.c 	pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
p                 355 net/sched/sch_atm.c 	list_for_each_entry(flow, &p->flows, list) {
p                 368 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                 371 net/sched/sch_atm.c 	pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
p                 372 net/sched/sch_atm.c 	return flow ? flow->block : p->link.block;
p                 380 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                 386 net/sched/sch_atm.c 	pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
p                 393 net/sched/sch_atm.c 		list_for_each_entry(flow, &p->flows, list) {
p                 410 net/sched/sch_atm.c 		flow = &p->link;
p                 454 net/sched/sch_atm.c 	if (flow == &p->link) {
p                 458 net/sched/sch_atm.c 	tasklet_schedule(&p->task);
p                 472 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                 476 net/sched/sch_atm.c 	pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
p                 477 net/sched/sch_atm.c 	list_for_each_entry(flow, &p->flows, list) {
p                 478 net/sched/sch_atm.c 		if (flow == &p->link)
p                 521 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                 524 net/sched/sch_atm.c 	pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
p                 525 net/sched/sch_atm.c 	tasklet_schedule(&p->task);
p                 526 net/sched/sch_atm.c 	skb = qdisc_dequeue_peeked(p->link.q);
p                 534 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                 536 net/sched/sch_atm.c 	pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p);
p                 538 net/sched/sch_atm.c 	return p->link.q->ops->peek(p->link.q);
p                 544 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                 547 net/sched/sch_atm.c 	pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
p                 548 net/sched/sch_atm.c 	INIT_LIST_HEAD(&p->flows);
p                 549 net/sched/sch_atm.c 	INIT_LIST_HEAD(&p->link.list);
p                 550 net/sched/sch_atm.c 	list_add(&p->link.list, &p->flows);
p                 551 net/sched/sch_atm.c 	p->link.q = qdisc_create_dflt(sch->dev_queue,
p                 553 net/sched/sch_atm.c 	if (!p->link.q)
p                 554 net/sched/sch_atm.c 		p->link.q = &noop_qdisc;
p                 555 net/sched/sch_atm.c 	pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
p                 557 net/sched/sch_atm.c 	err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
p                 562 net/sched/sch_atm.c 	p->link.vcc = NULL;
p                 563 net/sched/sch_atm.c 	p->link.sock = NULL;
p                 564 net/sched/sch_atm.c 	p->link.common.classid = sch->handle;
p                 565 net/sched/sch_atm.c 	p->link.ref = 1;
p                 566 net/sched/sch_atm.c 	tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
p                 572 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                 575 net/sched/sch_atm.c 	pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
p                 576 net/sched/sch_atm.c 	list_for_each_entry(flow, &p->flows, list)
p                 583 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                 586 net/sched/sch_atm.c 	pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
p                 587 net/sched/sch_atm.c 	list_for_each_entry(flow, &p->flows, list) {
p                 592 net/sched/sch_atm.c 	list_for_each_entry_safe(flow, tmp, &p->flows, list) {
p                 597 net/sched/sch_atm.c 	tasklet_kill(&p->task);
p                 603 net/sched/sch_atm.c 	struct atm_qdisc_data *p = qdisc_priv(sch);
p                 608 net/sched/sch_atm.c 		sch, p, flow, skb, tcm);
p                 450 net/sched/sch_cake.c 			      struct cobalt_params *p,
p                 455 net/sched/sch_cake.c 	if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
p                 457 net/sched/sch_cake.c 		vars->p_drop += p->p_inc;
p                 458 net/sched/sch_cake.c 		if (vars->p_drop < p->p_inc)
p                 474 net/sched/sch_cake.c 			       struct cobalt_params *p,
p                 480 net/sched/sch_cake.c 	    ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
p                 481 net/sched/sch_cake.c 		if (vars->p_drop < p->p_dec)
p                 484 net/sched/sch_cake.c 			vars->p_drop -= p->p_dec;
p                 494 net/sched/sch_cake.c 						 p->interval,
p                 505 net/sched/sch_cake.c 			       struct cobalt_params *p,
p                 531 net/sched/sch_cake.c 	over_target = sojourn > p->target &&
p                 532 net/sched/sch_cake.c 		      sojourn > p->mtu_time * bulk_flows * 2 &&
p                 533 net/sched/sch_cake.c 		      sojourn > p->mtu_time * 4;
p                 542 net/sched/sch_cake.c 							 p->interval,
p                 560 net/sched/sch_cake.c 						 p->interval,
p                 568 net/sched/sch_cake.c 							 p->interval,
p                 581 net/sched/sch_cake.c 		vars->drop_next = ktime_add_ns(now, p->interval);
p                1415 net/sched/sch_cake.c 		u16 p = (i - 1) >> 1;
p                1417 net/sched/sch_cake.c 		u32 pb = cake_heap_get_backlog(q, p);
p                1420 net/sched/sch_cake.c 			cake_heap_swap(q, i, p);
p                1421 net/sched/sch_cake.c 			i = p;
p                1732 net/sched/sch_cbq.c 	struct cbq_class *p = (struct cbq_class *)parent;
p                1736 net/sched/sch_cbq.c 		if (p && p->level <= cl->level)
p                 223 net/sched/sch_choke.c 	const struct red_parms *p = &q->parms;
p                 227 net/sched/sch_choke.c 	q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
p                 232 net/sched/sch_choke.c 	if (q->vars.qavg <= p->qth_min)
p                 245 net/sched/sch_choke.c 		if (q->vars.qavg > p->qth_max) {
p                 257 net/sched/sch_choke.c 			if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
p                 259 net/sched/sch_choke.c 				q->vars.qR = red_random(p);
p                 270 net/sched/sch_choke.c 			q->vars.qR = red_random(p);
p                  57 net/sched/sch_dsmark.c static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
p                  59 net/sched/sch_dsmark.c 	return index <= p->indices && index > 0;
p                  68 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
p                  71 net/sched/sch_dsmark.c 		 __func__, sch, p, new, old);
p                  80 net/sched/sch_dsmark.c 	*old = qdisc_replace(sch, new, &p->q);
p                  86 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
p                  87 net/sched/sch_dsmark.c 	return p->q;
p                 120 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
p                 126 net/sched/sch_dsmark.c 		 __func__, sch, p, classid, parent, *arg);
p                 128 net/sched/sch_dsmark.c 	if (!dsmark_valid_index(p, *arg)) {
p                 142 net/sched/sch_dsmark.c 		p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
p                 145 net/sched/sch_dsmark.c 		p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
p                 155 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
p                 157 net/sched/sch_dsmark.c 	if (!dsmark_valid_index(p, arg))
p                 160 net/sched/sch_dsmark.c 	p->mv[arg - 1].mask = 0xff;
p                 161 net/sched/sch_dsmark.c 	p->mv[arg - 1].value = 0;
p                 168 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
p                 172 net/sched/sch_dsmark.c 		 __func__, sch, p, walker);
p                 177 net/sched/sch_dsmark.c 	for (i = 0; i < p->indices; i++) {
p                 178 net/sched/sch_dsmark.c 		if (p->mv[i].mask == 0xff && !p->mv[i].value)
p                 194 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
p                 196 net/sched/sch_dsmark.c 	return p->block;
p                 205 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
p                 208 net/sched/sch_dsmark.c 	pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
p                 210 net/sched/sch_dsmark.c 	if (p->set_tc_index) {
p                 243 net/sched/sch_dsmark.c 		struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
p                 264 net/sched/sch_dsmark.c 			if (p->default_index != NO_DEFAULT_INDEX)
p                 265 net/sched/sch_dsmark.c 				skb->tc_index = p->default_index;
p                 270 net/sched/sch_dsmark.c 	err = qdisc_enqueue(skb, p->q, to_free);
p                 289 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
p                 293 net/sched/sch_dsmark.c 	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
p                 295 net/sched/sch_dsmark.c 	skb = qdisc_dequeue_peeked(p->q);
p                 303 net/sched/sch_dsmark.c 	index = skb->tc_index & (p->indices - 1);
p                 308 net/sched/sch_dsmark.c 		ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
p                 309 net/sched/sch_dsmark.c 				    p->mv[index].value);
p                 312 net/sched/sch_dsmark.c 		ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
p                 313 net/sched/sch_dsmark.c 				    p->mv[index].value);
p                 321 net/sched/sch_dsmark.c 		if (p->mv[index].mask != 0xff || p->mv[index].value)
p                 332 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
p                 334 net/sched/sch_dsmark.c 	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
p                 336 net/sched/sch_dsmark.c 	return p->q->ops->peek(p->q);
p                 342 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
p                 349 net/sched/sch_dsmark.c 	pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
p                 354 net/sched/sch_dsmark.c 	err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
p                 375 net/sched/sch_dsmark.c 		p->mv = p->embedded;
p                 377 net/sched/sch_dsmark.c 		p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
p                 378 net/sched/sch_dsmark.c 	if (!p->mv) {
p                 383 net/sched/sch_dsmark.c 		p->mv[i].mask = 0xff;
p                 384 net/sched/sch_dsmark.c 		p->mv[i].value = 0;
p                 386 net/sched/sch_dsmark.c 	p->indices = indices;
p                 387 net/sched/sch_dsmark.c 	p->default_index = default_index;
p                 388 net/sched/sch_dsmark.c 	p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
p                 390 net/sched/sch_dsmark.c 	p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
p                 392 net/sched/sch_dsmark.c 	if (p->q == NULL)
p                 393 net/sched/sch_dsmark.c 		p->q = &noop_qdisc;
p                 395 net/sched/sch_dsmark.c 		qdisc_hash_add(p->q, true);
p                 397 net/sched/sch_dsmark.c 	pr_debug("%s: qdisc %p\n", __func__, p->q);
p                 406 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
p                 408 net/sched/sch_dsmark.c 	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
p                 409 net/sched/sch_dsmark.c 	qdisc_reset(p->q);
p                 416 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
p                 418 net/sched/sch_dsmark.c 	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
p                 420 net/sched/sch_dsmark.c 	tcf_block_put(p->block);
p                 421 net/sched/sch_dsmark.c 	qdisc_put(p->q);
p                 422 net/sched/sch_dsmark.c 	if (p->mv != p->embedded)
p                 423 net/sched/sch_dsmark.c 		kfree(p->mv);
p                 429 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
p                 432 net/sched/sch_dsmark.c 	pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
p                 434 net/sched/sch_dsmark.c 	if (!dsmark_valid_index(p, cl))
p                 438 net/sched/sch_dsmark.c 	tcm->tcm_info = p->q->handle;
p                 443 net/sched/sch_dsmark.c 	if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
p                 444 net/sched/sch_dsmark.c 	    nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
p                 456 net/sched/sch_dsmark.c 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
p                 462 net/sched/sch_dsmark.c 	if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
p                 465 net/sched/sch_dsmark.c 	if (p->default_index != NO_DEFAULT_INDEX &&
p                 466 net/sched/sch_dsmark.c 	    nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
p                 469 net/sched/sch_dsmark.c 	if (p->set_tc_index &&
p                 111 net/sched/sch_etf.c 	struct rb_node *p;
p                 113 net/sched/sch_etf.c 	p = rb_first_cached(&q->head);
p                 114 net/sched/sch_etf.c 	if (!p)
p                 117 net/sched/sch_etf.c 	return rb_to_skb(p);
p                 166 net/sched/sch_etf.c 	struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL;
p                 176 net/sched/sch_etf.c 	while (*p) {
p                 179 net/sched/sch_etf.c 		parent = *p;
p                 182 net/sched/sch_etf.c 			p = &parent->rb_right;
p                 185 net/sched/sch_etf.c 			p = &parent->rb_left;
p                 188 net/sched/sch_etf.c 	rb_link_node(&nskb->rbnode, parent, p);
p                 423 net/sched/sch_etf.c 	struct rb_node *p = rb_first_cached(&q->head);
p                 425 net/sched/sch_etf.c 	while (p) {
p                 426 net/sched/sch_etf.c 		struct sk_buff *skb = rb_to_skb(p);
p                 428 net/sched/sch_etf.c 		p = rb_next(p);
p                 165 net/sched/sch_fq.c 	struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
p                 167 net/sched/sch_fq.c 	while (*p) {
p                 170 net/sched/sch_fq.c 		parent = *p;
p                 173 net/sched/sch_fq.c 			p = &parent->rb_right;
p                 175 net/sched/sch_fq.c 			p = &parent->rb_left;
p                 177 net/sched/sch_fq.c 	rb_link_node(&f->rate_node, parent, p);
p                 206 net/sched/sch_fq.c 	struct rb_node **p, *parent;
p                 209 net/sched/sch_fq.c 	p = &root->rb_node;
p                 211 net/sched/sch_fq.c 	while (*p) {
p                 212 net/sched/sch_fq.c 		parent = *p;
p                 225 net/sched/sch_fq.c 			p = &parent->rb_right;
p                 227 net/sched/sch_fq.c 			p = &parent->rb_left;
p                 243 net/sched/sch_fq.c 	struct rb_node **p, *parent;
p                 288 net/sched/sch_fq.c 	p = &root->rb_node;
p                 290 net/sched/sch_fq.c 	while (*p) {
p                 291 net/sched/sch_fq.c 		parent = *p;
p                 314 net/sched/sch_fq.c 			p = &parent->rb_right;
p                 316 net/sched/sch_fq.c 			p = &parent->rb_left;
p                 336 net/sched/sch_fq.c 	rb_link_node(&f->fq_node, parent, p);
p                 388 net/sched/sch_fq.c 	struct rb_node **p, *parent;
p                 405 net/sched/sch_fq.c 	p = &flow->t_root.rb_node;
p                 408 net/sched/sch_fq.c 	while (*p) {
p                 409 net/sched/sch_fq.c 		parent = *p;
p                 412 net/sched/sch_fq.c 			p = &parent->rb_right;
p                 414 net/sched/sch_fq.c 			p = &parent->rb_left;
p                 416 net/sched/sch_fq.c 	rb_link_node(&skb->rbnode, parent, p);
p                 458 net/sched/sch_fq.c 	struct rb_node *p;
p                 471 net/sched/sch_fq.c 	while ((p = rb_first(&q->delayed)) != NULL) {
p                 472 net/sched/sch_fq.c 		struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
p                 604 net/sched/sch_fq.c 	struct rb_node *p = rb_first(&flow->t_root);
p                 606 net/sched/sch_fq.c 	while (p) {
p                 607 net/sched/sch_fq.c 		struct sk_buff *skb = rb_to_skb(p);
p                 609 net/sched/sch_fq.c 		p = rb_next(p);
p                 622 net/sched/sch_fq.c 	struct rb_node *p;
p                 636 net/sched/sch_fq.c 		while ((p = rb_first(root)) != NULL) {
p                 637 net/sched/sch_fq.c 			f = rb_entry(p, struct fq_flow, fq_node);
p                 638 net/sched/sch_fq.c 			rb_erase(p, root);
p                 806 net/sched/sch_generic.c 	void *p;
p                 819 net/sched/sch_generic.c 	p = kzalloc_node(size, GFP_KERNEL,
p                 822 net/sched/sch_generic.c 	if (!p)
p                 824 net/sched/sch_generic.c 	sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
p                 826 net/sched/sch_generic.c 	if (sch != p) {
p                 827 net/sched/sch_generic.c 		kfree(p);
p                 828 net/sched/sch_generic.c 		p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
p                 830 net/sched/sch_generic.c 		if (!p)
p                 832 net/sched/sch_generic.c 		sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
p                 833 net/sched/sch_generic.c 		sch->padded = (char *) sch - (char *) p;
p                 875 net/sched/sch_generic.c 	kfree(p);
p                 188 net/sched/sch_hfsc.c 	struct rb_node **p = &cl->sched->eligible.rb_node;
p                 192 net/sched/sch_hfsc.c 	while (*p != NULL) {
p                 193 net/sched/sch_hfsc.c 		parent = *p;
p                 196 net/sched/sch_hfsc.c 			p = &parent->rb_right;
p                 198 net/sched/sch_hfsc.c 			p = &parent->rb_left;
p                 200 net/sched/sch_hfsc.c 	rb_link_node(&cl->el_node, parent, p);
p                 221 net/sched/sch_hfsc.c 	struct hfsc_class *p, *cl = NULL;
p                 225 net/sched/sch_hfsc.c 		p = rb_entry(n, struct hfsc_class, el_node);
p                 226 net/sched/sch_hfsc.c 		if (p->cl_e > cur_time)
p                 228 net/sched/sch_hfsc.c 		if (cl == NULL || p->cl_d < cl->cl_d)
p                 229 net/sched/sch_hfsc.c 			cl = p;
p                 253 net/sched/sch_hfsc.c 	struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
p                 257 net/sched/sch_hfsc.c 	while (*p != NULL) {
p                 258 net/sched/sch_hfsc.c 		parent = *p;
p                 261 net/sched/sch_hfsc.c 			p = &parent->rb_right;
p                 263 net/sched/sch_hfsc.c 			p = &parent->rb_left;
p                 265 net/sched/sch_hfsc.c 	rb_link_node(&cl->vt_node, parent, p);
p                 285 net/sched/sch_hfsc.c 	struct hfsc_class *p;
p                 289 net/sched/sch_hfsc.c 		p = rb_entry(n, struct hfsc_class, vt_node);
p                 290 net/sched/sch_hfsc.c 		if (p->cl_f <= cur_time)
p                 291 net/sched/sch_hfsc.c 			return p;
p                 322 net/sched/sch_hfsc.c 	struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
p                 326 net/sched/sch_hfsc.c 	while (*p != NULL) {
p                 327 net/sched/sch_hfsc.c 		parent = *p;
p                 330 net/sched/sch_hfsc.c 			p = &parent->rb_right;
p                 332 net/sched/sch_hfsc.c 			p = &parent->rb_left;
p                 334 net/sched/sch_hfsc.c 	rb_link_node(&cl->cf_node, parent, p);
p                 654 net/sched/sch_hfsc.c 	struct hfsc_class *p;
p                 660 net/sched/sch_hfsc.c 	p = rb_entry(n, struct hfsc_class, cf_node);
p                 661 net/sched/sch_hfsc.c 	cl->cl_cfmin = p->cl_f;
p                 850 net/sched/sch_hfsc.c 	struct hfsc_class *p;
p                 855 net/sched/sch_hfsc.c 		list_for_each_entry(p, &cl->children, siblings) {
p                 856 net/sched/sch_hfsc.c 			if (p->level >= level)
p                 857 net/sched/sch_hfsc.c 				level = p->level + 1;
p                1221 net/sched/sch_hfsc.c 	struct hfsc_class *p = (struct hfsc_class *)parent;
p                1225 net/sched/sch_hfsc.c 		if (p != NULL && p->level <= cl->level)
p                 277 net/sched/sch_htb.c 	struct rb_node **p = &root->rb_node, *parent = NULL;
p                 279 net/sched/sch_htb.c 	while (*p) {
p                 281 net/sched/sch_htb.c 		parent = *p;
p                 285 net/sched/sch_htb.c 			p = &parent->rb_right;
p                 287 net/sched/sch_htb.c 			p = &parent->rb_left;
p                 289 net/sched/sch_htb.c 	rb_link_node(&cl->node[prio], parent, p);
p                 303 net/sched/sch_htb.c 	struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
p                 313 net/sched/sch_htb.c 	while (*p) {
p                 315 net/sched/sch_htb.c 		parent = *p;
p                 318 net/sched/sch_htb.c 			p = &parent->rb_right;
p                 320 net/sched/sch_htb.c 			p = &parent->rb_left;
p                 322 net/sched/sch_htb.c 	rb_link_node(&cl->pq_node, parent, p);
p                 402 net/sched/sch_htb.c 	struct htb_class *p = cl->parent;
p                 405 net/sched/sch_htb.c 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
p                 411 net/sched/sch_htb.c 			if (p->inner.clprio[prio].feed.rb_node)
p                 417 net/sched/sch_htb.c 			htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
p                 419 net/sched/sch_htb.c 		p->prio_activity |= mask;
p                 420 net/sched/sch_htb.c 		cl = p;
p                 421 net/sched/sch_htb.c 		p = cl->parent;
p                 437 net/sched/sch_htb.c 	struct htb_class *p = cl->parent;
p                 440 net/sched/sch_htb.c 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
p                 447 net/sched/sch_htb.c 			if (p->inner.clprio[prio].ptr == cl->node + prio) {
p                 452 net/sched/sch_htb.c 				p->inner.clprio[prio].last_ptr_id = cl->common.classid;
p                 453 net/sched/sch_htb.c 				p->inner.clprio[prio].ptr = NULL;
p                 457 net/sched/sch_htb.c 					  &p->inner.clprio[prio].feed);
p                 459 net/sched/sch_htb.c 			if (!p->inner.clprio[prio].feed.rb_node)
p                 463 net/sched/sch_htb.c 		p->prio_activity &= ~mask;
p                 464 net/sched/sch_htb.c 		cl = p;
p                 465 net/sched/sch_htb.c 		p = cl->parent;
p                 713 net/sched/sch_htb.c 		struct rb_node *p = rb_first(wait_pq);
p                 715 net/sched/sch_htb.c 		if (!p)
p                 718 net/sched/sch_htb.c 		cl = rb_entry(p, struct htb_class, pq_node);
p                 722 net/sched/sch_htb.c 		htb_safe_rb_erase(p, wait_pq);
p                 363 net/sched/sch_netem.c 	struct rb_node *p = rb_first(&q->t_root);
p                 365 net/sched/sch_netem.c 	while (p) {
p                 366 net/sched/sch_netem.c 		struct sk_buff *skb = rb_to_skb(p);
p                 368 net/sched/sch_netem.c 		p = rb_next(p);
p                 390 net/sched/sch_netem.c 		struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
p                 392 net/sched/sch_netem.c 		while (*p) {
p                 395 net/sched/sch_netem.c 			parent = *p;
p                 398 net/sched/sch_netem.c 				p = &parent->rb_right;
p                 400 net/sched/sch_netem.c 				p = &parent->rb_left;
p                 402 net/sched/sch_netem.c 		rb_link_node(&nskb->rbnode, parent, p);
p                 901 net/sched/sch_netem.c 			q->clg.a1 = ge->p;
p                1111 net/sched/sch_netem.c 			.p = q->clg.a1,
p                 205 net/sched/sch_sfq.c 	sfq_index p, n;
p                 209 net/sched/sch_sfq.c 	p = qlen + SFQ_MAX_FLOWS;
p                 213 net/sched/sch_sfq.c 	slot->dep.prev = p;
p                 219 net/sched/sch_sfq.c #define sfq_unlink(q, x, n, p)			\
p                 222 net/sched/sch_sfq.c 		p = q->slots[x].dep.prev;	\
p                 223 net/sched/sch_sfq.c 		sfq_dep_head(q, p)->next = n;	\
p                 224 net/sched/sch_sfq.c 		sfq_dep_head(q, n)->prev = p;	\
p                 230 net/sched/sch_sfq.c 	sfq_index p, n;
p                 233 net/sched/sch_sfq.c 	sfq_unlink(q, x, n, p);
p                 236 net/sched/sch_sfq.c 	if (n == p && q->cur_depth == d)
p                 243 net/sched/sch_sfq.c 	sfq_index p, n;
p                 246 net/sched/sch_sfq.c 	sfq_unlink(q, x, n, p);
p                 629 net/sched/sch_sfq.c 	struct red_parms *p = NULL;
p                 653 net/sched/sch_sfq.c 		p = kmalloc(sizeof(*p), GFP_KERNEL);
p                 654 net/sched/sch_sfq.c 		if (!p)
p                 672 net/sched/sch_sfq.c 		if (p) {
p                 673 net/sched/sch_sfq.c 			swap(q->red_parms, p);
p                 705 net/sched/sch_sfq.c 	kfree(p);
p                 793 net/sched/sch_sfq.c 	struct red_parms *p = q->red_parms;
p                 804 net/sched/sch_sfq.c 	if (p) {
p                 805 net/sched/sch_sfq.c 		opt.qth_min	= p->qth_min >> p->Wlog;
p                 806 net/sched/sch_sfq.c 		opt.qth_max	= p->qth_max >> p->Wlog;
p                 807 net/sched/sch_sfq.c 		opt.Wlog	= p->Wlog;
p                 808 net/sched/sch_sfq.c 		opt.Plog	= p->Plog;
p                 809 net/sched/sch_sfq.c 		opt.Scell_log	= p->Scell_log;
p                 810 net/sched/sch_sfq.c 		opt.max_P	= p->max_P;
p                  58 net/sctp/associola.c 	struct sctp_paramhdr *p;
p                 267 net/sctp/associola.c 	p = (struct sctp_paramhdr *)asoc->c.auth_random;
p                 268 net/sctp/associola.c 	p->type = SCTP_PARAM_RANDOM;
p                 269 net/sctp/associola.c 	p->length = htons(sizeof(*p) + SCTP_AUTH_RANDOM_LENGTH);
p                 270 net/sctp/associola.c 	get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
p                 763 net/sctp/auth.c 	struct sctp_chunks_param *p = ep->auth_chunk_list;
p                 768 net/sctp/auth.c 	if (__sctp_auth_cid(chunk_id, p))
p                 772 net/sctp/auth.c 	param_len = ntohs(p->param_hdr.length);
p                 777 net/sctp/auth.c 	p->chunks[nchunks] = chunk_id;
p                 778 net/sctp/auth.c 	p->param_hdr.length = htons(param_len + 1);
p                 248 net/sctp/diag.c static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
p                 252 net/sctp/diag.c 	struct sctp_comm_param *commp = p;
p                 295 net/sctp/diag.c static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
p                 298 net/sctp/diag.c 	struct sctp_comm_param *commp = p;
p                 349 net/sctp/diag.c static int sctp_sock_filter(struct sctp_transport *tsp, void *p)
p                 352 net/sctp/diag.c 	struct sctp_comm_param *commp = p;
p                 368 net/sctp/diag.c static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
p                 370 net/sctp/diag.c 	struct sctp_comm_param *commp = p;
p                1130 net/sctp/input.c 		af = sctp_get_af_specific(param_type2af(params.p->type));
p                1173 net/sctp/input.c 	af = sctp_get_af_specific(param_type2af(param->p.type));
p                 201 net/sctp/proc.c 	struct seq_net_private p;
p                1646 net/sctp/sm_make_chunk.c 	retval->p.type = SCTP_PARAM_STATE_COOKIE;
p                1647 net/sctp/sm_make_chunk.c 	retval->p.length = htons(*cookie_len);
p                1956 net/sctp/sm_make_chunk.c 	__u16 len = ntohs(param.p->length);
p                1977 net/sctp/sm_make_chunk.c 	__u16 num_ext = ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
p                2011 net/sctp/sm_make_chunk.c 	__u16 num_ext = ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
p                2079 net/sctp/sm_make_chunk.c 	switch (param.p->type & SCTP_PARAM_ACTION_MASK) {
p                2106 net/sctp/sm_make_chunk.c 				     ntohs(param.p->length)))
p                2107 net/sctp/sm_make_chunk.c 			sctp_addto_chunk(*errp, ntohs(param.p->length),
p                2142 net/sctp/sm_make_chunk.c 	switch (param.p->type) {
p                2184 net/sctp/sm_make_chunk.c 		if (SCTP_AUTH_RANDOM_LENGTH != ntohs(param.p->length) -
p                2186 net/sctp/sm_make_chunk.c 			sctp_process_inv_paramlength(asoc, param.p,
p                2201 net/sctp/sm_make_chunk.c 		if (260 < ntohs(param.p->length)) {
p                2202 net/sctp/sm_make_chunk.c 			sctp_process_inv_paramlength(asoc, param.p,
p                2212 net/sctp/sm_make_chunk.c 		hmacs = (struct sctp_hmac_algo_param *)param.p;
p                2213 net/sctp/sm_make_chunk.c 		n_elt = (ntohs(param.p->length) -
p                2228 net/sctp/sm_make_chunk.c 			sctp_process_inv_paramlength(asoc, param.p, chunk,
p                2236 net/sctp/sm_make_chunk.c 			 __func__, ntohs(param.p->type), cid);
p                2265 net/sctp/sm_make_chunk.c 		if (param.p->type == SCTP_PARAM_STATE_COOKIE)
p                2277 net/sctp/sm_make_chunk.c 		return sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
p                2340 net/sctp/sm_make_chunk.c 		if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
p                2341 net/sctp/sm_make_chunk.c 		    param.p->type == SCTP_PARAM_IPV6_ADDRESS)) {
p                2342 net/sctp/sm_make_chunk.c 			af = sctp_get_af_specific(param_type2af(param.p->type));
p                2513 net/sctp/sm_make_chunk.c 	switch (param.p->type) {
p                2524 net/sctp/sm_make_chunk.c 		af = sctp_get_af_specific(param_type2af(param.p->type));
p                2564 net/sctp/sm_make_chunk.c 		sat = ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
p                2591 net/sctp/sm_make_chunk.c 			ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
p                2625 net/sctp/sm_make_chunk.c 		af = sctp_get_af_specific(param_type2af(addr_param->p.type));
p                2663 net/sctp/sm_make_chunk.c 		asoc->peer.peer_random = kmemdup(param.p,
p                2664 net/sctp/sm_make_chunk.c 					    ntohs(param.p->length), gfp);
p                2677 net/sctp/sm_make_chunk.c 		asoc->peer.peer_hmacs = kmemdup(param.p,
p                2678 net/sctp/sm_make_chunk.c 					    ntohs(param.p->length), gfp);
p                2693 net/sctp/sm_make_chunk.c 		asoc->peer.peer_chunks = kmemdup(param.p,
p                2694 net/sctp/sm_make_chunk.c 					    ntohs(param.p->length), gfp);
p                2706 net/sctp/sm_make_chunk.c 			 __func__, ntohs(param.p->type), asoc);
p                3031 net/sctp/sm_make_chunk.c 	switch (addr_param->p.type) {
p                3044 net/sctp/sm_make_chunk.c 	af = sctp_get_af_specific(param_type2af(addr_param->p.type));
p                3167 net/sctp/sm_make_chunk.c 		size_t length = ntohs(param.p->length);
p                3169 net/sctp/sm_make_chunk.c 		*errp = param.p;
p                3170 net/sctp/sm_make_chunk.c 		switch (param.p->type) {
p                3253 net/sctp/sm_make_chunk.c 	length = ntohs(addr_param->p.length);
p                3268 net/sctp/sm_make_chunk.c 		if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
p                3269 net/sctp/sm_make_chunk.c 		    param.p->type == SCTP_PARAM_IPV6_ADDRESS)
p                3324 net/sctp/sm_make_chunk.c 	af = sctp_get_af_specific(param_type2af(addr_param->p.type));
p                3448 net/sctp/sm_make_chunk.c 	length = ntohs(addr_param->p.length);
p                3842 net/sctp/sm_make_chunk.c 		__u16 length = ntohs(param.p->length);
p                3844 net/sctp/sm_make_chunk.c 		*errp = param.p;
p                3847 net/sctp/sm_make_chunk.c 		switch (param.p->type) {
p                3885 net/sctp/sm_make_chunk.c 		last = param.p->type;
p                4003 net/sctp/sm_statefuns.c 		if (param.p->type == SCTP_PARAM_RESET_OUT_REQUEST)
p                4006 net/sctp/sm_statefuns.c 		else if (param.p->type == SCTP_PARAM_RESET_IN_REQUEST)
p                4009 net/sctp/sm_statefuns.c 		else if (param.p->type == SCTP_PARAM_RESET_TSN_REQUEST)
p                4012 net/sctp/sm_statefuns.c 		else if (param.p->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS)
p                4015 net/sctp/sm_statefuns.c 		else if (param.p->type == SCTP_PARAM_RESET_ADD_IN_STREAMS)
p                4018 net/sctp/sm_statefuns.c 		else if (param.p->type == SCTP_PARAM_RESET_RESPONSE)
p                 553 net/sctp/socket.c 	struct list_head		*p;
p                 602 net/sctp/socket.c 		p = bp->address_list.next;
p                 603 net/sctp/socket.c 		laddr = list_entry(p, struct sctp_sockaddr_entry, list);
p                5355 net/sctp/socket.c 			   void *p) {
p                5365 net/sctp/socket.c 			err = cb(sctp_ep(epb), p);
p                5379 net/sctp/socket.c 				  const union sctp_addr *paddr, void *p)
p                5390 net/sctp/socket.c 	err = cb(transport, p);
p                5399 net/sctp/socket.c 			    struct net *net, int *pos, void *p) {
p                5410 net/sctp/socket.c 		ret = cb(tsp, p);
p                5419 net/sctp/socket.c 		if (cb_done && !cb_done(tsp, p)) {
p                6914 net/sctp/socket.c 	struct sctp_hmacalgo  __user *p = (void __user *)optval;
p                6935 net/sctp/socket.c 	if (put_user(num_idents, &p->shmac_num_idents))
p                6940 net/sctp/socket.c 		if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
p                6985 net/sctp/socket.c 	struct sctp_authchunks __user *p = (void __user *)optval;
p                6998 net/sctp/socket.c 	to = p->gauth_chunks;
p                7021 net/sctp/socket.c 	if (put_user(num_chunks, &p->gauth_number_of_chunks))
p                7030 net/sctp/socket.c 	struct sctp_authchunks __user *p = (void __user *)optval;
p                7043 net/sctp/socket.c 	to = p->gauth_chunks;
p                7071 net/sctp/socket.c 	if (put_user(num_chunks, &p->gauth_number_of_chunks))
p                 548 net/sctp/stream.c 	nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
p                 634 net/sctp/stream.c 	nums = (ntohs(param.p->length) - sizeof(*inreq)) / sizeof(__u16);
p                 990 net/sctp/stream.c 		if (ntohs(param.p->length) != sizeof(*resptsn))
p                 149 net/sctp/stream_sched.c 			void *p = SCTP_SO(&asoc->stream, i)->ext;
p                 151 net/sctp/stream_sched.c 			if (!p)
p                 154 net/sctp/stream_sched.c 			p += offsetofend(struct sctp_stream_out_ext, outq);
p                 155 net/sctp/stream_sched.c 			memset(p, 0, sizeof(struct sctp_stream_out_ext) -
p                  31 net/sctp/stream_sched_prio.c 	struct sctp_stream_priorities *p;
p                  33 net/sctp/stream_sched_prio.c 	p = kmalloc(sizeof(*p), gfp);
p                  34 net/sctp/stream_sched_prio.c 	if (!p)
p                  37 net/sctp/stream_sched_prio.c 	INIT_LIST_HEAD(&p->prio_sched);
p                  38 net/sctp/stream_sched_prio.c 	INIT_LIST_HEAD(&p->active);
p                  39 net/sctp/stream_sched_prio.c 	p->next = NULL;
p                  40 net/sctp/stream_sched_prio.c 	p->prio = prio;
p                  42 net/sctp/stream_sched_prio.c 	return p;
p                  48 net/sctp/stream_sched_prio.c 	struct sctp_stream_priorities *p;
p                  54 net/sctp/stream_sched_prio.c 	list_for_each_entry(p, &stream->prio_list, prio_sched) {
p                  55 net/sctp/stream_sched_prio.c 		if (p->prio == prio)
p                  56 net/sctp/stream_sched_prio.c 			return p;
p                  57 net/sctp/stream_sched_prio.c 		if (p->prio > prio)
p                  66 net/sctp/stream_sched_prio.c 		p = SCTP_SO(stream, i)->ext->prio_head;
p                  67 net/sctp/stream_sched_prio.c 		if (!p)
p                  72 net/sctp/stream_sched_prio.c 		if (p->prio == prio)
p                  73 net/sctp/stream_sched_prio.c 			return p;
p                  80 net/sctp/stream_sched_prio.c static void sctp_sched_prio_next_stream(struct sctp_stream_priorities *p)
p                  84 net/sctp/stream_sched_prio.c 	pos = p->next->prio_list.next;
p                  85 net/sctp/stream_sched_prio.c 	if (pos == &p->active)
p                  87 net/sctp/stream_sched_prio.c 	p->next = list_entry(pos, struct sctp_stream_out_ext, prio_list);
p                 313 net/sctp/stream_sched_prio.c 	struct sctp_stream_priorities *p, *tmp;
p                 316 net/sctp/stream_sched_prio.c 	list_for_each_entry_safe(p, tmp, &stream->prio_list, prio_sched)
p                 317 net/sctp/stream_sched_prio.c 		list_for_each_entry_safe(soute, souttmp, &p->active, prio_list)
p                 165 net/sunrpc/addr.c 	char *p;
p                 178 net/sunrpc/addr.c 	p = kstrndup(delim + 1, len, GFP_KERNEL);
p                 179 net/sunrpc/addr.c 	if (p) {
p                 183 net/sunrpc/addr.c 		dev = dev_get_by_name(net, p);
p                 188 net/sunrpc/addr.c 			if (kstrtou32(p, 10, &scope_id) == 0) {
p                 189 net/sunrpc/addr.c 				kfree(p);
p                 194 net/sunrpc/addr.c 		kfree(p);
p                  87 net/sunrpc/auth.c #define param_check_hashtbl_sz(name, p) __param_check(name, p, unsigned int);
p                 129 net/sunrpc/auth_gss/auth_gss.c simple_get_bytes(const void *p, const void *end, void *res, size_t len)
p                 131 net/sunrpc/auth_gss/auth_gss.c 	const void *q = (const void *)((const char *)p + len);
p                 132 net/sunrpc/auth_gss/auth_gss.c 	if (unlikely(q > end || q < p))
p                 134 net/sunrpc/auth_gss/auth_gss.c 	memcpy(res, p, len);
p                 139 net/sunrpc/auth_gss/auth_gss.c simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
p                 144 net/sunrpc/auth_gss/auth_gss.c 	p = simple_get_bytes(p, end, &len, sizeof(len));
p                 145 net/sunrpc/auth_gss/auth_gss.c 	if (IS_ERR(p))
p                 146 net/sunrpc/auth_gss/auth_gss.c 		return p;
p                 147 net/sunrpc/auth_gss/auth_gss.c 	q = (const void *)((const char *)p + len);
p                 148 net/sunrpc/auth_gss/auth_gss.c 	if (unlikely(q > end || q < p))
p                 150 net/sunrpc/auth_gss/auth_gss.c 	dest->data = kmemdup(p, len, GFP_NOFS);
p                 188 net/sunrpc/auth_gss/auth_gss.c gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
p                 201 net/sunrpc/auth_gss/auth_gss.c 	p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
p                 202 net/sunrpc/auth_gss/auth_gss.c 	if (IS_ERR(p))
p                 210 net/sunrpc/auth_gss/auth_gss.c 	p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
p                 211 net/sunrpc/auth_gss/auth_gss.c 	if (IS_ERR(p))
p                 220 net/sunrpc/auth_gss/auth_gss.c 		p = simple_get_bytes(p, end, &ret, sizeof(ret));
p                 221 net/sunrpc/auth_gss/auth_gss.c 		if (!IS_ERR(p))
p                 222 net/sunrpc/auth_gss/auth_gss.c 			p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) :
p                 227 net/sunrpc/auth_gss/auth_gss.c 	p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
p                 228 net/sunrpc/auth_gss/auth_gss.c 	if (IS_ERR(p))
p                 231 net/sunrpc/auth_gss/auth_gss.c 	p  = simple_get_bytes(p, end, &seclen, sizeof(seclen));
p                 232 net/sunrpc/auth_gss/auth_gss.c 	if (IS_ERR(p))
p                 234 net/sunrpc/auth_gss/auth_gss.c 	q = (const void *)((const char *)p + seclen);
p                 235 net/sunrpc/auth_gss/auth_gss.c 	if (unlikely(q > end || q < p)) {
p                 236 net/sunrpc/auth_gss/auth_gss.c 		p = ERR_PTR(-EFAULT);
p                 239 net/sunrpc/auth_gss/auth_gss.c 	ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS);
p                 242 net/sunrpc/auth_gss/auth_gss.c 		p = ERR_PTR(ret);
p                 248 net/sunrpc/auth_gss/auth_gss.c 		p = q;
p                 253 net/sunrpc/auth_gss/auth_gss.c 	p = simple_get_netobj(q, end, &ctx->gc_acceptor);
p                 254 net/sunrpc/auth_gss/auth_gss.c 	if (IS_ERR(p))
p                 260 net/sunrpc/auth_gss/auth_gss.c 	return p;
p                 448 net/sunrpc/auth_gss/auth_gss.c 	char *p = gss_msg->databuf;
p                 452 net/sunrpc/auth_gss/auth_gss.c 	len = scnprintf(p, buflen, "mech=%s uid=%d", mech->gm_name,
p                 455 net/sunrpc/auth_gss/auth_gss.c 	p += len;
p                 463 net/sunrpc/auth_gss/auth_gss.c 		len = scnprintf(p, buflen, " target=%s", target_name);
p                 465 net/sunrpc/auth_gss/auth_gss.c 		p += len;
p                 483 net/sunrpc/auth_gss/auth_gss.c 			len = scnprintf(p, buflen, " service=%s",
p                 486 net/sunrpc/auth_gss/auth_gss.c 			len = scnprintf(p, buflen,
p                 491 net/sunrpc/auth_gss/auth_gss.c 		p += len;
p                 496 net/sunrpc/auth_gss/auth_gss.c 		len = scnprintf(p, buflen, " enctypes=%s",
p                 499 net/sunrpc/auth_gss/auth_gss.c 		p += len;
p                 503 net/sunrpc/auth_gss/auth_gss.c 	len = scnprintf(p, buflen, "\n");
p                 719 net/sunrpc/auth_gss/auth_gss.c 	const void *p, *end;
p                 740 net/sunrpc/auth_gss/auth_gss.c 	p = simple_get_bytes(buf, end, &id, sizeof(id));
p                 741 net/sunrpc/auth_gss/auth_gss.c 	if (IS_ERR(p)) {
p                 742 net/sunrpc/auth_gss/auth_gss.c 		err = PTR_ERR(p);
p                 768 net/sunrpc/auth_gss/auth_gss.c 	p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
p                 769 net/sunrpc/auth_gss/auth_gss.c 	if (IS_ERR(p)) {
p                 770 net/sunrpc/auth_gss/auth_gss.c 		err = PTR_ERR(p);
p                 895 net/sunrpc/auth_gss/auth_gss.c 	struct gss_pipe *p = pdo->pdo_data;
p                 898 net/sunrpc/auth_gss/auth_gss.c 	dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe);
p                 901 net/sunrpc/auth_gss/auth_gss.c 	p->pipe->dentry = dentry;
p                 914 net/sunrpc/auth_gss/auth_gss.c 	struct gss_pipe *p;
p                 917 net/sunrpc/auth_gss/auth_gss.c 	p = kmalloc(sizeof(*p), GFP_KERNEL);
p                 918 net/sunrpc/auth_gss/auth_gss.c 	if (p == NULL)
p                 920 net/sunrpc/auth_gss/auth_gss.c 	p->pipe = rpc_mkpipe_data(upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
p                 921 net/sunrpc/auth_gss/auth_gss.c 	if (IS_ERR(p->pipe)) {
p                 922 net/sunrpc/auth_gss/auth_gss.c 		err = PTR_ERR(p->pipe);
p                 925 net/sunrpc/auth_gss/auth_gss.c 	p->name = name;
p                 926 net/sunrpc/auth_gss/auth_gss.c 	p->clnt = clnt;
p                 927 net/sunrpc/auth_gss/auth_gss.c 	kref_init(&p->kref);
p                 928 net/sunrpc/auth_gss/auth_gss.c 	rpc_init_pipe_dir_object(&p->pdo,
p                 930 net/sunrpc/auth_gss/auth_gss.c 			p);
p                 931 net/sunrpc/auth_gss/auth_gss.c 	return p;
p                 933 net/sunrpc/auth_gss/auth_gss.c 	kfree(p);
p                 992 net/sunrpc/auth_gss/auth_gss.c static void __gss_pipe_free(struct gss_pipe *p)
p                 994 net/sunrpc/auth_gss/auth_gss.c 	struct rpc_clnt *clnt = p->clnt;
p                 999 net/sunrpc/auth_gss/auth_gss.c 			&p->pdo);
p                1000 net/sunrpc/auth_gss/auth_gss.c 	rpc_destroy_pipe_data(p->pipe);
p                1001 net/sunrpc/auth_gss/auth_gss.c 	kfree(p);
p                1006 net/sunrpc/auth_gss/auth_gss.c 	struct gss_pipe *p = container_of(kref, struct gss_pipe, kref);
p                1008 net/sunrpc/auth_gss/auth_gss.c 	__gss_pipe_free(p);
p                1011 net/sunrpc/auth_gss/auth_gss.c static void gss_pipe_free(struct gss_pipe *p)
p                1013 net/sunrpc/auth_gss/auth_gss.c 	if (p != NULL)
p                1014 net/sunrpc/auth_gss/auth_gss.c 		kref_put(&p->kref, __gss_pipe_release);
p                1533 net/sunrpc/auth_gss/auth_gss.c 	__be32		*p, *cred_len;
p                1542 net/sunrpc/auth_gss/auth_gss.c 	p = xdr_reserve_space(xdr, 7 * sizeof(*p) +
p                1544 net/sunrpc/auth_gss/auth_gss.c 	if (!p)
p                1546 net/sunrpc/auth_gss/auth_gss.c 	*p++ = rpc_auth_gss;
p                1547 net/sunrpc/auth_gss/auth_gss.c 	cred_len = p++;
p                1556 net/sunrpc/auth_gss/auth_gss.c 	*p++ = cpu_to_be32(RPC_GSS_VERSION);
p                1557 net/sunrpc/auth_gss/auth_gss.c 	*p++ = cpu_to_be32(ctx->gc_proc);
p                1558 net/sunrpc/auth_gss/auth_gss.c 	*p++ = cpu_to_be32(req->rq_seqno);
p                1559 net/sunrpc/auth_gss/auth_gss.c 	*p++ = cpu_to_be32(gss_cred->gc_service);
p                1560 net/sunrpc/auth_gss/auth_gss.c 	p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
p                1561 net/sunrpc/auth_gss/auth_gss.c 	*cred_len = cpu_to_be32((p - (cred_len + 1)) << 2);
p                1568 net/sunrpc/auth_gss/auth_gss.c 	iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
p                1571 net/sunrpc/auth_gss/auth_gss.c 	p = xdr_reserve_space(xdr, sizeof(*p));
p                1572 net/sunrpc/auth_gss/auth_gss.c 	if (!p)
p                1574 net/sunrpc/auth_gss/auth_gss.c 	*p++ = rpc_auth_gss;
p                1575 net/sunrpc/auth_gss/auth_gss.c 	mic.data = (u8 *)(p + 1);
p                1581 net/sunrpc/auth_gss/auth_gss.c 	if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
p                1676 net/sunrpc/auth_gss/auth_gss.c 	__be32		*p, *seq = NULL;
p                1683 net/sunrpc/auth_gss/auth_gss.c 	p = xdr_inline_decode(xdr, 2 * sizeof(*p));
p                1684 net/sunrpc/auth_gss/auth_gss.c 	if (!p)
p                1686 net/sunrpc/auth_gss/auth_gss.c 	if (*p++ != rpc_auth_gss)
p                1688 net/sunrpc/auth_gss/auth_gss.c 	len = be32_to_cpup(p);
p                1691 net/sunrpc/auth_gss/auth_gss.c 	p = xdr_inline_decode(xdr, len);
p                1692 net/sunrpc/auth_gss/auth_gss.c 	if (!p)
p                1702 net/sunrpc/auth_gss/auth_gss.c 	mic.data = (u8 *)p;
p                1734 net/sunrpc/auth_gss/auth_gss.c 	__be32 *p, *integ_len;
p                1737 net/sunrpc/auth_gss/auth_gss.c 	p = xdr_reserve_space(xdr, 2 * sizeof(*p));
p                1738 net/sunrpc/auth_gss/auth_gss.c 	if (!p)
p                1740 net/sunrpc/auth_gss/auth_gss.c 	integ_len = p++;
p                1741 net/sunrpc/auth_gss/auth_gss.c 	*p = cpu_to_be32(rqstp->rq_seqno);
p                1746 net/sunrpc/auth_gss/auth_gss.c 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
p                1752 net/sunrpc/auth_gss/auth_gss.c 	p = xdr_reserve_space(xdr, 0);
p                1753 net/sunrpc/auth_gss/auth_gss.c 	if (!p)
p                1755 net/sunrpc/auth_gss/auth_gss.c 	mic.data = (u8 *)(p + 1);
p                1762 net/sunrpc/auth_gss/auth_gss.c 	if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
p                1827 net/sunrpc/auth_gss/auth_gss.c 	__be32		*p, *opaque_len;
p                1833 net/sunrpc/auth_gss/auth_gss.c 	p = xdr_reserve_space(xdr, 2 * sizeof(*p));
p                1834 net/sunrpc/auth_gss/auth_gss.c 	if (!p)
p                1836 net/sunrpc/auth_gss/auth_gss.c 	opaque_len = p++;
p                1837 net/sunrpc/auth_gss/auth_gss.c 	*p = cpu_to_be32(rqstp->rq_seqno);
p                1862 net/sunrpc/auth_gss/auth_gss.c 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
p                1880 net/sunrpc/auth_gss/auth_gss.c 	p = iov->iov_base + iov->iov_len;
p                1882 net/sunrpc/auth_gss/auth_gss.c 	memset(p, 0, pad);
p                2034 net/sunrpc/auth_gss/auth_gss.c 	__be32 *p;
p                2036 net/sunrpc/auth_gss/auth_gss.c 	p = xdr_inline_decode(xdr, 2 * sizeof(*p));
p                2037 net/sunrpc/auth_gss/auth_gss.c 	if (unlikely(!p))
p                2039 net/sunrpc/auth_gss/auth_gss.c 	opaque_len = be32_to_cpup(p++);
p                2040 net/sunrpc/auth_gss/auth_gss.c 	offset = (u8 *)(p) - (u8 *)head->iov_base;
p                2051 net/sunrpc/auth_gss/auth_gss.c 	if (be32_to_cpup(p++) != rqstp->rq_seqno)
p                2057 net/sunrpc/auth_gss/auth_gss.c 	xdr_init_decode(xdr, rcv_buf, p, rqstp);
p                2067 net/sunrpc/auth_gss/auth_gss.c 	trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p));
p                 658 net/sunrpc/auth_gss/gss_krb5_crypto.c 	u8 *p;
p                 666 net/sunrpc/auth_gss/gss_krb5_crypto.c 	p = buf->head[0].iov_base + base;
p                 668 net/sunrpc/auth_gss/gss_krb5_crypto.c 	memmove(p + shiftlen, p, buf->head[0].iov_len - base);
p                 168 net/sunrpc/auth_gss/gss_krb5_mech.c simple_get_bytes(const void *p, const void *end, void *res, int len)
p                 170 net/sunrpc/auth_gss/gss_krb5_mech.c 	const void *q = (const void *)((const char *)p + len);
p                 171 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (unlikely(q > end || q < p))
p                 173 net/sunrpc/auth_gss/gss_krb5_mech.c 	memcpy(res, p, len);
p                 178 net/sunrpc/auth_gss/gss_krb5_mech.c simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
p                 183 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = simple_get_bytes(p, end, &len, sizeof(len));
p                 184 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 185 net/sunrpc/auth_gss/gss_krb5_mech.c 		return p;
p                 186 net/sunrpc/auth_gss/gss_krb5_mech.c 	q = (const void *)((const char *)p + len);
p                 187 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (unlikely(q > end || q < p))
p                 189 net/sunrpc/auth_gss/gss_krb5_mech.c 	res->data = kmemdup(p, len, GFP_NOFS);
p                 197 net/sunrpc/auth_gss/gss_krb5_mech.c get_key(const void *p, const void *end,
p                 203 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = simple_get_bytes(p, end, &alg, sizeof(alg));
p                 204 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 219 net/sunrpc/auth_gss/gss_krb5_mech.c 		p = ERR_PTR(-EINVAL);
p                 222 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = simple_get_netobj(p, end, &key);
p                 223 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 240 net/sunrpc/auth_gss/gss_krb5_mech.c 	return p;
p                 246 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = ERR_PTR(-EINVAL);
p                 248 net/sunrpc/auth_gss/gss_krb5_mech.c 	return p;
p                 252 net/sunrpc/auth_gss/gss_krb5_mech.c gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
p                 257 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
p                 258 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 266 net/sunrpc/auth_gss/gss_krb5_mech.c 		p = ERR_PTR(-EINVAL);
p                 274 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (unlikely(p + 20 > end || p + 20 < p)) {
p                 275 net/sunrpc/auth_gss/gss_krb5_mech.c 		p = ERR_PTR(-EFAULT);
p                 278 net/sunrpc/auth_gss/gss_krb5_mech.c 	p += 20;
p                 279 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
p                 280 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 283 net/sunrpc/auth_gss/gss_krb5_mech.c 		p = ERR_PTR(-ENOSYS);
p                 286 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
p                 287 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 290 net/sunrpc/auth_gss/gss_krb5_mech.c 		p = ERR_PTR(-ENOSYS);
p                 293 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
p                 294 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 296 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = simple_get_bytes(p, end, &seq_send, sizeof(seq_send));
p                 297 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 300 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = simple_get_netobj(p, end, &ctx->mech_used);
p                 301 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 303 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = get_key(p, end, ctx, &ctx->enc);
p                 304 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 306 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = get_key(p, end, ctx, &ctx->seq);
p                 307 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 309 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (p != end) {
p                 310 net/sunrpc/auth_gss/gss_krb5_mech.c 		p = ERR_PTR(-EFAULT);
p                 323 net/sunrpc/auth_gss/gss_krb5_mech.c 	return PTR_ERR(p);
p                 585 net/sunrpc/auth_gss/gss_krb5_mech.c gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
p                 591 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags));
p                 592 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 596 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
p                 597 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 599 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = simple_get_bytes(p, end, &seq_send64, sizeof(seq_send64));
p                 600 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 608 net/sunrpc/auth_gss/gss_krb5_mech.c 		p = ERR_PTR(-EINVAL);
p                 611 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype));
p                 612 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 621 net/sunrpc/auth_gss/gss_krb5_mech.c 		p = ERR_PTR(-EINVAL);
p                 626 net/sunrpc/auth_gss/gss_krb5_mech.c 	p = simple_get_bytes(p, end, ctx->Ksess, keylen);
p                 627 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (IS_ERR(p))
p                 630 net/sunrpc/auth_gss/gss_krb5_mech.c 	if (p != end) {
p                 631 net/sunrpc/auth_gss/gss_krb5_mech.c 		p = ERR_PTR(-EINVAL);
p                 638 net/sunrpc/auth_gss/gss_krb5_mech.c 		p = ERR_PTR(-ENOMEM);
p                 656 net/sunrpc/auth_gss/gss_krb5_mech.c 	return PTR_ERR(p);
p                 660 net/sunrpc/auth_gss/gss_krb5_mech.c gss_import_sec_context_kerberos(const void *p, size_t len,
p                 665 net/sunrpc/auth_gss/gss_krb5_mech.c 	const void *end = (const void *)((const char *)p + len);
p                 674 net/sunrpc/auth_gss/gss_krb5_mech.c 		ret = gss_import_v1_context(p, end, ctx);
p                 676 net/sunrpc/auth_gss/gss_krb5_mech.c 		ret = gss_import_v2_context(p, end, ctx, gfp_mask);
p                 103 net/sunrpc/auth_gss/gss_krb5_seal.c 	u8 *p, flags = 0x00;
p                 115 net/sunrpc/auth_gss/gss_krb5_seal.c 	p = (u8 *)ptr;
p                 116 net/sunrpc/auth_gss/gss_krb5_seal.c 	*p++ = flags;
p                 117 net/sunrpc/auth_gss/gss_krb5_seal.c 	*p++ = 0xff;
p                 118 net/sunrpc/auth_gss/gss_krb5_seal.c 	ptr = (u16 *)p;
p                  52 net/sunrpc/auth_gss/gss_krb5_wrap.c 	char *p;
p                  59 net/sunrpc/auth_gss/gss_krb5_wrap.c 	p = iov->iov_base + iov->iov_len;
p                  62 net/sunrpc/auth_gss/gss_krb5_wrap.c 	memset(p, padding, padding);
p                 117 net/sunrpc/auth_gss/gss_krb5_wrap.c gss_krb5_make_confounder(char *p, u32 conflen)
p                 120 net/sunrpc/auth_gss/gss_krb5_wrap.c 	u64 *q = (u64 *)p;
p                  13 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                  15 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_reserve_space(xdr, 4);
p                  16 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (unlikely(p == NULL))
p                  18 net/sunrpc/auth_gss/gss_rpc_xdr.c 	*p = v ? xdr_one : xdr_zero;
p                  24 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                  26 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                  27 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (unlikely(p == NULL))
p                  29 net/sunrpc/auth_gss/gss_rpc_xdr.c 	*v = be32_to_cpu(*p);
p                  36 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                  38 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_reserve_space(xdr, sizeof(u32) + buf->len);
p                  39 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (!p)
p                  41 net/sunrpc/auth_gss/gss_rpc_xdr.c 	xdr_encode_opaque(p, buf->data, buf->len);
p                  48 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                  50 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_reserve_space(xdr, 4);
p                  51 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (!p)
p                  53 net/sunrpc/auth_gss/gss_rpc_xdr.c 	*p = cpu_to_be32(in->page_len);
p                  66 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                  68 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                  69 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (unlikely(p == NULL))
p                  72 net/sunrpc/auth_gss/gss_rpc_xdr.c 	length = be32_to_cpup(p);
p                  73 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_inline_decode(xdr, length);
p                  74 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (unlikely(p == NULL))
p                  85 net/sunrpc/auth_gss/gss_rpc_xdr.c 		buf->data = kmemdup(p, length, GFP_KERNEL);
p                  89 net/sunrpc/auth_gss/gss_rpc_xdr.c 		memcpy(buf->data, p, length);
p                 122 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                 127 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_reserve_space(xdr, 4);
p                 128 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (!p)
p                 130 net/sunrpc/auth_gss/gss_rpc_xdr.c 	*p = 0;
p                 140 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                 142 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 143 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (unlikely(p == NULL))
p                 145 net/sunrpc/auth_gss/gss_rpc_xdr.c 	count = be32_to_cpup(p++);
p                 158 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                 160 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 161 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (!p)
p                 164 net/sunrpc/auth_gss/gss_rpc_xdr.c 	memcpy(res, p, sizeof(u32));
p                 172 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                 177 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 178 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (unlikely(p == NULL))
p                 181 net/sunrpc/auth_gss/gss_rpc_xdr.c 	length = be32_to_cpup(p);
p                 234 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                 237 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 238 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (unlikely(p == NULL))
p                 240 net/sunrpc/auth_gss/gss_rpc_xdr.c 	count = be32_to_cpup(p++);
p                 267 net/sunrpc/auth_gss/gss_rpc_xdr.c 		p = xdr_inline_decode(xdr, 4);
p                 268 net/sunrpc/auth_gss/gss_rpc_xdr.c 		if (unlikely(p == NULL))
p                 271 net/sunrpc/auth_gss/gss_rpc_xdr.c 		length = be32_to_cpup(p);
p                 272 net/sunrpc/auth_gss/gss_rpc_xdr.c 		p = xdr_inline_decode(xdr, length);
p                 273 net/sunrpc/auth_gss/gss_rpc_xdr.c 		if (unlikely(p == NULL))
p                 277 net/sunrpc/auth_gss/gss_rpc_xdr.c 		    memcmp(p, CREDS_VALUE, sizeof(CREDS_VALUE)) == 0) {
p                 296 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                 300 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_inline_decode(xdr, 8);
p                 301 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (unlikely(p == NULL))
p                 303 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_decode_hyper(p, &status->major_status);
p                 311 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_inline_decode(xdr, 8);
p                 312 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (unlikely(p == NULL))
p                 314 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_decode_hyper(p, &status->minor_status);
p                 342 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                 357 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_reserve_space(xdr, 4);
p                 358 net/sunrpc/auth_gss/gss_rpc_xdr.c 	*p = cpu_to_be32(2);
p                 401 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                 406 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_reserve_space(xdr, 4);
p                 407 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (!p)
p                 409 net/sunrpc/auth_gss/gss_rpc_xdr.c 	*p = 0;
p                 419 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                 421 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_inline_decode(xdr, 4);
p                 422 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (unlikely(p == NULL))
p                 424 net/sunrpc/auth_gss/gss_rpc_xdr.c 	count = be32_to_cpup(p++);
p                 525 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                 530 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_reserve_space(xdr, 4);
p                 531 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (!p)
p                 533 net/sunrpc/auth_gss/gss_rpc_xdr.c 	*p = 0;
p                 567 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                 601 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_reserve_space(xdr, 8+8);
p                 602 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (!p)
p                 604 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_encode_hyper(p, ctx->lifetime);
p                 607 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_encode_hyper(p, ctx->ctx_flags);
p                 630 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                 664 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_inline_decode(xdr, 8+8);
p                 665 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (unlikely(p == NULL))
p                 667 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_decode_hyper(p, &ctx->lifetime);
p                 670 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_decode_hyper(p, &ctx->ctx_flags);
p                 691 net/sunrpc/auth_gss/gss_rpc_xdr.c 	__be32 *p;
p                 695 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_reserve_space(xdr, 8);
p                 696 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (!p)
p                 698 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_encode_hyper(p, cb->initiator_addrtype);
p                 706 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_reserve_space(xdr, 8);
p                 707 net/sunrpc/auth_gss/gss_rpc_xdr.c 	if (!p)
p                 709 net/sunrpc/auth_gss/gss_rpc_xdr.c 	p = xdr_encode_hyper(p, cb->acceptor_addrtype);
p                 661 net/sunrpc/auth_gss/svcauth_gss.c 	u8 *p;
p                 666 net/sunrpc/auth_gss/svcauth_gss.c 	p = resv->iov_base + resv->iov_len;
p                 670 net/sunrpc/auth_gss/svcauth_gss.c 	memcpy(p, o->data, o->len);
p                 671 net/sunrpc/auth_gss/svcauth_gss.c 	memset(p + o->len, 0, round_up_to_quad(o->len) - o->len);
p                 729 net/sunrpc/auth_gss/svcauth_gss.c 	__be32     *p;
p                 732 net/sunrpc/auth_gss/svcauth_gss.c 	p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
p                 734 net/sunrpc/auth_gss/svcauth_gss.c 	*p++ = 0;
p                 735 net/sunrpc/auth_gss/svcauth_gss.c 	if (!xdr_ressize_check(rqstp, p))
p                 747 net/sunrpc/auth_gss/svcauth_gss.c 	__be32			*p;
p                 760 net/sunrpc/auth_gss/svcauth_gss.c 	p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
p                 761 net/sunrpc/auth_gss/svcauth_gss.c 	mic.data = (u8 *)(p + 1);
p                 765 net/sunrpc/auth_gss/svcauth_gss.c 	*p++ = htonl(mic.len);
p                 766 net/sunrpc/auth_gss/svcauth_gss.c 	memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len);
p                 767 net/sunrpc/auth_gss/svcauth_gss.c 	p += XDR_QUADLEN(mic.len);
p                 768 net/sunrpc/auth_gss/svcauth_gss.c 	if (!xdr_ressize_check(rqstp, p))
p                1412 net/sunrpc/auth_gss/svcauth_gss.c 	unsigned long p = *ppos;
p                1418 net/sunrpc/auth_gss/svcauth_gss.c 	if (p >= len)
p                1420 net/sunrpc/auth_gss/svcauth_gss.c 	len -= p;
p                1423 net/sunrpc/auth_gss/svcauth_gss.c 	if (copy_to_user(buf, (void *)(tbuf+p), len))
p                1438 net/sunrpc/auth_gss/svcauth_gss.c 	struct proc_dir_entry **p = &sn->use_gssp_proc;
p                1441 net/sunrpc/auth_gss/svcauth_gss.c 	*p = proc_create_data("use-gss-proxy", S_IFREG | 0600,
p                1444 net/sunrpc/auth_gss/svcauth_gss.c 	if (!*p)
p                1635 net/sunrpc/auth_gss/svcauth_gss.c 	__be32 *p;
p                1638 net/sunrpc/auth_gss/svcauth_gss.c 	p = gsd->verf_start;
p                1642 net/sunrpc/auth_gss/svcauth_gss.c 	if (*(p-1) != rpc_success)
p                1645 net/sunrpc/auth_gss/svcauth_gss.c 	p += 1;
p                1646 net/sunrpc/auth_gss/svcauth_gss.c 	verf_len = ntohl(*p++);
p                1647 net/sunrpc/auth_gss/svcauth_gss.c 	p += XDR_QUADLEN(verf_len);
p                1649 net/sunrpc/auth_gss/svcauth_gss.c 	memcpy(p, p + 2, 4);
p                1651 net/sunrpc/auth_gss/svcauth_gss.c 	if (*p != rpc_success) {
p                1655 net/sunrpc/auth_gss/svcauth_gss.c 	p++;
p                1656 net/sunrpc/auth_gss/svcauth_gss.c 	return p;
p                1668 net/sunrpc/auth_gss/svcauth_gss.c 	__be32 *p;
p                1672 net/sunrpc/auth_gss/svcauth_gss.c 	p = svcauth_gss_prepare_to_wrap(resbuf, gsd);
p                1673 net/sunrpc/auth_gss/svcauth_gss.c 	if (p == NULL)
p                1675 net/sunrpc/auth_gss/svcauth_gss.c 	integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
p                1678 net/sunrpc/auth_gss/svcauth_gss.c 	*p++ = htonl(integ_len);
p                1679 net/sunrpc/auth_gss/svcauth_gss.c 	*p++ = htonl(gc->gc_seq);
p                1715 net/sunrpc/auth_gss/svcauth_gss.c 	__be32 *p, *len;
p                1719 net/sunrpc/auth_gss/svcauth_gss.c 	p = svcauth_gss_prepare_to_wrap(resbuf, gsd);
p                1720 net/sunrpc/auth_gss/svcauth_gss.c 	if (p == NULL)
p                1722 net/sunrpc/auth_gss/svcauth_gss.c 	len = p++;
p                1723 net/sunrpc/auth_gss/svcauth_gss.c 	offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base;
p                1724 net/sunrpc/auth_gss/svcauth_gss.c 	*p++ = htonl(gc->gc_seq);
p                1766 net/sunrpc/auth_gss/svcauth_gss.c 	p = (__be32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len);
p                1767 net/sunrpc/auth_gss/svcauth_gss.c 	memset(p, 0, pad);
p                  65 net/sunrpc/auth_null.c 	__be32 *p;
p                  67 net/sunrpc/auth_null.c 	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
p                  68 net/sunrpc/auth_null.c 	if (!p)
p                  71 net/sunrpc/auth_null.c 	*p++ = rpc_auth_null;
p                  72 net/sunrpc/auth_null.c 	*p++ = xdr_zero;
p                  74 net/sunrpc/auth_null.c 	*p++ = rpc_auth_null;
p                  75 net/sunrpc/auth_null.c 	*p   = xdr_zero;
p                  92 net/sunrpc/auth_null.c 	__be32 *p;
p                  94 net/sunrpc/auth_null.c 	p = xdr_inline_decode(xdr, 2 * sizeof(*p));
p                  95 net/sunrpc/auth_null.c 	if (!p)
p                  97 net/sunrpc/auth_null.c 	if (*p++ != rpc_auth_null)
p                  99 net/sunrpc/auth_null.c 	if (*p != xdr_zero)
p                 107 net/sunrpc/auth_unix.c 	__be32		*p, *cred_len, *gidarr_len;
p                 115 net/sunrpc/auth_unix.c 	p = xdr_reserve_space(xdr, 3 * sizeof(*p));
p                 116 net/sunrpc/auth_unix.c 	if (!p)
p                 118 net/sunrpc/auth_unix.c 	*p++ = rpc_auth_unix;
p                 119 net/sunrpc/auth_unix.c 	cred_len = p++;
p                 120 net/sunrpc/auth_unix.c 	*p++ = xdr_zero;	/* stamp */
p                 124 net/sunrpc/auth_unix.c 	p = xdr_reserve_space(xdr, 3 * sizeof(*p));
p                 125 net/sunrpc/auth_unix.c 	if (!p)
p                 127 net/sunrpc/auth_unix.c 	*p++ = cpu_to_be32(from_kuid_munged(userns, cred->cr_cred->fsuid));
p                 128 net/sunrpc/auth_unix.c 	*p++ = cpu_to_be32(from_kgid_munged(userns, cred->cr_cred->fsgid));
p                 130 net/sunrpc/auth_unix.c 	gidarr_len = p++;
p                 133 net/sunrpc/auth_unix.c 			*p++ = cpu_to_be32(from_kgid_munged(userns, gi->gid[i]));
p                 134 net/sunrpc/auth_unix.c 	*gidarr_len = cpu_to_be32(p - gidarr_len - 1);
p                 135 net/sunrpc/auth_unix.c 	*cred_len = cpu_to_be32((p - cred_len - 1) << 2);
p                 136 net/sunrpc/auth_unix.c 	p = xdr_reserve_space(xdr, (p - gidarr_len - 1) << 2);
p                 137 net/sunrpc/auth_unix.c 	if (!p)
p                 142 net/sunrpc/auth_unix.c 	p = xdr_reserve_space(xdr, 2 * sizeof(*p));
p                 143 net/sunrpc/auth_unix.c 	if (!p)
p                 145 net/sunrpc/auth_unix.c 	*p++ = rpc_auth_null;
p                 146 net/sunrpc/auth_unix.c 	*p   = xdr_zero;
p                 168 net/sunrpc/auth_unix.c 	__be32 *p;
p                 171 net/sunrpc/auth_unix.c 	p = xdr_inline_decode(xdr, 2 * sizeof(*p));
p                 172 net/sunrpc/auth_unix.c 	if (!p)
p                 174 net/sunrpc/auth_unix.c 	switch (*p++) {
p                 182 net/sunrpc/auth_unix.c 	size = be32_to_cpup(p);
p                 185 net/sunrpc/auth_unix.c 	p = xdr_inline_decode(xdr, size);
p                 186 net/sunrpc/auth_unix.c 	if (!p)
p                1346 net/sunrpc/cache.c static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
p                1348 net/sunrpc/cache.c 	struct cache_head *ch = p;
p                1352 net/sunrpc/cache.c 	if (p == SEQ_START_TOKEN)
p                1385 net/sunrpc/cache.c void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
p                1387 net/sunrpc/cache.c 	return cache_seq_next(file, p, pos);
p                1391 net/sunrpc/cache.c void cache_seq_stop_rcu(struct seq_file *m, void *p)
p                1398 net/sunrpc/cache.c static int c_show(struct seq_file *m, void *p)
p                1400 net/sunrpc/cache.c 	struct cache_head *cp = p;
p                1403 net/sunrpc/cache.c 	if (p == SEQ_START_TOKEN)
p                1656 net/sunrpc/cache.c 	struct proc_dir_entry *p;
p                1664 net/sunrpc/cache.c 	p = proc_create_data("flush", S_IFREG | 0600,
p                1666 net/sunrpc/cache.c 	if (p == NULL)
p                1670 net/sunrpc/cache.c 		p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
p                1672 net/sunrpc/cache.c 		if (p == NULL)
p                1676 net/sunrpc/cache.c 		p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
p                1678 net/sunrpc/cache.c 		if (p == NULL)
p                2561 net/sunrpc/clnt.c 	__be32 *p;
p                2565 net/sunrpc/clnt.c 	p = xdr_reserve_space(xdr, RPC_CALLHDRSIZE << 2);
p                2566 net/sunrpc/clnt.c 	if (!p)
p                2568 net/sunrpc/clnt.c 	*p++ = req->rq_xid;
p                2569 net/sunrpc/clnt.c 	*p++ = rpc_call;
p                2570 net/sunrpc/clnt.c 	*p++ = cpu_to_be32(RPC_VERSION);
p                2571 net/sunrpc/clnt.c 	*p++ = cpu_to_be32(clnt->cl_prog);
p                2572 net/sunrpc/clnt.c 	*p++ = cpu_to_be32(clnt->cl_vers);
p                2573 net/sunrpc/clnt.c 	*p   = cpu_to_be32(task->tk_msg.rpc_proc->p_proc);
p                2590 net/sunrpc/clnt.c 	__be32 *p;
p                2600 net/sunrpc/clnt.c 	p = xdr_inline_decode(xdr, 3 * sizeof(*p));
p                2601 net/sunrpc/clnt.c 	if (!p)
p                2603 net/sunrpc/clnt.c 	p++;	/* skip XID */
p                2604 net/sunrpc/clnt.c 	if (*p++ != rpc_reply)
p                2606 net/sunrpc/clnt.c 	if (*p++ != rpc_msg_accepted)
p                2613 net/sunrpc/clnt.c 	p = xdr_inline_decode(xdr, sizeof(*p));
p                2614 net/sunrpc/clnt.c 	if (!p)
p                2616 net/sunrpc/clnt.c 	switch (*p) {
p                2662 net/sunrpc/clnt.c 	p = xdr_inline_decode(xdr, sizeof(*p));
p                2663 net/sunrpc/clnt.c 	if (!p)
p                2665 net/sunrpc/clnt.c 	switch (*p++) {
p                2676 net/sunrpc/clnt.c 	p = xdr_inline_decode(xdr, sizeof(*p));
p                2677 net/sunrpc/clnt.c 	if (!p)
p                2679 net/sunrpc/clnt.c 	switch (*p++) {
p                 865 net/sunrpc/rpcb_clnt.c 	__be32 *p;
p                 872 net/sunrpc/rpcb_clnt.c 	p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2);
p                 873 net/sunrpc/rpcb_clnt.c 	*p++ = cpu_to_be32(rpcb->r_prog);
p                 874 net/sunrpc/rpcb_clnt.c 	*p++ = cpu_to_be32(rpcb->r_vers);
p                 875 net/sunrpc/rpcb_clnt.c 	*p++ = cpu_to_be32(rpcb->r_prot);
p                 876 net/sunrpc/rpcb_clnt.c 	*p   = cpu_to_be32(rpcb->r_port);
p                 884 net/sunrpc/rpcb_clnt.c 	__be32 *p;
p                 888 net/sunrpc/rpcb_clnt.c 	p = xdr_inline_decode(xdr, 4);
p                 889 net/sunrpc/rpcb_clnt.c 	if (unlikely(p == NULL))
p                 892 net/sunrpc/rpcb_clnt.c 	port = be32_to_cpup(p);
p                 906 net/sunrpc/rpcb_clnt.c 	__be32 *p;
p                 908 net/sunrpc/rpcb_clnt.c 	p = xdr_inline_decode(xdr, 4);
p                 909 net/sunrpc/rpcb_clnt.c 	if (unlikely(p == NULL))
p                 913 net/sunrpc/rpcb_clnt.c 	if (*p != xdr_zero)
p                 926 net/sunrpc/rpcb_clnt.c 	__be32 *p;
p                 934 net/sunrpc/rpcb_clnt.c 	p = xdr_reserve_space(xdr, 4 + len);
p                 935 net/sunrpc/rpcb_clnt.c 	xdr_encode_opaque(p, string, len);
p                 942 net/sunrpc/rpcb_clnt.c 	__be32 *p;
p                 950 net/sunrpc/rpcb_clnt.c 	p = xdr_reserve_space(xdr, (RPCB_program_sz + RPCB_version_sz) << 2);
p                 951 net/sunrpc/rpcb_clnt.c 	*p++ = cpu_to_be32(rpcb->r_prog);
p                 952 net/sunrpc/rpcb_clnt.c 	*p = cpu_to_be32(rpcb->r_vers);
p                 965 net/sunrpc/rpcb_clnt.c 	__be32 *p;
p                 970 net/sunrpc/rpcb_clnt.c 	p = xdr_inline_decode(xdr, 4);
p                 971 net/sunrpc/rpcb_clnt.c 	if (unlikely(p == NULL))
p                 973 net/sunrpc/rpcb_clnt.c 	len = be32_to_cpup(p);
p                 988 net/sunrpc/rpcb_clnt.c 	p = xdr_inline_decode(xdr, len);
p                 989 net/sunrpc/rpcb_clnt.c 	if (unlikely(p == NULL))
p                 992 net/sunrpc/rpcb_clnt.c 			req->rq_task->tk_msg.rpc_proc->p_name, (char *)p);
p                 994 net/sunrpc/rpcb_clnt.c 	if (rpc_uaddr2sockaddr(req->rq_xprt->xprt_net, (char *)p, len,
p                 581 net/sunrpc/svc.c 		struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
p                 582 net/sunrpc/svc.c 		if (!p)
p                 584 net/sunrpc/svc.c 		rqstp->rq_pages[arghi++] = p;
p                1688 net/sunrpc/svc.c 				void *p, size_t total)
p                1709 net/sunrpc/svc.c 		memcpy(dst, p, len);
p                 648 net/sunrpc/svc_xprt.c 			struct page *p = alloc_page(GFP_KERNEL);
p                 649 net/sunrpc/svc_xprt.c 			if (!p) {
p                 657 net/sunrpc/svc_xprt.c 			rqstp->rq_pages[i] = p;
p                1375 net/sunrpc/svc_xprt.c static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
p                1377 net/sunrpc/svc_xprt.c 	struct svc_pool *pool = p;
p                1382 net/sunrpc/svc_xprt.c 	if (p == SEQ_START_TOKEN) {
p                1395 net/sunrpc/svc_xprt.c static void svc_pool_stats_stop(struct seq_file *m, void *p)
p                1399 net/sunrpc/svc_xprt.c static int svc_pool_stats_show(struct seq_file *m, void *p)
p                1401 net/sunrpc/svc_xprt.c 	struct svc_pool *pool = p;
p                1403 net/sunrpc/svc_xprt.c 	if (p == SEQ_START_TOKEN) {
p                 965 net/sunrpc/svcsock.c 	__be32 *p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
p                 969 net/sunrpc/svcsock.c 	xid = *p++;
p                 970 net/sunrpc/svcsock.c 	calldir = *p;
p                1040 net/sunrpc/svcsock.c 	__be32 *p;
p                1102 net/sunrpc/svcsock.c 	p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
p                1103 net/sunrpc/svcsock.c 	calldir = p[1];
p                  81 net/sunrpc/sysctl.c 	char __user *p;
p                  95 net/sunrpc/sysctl.c 		p = buffer;
p                  96 net/sunrpc/sysctl.c 		while (left && __get_user(c, p) >= 0 && isspace(c))
p                  97 net/sunrpc/sysctl.c 			left--, p++;
p                 103 net/sunrpc/sysctl.c 		if (copy_from_user(tmpbuf, p, left))
p                  26 net/sunrpc/xdr.c xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
p                  30 net/sunrpc/xdr.c 	p[quadlen] = 0;		/* zero trailing bytes */
p                  31 net/sunrpc/xdr.c 	*p++ = cpu_to_be32(obj->len);
p                  32 net/sunrpc/xdr.c 	memcpy(p, obj->data, obj->len);
p                  33 net/sunrpc/xdr.c 	return p + XDR_QUADLEN(obj->len);
p                  38 net/sunrpc/xdr.c xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
p                  42 net/sunrpc/xdr.c 	if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
p                  45 net/sunrpc/xdr.c 	obj->data = (u8 *) p;
p                  46 net/sunrpc/xdr.c 	return p + XDR_QUADLEN(len);
p                  64 net/sunrpc/xdr.c __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
p                  71 net/sunrpc/xdr.c 			memcpy(p, ptr, nbytes);
p                  73 net/sunrpc/xdr.c 			memset((char *)p + nbytes, 0, padding);
p                  74 net/sunrpc/xdr.c 		p += quadlen;
p                  76 net/sunrpc/xdr.c 	return p;
p                  88 net/sunrpc/xdr.c __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
p                  90 net/sunrpc/xdr.c 	*p++ = cpu_to_be32(nbytes);
p                  91 net/sunrpc/xdr.c 	return xdr_encode_opaque_fixed(p, ptr, nbytes);
p                  96 net/sunrpc/xdr.c xdr_encode_string(__be32 *p, const char *string)
p                  98 net/sunrpc/xdr.c 	return xdr_encode_array(p, string, strlen(string));
p                 103 net/sunrpc/xdr.c xdr_decode_string_inplace(__be32 *p, char **sp,
p                 108 net/sunrpc/xdr.c 	len = be32_to_cpu(*p++);
p                 112 net/sunrpc/xdr.c 	*sp = (char *) p;
p                 113 net/sunrpc/xdr.c 	return p + XDR_QUADLEN(len);
p                 280 net/sunrpc/xdr.c _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
p                 295 net/sunrpc/xdr.c 		memcpy(vto + pgbase, p, copy);
p                 308 net/sunrpc/xdr.c 		p += copy;
p                 324 net/sunrpc/xdr.c _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
p                 339 net/sunrpc/xdr.c 		memcpy(p, vfrom + pgbase, copy);
p                 347 net/sunrpc/xdr.c 		p += copy;
p                 471 net/sunrpc/xdr.c 			char *p = (char *)tail->iov_base + len;
p                 472 net/sunrpc/xdr.c 			memmove(p, tail->iov_base, tail->iov_len - len);
p                 522 net/sunrpc/xdr.c void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
p                 532 net/sunrpc/xdr.c 	xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
p                 536 net/sunrpc/xdr.c 	if (p != xdr->p && p != NULL) {
p                 539 net/sunrpc/xdr.c 		BUG_ON(p < xdr->p || p > xdr->end);
p                 540 net/sunrpc/xdr.c 		len = (char *)p - (char *)xdr->p;
p                 541 net/sunrpc/xdr.c 		xdr->p = p;
p                 571 net/sunrpc/xdr.c 	memmove(page, page + shift, (void *)xdr->p - page);
p                 579 net/sunrpc/xdr.c 	__be32 *p;
p                 587 net/sunrpc/xdr.c 	frag1bytes = (xdr->end - xdr->p) << 2;
p                 602 net/sunrpc/xdr.c 	xdr->scratch.iov_base = xdr->p;
p                 604 net/sunrpc/xdr.c 	p = page_address(*xdr->page_ptr);
p                 609 net/sunrpc/xdr.c 	xdr->p = (void *)p + frag2bytes;
p                 611 net/sunrpc/xdr.c 	xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
p                 614 net/sunrpc/xdr.c 	return p;
p                 631 net/sunrpc/xdr.c 	__be32 *p = xdr->p;
p                 638 net/sunrpc/xdr.c 	q = p + (nbytes >> 2);
p                 639 net/sunrpc/xdr.c 	if (unlikely(q > xdr->end || q < p))
p                 641 net/sunrpc/xdr.c 	xdr->p = q;
p                 647 net/sunrpc/xdr.c 	return p;
p                 688 net/sunrpc/xdr.c 		xdr->p = tail->iov_base + tail->iov_len;
p                 703 net/sunrpc/xdr.c 		xdr->p = page_address(*xdr->page_ptr);
p                 704 net/sunrpc/xdr.c 		xdr->end = (void *)xdr->p + PAGE_SIZE;
p                 705 net/sunrpc/xdr.c 		xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
p                 715 net/sunrpc/xdr.c 	xdr->p = head->iov_base + head->iov_len;
p                 735 net/sunrpc/xdr.c 	int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
p                 766 net/sunrpc/xdr.c 	iov->iov_base = (char *)xdr->p;
p                 773 net/sunrpc/xdr.c 		BUG_ON(xdr->p >= xdr->end);
p                 774 net/sunrpc/xdr.c 		iov->iov_base = (char *)xdr->p + (len & 3);
p                 777 net/sunrpc/xdr.c 		*xdr->p++ = 0;
p                 789 net/sunrpc/xdr.c 	xdr->p = (__be32*)iov->iov_base;
p                 818 net/sunrpc/xdr.c 	xdr->p = (__be32*)(kaddr + pgoff);
p                 847 net/sunrpc/xdr.c 	return xdr->p != xdr->end;
p                 857 net/sunrpc/xdr.c void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
p                 870 net/sunrpc/xdr.c 	if (p != NULL && p > xdr->p && xdr->end >= p) {
p                 871 net/sunrpc/xdr.c 		xdr->nwords -= p - xdr->p;
p                 872 net/sunrpc/xdr.c 		xdr->p = p;
p                 900 net/sunrpc/xdr.c 	__be32 *p = xdr->p;
p                 901 net/sunrpc/xdr.c 	__be32 *q = p + nwords;
p                 903 net/sunrpc/xdr.c 	if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
p                 905 net/sunrpc/xdr.c 	xdr->p = q;
p                 907 net/sunrpc/xdr.c 	return p;
p                 930 net/sunrpc/xdr.c 	__be32 *p;
p                 932 net/sunrpc/xdr.c 	size_t cplen = (char *)xdr->end - (char *)xdr->p;
p                 936 net/sunrpc/xdr.c 	p = __xdr_inline_decode(xdr, cplen);
p                 937 net/sunrpc/xdr.c 	if (p == NULL)
p                 939 net/sunrpc/xdr.c 	memcpy(cpdest, p, cplen);
p                 944 net/sunrpc/xdr.c 	p = __xdr_inline_decode(xdr, nbytes);
p                 945 net/sunrpc/xdr.c 	if (p == NULL)
p                 947 net/sunrpc/xdr.c 	memcpy(cpdest, p, nbytes);
p                 966 net/sunrpc/xdr.c 	__be32 *p;
p                 969 net/sunrpc/xdr.c 		return xdr->p;
p                 970 net/sunrpc/xdr.c 	if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
p                 972 net/sunrpc/xdr.c 	p = __xdr_inline_decode(xdr, nbytes);
p                 973 net/sunrpc/xdr.c 	if (p != NULL)
p                 974 net/sunrpc/xdr.c 		return p;
p                1052 net/sunrpc/xdr.c 	xdr->p = (__be32 *)((char *)iov->iov_base + padding);
p                1633 net/sunrpc/xdr.c 	void *p;
p                1635 net/sunrpc/xdr.c 	ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
p                1638 net/sunrpc/xdr.c 	memcpy(ptr, p, ret);
p                1660 net/sunrpc/xdr.c 	void *p;
p                1662 net/sunrpc/xdr.c 	ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
p                1664 net/sunrpc/xdr.c 		*ptr = kmemdup(p, ret, gfp_flags);
p                1688 net/sunrpc/xdr.c 	void *p;
p                1690 net/sunrpc/xdr.c 	ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
p                1692 net/sunrpc/xdr.c 		memcpy(str, p, ret);
p                1717 net/sunrpc/xdr.c 	void *p;
p                1720 net/sunrpc/xdr.c 	ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
p                1724 net/sunrpc/xdr.c 			memcpy(s, p, ret);
p                 928 net/sunrpc/xprt.c 	struct rb_node **p = &xprt->recv_queue.rb_node;
p                 932 net/sunrpc/xprt.c 	while (*p != NULL) {
p                 933 net/sunrpc/xprt.c 		n = *p;
p                 937 net/sunrpc/xprt.c 			p = &n->rb_left;
p                 940 net/sunrpc/xprt.c 			p = &n->rb_right;
p                 947 net/sunrpc/xprt.c 	rb_link_node(&new->rq_recv, n, p);
p                  64 net/sunrpc/xprtrdma/backchannel.c 	__be32 *p;
p                  70 net/sunrpc/xprtrdma/backchannel.c 	p = xdr_reserve_space(&req->rl_stream, 28);
p                  71 net/sunrpc/xprtrdma/backchannel.c 	if (unlikely(!p))
p                  73 net/sunrpc/xprtrdma/backchannel.c 	*p++ = rqst->rq_xid;
p                  74 net/sunrpc/xprtrdma/backchannel.c 	*p++ = rpcrdma_version;
p                  75 net/sunrpc/xprtrdma/backchannel.c 	*p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
p                  76 net/sunrpc/xprtrdma/backchannel.c 	*p++ = rdma_msg;
p                  77 net/sunrpc/xprtrdma/backchannel.c 	*p++ = xdr_zero;
p                  78 net/sunrpc/xprtrdma/backchannel.c 	*p++ = xdr_zero;
p                  79 net/sunrpc/xprtrdma/backchannel.c 	*p = xdr_zero;
p                 226 net/sunrpc/xprtrdma/backchannel.c 	__be32 *p;
p                 228 net/sunrpc/xprtrdma/backchannel.c 	p = xdr_inline_decode(&rep->rr_stream, 0);
p                 233 net/sunrpc/xprtrdma/backchannel.c 		__func__, be32_to_cpup(p), size);
p                 234 net/sunrpc/xprtrdma/backchannel.c 	pr_info("RPC:       %s: %*ph\n", __func__, size, p);
p                 242 net/sunrpc/xprtrdma/backchannel.c 	rqst->rq_xid = *p;
p                 248 net/sunrpc/xprtrdma/backchannel.c 	buf->head[0].iov_base = p;
p                 285 net/sunrpc/xprtrdma/rpc_rdma.c 	__be32 *p;
p                 287 net/sunrpc/xprtrdma/rpc_rdma.c 	p = xdr_reserve_space(xdr, sizeof(*p));
p                 288 net/sunrpc/xprtrdma/rpc_rdma.c 	if (unlikely(!p))
p                 291 net/sunrpc/xprtrdma/rpc_rdma.c 	*p = xdr_one;
p                 298 net/sunrpc/xprtrdma/rpc_rdma.c 	__be32 *p;
p                 300 net/sunrpc/xprtrdma/rpc_rdma.c 	p = xdr_reserve_space(xdr, sizeof(*p));
p                 301 net/sunrpc/xprtrdma/rpc_rdma.c 	if (unlikely(!p))
p                 304 net/sunrpc/xprtrdma/rpc_rdma.c 	*p = xdr_zero;
p                 319 net/sunrpc/xprtrdma/rpc_rdma.c 	__be32 *p;
p                 321 net/sunrpc/xprtrdma/rpc_rdma.c 	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
p                 322 net/sunrpc/xprtrdma/rpc_rdma.c 	if (unlikely(!p))
p                 325 net/sunrpc/xprtrdma/rpc_rdma.c 	xdr_encode_rdma_segment(p, mr);
p                 333 net/sunrpc/xprtrdma/rpc_rdma.c 	__be32 *p;
p                 335 net/sunrpc/xprtrdma/rpc_rdma.c 	p = xdr_reserve_space(xdr, 6 * sizeof(*p));
p                 336 net/sunrpc/xprtrdma/rpc_rdma.c 	if (unlikely(!p))
p                 339 net/sunrpc/xprtrdma/rpc_rdma.c 	*p++ = xdr_one;			/* Item present */
p                 340 net/sunrpc/xprtrdma/rpc_rdma.c 	*p++ = cpu_to_be32(position);
p                 341 net/sunrpc/xprtrdma/rpc_rdma.c 	xdr_encode_rdma_segment(p, mr);
p                 800 net/sunrpc/xprtrdma/rpc_rdma.c 	__be32 *p;
p                 809 net/sunrpc/xprtrdma/rpc_rdma.c 	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
p                 810 net/sunrpc/xprtrdma/rpc_rdma.c 	if (!p)
p                 812 net/sunrpc/xprtrdma/rpc_rdma.c 	*p++ = rqst->rq_xid;
p                 813 net/sunrpc/xprtrdma/rpc_rdma.c 	*p++ = rpcrdma_version;
p                 814 net/sunrpc/xprtrdma/rpc_rdma.c 	*p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
p                 855 net/sunrpc/xprtrdma/rpc_rdma.c 		*p++ = rdma_msg;
p                 858 net/sunrpc/xprtrdma/rpc_rdma.c 		*p++ = rdma_msg;
p                 862 net/sunrpc/xprtrdma/rpc_rdma.c 		*p++ = rdma_nomsg;
p                1024 net/sunrpc/xprtrdma/rpc_rdma.c 	__be32 *p;
p                1030 net/sunrpc/xprtrdma/rpc_rdma.c 	p = xdr_inline_decode(xdr, 0);
p                1033 net/sunrpc/xprtrdma/rpc_rdma.c 	if (*p++ != xdr_zero)
p                1035 net/sunrpc/xprtrdma/rpc_rdma.c 	if (*p++ != xdr_zero)
p                1037 net/sunrpc/xprtrdma/rpc_rdma.c 	if (*p++ != xdr_zero)
p                1041 net/sunrpc/xprtrdma/rpc_rdma.c 	if (*p++ != rep->rr_xid)
p                1043 net/sunrpc/xprtrdma/rpc_rdma.c 	if (*p != cpu_to_be32(RPC_CALL))
p                1049 net/sunrpc/xprtrdma/rpc_rdma.c 	p = xdr_inline_decode(xdr, 3 * sizeof(*p));
p                1050 net/sunrpc/xprtrdma/rpc_rdma.c 	if (unlikely(!p))
p                1070 net/sunrpc/xprtrdma/rpc_rdma.c 	__be32 *p;
p                1072 net/sunrpc/xprtrdma/rpc_rdma.c 	p = xdr_inline_decode(xdr, 4 * sizeof(*p));
p                1073 net/sunrpc/xprtrdma/rpc_rdma.c 	if (unlikely(!p))
p                1076 net/sunrpc/xprtrdma/rpc_rdma.c 	handle = be32_to_cpup(p++);
p                1077 net/sunrpc/xprtrdma/rpc_rdma.c 	*length = be32_to_cpup(p++);
p                1078 net/sunrpc/xprtrdma/rpc_rdma.c 	xdr_decode_hyper(p, &offset);
p                1087 net/sunrpc/xprtrdma/rpc_rdma.c 	__be32 *p;
p                1089 net/sunrpc/xprtrdma/rpc_rdma.c 	p = xdr_inline_decode(xdr, sizeof(*p));
p                1090 net/sunrpc/xprtrdma/rpc_rdma.c 	if (unlikely(!p))
p                1094 net/sunrpc/xprtrdma/rpc_rdma.c 	segcount = be32_to_cpup(p);
p                1110 net/sunrpc/xprtrdma/rpc_rdma.c 	__be32 *p;
p                1112 net/sunrpc/xprtrdma/rpc_rdma.c 	p = xdr_inline_decode(xdr, sizeof(*p));
p                1113 net/sunrpc/xprtrdma/rpc_rdma.c 	if (unlikely(!p))
p                1115 net/sunrpc/xprtrdma/rpc_rdma.c 	if (unlikely(*p != xdr_zero))
p                1126 net/sunrpc/xprtrdma/rpc_rdma.c 	__be32 *p;
p                1131 net/sunrpc/xprtrdma/rpc_rdma.c 		p = xdr_inline_decode(xdr, sizeof(*p));
p                1132 net/sunrpc/xprtrdma/rpc_rdma.c 		if (unlikely(!p))
p                1134 net/sunrpc/xprtrdma/rpc_rdma.c 		if (*p == xdr_zero)
p                1149 net/sunrpc/xprtrdma/rpc_rdma.c 	__be32 *p;
p                1151 net/sunrpc/xprtrdma/rpc_rdma.c 	p = xdr_inline_decode(xdr, sizeof(*p));
p                1152 net/sunrpc/xprtrdma/rpc_rdma.c 	if (unlikely(!p))
p                1156 net/sunrpc/xprtrdma/rpc_rdma.c 	if (*p != xdr_zero)
p                1222 net/sunrpc/xprtrdma/rpc_rdma.c 	__be32 *p;
p                1224 net/sunrpc/xprtrdma/rpc_rdma.c 	p = xdr_inline_decode(xdr, sizeof(*p));
p                1225 net/sunrpc/xprtrdma/rpc_rdma.c 	if (unlikely(!p))
p                1228 net/sunrpc/xprtrdma/rpc_rdma.c 	switch (*p) {
p                1230 net/sunrpc/xprtrdma/rpc_rdma.c 		p = xdr_inline_decode(xdr, 2 * sizeof(*p));
p                1231 net/sunrpc/xprtrdma/rpc_rdma.c 		if (!p)
p                1235 net/sunrpc/xprtrdma/rpc_rdma.c 			be32_to_cpup(p), be32_to_cpu(*(p + 1)),
p                1246 net/sunrpc/xprtrdma/rpc_rdma.c 			be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
p                1320 net/sunrpc/xprtrdma/rpc_rdma.c 	__be32 *p;
p                1331 net/sunrpc/xprtrdma/rpc_rdma.c 	p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
p                1332 net/sunrpc/xprtrdma/rpc_rdma.c 	if (unlikely(!p))
p                1334 net/sunrpc/xprtrdma/rpc_rdma.c 	rep->rr_xid = *p++;
p                1335 net/sunrpc/xprtrdma/rpc_rdma.c 	rep->rr_vers = *p++;
p                1336 net/sunrpc/xprtrdma/rpc_rdma.c 	credits = be32_to_cpu(*p++);
p                1337 net/sunrpc/xprtrdma/rpc_rdma.c 	rep->rr_proc = *p++;
p                  36 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	__be32 *p;
p                  39 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	p = (__be32 *)src->iov_base;
p                  49 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 		__func__, (int)len, p);
p                  65 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	memcpy(dst->iov_base, p, len);
p                 177 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	__be32 *p;
p                 184 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	p = ctxt->sc_xprt_buf;
p                 185 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	*p++ = rqst->rq_xid;
p                 186 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	*p++ = rpcrdma_version;
p                 187 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	*p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_max_requests);
p                 188 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	*p++ = rdma_msg;
p                 189 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	*p++ = xdr_zero;
p                 190 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	*p++ = xdr_zero;
p                 191 net/sunrpc/xprtrdma/svc_rdma_backchannel.c 	*p   = xdr_zero;
p                 406 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end)
p                 412 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	while (*p++ != xdr_zero) {
p                 414 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 			position = be32_to_cpup(p++);
p                 416 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		} else if (be32_to_cpup(p++) != position) {
p                 419 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		p++;	/* handle */
p                 420 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		if (be32_to_cpup(p++) > MAX_BYTES_SPECIAL_SEG)
p                 422 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		p += 2;	/* offset */
p                 424 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		if (p > end)
p                 427 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	return p;
p                 435 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c static __be32 *xdr_check_write_chunk(__be32 *p, const __be32 *end,
p                 440 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	segcount = be32_to_cpup(p++);
p                 442 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		p++;	/* handle */
p                 443 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		if (be32_to_cpup(p++) > maxlen)
p                 445 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		p += 2;	/* offset */
p                 447 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		if (p > end)
p                 451 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	return p;
p                 465 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c static __be32 *xdr_check_write_list(__be32 *p, const __be32 *end)
p                 470 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	while (*p++ != xdr_zero) {
p                 471 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		p = xdr_check_write_chunk(p, end, MAX_BYTES_WRITE_SEG);
p                 472 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		if (!p)
p                 477 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	return p;
p                 488 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c static __be32 *xdr_check_reply_chunk(__be32 *p, const __be32 *end)
p                 490 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	if (*p++ != xdr_zero) {
p                 491 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		p = xdr_check_write_chunk(p, end, MAX_BYTES_SPECIAL_SEG);
p                 492 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		if (!p)
p                 495 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	return p;
p                 511 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	__be32 inv_rkey, *p;
p                 520 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	p = ctxt->rc_recv_buf;
p                 521 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	p += rpcrdma_fixed_maxsz;
p                 524 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	while (*p++ != xdr_zero) {
p                 525 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		p++;	/* position */
p                 527 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 			inv_rkey = *p;
p                 528 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		else if (inv_rkey != *p)
p                 530 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		p += 4;
p                 534 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	while (*p++ != xdr_zero) {
p                 535 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		segcount = be32_to_cpup(p++);
p                 538 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 				inv_rkey = *p;
p                 539 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 			else if (inv_rkey != *p)
p                 541 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 			p += 4;
p                 546 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	if (*p++ != xdr_zero) {
p                 547 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		segcount = be32_to_cpup(p++);
p                 550 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 				inv_rkey = *p;
p                 551 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 			else if (inv_rkey != *p)
p                 553 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 			p += 4;
p                 572 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	__be32 *p, *end, *rdma_argp;
p                 600 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	p = xdr_check_read_list(rdma_argp + 4, end);
p                 601 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	if (!p)
p                 603 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	p = xdr_check_write_list(p, end);
p                 604 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	if (!p)
p                 606 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	p = xdr_check_reply_chunk(p, end);
p                 607 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	if (!p)
p                 609 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	if (p > end)
p                 612 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	rq_arg->head[0].iov_base = p;
p                 613 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	hdr_len = (unsigned long)p - (unsigned long)rdma_argp;
p                 674 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	__be32 *p;
p                 681 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	p = ctxt->sc_xprt_buf;
p                 682 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	*p++ = *rdma_argp;
p                 683 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	*p++ = *(rdma_argp + 1);
p                 684 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	*p++ = xprt->sc_fc_credits;
p                 685 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	*p++ = rdma_error;
p                 688 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		*p++ = err_vers;
p                 689 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		*p++ = rpcrdma_version;
p                 690 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		*p++ = rpcrdma_version;
p                 694 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		*p++ = err_chunk;
p                 697 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	length = (unsigned long)p - (unsigned long)ctxt->sc_xprt_buf;
p                 714 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	__be32 *p;
p                 719 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	p = rdma_resp + 3;
p                 720 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	if (*p++ != rdma_msg)
p                 723 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	if (*p++ != xdr_zero)
p                 725 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	if (*p++ != xdr_zero)
p                 727 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	if (*p++ != xdr_zero)
p                 731 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	if (*p++ != *rdma_resp)
p                 734 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	if (*p == cpu_to_be32(RPC_CALL))
p                 776 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	__be32 *p;
p                 809 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
p                 817 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	if (svc_rdma_is_backchannel_reply(xprt, p)) {
p                 818 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
p                 825 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	p += rpcrdma_fixed_maxsz;
p                 826 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	if (*p != xdr_zero)
p                 836 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p);
p                 842 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	svc_rdma_send_error(rdma_xprt, p, ret);
p                 848 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 		svc_rdma_send_error(rdma_xprt, p, ret);
p                 672 net/sunrpc/xprtrdma/svc_rdma_rw.c 				     __be32 *p)
p                 679 net/sunrpc/xprtrdma/svc_rdma_rw.c 	while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
p                 683 net/sunrpc/xprtrdma/svc_rdma_rw.c 		rs_handle = be32_to_cpup(p++);
p                 684 net/sunrpc/xprtrdma/svc_rdma_rw.c 		rs_length = be32_to_cpup(p++);
p                 685 net/sunrpc/xprtrdma/svc_rdma_rw.c 		p = xdr_decode_hyper(p, &rs_offset);
p                 715 net/sunrpc/xprtrdma/svc_rdma_rw.c 					    __be32 *p)
p                 720 net/sunrpc/xprtrdma/svc_rdma_rw.c 	ret = svc_rdma_build_read_chunk(rqstp, info, p);
p                 771 net/sunrpc/xprtrdma/svc_rdma_rw.c 					__be32 *p)
p                 776 net/sunrpc/xprtrdma/svc_rdma_rw.c 	ret = svc_rdma_build_read_chunk(rqstp, info, p);
p                 815 net/sunrpc/xprtrdma/svc_rdma_rw.c 			     struct svc_rdma_recv_ctxt *head, __be32 *p)
p                 839 net/sunrpc/xprtrdma/svc_rdma_rw.c 	info->ri_position = be32_to_cpup(p + 1);
p                 841 net/sunrpc/xprtrdma/svc_rdma_rw.c 		ret = svc_rdma_build_normal_read_chunk(rqstp, info, p);
p                 843 net/sunrpc/xprtrdma/svc_rdma_rw.c 		ret = svc_rdma_build_pz_read_chunk(rqstp, info, p);
p                 333 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	__be32 *p;
p                 335 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	p = rdma_resp;
p                 338 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	p += rpcrdma_fixed_maxsz + 1;
p                 341 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	while (*p++ != xdr_zero) {
p                 342 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		nsegs = be32_to_cpup(p++);
p                 343 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		p += nsegs * rpcrdma_segment_maxsz;
p                 347 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	if (*p++ != xdr_zero) {
p                 348 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		nsegs = be32_to_cpup(p++);
p                 349 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		p += nsegs * rpcrdma_segment_maxsz;
p                 352 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	return (unsigned long)p - (unsigned long)rdma_resp;
p                 411 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	__be32 *p, *q;
p                 414 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	p = rdma_resp + rpcrdma_fixed_maxsz + 1;
p                 418 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		nsegs = xdr_encode_write_chunk(p, q, consumed);
p                 420 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		p += 2 + nsegs * rpcrdma_segment_maxsz;
p                 425 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	*p++ = xdr_zero;
p                 428 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	*p = xdr_zero;
p                 441 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	__be32 *p;
p                 446 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	p = rdma_resp + rpcrdma_fixed_maxsz + 1;
p                 449 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	while (*p++ != xdr_zero)
p                 450 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		p += 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz;
p                 452 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	xdr_encode_write_chunk(p, rp_ch, consumed);
p                 460 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	__be32 *p;
p                 462 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	p = rdma_argp + rpcrdma_fixed_maxsz;
p                 465 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	while (*p++ != xdr_zero)
p                 466 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		p += 5;
p                 469 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	if (*p != xdr_zero) {
p                 470 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		*write = p;
p                 471 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		while (*p++ != xdr_zero)
p                 472 net/sunrpc/xprtrdma/svc_rdma_sendto.c 			p += 1 + be32_to_cpu(*p) * 4;
p                 475 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		p++;
p                 479 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	if (*p != xdr_zero)
p                 480 net/sunrpc/xprtrdma/svc_rdma_sendto.c 		*reply = p;
p                 786 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	__be32 *p;
p                 789 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	p = ctxt->sc_xprt_buf;
p                 790 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	trace_svcrdma_err_chunk(*p);
p                 791 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	p += 3;
p                 792 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	*p++ = rdma_error;
p                 793 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	*p   = err_chunk;
p                 826 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	__be32 *p, *rdma_argp, *rdma_resp, *wr_lst, *rp_ch;
p                 846 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	p = rdma_resp;
p                 847 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	*p++ = *rdma_argp;
p                 848 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	*p++ = *(rdma_argp + 1);
p                 849 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	*p++ = rdma->sc_fc_credits;
p                 850 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	*p++ = rp_ch ? rdma_nomsg : rdma_msg;
p                 853 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	*p++ = xdr_zero;
p                 854 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	*p++ = xdr_zero;
p                 855 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	*p   = xdr_zero;
p                3297 net/sunrpc/xprtsock.c #define param_check_portnr(name, p) \
p                3298 net/sunrpc/xprtsock.c 	__param_check(name, p, unsigned int);
p                3316 net/sunrpc/xprtsock.c #define param_check_slot_table_size(name, p) \
p                3317 net/sunrpc/xprtsock.c 	__param_check(name, p, unsigned int);
p                3332 net/sunrpc/xprtsock.c #define param_check_max_slot_table_size(name, p) \
p                3333 net/sunrpc/xprtsock.c 	__param_check(name, p, unsigned int);
p                 360 net/tipc/monitor.c 	struct tipc_peer *cur, *prev, *p;
p                 362 net/tipc/monitor.c 	p = kzalloc(sizeof(*p), GFP_ATOMIC);
p                 363 net/tipc/monitor.c 	*peer = p;
p                 364 net/tipc/monitor.c 	if (!p)
p                 366 net/tipc/monitor.c 	p->addr = addr;
p                 369 net/tipc/monitor.c 	INIT_LIST_HEAD(&p->list);
p                 370 net/tipc/monitor.c 	hlist_add_head(&p->hash, &mon->peers[tipc_hashfn(addr)]);
p                 382 net/tipc/monitor.c 	list_add_tail(&p->list, &cur->list);
p                 384 net/tipc/monitor.c 	mon_update_neighbors(mon, p);
p                 495 net/tipc/msg.h static inline void msg_set_origport(struct tipc_msg *m, u32 p)
p                 497 net/tipc/msg.h 	msg_set_word(m, 4, p);
p                 505 net/tipc/msg.h static inline void msg_set_destport(struct tipc_msg *m, u32 p)
p                 507 net/tipc/msg.h 	msg_set_word(m, 5, p);
p                 515 net/tipc/msg.h static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p)
p                 517 net/tipc/msg.h 	msg_set_word(m, 5, p);
p                  54 net/tipc/name_distr.c static void publ_to_item(struct distr_item *i, struct publication *p)
p                  56 net/tipc/name_distr.c 	i->type = htonl(p->type);
p                  57 net/tipc/name_distr.c 	i->lower = htonl(p->lower);
p                  58 net/tipc/name_distr.c 	i->upper = htonl(p->upper);
p                  59 net/tipc/name_distr.c 	i->port = htonl(p->port);
p                  60 net/tipc/name_distr.c 	i->key = htonl(p->key);
p                 210 net/tipc/name_distr.c 	struct publication *p;
p                 213 net/tipc/name_distr.c 	p = tipc_nametbl_remove_publ(net, publ->type, publ->lower, publ->upper,
p                 215 net/tipc/name_distr.c 	if (p)
p                 216 net/tipc/name_distr.c 		tipc_node_unsubscribe(net, &p->binding_node, addr);
p                 219 net/tipc/name_distr.c 	if (p != publ) {
p                 226 net/tipc/name_distr.c 	if (p)
p                 227 net/tipc/name_distr.c 		kfree_rcu(p, rcu);
p                 267 net/tipc/name_distr.c 	struct publication *p = NULL;
p                 275 net/tipc/name_distr.c 		p = tipc_nametbl_insert_publ(net, type, lower, upper,
p                 278 net/tipc/name_distr.c 		if (p) {
p                 279 net/tipc/name_distr.c 			tipc_node_subscribe(net, &p->binding_node, node);
p                 283 net/tipc/name_distr.c 		p = tipc_nametbl_remove_publ(net, type, lower,
p                 285 net/tipc/name_distr.c 		if (p) {
p                 286 net/tipc/name_distr.c 			tipc_node_unsubscribe(net, &p->binding_node, node);
p                 287 net/tipc/name_distr.c 			kfree_rcu(p, rcu);
p                 228 net/tipc/name_table.c 	struct publication *p;
p                 238 net/tipc/name_table.c 	list_for_each_entry(p, &sr->all_publ, all_publ) {
p                 239 net/tipc/name_table.c 		if (p->key == key && (!p->node || p->node == node))
p                 244 net/tipc/name_table.c 	p = tipc_publ_create(type, lower, upper, scope, node, port, key);
p                 245 net/tipc/name_table.c 	if (!p)
p                 248 net/tipc/name_table.c 		list_add(&p->local_publ, &sr->local_publ);
p                 249 net/tipc/name_table.c 	list_add(&p->all_publ, &sr->all_publ);
p                 253 net/tipc/name_table.c 		tipc_sub_report_overlap(sub, p->lower, p->upper, TIPC_PUBLISHED,
p                 254 net/tipc/name_table.c 					p->port, p->node, p->scope, first);
p                 256 net/tipc/name_table.c 	return p;
p                 268 net/tipc/name_table.c 	struct publication *p;
p                 270 net/tipc/name_table.c 	list_for_each_entry(p, &sr->all_publ, all_publ) {
p                 271 net/tipc/name_table.c 		if (p->key != key || (node && node != p->node))
p                 273 net/tipc/name_table.c 		list_del(&p->all_publ);
p                 274 net/tipc/name_table.c 		list_del(&p->local_publ);
p                 275 net/tipc/name_table.c 		return p;
p                 291 net/tipc/name_table.c 	struct publication *p;
p                 313 net/tipc/name_table.c 		list_for_each_entry(p, &sr->all_publ, all_publ) {
p                 315 net/tipc/name_table.c 						TIPC_PUBLISHED,	p->port,
p                 316 net/tipc/name_table.c 						p->node, p->scope, first);
p                 343 net/tipc/name_table.c 	struct publication *p;
p                 357 net/tipc/name_table.c 	p = tipc_service_insert_publ(net, sc, type, lower, upper,
p                 360 net/tipc/name_table.c 	return p;
p                 370 net/tipc/name_table.c 	struct publication *p = NULL;
p                 380 net/tipc/name_table.c 	p = tipc_service_remove_publ(sr, node, key);
p                 381 net/tipc/name_table.c 	if (!p)
p                 388 net/tipc/name_table.c 					p->port, node, p->scope, last);
p                 404 net/tipc/name_table.c 	return p;
p                 431 net/tipc/name_table.c 	struct publication *p;
p                 453 net/tipc/name_table.c 		p = list_first_entry(list, struct publication, local_publ);
p                 454 net/tipc/name_table.c 		list_move_tail(&p->local_publ, &sr->local_publ);
p                 457 net/tipc/name_table.c 		p = list_first_entry(list, struct publication, local_publ);
p                 458 net/tipc/name_table.c 		list_move_tail(&p->local_publ, &sr->local_publ);
p                 461 net/tipc/name_table.c 		p = list_first_entry(list, struct publication, all_publ);
p                 462 net/tipc/name_table.c 		list_move_tail(&p->all_publ, &sr->all_publ);
p                 464 net/tipc/name_table.c 	port = p->port;
p                 465 net/tipc/name_table.c 	node = p->node;
p                 481 net/tipc/name_table.c 	struct publication *p;
p                 495 net/tipc/name_table.c 	list_for_each_entry(p, &sr->all_publ, all_publ) {
p                 496 net/tipc/name_table.c 		if (p->scope != scope)
p                 498 net/tipc/name_table.c 		if (p->port == exclude && p->node == self)
p                 500 net/tipc/name_table.c 		tipc_dest_push(dsts, p->node, p->port);
p                 504 net/tipc/name_table.c 		list_move_tail(&p->all_publ, &sr->all_publ);
p                 519 net/tipc/name_table.c 	struct publication *p;
p                 535 net/tipc/name_table.c 		list_for_each_entry(p, &sr->local_publ, local_publ) {
p                 536 net/tipc/name_table.c 			if (p->scope == scope || (!exact && p->scope < scope))
p                 537 net/tipc/name_table.c 				tipc_dest_push(dports, 0, p->port);
p                 554 net/tipc/name_table.c 	struct publication *p;
p                 570 net/tipc/name_table.c 		list_for_each_entry(p, &sr->all_publ, all_publ) {
p                 571 net/tipc/name_table.c 			tipc_nlist_add(nodes, p->node);
p                 586 net/tipc/name_table.c 	struct publication *p;
p                 597 net/tipc/name_table.c 		list_for_each_entry(p, &sr->all_publ, all_publ) {
p                 598 net/tipc/name_table.c 			if (p->scope != scope)
p                 600 net/tipc/name_table.c 			tipc_group_add_member(grp, p->node, p->port, p->lower);
p                 616 net/tipc/name_table.c 	struct publication *p = NULL;
p                 626 net/tipc/name_table.c 	p = tipc_nametbl_insert_publ(net, type, lower, upper, scope,
p                 628 net/tipc/name_table.c 	if (p) {
p                 630 net/tipc/name_table.c 		skb = tipc_named_publish(net, p);
p                 637 net/tipc/name_table.c 	return p;
p                 650 net/tipc/name_table.c 	struct publication *p;
p                 654 net/tipc/name_table.c 	p = tipc_nametbl_remove_publ(net, type, lower, upper, self, key);
p                 655 net/tipc/name_table.c 	if (p) {
p                 657 net/tipc/name_table.c 		skb = tipc_named_withdraw(net, p);
p                 658 net/tipc/name_table.c 		list_del_init(&p->binding_sock);
p                 659 net/tipc/name_table.c 		kfree_rcu(p, rcu);
p                 759 net/tipc/name_table.c 	struct publication *p, *tmp;
p                 763 net/tipc/name_table.c 		list_for_each_entry_safe(p, tmp, &sr->all_publ, all_publ) {
p                 764 net/tipc/name_table.c 			tipc_service_remove_publ(sr, p->node, p->key);
p                 765 net/tipc/name_table.c 			kfree_rcu(p, rcu);
p                 806 net/tipc/name_table.c 	struct publication *p;
p                 812 net/tipc/name_table.c 		list_for_each_entry(p, &sr->all_publ, all_publ)
p                 813 net/tipc/name_table.c 			if (p->key == *last_key)
p                 815 net/tipc/name_table.c 		if (p->key != *last_key)
p                 818 net/tipc/name_table.c 		p = list_first_entry(&sr->all_publ,
p                 823 net/tipc/name_table.c 	list_for_each_entry_from(p, &sr->all_publ, all_publ) {
p                 824 net/tipc/name_table.c 		*last_key = p->key;
p                 846 net/tipc/name_table.c 		if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope))
p                 848 net/tipc/name_table.c 		if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->node))
p                 850 net/tipc/name_table.c 		if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->port))
p                 852 net/tipc/name_table.c 		if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key))
p                3553 net/tipc/socket.c 	struct publication *p;
p                3556 net/tipc/socket.c 		list_for_each_entry(p, &tsk->publications, binding_sock) {
p                3557 net/tipc/socket.c 			if (p->key == *last_publ)
p                3560 net/tipc/socket.c 		if (p->key != *last_publ) {
p                3572 net/tipc/socket.c 		p = list_first_entry(&tsk->publications, struct publication,
p                3576 net/tipc/socket.c 	list_for_each_entry_from(p, &tsk->publications, binding_sock) {
p                3577 net/tipc/socket.c 		err = __tipc_nl_add_sk_publ(skb, cb, p);
p                3579 net/tipc/socket.c 			*last_publ = p->key;
p                3654 net/tipc/socket.c 	struct publication *p;
p                3679 net/tipc/socket.c 		p = list_first_entry_or_null(&tsk->publications,
p                3681 net/tipc/socket.c 		if (p) {
p                3682 net/tipc/socket.c 			type = p->type;
p                3683 net/tipc/socket.c 			lower = p->lower;
p                3684 net/tipc/socket.c 			upper = p->upper;
p                3757 net/tipc/socket.c 	struct publication *p;
p                3781 net/tipc/socket.c 		p = list_first_entry_or_null(&tsk->publications,
p                3783 net/tipc/socket.c 		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->type : 0);
p                3784 net/tipc/socket.c 		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->lower : 0);
p                3785 net/tipc/socket.c 		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->upper : 0);
p                 107 net/tls/tls_main.c 	struct page *p;
p                 121 net/tls/tls_main.c 		p = sg_page(sg);
p                 123 net/tls/tls_main.c 		ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
p                 139 net/tls/tls_main.c 		put_page(p);
p                 801 net/tls/tls_main.c static void tls_update(struct sock *sk, struct proto *p,
p                 809 net/tls/tls_main.c 		ctx->sk_proto = p;
p                 811 net/tls/tls_main.c 		sk->sk_prot = p;
p                  94 net/unix/scm.c static inline bool too_many_unix_fds(struct task_struct *p)
p                  98 net/unix/scm.c 	if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
p                1880 net/vmw_vsock/af_vsock.c 	u32 __user *p = ptr;
p                1885 net/vmw_vsock/af_vsock.c 		if (put_user(transport->get_local_cid(), p) != 0)
p                1028 net/wireless/nl80211.c 	struct key_params p;
p                1059 net/wireless/nl80211.c 		k->p.key = nla_data(tb[NL80211_KEY_DATA]);
p                1060 net/wireless/nl80211.c 		k->p.key_len = nla_len(tb[NL80211_KEY_DATA]);
p                1064 net/wireless/nl80211.c 		k->p.seq = nla_data(tb[NL80211_KEY_SEQ]);
p                1065 net/wireless/nl80211.c 		k->p.seq_len = nla_len(tb[NL80211_KEY_SEQ]);
p                1069 net/wireless/nl80211.c 		k->p.cipher = nla_get_u32(tb[NL80211_KEY_CIPHER]);
p                1090 net/wireless/nl80211.c 		k->p.mode = nla_get_u8(tb[NL80211_KEY_MODE]);
p                1098 net/wireless/nl80211.c 		k->p.key = nla_data(info->attrs[NL80211_ATTR_KEY_DATA]);
p                1099 net/wireless/nl80211.c 		k->p.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]);
p                1103 net/wireless/nl80211.c 		k->p.seq = nla_data(info->attrs[NL80211_ATTR_KEY_SEQ]);
p                1104 net/wireless/nl80211.c 		k->p.seq_len = nla_len(info->attrs[NL80211_ATTR_KEY_SEQ]);
p                1111 net/wireless/nl80211.c 		k->p.cipher = nla_get_u32(info->attrs[NL80211_ATTR_KEY_CIPHER]);
p                1227 net/wireless/nl80211.c 		if (!parse.p.key)
p                1245 net/wireless/nl80211.c 		err = cfg80211_validate_key_settings(rdev, &parse.p,
p                1249 net/wireless/nl80211.c 		if (parse.p.cipher != WLAN_CIPHER_SUITE_WEP40 &&
p                1250 net/wireless/nl80211.c 		    parse.p.cipher != WLAN_CIPHER_SUITE_WEP104) {
p                1255 net/wireless/nl80211.c 		result->params[parse.idx].cipher = parse.p.cipher;
p                1256 net/wireless/nl80211.c 		result->params[parse.idx].key_len = parse.p.key_len;
p                1258 net/wireless/nl80211.c 		memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len);
p                3846 net/wireless/nl80211.c 	    !(key.p.mode == NL80211_KEY_SET_TX))
p                3892 net/wireless/nl80211.c 	} else if (key.p.mode == NL80211_KEY_SET_TX &&
p                3907 net/wireless/nl80211.c 				   mac_addr, &key.p);
p                3929 net/wireless/nl80211.c 	if (!key.p.key)
p                3950 net/wireless/nl80211.c 	if (cfg80211_validate_key_settings(rdev, &key.p, key.idx,
p                3960 net/wireless/nl80211.c 				    mac_addr, &key.p);
p                8966 net/wireless/nl80211.c 		if (!key.p.key || !key.p.key_len)
p                8968 net/wireless/nl80211.c 		if ((key.p.cipher != WLAN_CIPHER_SUITE_WEP40 ||
p                8969 net/wireless/nl80211.c 		     key.p.key_len != WLAN_KEY_LEN_WEP40) &&
p                8970 net/wireless/nl80211.c 		    (key.p.cipher != WLAN_CIPHER_SUITE_WEP104 ||
p                8971 net/wireless/nl80211.c 		     key.p.key_len != WLAN_KEY_LEN_WEP104))
p                8976 net/wireless/nl80211.c 		key.p.key_len = 0;
p                8977 net/wireless/nl80211.c 		key.p.key = NULL;
p                8985 net/wireless/nl80211.c 			if (key.p.cipher == rdev->wiphy.cipher_suites[i]) {
p                9051 net/wireless/nl80211.c 				 key.p.key, key.p.key_len, key.idx,
p                  81 net/wireless/of.c 	const __be32 *p;
p                 107 net/wireless/of.c 	p = NULL;
p                 111 net/wireless/of.c 		p = of_prop_next_u32(prop, p, &limit->start_freq_khz);
p                 112 net/wireless/of.c 		if (!p) {
p                 117 net/wireless/of.c 		p = of_prop_next_u32(prop, p, &limit->end_freq_khz);
p                 118 net/wireless/of.c 		if (!p) {
p                 722 net/wireless/reg.c static void __init load_keys_from_buffer(const u8 *p, unsigned int buflen)
p                 724 net/wireless/reg.c 	const u8 *end = p + buflen;
p                 728 net/wireless/reg.c 	while (p < end) {
p                 732 net/wireless/reg.c 		if (end - p < 4)
p                 734 net/wireless/reg.c 		if (p[0] != 0x30 &&
p                 735 net/wireless/reg.c 		    p[1] != 0x82)
p                 737 net/wireless/reg.c 		plen = (p[2] << 8) | p[3];
p                 739 net/wireless/reg.c 		if (plen > end - p)
p                 743 net/wireless/reg.c 					   "asymmetric", NULL, p, plen,
p                 757 net/wireless/reg.c 		p += plen;
p                 965 net/wireless/scan.c 	struct rb_node **p = &rdev->bss_tree.rb_node;
p                 970 net/wireless/scan.c 	while (*p) {
p                 971 net/wireless/scan.c 		parent = *p;
p                 982 net/wireless/scan.c 			p = &(*p)->rb_left;
p                 984 net/wireless/scan.c 			p = &(*p)->rb_right;
p                 987 net/wireless/scan.c 	rb_link_node(&bss->rbn, parent, p);
p                2307 net/wireless/scan.c 	u8 *cfg, *p, *tmp;
p                2489 net/wireless/scan.c 			p = current_ev + iwe_stream_lcp_len(info);
p                2499 net/wireless/scan.c 				tmp = p;
p                2500 net/wireless/scan.c 				p = iwe_stream_add_value(info, current_ev, p,
p                2503 net/wireless/scan.c 				if (p == tmp) {
p                2508 net/wireless/scan.c 			current_ev = p;
p                 117 net/x25/af_x25.c int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
p                 124 net/x25/af_x25.c 	called_len  = (*p >> 0) & 0x0F;
p                 125 net/x25/af_x25.c 	calling_len = (*p >> 4) & 0x0F;
p                 129 net/x25/af_x25.c 	p++;
p                 134 net/x25/af_x25.c 				*called++ = ((*p >> 0) & 0x0F) + '0';
p                 135 net/x25/af_x25.c 				p++;
p                 137 net/x25/af_x25.c 				*called++ = ((*p >> 4) & 0x0F) + '0';
p                 141 net/x25/af_x25.c 				*calling++ = ((*p >> 0) & 0x0F) + '0';
p                 142 net/x25/af_x25.c 				p++;
p                 144 net/x25/af_x25.c 				*calling++ = ((*p >> 4) & 0x0F) + '0';
p                 154 net/x25/af_x25.c int x25_addr_aton(unsigned char *p, struct x25_address *called_addr,
p                 167 net/x25/af_x25.c 	*p++ = (calling_len << 4) | (called_len << 0);
p                 172 net/x25/af_x25.c 				*p |= (*called++ - '0') << 0;
p                 173 net/x25/af_x25.c 				p++;
p                 175 net/x25/af_x25.c 				*p = 0x00;
p                 176 net/x25/af_x25.c 				*p |= (*called++ - '0') << 4;
p                 180 net/x25/af_x25.c 				*p |= (*calling++ - '0') << 0;
p                 181 net/x25/af_x25.c 				p++;
p                 183 net/x25/af_x25.c 				*p = 0x00;
p                 184 net/x25/af_x25.c 				*p |= (*calling++ - '0') << 4;
p                  44 net/x25/x25_facilities.c 	unsigned char *p;
p                  68 net/x25/x25_facilities.c 	p = skb->data + 1;
p                  71 net/x25/x25_facilities.c 		switch (*p & X25_FAC_CLASS_MASK) {
p                  75 net/x25/x25_facilities.c 			switch (*p) {
p                  77 net/x25/x25_facilities.c 				if((p[1] & 0x81) == 0x81) {
p                  78 net/x25/x25_facilities.c 					facilities->reverse = p[1] & 0x81;
p                  83 net/x25/x25_facilities.c 				if((p[1] & 0x01) == 0x01) {
p                  84 net/x25/x25_facilities.c 					facilities->reverse = p[1] & 0x01;
p                  89 net/x25/x25_facilities.c 				if((p[1] & 0x80) == 0x80) {
p                  90 net/x25/x25_facilities.c 					facilities->reverse = p[1] & 0x80;
p                  95 net/x25/x25_facilities.c 				if(p[1] == 0x00) {
p                 103 net/x25/x25_facilities.c 				facilities->throughput = p[1];
p                 111 net/x25/x25_facilities.c 				       p[0], p[1]);
p                 114 net/x25/x25_facilities.c 			p   += 2;
p                 120 net/x25/x25_facilities.c 			switch (*p) {
p                 122 net/x25/x25_facilities.c 				facilities->pacsize_in  = p[1];
p                 123 net/x25/x25_facilities.c 				facilities->pacsize_out = p[2];
p                 127 net/x25/x25_facilities.c 				facilities->winsize_in  = p[1];
p                 128 net/x25/x25_facilities.c 				facilities->winsize_out = p[2];
p                 134 net/x25/x25_facilities.c 				       p[0], p[1], p[2]);
p                 137 net/x25/x25_facilities.c 			p   += 3;
p                 145 net/x25/x25_facilities.c 			       p[0], p[1], p[2], p[3]);
p                 146 net/x25/x25_facilities.c 			p   += 4;
p                 150 net/x25/x25_facilities.c 			if (len < p[1] + 2)
p                 152 net/x25/x25_facilities.c 			switch (*p) {
p                 154 net/x25/x25_facilities.c 				if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
p                 156 net/x25/x25_facilities.c 				if (p[2] > X25_MAX_AE_LEN)
p                 158 net/x25/x25_facilities.c 				dte_facs->calling_len = p[2];
p                 159 net/x25/x25_facilities.c 				memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
p                 163 net/x25/x25_facilities.c 				if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
p                 165 net/x25/x25_facilities.c 				if (p[2] > X25_MAX_AE_LEN)
p                 167 net/x25/x25_facilities.c 				dte_facs->called_len = p[2];
p                 168 net/x25/x25_facilities.c 				memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
p                 173 net/x25/x25_facilities.c 					"length %d\n", p[0], p[1]);
p                 176 net/x25/x25_facilities.c 			len -= p[1] + 2;
p                 177 net/x25/x25_facilities.c 			p += p[1] + 2;
p                 182 net/x25/x25_facilities.c 	return p - skb->data;
p                 192 net/x25/x25_facilities.c 	unsigned char *p = buffer + 1;
p                 206 net/x25/x25_facilities.c 		*p++ = X25_FAC_REVERSE;
p                 207 net/x25/x25_facilities.c 		*p++ = facilities->reverse;
p                 211 net/x25/x25_facilities.c 		*p++ = X25_FAC_THROUGHPUT;
p                 212 net/x25/x25_facilities.c 		*p++ = facilities->throughput;
p                 217 net/x25/x25_facilities.c 		*p++ = X25_FAC_PACKET_SIZE;
p                 218 net/x25/x25_facilities.c 		*p++ = facilities->pacsize_in ? : facilities->pacsize_out;
p                 219 net/x25/x25_facilities.c 		*p++ = facilities->pacsize_out ? : facilities->pacsize_in;
p                 224 net/x25/x25_facilities.c 		*p++ = X25_FAC_WINDOW_SIZE;
p                 225 net/x25/x25_facilities.c 		*p++ = facilities->winsize_in ? : facilities->winsize_out;
p                 226 net/x25/x25_facilities.c 		*p++ = facilities->winsize_out ? : facilities->winsize_in;
p                 230 net/x25/x25_facilities.c 		*p++ = X25_MARKER;
p                 231 net/x25/x25_facilities.c 		*p++ = X25_DTE_SERVICES;
p                 236 net/x25/x25_facilities.c 		*p++ = X25_FAC_CALLING_AE;
p                 237 net/x25/x25_facilities.c 		*p++ = 1 + bytecount;
p                 238 net/x25/x25_facilities.c 		*p++ = dte_facs->calling_len;
p                 239 net/x25/x25_facilities.c 		memcpy(p, dte_facs->calling_ae, bytecount);
p                 240 net/x25/x25_facilities.c 		p += bytecount;
p                 247 net/x25/x25_facilities.c 		*p++ = X25_FAC_CALLED_AE;
p                 248 net/x25/x25_facilities.c 		*p++ = 1 + bytecount;
p                 249 net/x25/x25_facilities.c 		*p++ = dte_facs->called_len;
p                 250 net/x25/x25_facilities.c 		memcpy(p, dte_facs->called_ae, bytecount);
p                 251 net/x25/x25_facilities.c 		p+=bytecount;
p                 254 net/x25/x25_facilities.c 	len       = p - buffer;
p                  65 net/xfrm/xfrm_interface.c 		if (x->if_id == xi->p.if_id &&
p                 157 net/xfrm/xfrm_interface.c static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p)
p                 166 net/xfrm/xfrm_interface.c 		if (xi->p.if_id == p->if_id)
p                 272 net/xfrm/xfrm_interface.c 	dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id);
p                 283 net/xfrm/xfrm_interface.c 	if (x->if_id != xi->p.if_id)
p                 385 net/xfrm/xfrm_interface.c 	fl.flowi_oif = xi->p.link;
p                 514 net/xfrm/xfrm_interface.c static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
p                 516 net/xfrm/xfrm_interface.c 	if (xi->p.link != p->link)
p                 519 net/xfrm/xfrm_interface.c 	xi->p.if_id = p->if_id;
p                 524 net/xfrm/xfrm_interface.c static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
p                 532 net/xfrm/xfrm_interface.c 	err = xfrmi_change(xi, p);
p                 571 net/xfrm/xfrm_interface.c 	return xi->p.link;
p                 601 net/xfrm/xfrm_interface.c 	struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
p                 659 net/xfrm/xfrm_interface.c 	struct xfrm_if_parms p;
p                 663 net/xfrm/xfrm_interface.c 	xfrmi_netlink_parms(data, &p);
p                 664 net/xfrm/xfrm_interface.c 	xi = xfrmi_locate(net, &p);
p                 669 net/xfrm/xfrm_interface.c 	xi->p = p;
p                 688 net/xfrm/xfrm_interface.c 	struct xfrm_if_parms p;
p                 690 net/xfrm/xfrm_interface.c 	xfrmi_netlink_parms(data, &p);
p                 691 net/xfrm/xfrm_interface.c 	xi = xfrmi_locate(net, &p);
p                 699 net/xfrm/xfrm_interface.c 	return xfrmi_update(xi, &p);
p                 715 net/xfrm/xfrm_interface.c 	struct xfrm_if_parms *parm = &xi->p;
p                 830 net/xfrm/xfrm_policy.c 	struct xfrm_policy *policy, *p;
p                 845 net/xfrm/xfrm_policy.c 		hlist_for_each_entry(p, &n->hhead, bydst) {
p                 846 net/xfrm/xfrm_policy.c 			if (policy->priority > p->priority)
p                 847 net/xfrm/xfrm_policy.c 				newpos = &p->bydst;
p                 848 net/xfrm/xfrm_policy.c 			else if (policy->priority == p->priority &&
p                 849 net/xfrm/xfrm_policy.c 				 policy->pos > p->pos)
p                 850 net/xfrm/xfrm_policy.c 				newpos = &p->bydst;
p                 894 net/xfrm/xfrm_policy.c 	struct rb_node **p, *parent;
p                 900 net/xfrm/xfrm_policy.c 	p = &new->rb_node;
p                 901 net/xfrm/xfrm_policy.c 	while (*p) {
p                 905 net/xfrm/xfrm_policy.c 		parent = *p;
p                 906 net/xfrm/xfrm_policy.c 		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
p                 913 net/xfrm/xfrm_policy.c 			p = &parent->rb_left;
p                 915 net/xfrm/xfrm_policy.c 			p = &parent->rb_right;
p                 934 net/xfrm/xfrm_policy.c 			rb_erase(*p, new);
p                 941 net/xfrm/xfrm_policy.c 	rb_link_node_rcu(&n->node, parent, p);
p                 981 net/xfrm/xfrm_policy.c 	struct rb_node **p, *parent = NULL;
p                 984 net/xfrm/xfrm_policy.c 	p = &root->rb_node;
p                 985 net/xfrm/xfrm_policy.c 	while (*p) {
p                 988 net/xfrm/xfrm_policy.c 		parent = *p;
p                 989 net/xfrm/xfrm_policy.c 		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
p                1000 net/xfrm/xfrm_policy.c 			p = &parent->rb_left;
p                1002 net/xfrm/xfrm_policy.c 			p = &parent->rb_right;
p                1033 net/xfrm/xfrm_policy.c 			p = &root->rb_node;
p                1045 net/xfrm/xfrm_policy.c 	rb_link_node_rcu(&node->node, parent, p);
p                1324 net/xfrm/xfrm_policy.c 			void *p = xfrm_policy_inexact_insert(policy, dir, 0);
p                1326 net/xfrm/xfrm_policy.c 			WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
p                1364 net/xfrm/xfrm_policy.c 		struct xfrm_policy *p;
p                1380 net/xfrm/xfrm_policy.c 		hlist_for_each_entry(p, list, byidx) {
p                1381 net/xfrm/xfrm_policy.c 			if (p->index == idx) {
p                2321 net/xfrm/xfrm_policy.c 	const struct xfrm_policy *p;
p                2327 net/xfrm/xfrm_policy.c 		p = rcu_dereference(osk->sk_policy[i]);
p                2328 net/xfrm/xfrm_policy.c 		if (p) {
p                2329 net/xfrm/xfrm_policy.c 			np = clone_policy(p, i);
p                3525 net/xfrm/xfrm_policy.c 			if_id = xi->p.if_id;
p                1774 net/xfrm/xfrm_state.c 	     int (*cmp)(const void *p), int maxclass)
p                1804 net/xfrm/xfrm_state.c static int __xfrm6_state_sort_cmp(const void *p)
p                1806 net/xfrm/xfrm_state.c 	const struct xfrm_state *v = p;
p                1833 net/xfrm/xfrm_state.c static int __xfrm6_tmpl_sort_cmp(const void *p)
p                1835 net/xfrm/xfrm_state.c 	const struct xfrm_tmpl *v = p;
p                1852 net/xfrm/xfrm_state.c static inline int __xfrm6_state_sort_cmp(const void *p) { return 5; }
p                1853 net/xfrm/xfrm_state.c static inline int __xfrm6_tmpl_sort_cmp(const void *p) { return 4; }
p                1857 net/xfrm/xfrm_state.c 	     int (*cmp)(const void *p), int maxclass)
p                 120 net/xfrm/xfrm_user.c static inline int verify_replay(struct xfrm_usersa_info *p,
p                 127 net/xfrm/xfrm_user.c 		return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
p                 139 net/xfrm/xfrm_user.c 	if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
p                 142 net/xfrm/xfrm_user.c 	if (p->replay_window != 0)
p                 148 net/xfrm/xfrm_user.c static int verify_newsa_info(struct xfrm_usersa_info *p,
p                 154 net/xfrm/xfrm_user.c 	switch (p->family) {
p                 170 net/xfrm/xfrm_user.c 	switch (p->sel.family) {
p                 175 net/xfrm/xfrm_user.c 		if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
p                 182 net/xfrm/xfrm_user.c 		if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
p                 196 net/xfrm/xfrm_user.c 	switch (p->id.proto) {
p                 221 net/xfrm/xfrm_user.c 		    p->mode != XFRM_MODE_TUNNEL)
p                 232 net/xfrm/xfrm_user.c 		    (ntohl(p->id.spi) >= 0x10000))
p                 268 net/xfrm/xfrm_user.c 	if ((err = verify_replay(p, attrs)))
p                 272 net/xfrm/xfrm_user.c 	switch (p->mode) {
p                 293 net/xfrm/xfrm_user.c 	struct xfrm_algo *p, *ualg;
p                 306 net/xfrm/xfrm_user.c 	p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
p                 307 net/xfrm/xfrm_user.c 	if (!p)
p                 310 net/xfrm/xfrm_user.c 	strcpy(p->alg_name, algo->name);
p                 311 net/xfrm/xfrm_user.c 	*algpp = p;
p                 317 net/xfrm/xfrm_user.c 	struct xfrm_algo *p, *ualg;
p                 330 net/xfrm/xfrm_user.c 	p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
p                 331 net/xfrm/xfrm_user.c 	if (!p)
p                 334 net/xfrm/xfrm_user.c 	strcpy(p->alg_name, algo->name);
p                 335 net/xfrm/xfrm_user.c 	x->ealg = p;
p                 344 net/xfrm/xfrm_user.c 	struct xfrm_algo_auth *p;
p                 357 net/xfrm/xfrm_user.c 	p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
p                 358 net/xfrm/xfrm_user.c 	if (!p)
p                 361 net/xfrm/xfrm_user.c 	strcpy(p->alg_name, algo->name);
p                 362 net/xfrm/xfrm_user.c 	p->alg_key_len = ualg->alg_key_len;
p                 363 net/xfrm/xfrm_user.c 	p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
p                 364 net/xfrm/xfrm_user.c 	memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
p                 366 net/xfrm/xfrm_user.c 	*algpp = p;
p                 373 net/xfrm/xfrm_user.c 	struct xfrm_algo_auth *p, *ualg;
p                 388 net/xfrm/xfrm_user.c 	p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
p                 389 net/xfrm/xfrm_user.c 	if (!p)
p                 392 net/xfrm/xfrm_user.c 	strcpy(p->alg_name, algo->name);
p                 393 net/xfrm/xfrm_user.c 	if (!p->alg_trunc_len)
p                 394 net/xfrm/xfrm_user.c 		p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
p                 396 net/xfrm/xfrm_user.c 	*algpp = p;
p                 402 net/xfrm/xfrm_user.c 	struct xfrm_algo_aead *p, *ualg;
p                 415 net/xfrm/xfrm_user.c 	p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
p                 416 net/xfrm/xfrm_user.c 	if (!p)
p                 419 net/xfrm/xfrm_user.c 	strcpy(p->alg_name, algo->name);
p                 420 net/xfrm/xfrm_user.c 	x->aead = p;
p                 454 net/xfrm/xfrm_user.c 	struct xfrm_replay_state_esn *p, *pp, *up;
p                 464 net/xfrm/xfrm_user.c 	p = kzalloc(klen, GFP_KERNEL);
p                 465 net/xfrm/xfrm_user.c 	if (!p)
p                 470 net/xfrm/xfrm_user.c 		kfree(p);
p                 474 net/xfrm/xfrm_user.c 	memcpy(p, up, ulen);
p                 477 net/xfrm/xfrm_user.c 	*replay_esn = p;
p                 494 net/xfrm/xfrm_user.c static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
p                 496 net/xfrm/xfrm_user.c 	memcpy(&x->id, &p->id, sizeof(x->id));
p                 497 net/xfrm/xfrm_user.c 	memcpy(&x->sel, &p->sel, sizeof(x->sel));
p                 498 net/xfrm/xfrm_user.c 	memcpy(&x->lft, &p->lft, sizeof(x->lft));
p                 499 net/xfrm/xfrm_user.c 	x->props.mode = p->mode;
p                 500 net/xfrm/xfrm_user.c 	x->props.replay_window = min_t(unsigned int, p->replay_window,
p                 502 net/xfrm/xfrm_user.c 	x->props.reqid = p->reqid;
p                 503 net/xfrm/xfrm_user.c 	x->props.family = p->family;
p                 504 net/xfrm/xfrm_user.c 	memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
p                 505 net/xfrm/xfrm_user.c 	x->props.flags = p->flags;
p                 507 net/xfrm/xfrm_user.c 	if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
p                 508 net/xfrm/xfrm_user.c 		x->sel.family = p->family;
p                 571 net/xfrm/xfrm_user.c 					       struct xfrm_usersa_info *p,
p                 581 net/xfrm/xfrm_user.c 	copy_from_user_state(x, p);
p                 642 net/xfrm/xfrm_user.c 	x->km.seq = p->seq;
p                 675 net/xfrm/xfrm_user.c 	struct xfrm_usersa_info *p = nlmsg_data(nlh);
p                 680 net/xfrm/xfrm_user.c 	err = verify_newsa_info(p, attrs);
p                 684 net/xfrm/xfrm_user.c 	x = xfrm_state_construct(net, p, attrs, &err);
p                 717 net/xfrm/xfrm_user.c 						 struct xfrm_usersa_id *p,
p                 726 net/xfrm/xfrm_user.c 	if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
p                 728 net/xfrm/xfrm_user.c 		x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
p                 740 net/xfrm/xfrm_user.c 					     &p->daddr, saddr,
p                 741 net/xfrm/xfrm_user.c 					     p->proto, p->family);
p                 757 net/xfrm/xfrm_user.c 	struct xfrm_usersa_id *p = nlmsg_data(nlh);
p                 759 net/xfrm/xfrm_user.c 	x = xfrm_user_state_lookup(net, p, attrs, &err);
p                 787 net/xfrm/xfrm_user.c static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
p                 789 net/xfrm/xfrm_user.c 	memset(p, 0, sizeof(*p));
p                 790 net/xfrm/xfrm_user.c 	memcpy(&p->id, &x->id, sizeof(p->id));
p                 791 net/xfrm/xfrm_user.c 	memcpy(&p->sel, &x->sel, sizeof(p->sel));
p                 792 net/xfrm/xfrm_user.c 	memcpy(&p->lft, &x->lft, sizeof(p->lft));
p                 793 net/xfrm/xfrm_user.c 	memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
p                 794 net/xfrm/xfrm_user.c 	put_unaligned(x->stats.replay_window, &p->stats.replay_window);
p                 795 net/xfrm/xfrm_user.c 	put_unaligned(x->stats.replay, &p->stats.replay);
p                 796 net/xfrm/xfrm_user.c 	put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed);
p                 797 net/xfrm/xfrm_user.c 	memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
p                 798 net/xfrm/xfrm_user.c 	p->mode = x->props.mode;
p                 799 net/xfrm/xfrm_user.c 	p->replay_window = x->props.replay_window;
p                 800 net/xfrm/xfrm_user.c 	p->reqid = x->props.reqid;
p                 801 net/xfrm/xfrm_user.c 	p->family = x->props.family;
p                 802 net/xfrm/xfrm_user.c 	p->flags = x->props.flags;
p                 803 net/xfrm/xfrm_user.c 	p->seq = x->km.seq;
p                 883 net/xfrm/xfrm_user.c 				    struct xfrm_usersa_info *p,
p                 888 net/xfrm/xfrm_user.c 	copy_to_user_state(x, p);
p                 978 net/xfrm/xfrm_user.c 	struct xfrm_usersa_info *p;
p                 983 net/xfrm/xfrm_user.c 			XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
p                 987 net/xfrm/xfrm_user.c 	p = nlmsg_data(nlh);
p                 989 net/xfrm/xfrm_user.c 	err = copy_to_user_state_extra(x, p, skb);
p                1285 net/xfrm/xfrm_user.c 	struct xfrm_usersa_id *p = nlmsg_data(nlh);
p                1290 net/xfrm/xfrm_user.c 	x = xfrm_user_state_lookup(net, p, attrs, &err);
p                1310 net/xfrm/xfrm_user.c 	struct xfrm_userspi_info *p;
p                1319 net/xfrm/xfrm_user.c 	p = nlmsg_data(nlh);
p                1320 net/xfrm/xfrm_user.c 	err = verify_spi_info(p->info.id.proto, p->min, p->max);
p                1324 net/xfrm/xfrm_user.c 	family = p->info.family;
p                1325 net/xfrm/xfrm_user.c 	daddr = &p->info.id.daddr;
p                1334 net/xfrm/xfrm_user.c 	if (p->info.seq) {
p                1335 net/xfrm/xfrm_user.c 		x = xfrm_find_acq_byseq(net, mark, p->info.seq);
p                1343 net/xfrm/xfrm_user.c 		x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
p                1344 net/xfrm/xfrm_user.c 				  if_id, p->info.id.proto, daddr,
p                1345 net/xfrm/xfrm_user.c 				  &p->info.saddr, 1,
p                1351 net/xfrm/xfrm_user.c 	err = xfrm_alloc_spi(x, p->min, p->max);
p                1400 net/xfrm/xfrm_user.c static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
p                1404 net/xfrm/xfrm_user.c 	switch (p->share) {
p                1415 net/xfrm/xfrm_user.c 	switch (p->action) {
p                1424 net/xfrm/xfrm_user.c 	switch (p->sel.family) {
p                1426 net/xfrm/xfrm_user.c 		if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
p                1433 net/xfrm/xfrm_user.c 		if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
p                1445 net/xfrm/xfrm_user.c 	ret = verify_policy_dir(p->dir);
p                1448 net/xfrm/xfrm_user.c 	if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
p                1584 net/xfrm/xfrm_user.c static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
p                1586 net/xfrm/xfrm_user.c 	xp->priority = p->priority;
p                1587 net/xfrm/xfrm_user.c 	xp->index = p->index;
p                1588 net/xfrm/xfrm_user.c 	memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
p                1589 net/xfrm/xfrm_user.c 	memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
p                1590 net/xfrm/xfrm_user.c 	xp->action = p->action;
p                1591 net/xfrm/xfrm_user.c 	xp->flags = p->flags;
p                1592 net/xfrm/xfrm_user.c 	xp->family = p->sel.family;
p                1596 net/xfrm/xfrm_user.c static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
p                1598 net/xfrm/xfrm_user.c 	memset(p, 0, sizeof(*p));
p                1599 net/xfrm/xfrm_user.c 	memcpy(&p->sel, &xp->selector, sizeof(p->sel));
p                1600 net/xfrm/xfrm_user.c 	memcpy(&p->lft, &xp->lft, sizeof(p->lft));
p                1601 net/xfrm/xfrm_user.c 	memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
p                1602 net/xfrm/xfrm_user.c 	p->priority = xp->priority;
p                1603 net/xfrm/xfrm_user.c 	p->index = xp->index;
p                1604 net/xfrm/xfrm_user.c 	p->sel.family = xp->family;
p                1605 net/xfrm/xfrm_user.c 	p->dir = dir;
p                1606 net/xfrm/xfrm_user.c 	p->action = xp->action;
p                1607 net/xfrm/xfrm_user.c 	p->flags = xp->flags;
p                1608 net/xfrm/xfrm_user.c 	p->share = XFRM_SHARE_ANY; /* XXX xp->share */
p                1611 net/xfrm/xfrm_user.c static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
p                1621 net/xfrm/xfrm_user.c 	copy_from_user_policy(xp, p);
p                1649 net/xfrm/xfrm_user.c 	struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
p                1655 net/xfrm/xfrm_user.c 	err = verify_newpolicy_info(p);
p                1662 net/xfrm/xfrm_user.c 	xp = xfrm_policy_construct(net, p, attrs, &err);
p                1671 net/xfrm/xfrm_user.c 	err = xfrm_policy_insert(p->dir, xp, excl);
p                1683 net/xfrm/xfrm_user.c 	km_policy_notify(xp, p->dir, &c);
p                1764 net/xfrm/xfrm_user.c 	struct xfrm_userpolicy_info *p;
p                1771 net/xfrm/xfrm_user.c 			XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
p                1775 net/xfrm/xfrm_user.c 	p = nlmsg_data(nlh);
p                1776 net/xfrm/xfrm_user.c 	copy_to_user_policy(xp, p, dir);
p                1860 net/xfrm/xfrm_user.c 	struct xfrm_userpolicy_id *p;
p                1869 net/xfrm/xfrm_user.c 	p = nlmsg_data(nlh);
p                1876 net/xfrm/xfrm_user.c 	err = verify_policy_dir(p->dir);
p                1883 net/xfrm/xfrm_user.c 	if (p->index)
p                1884 net/xfrm/xfrm_user.c 		xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, delete, &err);
p                1901 net/xfrm/xfrm_user.c 		xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel,
p                1911 net/xfrm/xfrm_user.c 		resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
p                1924 net/xfrm/xfrm_user.c 		c.data.byid = p->index;
p                1928 net/xfrm/xfrm_user.c 		km_policy_notify(xp, p->dir, &c);
p                1941 net/xfrm/xfrm_user.c 	struct xfrm_usersa_flush *p = nlmsg_data(nlh);
p                1944 net/xfrm/xfrm_user.c 	err = xfrm_state_flush(net, p->proto, true, false);
p                1950 net/xfrm/xfrm_user.c 	c.data.proto = p->proto;
p                2046 net/xfrm/xfrm_user.c 	struct xfrm_aevent_id *p = nlmsg_data(nlh);
p                2047 net/xfrm/xfrm_user.c 	struct xfrm_usersa_id *id = &p->sa_id;
p                2067 net/xfrm/xfrm_user.c 	c.data.aevent = p->flags;
p                2089 net/xfrm/xfrm_user.c 	struct xfrm_aevent_id *p = nlmsg_data(nlh);
p                2105 net/xfrm/xfrm_user.c 	x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
p                2165 net/xfrm/xfrm_user.c 	struct xfrm_userpolicy_info *p = &up->pol;
p                2176 net/xfrm/xfrm_user.c 	err = verify_policy_dir(p->dir);
p                2183 net/xfrm/xfrm_user.c 	if (p->index)
p                2184 net/xfrm/xfrm_user.c 		xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 0, &err);
p                2201 net/xfrm/xfrm_user.c 		xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir,
p                2202 net/xfrm/xfrm_user.c 					   &p->sel, ctx, 0, &err);
p                2213 net/xfrm/xfrm_user.c 		xfrm_policy_delete(xp, p->dir);
p                2216 net/xfrm/xfrm_user.c 	km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
p                2230 net/xfrm/xfrm_user.c 	struct xfrm_usersa_info *p = &ue->state;
p                2234 net/xfrm/xfrm_user.c 	x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
p                2762 net/xfrm/xfrm_user.c 	struct xfrm_usersa_flush *p;
p                2771 net/xfrm/xfrm_user.c 	nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
p                2777 net/xfrm/xfrm_user.c 	p = nlmsg_data(nlh);
p                2778 net/xfrm/xfrm_user.c 	p->proto = c->data.proto;
p                2832 net/xfrm/xfrm_user.c 	struct xfrm_usersa_info *p;
p                2840 net/xfrm/xfrm_user.c 	headlen = sizeof(*p);
p                2857 net/xfrm/xfrm_user.c 	p = nlmsg_data(nlh);
p                2868 net/xfrm/xfrm_user.c 		attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
p                2873 net/xfrm/xfrm_user.c 		p = nla_data(attr);
p                2875 net/xfrm/xfrm_user.c 	err = copy_to_user_state_extra(x, p, skb);
p                2986 net/xfrm/xfrm_user.c 	struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
p                2987 net/xfrm/xfrm_user.c 	struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
p                3013 net/xfrm/xfrm_user.c 	if (len < sizeof(*p) ||
p                3014 net/xfrm/xfrm_user.c 	    verify_newpolicy_info(p))
p                3017 net/xfrm/xfrm_user.c 	nr = ((len - sizeof(*p)) / sizeof(*ut));
p                3018 net/xfrm/xfrm_user.c 	if (validate_tmpl(nr, ut, p->sel.family))
p                3021 net/xfrm/xfrm_user.c 	if (p->dir > XFRM_POLICY_OUT)
p                3030 net/xfrm/xfrm_user.c 	copy_from_user_policy(xp, p);
p                3034 net/xfrm/xfrm_user.c 	*dir = p->dir;
p                3101 net/xfrm/xfrm_user.c 	struct xfrm_userpolicy_info *p;
p                3108 net/xfrm/xfrm_user.c 	headlen = sizeof(*p);
p                3126 net/xfrm/xfrm_user.c 	p = nlmsg_data(nlh);
p                3138 net/xfrm/xfrm_user.c 		attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
p                3143 net/xfrm/xfrm_user.c 		p = nla_data(attr);
p                3146 net/xfrm/xfrm_user.c 	copy_to_user_policy(xp, p, dir);
p                  63 samples/bpf/offwaketime_kern.c 	struct task_struct *p = (void *) PT_REGS_PARM1(ctx);
p                  67 samples/bpf/offwaketime_kern.c 	pid = _(p->pid);
p                 126 samples/bpf/offwaketime_kern.c 	struct task_struct *p = (void *) PT_REGS_PARM1(ctx);
p                 128 samples/bpf/offwaketime_kern.c 	u32 pid = _(p->pid);
p                  76 samples/bpf/test_cgrp2_sock.c 	void *p;
p                  89 samples/bpf/test_cgrp2_sock.c 	p = prog = malloc(insns_cnt);
p                  95 samples/bpf/test_cgrp2_sock.c 	memcpy(p, prog_start, sizeof(prog_start));
p                  96 samples/bpf/test_cgrp2_sock.c 	p += sizeof(prog_start);
p                  99 samples/bpf/test_cgrp2_sock.c 		memcpy(p, prog_dev, sizeof(prog_dev));
p                 100 samples/bpf/test_cgrp2_sock.c 		p += sizeof(prog_dev);
p                 104 samples/bpf/test_cgrp2_sock.c 		memcpy(p, prog_mark, sizeof(prog_mark));
p                 105 samples/bpf/test_cgrp2_sock.c 		p += sizeof(prog_mark);
p                 109 samples/bpf/test_cgrp2_sock.c 		memcpy(p, prog_prio, sizeof(prog_prio));
p                 110 samples/bpf/test_cgrp2_sock.c 		p += sizeof(prog_prio);
p                 113 samples/bpf/test_cgrp2_sock.c 	memcpy(p, prog_end, sizeof(prog_end));
p                 114 samples/bpf/test_cgrp2_sock.c 	p += sizeof(prog_end);
p                  26 samples/bpf/xdp2_kern.c 	unsigned short *p = data;
p                  29 samples/bpf/xdp2_kern.c 	dst[0] = p[0];
p                  30 samples/bpf/xdp2_kern.c 	dst[1] = p[1];
p                  31 samples/bpf/xdp2_kern.c 	dst[2] = p[2];
p                  32 samples/bpf/xdp2_kern.c 	p[0] = p[3];
p                  33 samples/bpf/xdp2_kern.c 	p[1] = p[4];
p                  34 samples/bpf/xdp2_kern.c 	p[2] = p[5];
p                  35 samples/bpf/xdp2_kern.c 	p[3] = dst[0];
p                  36 samples/bpf/xdp2_kern.c 	p[4] = dst[1];
p                  37 samples/bpf/xdp2_kern.c 	p[5] = dst[2];
p                 210 samples/bpf/xdp_monitor_user.c static double calc_period(struct record *r, struct record *p)
p                 215 samples/bpf/xdp_monitor_user.c 	period = r->timestamp - p->timestamp;
p                 222 samples/bpf/xdp_monitor_user.c static double calc_period_u64(struct record_u64 *r, struct record_u64 *p)
p                 227 samples/bpf/xdp_monitor_user.c 	period = r->timestamp - p->timestamp;
p                 234 samples/bpf/xdp_monitor_user.c static double calc_pps(struct datarec *r, struct datarec *p, double period)
p                 240 samples/bpf/xdp_monitor_user.c 		packets = r->processed - p->processed;
p                 246 samples/bpf/xdp_monitor_user.c static double calc_pps_u64(struct u64rec *r, struct u64rec *p, double period)
p                 252 samples/bpf/xdp_monitor_user.c 		packets = r->processed - p->processed;
p                 258 samples/bpf/xdp_monitor_user.c static double calc_drop(struct datarec *r, struct datarec *p, double period)
p                 264 samples/bpf/xdp_monitor_user.c 		packets = r->dropped - p->dropped;
p                 270 samples/bpf/xdp_monitor_user.c static double calc_info(struct datarec *r, struct datarec *p, double period)
p                 276 samples/bpf/xdp_monitor_user.c 		packets = r->info - p->info;
p                 282 samples/bpf/xdp_monitor_user.c static double calc_err(struct datarec *r, struct datarec *p, double period)
p                 288 samples/bpf/xdp_monitor_user.c 		packets = r->err - p->err;
p                 321 samples/bpf/xdp_monitor_user.c 			struct u64rec *p = &prev->cpu[i];
p                 323 samples/bpf/xdp_monitor_user.c 			pps = calc_pps_u64(r, p, t);
p                 346 samples/bpf/xdp_monitor_user.c 			struct u64rec *p = &prev->cpu[i];
p                 348 samples/bpf/xdp_monitor_user.c 			pps = calc_pps_u64(r, p, t);
p                 372 samples/bpf/xdp_monitor_user.c 			struct datarec *p = &prev->cpu[i];
p                 374 samples/bpf/xdp_monitor_user.c 			pps  = calc_pps(r, p, t);
p                 375 samples/bpf/xdp_monitor_user.c 			drop = calc_drop(r, p, t);
p                 376 samples/bpf/xdp_monitor_user.c 			info = calc_info(r, p, t);
p                 411 samples/bpf/xdp_monitor_user.c 			struct datarec *p = &prev->cpu[i];
p                 413 samples/bpf/xdp_monitor_user.c 			pps  = calc_pps(r, p, t);
p                 414 samples/bpf/xdp_monitor_user.c 			drop = calc_drop(r, p, t);
p                 415 samples/bpf/xdp_monitor_user.c 			info = calc_info(r, p, t);
p                 444 samples/bpf/xdp_monitor_user.c 			struct datarec *p = &prev->cpu[i];
p                 446 samples/bpf/xdp_monitor_user.c 			pps  = calc_pps(r, p, t);
p                 447 samples/bpf/xdp_monitor_user.c 			drop = calc_drop(r, p, t);
p                 448 samples/bpf/xdp_monitor_user.c 			info = calc_info(r, p, t);
p                 449 samples/bpf/xdp_monitor_user.c 			err  = calc_err(r, p, t);
p                 259 samples/bpf/xdp_redirect_cpu_user.c static double calc_period(struct record *r, struct record *p)
p                 264 samples/bpf/xdp_redirect_cpu_user.c 	period = r->timestamp - p->timestamp;
p                 271 samples/bpf/xdp_redirect_cpu_user.c static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
p                 277 samples/bpf/xdp_redirect_cpu_user.c 		packets = r->processed - p->processed;
p                 283 samples/bpf/xdp_redirect_cpu_user.c static __u64 calc_drop_pps(struct datarec *r, struct datarec *p, double period_)
p                 289 samples/bpf/xdp_redirect_cpu_user.c 		packets = r->dropped - p->dropped;
p                 296 samples/bpf/xdp_redirect_cpu_user.c 			    struct datarec *p, double period_)
p                 302 samples/bpf/xdp_redirect_cpu_user.c 		packets = r->issue - p->issue;
p                 335 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *p = &prev->cpu[i];
p                 337 samples/bpf/xdp_redirect_cpu_user.c 			pps = calc_pps(r, p, t);
p                 338 samples/bpf/xdp_redirect_cpu_user.c 			drop = calc_drop_pps(r, p, t);
p                 339 samples/bpf/xdp_redirect_cpu_user.c 			err  = calc_errs_pps(r, p, t);
p                 363 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *p = &prev->cpu[i];
p                 365 samples/bpf/xdp_redirect_cpu_user.c 			pps  = calc_pps(r, p, t);
p                 366 samples/bpf/xdp_redirect_cpu_user.c 			drop = calc_drop_pps(r, p, t);
p                 367 samples/bpf/xdp_redirect_cpu_user.c 			err  = calc_errs_pps(r, p, t);
p                 400 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *p = &prev->cpu[i];
p                 402 samples/bpf/xdp_redirect_cpu_user.c 			pps  = calc_pps(r, p, t);
p                 403 samples/bpf/xdp_redirect_cpu_user.c 			drop = calc_drop_pps(r, p, t);
p                 404 samples/bpf/xdp_redirect_cpu_user.c 			err  = calc_errs_pps(r, p, t);
p                 429 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *p = &prev->cpu[i];
p                 431 samples/bpf/xdp_redirect_cpu_user.c 			pps  = calc_pps(r, p, t);
p                 432 samples/bpf/xdp_redirect_cpu_user.c 			drop = calc_drop_pps(r, p, t);
p                 451 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *p = &prev->cpu[i];
p                 453 samples/bpf/xdp_redirect_cpu_user.c 			pps  = calc_pps(r, p, t);
p                 454 samples/bpf/xdp_redirect_cpu_user.c 			drop = calc_drop_pps(r, p, t);
p                  41 samples/bpf/xdp_redirect_kern.c 	unsigned short *p = data;
p                  44 samples/bpf/xdp_redirect_kern.c 	dst[0] = p[0];
p                  45 samples/bpf/xdp_redirect_kern.c 	dst[1] = p[1];
p                  46 samples/bpf/xdp_redirect_kern.c 	dst[2] = p[2];
p                  47 samples/bpf/xdp_redirect_kern.c 	p[0] = p[3];
p                  48 samples/bpf/xdp_redirect_kern.c 	p[1] = p[4];
p                  49 samples/bpf/xdp_redirect_kern.c 	p[2] = p[5];
p                  50 samples/bpf/xdp_redirect_kern.c 	p[3] = dst[0];
p                  51 samples/bpf/xdp_redirect_kern.c 	p[4] = dst[1];
p                  52 samples/bpf/xdp_redirect_kern.c 	p[5] = dst[2];
p                  41 samples/bpf/xdp_redirect_map_kern.c 	unsigned short *p = data;
p                  44 samples/bpf/xdp_redirect_map_kern.c 	dst[0] = p[0];
p                  45 samples/bpf/xdp_redirect_map_kern.c 	dst[1] = p[1];
p                  46 samples/bpf/xdp_redirect_map_kern.c 	dst[2] = p[2];
p                  47 samples/bpf/xdp_redirect_map_kern.c 	p[0] = p[3];
p                  48 samples/bpf/xdp_redirect_map_kern.c 	p[1] = p[4];
p                  49 samples/bpf/xdp_redirect_map_kern.c 	p[2] = p[5];
p                  50 samples/bpf/xdp_redirect_map_kern.c 	p[3] = dst[0];
p                  51 samples/bpf/xdp_redirect_map_kern.c 	p[4] = dst[1];
p                  52 samples/bpf/xdp_redirect_map_kern.c 	p[5] = dst[2];
p                  89 samples/bpf/xdp_router_ipv4_kern.c 	unsigned short *p = data;
p                  91 samples/bpf/xdp_router_ipv4_kern.c 	__builtin_memcpy(p, dest, 6);
p                  92 samples/bpf/xdp_router_ipv4_kern.c 	__builtin_memcpy(p + 3, source, 6);
p                  60 samples/bpf/xdp_rxq_info_kern.c 	unsigned short *p = data;
p                  63 samples/bpf/xdp_rxq_info_kern.c 	dst[0] = p[0];
p                  64 samples/bpf/xdp_rxq_info_kern.c 	dst[1] = p[1];
p                  65 samples/bpf/xdp_rxq_info_kern.c 	dst[2] = p[2];
p                  66 samples/bpf/xdp_rxq_info_kern.c 	p[0] = p[3];
p                  67 samples/bpf/xdp_rxq_info_kern.c 	p[1] = p[4];
p                  68 samples/bpf/xdp_rxq_info_kern.c 	p[2] = p[5];
p                  69 samples/bpf/xdp_rxq_info_kern.c 	p[3] = dst[0];
p                  70 samples/bpf/xdp_rxq_info_kern.c 	p[4] = dst[1];
p                  71 samples/bpf/xdp_rxq_info_kern.c 	p[5] = dst[2];
p                 304 samples/bpf/xdp_rxq_info_user.c static double calc_period(struct record *r, struct record *p)
p                 309 samples/bpf/xdp_rxq_info_user.c 	period = r->timestamp - p->timestamp;
p                 316 samples/bpf/xdp_rxq_info_user.c static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
p                 322 samples/bpf/xdp_rxq_info_user.c 		packets = r->processed - p->processed;
p                 329 samples/bpf/xdp_rxq_info_user.c 			    struct datarec *p, double period_)
p                 335 samples/bpf/xdp_rxq_info_user.c 		packets = r->issue - p->issue;
p                 371 samples/bpf/xdp_rxq_info_user.c 			struct datarec *p = &prev->cpu[i];
p                 373 samples/bpf/xdp_rxq_info_user.c 			pps = calc_pps     (r, p, t);
p                 374 samples/bpf/xdp_rxq_info_user.c 			err = calc_errs_pps(r, p, t);
p                 405 samples/bpf/xdp_rxq_info_user.c 			struct datarec *p = &prev->cpu[i];
p                 407 samples/bpf/xdp_rxq_info_user.c 			pps = calc_pps     (r, p, t);
p                 408 samples/bpf/xdp_rxq_info_user.c 			err = calc_errs_pps(r, p, t);
p                  68 samples/configfs/configfs_sample.c 	char *p = (char *) page;
p                  70 samples/configfs/configfs_sample.c 	tmp = simple_strtoul(p, &p, 10);
p                  71 samples/configfs/configfs_sample.c 	if (!p || (*p && (*p != '\n')))
p                 152 samples/configfs/configfs_sample.c 	char *p = (char *) page;
p                 154 samples/configfs/configfs_sample.c 	tmp = simple_strtoul(p, &p, 10);
p                 155 samples/configfs/configfs_sample.c 	if (!p || (*p && (*p != '\n')))
p                  28 samples/kprobes/kprobe_example.c static int handler_pre(struct kprobe *p, struct pt_regs *regs)
p                  32 samples/kprobes/kprobe_example.c 		p->symbol_name, p->addr, regs->ip, regs->flags);
p                  36 samples/kprobes/kprobe_example.c 		p->symbol_name, p->addr, regs->nip, regs->msr);
p                  40 samples/kprobes/kprobe_example.c 		p->symbol_name, p->addr, regs->cp0_epc, regs->cp0_status);
p                  45 samples/kprobes/kprobe_example.c 		p->symbol_name, p->addr, (long)regs->pc, (long)regs->pstate);
p                  49 samples/kprobes/kprobe_example.c 		p->symbol_name, p->addr, regs->psw.addr, regs->flags);
p                  57 samples/kprobes/kprobe_example.c static void handler_post(struct kprobe *p, struct pt_regs *regs,
p                  62 samples/kprobes/kprobe_example.c 		p->symbol_name, p->addr, regs->flags);
p                  66 samples/kprobes/kprobe_example.c 		p->symbol_name, p->addr, regs->msr);
p                  70 samples/kprobes/kprobe_example.c 		p->symbol_name, p->addr, regs->cp0_status);
p                  74 samples/kprobes/kprobe_example.c 		p->symbol_name, p->addr, (long)regs->pstate);
p                  78 samples/kprobes/kprobe_example.c 		p->symbol_name, p->addr, regs->flags);
p                  87 samples/kprobes/kprobe_example.c static int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr)
p                  89 samples/kprobes/kprobe_example.c 	pr_info("fault_handler: p->addr = 0x%p, trap #%dn", p->addr, trapnr);
p                 951 samples/mic/mpssd/mpssd.c 	char buff[PATH_MAX], *line, *evv, *p;
p                 963 samples/mic/mpssd/mpssd.c 		p = strchr(line, '\n');
p                 964 samples/mic/mpssd/mpssd.c 		if (p)
p                 965 samples/mic/mpssd/mpssd.c 			*p = '\0';
p                1595 samples/mic/mpssd/mpssd.c change_virtblk_backend(int x, siginfo_t *siginfo, void *p)
p                 349 scripts/asn1_compiler.c 	char *line, *nl, *start, *p, *q;
p                 376 scripts/asn1_compiler.c 		p = line;
p                 378 scripts/asn1_compiler.c 		while ((p = memchr(p, '-', nl - p))) {
p                 379 scripts/asn1_compiler.c 			if (p[1] == '-') {
p                 381 scripts/asn1_compiler.c 				q = p + 2;
p                 386 scripts/asn1_compiler.c 						memmove(p, q, nl - q);
p                 391 scripts/asn1_compiler.c 				*p = '\0';
p                 392 scripts/asn1_compiler.c 				nl = p;
p                 395 scripts/asn1_compiler.c 				p++;
p                 399 scripts/asn1_compiler.c 		p = line;
p                 400 scripts/asn1_compiler.c 		while (p < nl) {
p                 402 scripts/asn1_compiler.c 			while (p < nl && isspace(*p))
p                 403 scripts/asn1_compiler.c 				*(p++) = 0;
p                 404 scripts/asn1_compiler.c 			if (p >= nl)
p                 408 scripts/asn1_compiler.c 			start = p;
p                 411 scripts/asn1_compiler.c 			if (isalpha(*p)) {
p                 417 scripts/asn1_compiler.c 				q = p + 1;
p                 420 scripts/asn1_compiler.c 				tokens[tix].size = q - p;
p                 421 scripts/asn1_compiler.c 				p = q;
p                 456 scripts/asn1_compiler.c 			if (isdigit(*p)) {
p                 458 scripts/asn1_compiler.c 				q = p + 1;
p                 461 scripts/asn1_compiler.c 				tokens[tix].size = q - p;
p                 462 scripts/asn1_compiler.c 				p = q;
p                 474 scripts/asn1_compiler.c 			if (nl - p >= 3) {
p                 475 scripts/asn1_compiler.c 				if (memcmp(p, "::=", 3) == 0) {
p                 476 scripts/asn1_compiler.c 					p += 3;
p                 484 scripts/asn1_compiler.c 			if (nl - p >= 2) {
p                 485 scripts/asn1_compiler.c 				if (memcmp(p, "({", 2) == 0) {
p                 486 scripts/asn1_compiler.c 					p += 2;
p                 492 scripts/asn1_compiler.c 				if (memcmp(p, "})", 2) == 0) {
p                 493 scripts/asn1_compiler.c 					p += 2;
p                 501 scripts/asn1_compiler.c 			if (nl - p >= 1) {
p                 503 scripts/asn1_compiler.c 				switch (*p) {
p                 505 scripts/asn1_compiler.c 					p += 1;
p                 510 scripts/asn1_compiler.c 					p += 1;
p                 515 scripts/asn1_compiler.c 					p += 1;
p                 520 scripts/asn1_compiler.c 					p += 1;
p                 525 scripts/asn1_compiler.c 					p += 1;
p                 535 scripts/asn1_compiler.c 				filename, lineno, *p);
p                 565 scripts/asn1_compiler.c 	char *buffer, *p;
p                 625 scripts/asn1_compiler.c 	p = strrchr(argv[1], '/');
p                 626 scripts/asn1_compiler.c 	p = p ? p + 1 : argv[1];
p                 627 scripts/asn1_compiler.c 	grammar_name = strdup(p);
p                 628 scripts/asn1_compiler.c 	if (!p) {
p                 632 scripts/asn1_compiler.c 	p = strchr(grammar_name, '.');
p                 633 scripts/asn1_compiler.c 	if (p)
p                 634 scripts/asn1_compiler.c 		*p = '\0';
p                 860 scripts/asn1_compiler.c 	char *p;
p                 905 scripts/asn1_compiler.c 		element->tag |= strtoul(cursor->content, &p, 10);
p                 907 scripts/asn1_compiler.c 		if (p - cursor->content != cursor->size)
p                 242 scripts/basic/fixdep.c static void parse_config_file(const char *p)
p                 245 scripts/basic/fixdep.c 	const char *start = p;
p                 247 scripts/basic/fixdep.c 	while ((p = strstr(p, "CONFIG_"))) {
p                 248 scripts/basic/fixdep.c 		if (p > start && (isalnum(p[-1]) || p[-1] == '_')) {
p                 249 scripts/basic/fixdep.c 			p += 7;
p                 252 scripts/basic/fixdep.c 		p += 7;
p                 253 scripts/basic/fixdep.c 		q = p;
p                 256 scripts/basic/fixdep.c 		if (str_ends_with(p, q - p, "_MODULE"))
p                 260 scripts/basic/fixdep.c 		if (r > p)
p                 261 scripts/basic/fixdep.c 			use_config(p, r - p);
p                 262 scripts/basic/fixdep.c 		p = q;
p                 313 scripts/basic/fixdep.c 	char *p;
p                 328 scripts/basic/fixdep.c 		p = m;
p                 329 scripts/basic/fixdep.c 		while (*p && *p != ' ' && *p != '\\' && *p != '\n')
p                 330 scripts/basic/fixdep.c 			p++;
p                 331 scripts/basic/fixdep.c 		is_last = (*p == '\0');
p                 333 scripts/basic/fixdep.c 		is_target = (*(p-1) == ':');
p                 338 scripts/basic/fixdep.c 		} else if (!is_ignored_file(m, p - m)) {
p                 339 scripts/basic/fixdep.c 			*p = '\0';
p                 379 scripts/basic/fixdep.c 		m = p + 1;
p                  33 scripts/conmakehash.c   char *p = *p0;
p                  35 scripts/conmakehash.c   while (*p == ' ' || *p == '\t')
p                  36 scripts/conmakehash.c     p++;
p                  37 scripts/conmakehash.c   if (*p != 'U' || p[1] != '+' ||
p                  38 scripts/conmakehash.c       !isxdigit(p[2]) || !isxdigit(p[3]) || !isxdigit(p[4]) ||
p                  39 scripts/conmakehash.c       !isxdigit(p[5]) || isxdigit(p[6]))
p                  41 scripts/conmakehash.c   *p0 = p+6;
p                  42 scripts/conmakehash.c   return strtol(p+2,0,16);
p                  84 scripts/conmakehash.c   char *p, *p1;
p                 116 scripts/conmakehash.c       if ( (p = strchr(buffer, '\n')) != NULL )
p                 117 scripts/conmakehash.c 	*p = '\0';
p                 121 scripts/conmakehash.c       p = buffer;
p                 134 scripts/conmakehash.c       while (*p == ' ' || *p == '\t')
p                 135 scripts/conmakehash.c 	p++;
p                 136 scripts/conmakehash.c       if (!*p || *p == '#')
p                 139 scripts/conmakehash.c       fp0 = strtol(p, &p1, 0);
p                 140 scripts/conmakehash.c       if (p1 == p)
p                 145 scripts/conmakehash.c       p = p1;
p                 147 scripts/conmakehash.c       while (*p == ' ' || *p == '\t')
p                 148 scripts/conmakehash.c 	p++;
p                 149 scripts/conmakehash.c       if (*p == '-')
p                 151 scripts/conmakehash.c 	  p++;
p                 152 scripts/conmakehash.c 	  fp1 = strtol(p, &p1, 0);
p                 153 scripts/conmakehash.c 	  if (p1 == p)
p                 158 scripts/conmakehash.c 	  p = p1;
p                 182 scripts/conmakehash.c 	  while (*p == ' ' || *p == '\t')
p                 183 scripts/conmakehash.c 	    p++;
p                 184 scripts/conmakehash.c 	  if (!strncmp(p, "idem", 4))
p                 188 scripts/conmakehash.c 	      p += 4;
p                 192 scripts/conmakehash.c 	      un0 = getunicode(&p);
p                 193 scripts/conmakehash.c 	      while (*p == ' ' || *p == '\t')
p                 194 scripts/conmakehash.c 		p++;
p                 195 scripts/conmakehash.c 	      if (*p != '-')
p                 202 scripts/conmakehash.c 	      p++;
p                 203 scripts/conmakehash.c 	      un1 = getunicode(&p);
p                 226 scripts/conmakehash.c 	    while ( (un0 = getunicode(&p)) >= 0 )
p                 229 scripts/conmakehash.c       while (*p == ' ' || *p == '\t')
p                 230 scripts/conmakehash.c 	p++;
p                 231 scripts/conmakehash.c       if (*p && *p != '#')
p                 232 scripts/conmakehash.c 	fprintf(stderr, "%s: trailing junk (%s) ignored\n", tblname, p);
p                 107 scripts/dtc/data.c struct data data_append_data(struct data d, const void *p, int len)
p                 110 scripts/dtc/data.c 	memcpy(d.val + d.len, p, len);
p                 116 scripts/dtc/data.c 				  const void *p, int len)
p                 120 scripts/dtc/data.c 	memcpy(d.val + m->offset, p, len);
p                 107 scripts/dtc/dtc.h struct data data_append_data(struct data d, const void *p, int len);
p                 109 scripts/dtc/dtc.h 				  const void *p, int len);
p                 179 scripts/dtc/dtc.h #define for_each_property_withdel(n, p) \
p                 180 scripts/dtc/dtc.h 	for ((p) = (n)->proplist; (p); (p) = (p)->next)
p                 182 scripts/dtc/dtc.h #define for_each_property(n, p) \
p                 183 scripts/dtc/dtc.h 	for_each_property_withdel(n, p) \
p                 184 scripts/dtc/dtc.h 		if (!(p)->deleted)
p                  18 scripts/dtc/fdtdump.c #define PALIGN(p, a)	((void *)(ALIGN((unsigned long)(p), (a))))
p                  19 scripts/dtc/fdtdump.c #define GET_CELL(p)	(p += 4, *((const uint32_t *)(p-4)))
p                  24 scripts/dtc/fdtdump.c 	const char *p = data;
p                  35 scripts/dtc/fdtdump.c 			printf("0x%08x%s", fdt32_to_cpu(GET_CELL(p)),
p                  41 scripts/dtc/fdtdump.c 			printf("%02x%s", *p++, i < len - 1 ? " " : "");
p                  59 scripts/dtc/fdtdump.c 	const char *p, *s, *t;
p                  98 scripts/dtc/fdtdump.c 	p = p_struct;
p                  99 scripts/dtc/fdtdump.c 	while ((tag = fdt32_to_cpu(GET_CELL(p))) != FDT_END) {
p                 104 scripts/dtc/fdtdump.c 			s = p;
p                 105 scripts/dtc/fdtdump.c 			p = PALIGN(p + strlen(s) + 1, 4);
p                 132 scripts/dtc/fdtdump.c 		sz = fdt32_to_cpu(GET_CELL(p));
p                 133 scripts/dtc/fdtdump.c 		s = p_strings + fdt32_to_cpu(GET_CELL(p));
p                 135 scripts/dtc/fdtdump.c 			p = PALIGN(p, 8);
p                 136 scripts/dtc/fdtdump.c 		t = p;
p                 138 scripts/dtc/fdtdump.c 		p = PALIGN(p + sz, 4);
p                  56 scripts/dtc/fdtget.c 	const uint8_t *p = (const uint8_t *)data;
p                  91 scripts/dtc/fdtget.c 	for (i = 0; i < len; i += size, p += size) {
p                  94 scripts/dtc/fdtget.c 		value = size == 4 ? fdt32_to_cpu(*(const uint32_t *)p) :
p                  95 scripts/dtc/fdtget.c 			size == 2 ? (*p << 8) | p[1] : *p;
p                 195 scripts/dtc/fdtput.c 	char *p;
p                 197 scripts/dtc/fdtput.c 	p = strrchr(node_name, '/');
p                 198 scripts/dtc/fdtput.c 	if (!p) {
p                 202 scripts/dtc/fdtput.c 	*p = '\0';
p                 204 scripts/dtc/fdtput.c 	if (p > node_name) {
p                 212 scripts/dtc/fdtput.c 	node = fdt_add_subnode(blob, node, p + 1);
p                 214 scripts/dtc/fdtput.c 		report_error(p + 1, -1, node);
p                 434 scripts/dtc/flattree.c 	const char *p;
p                 437 scripts/dtc/flattree.c 	p = strbuf.val;
p                 439 scripts/dtc/flattree.c 	while (p < (strbuf.val + strbuf.len)) {
p                 440 scripts/dtc/flattree.c 		len = strlen(p);
p                 441 scripts/dtc/flattree.c 		fprintf(f, "\t.string \"%s\"\n", p);
p                 442 scripts/dtc/flattree.c 		p += len+1;
p                 576 scripts/dtc/flattree.c static void flat_read_chunk(struct inbuf *inb, void *p, int len)
p                 581 scripts/dtc/flattree.c 	memcpy(p, inb->ptr, len);
p                 609 scripts/dtc/flattree.c 	const char *p = inb->ptr;
p                 613 scripts/dtc/flattree.c 		if (p >= inb->limit)
p                 616 scripts/dtc/flattree.c 	} while ((*p++) != '\0');
p                 646 scripts/dtc/flattree.c 	const char *p;
p                 648 scripts/dtc/flattree.c 	p = inb->base + offset;
p                 650 scripts/dtc/flattree.c 		if (p >= inb->limit || p < inb->base)
p                 654 scripts/dtc/flattree.c 		if (*p == '\0')
p                 657 scripts/dtc/flattree.c 		p++;
p                 804 scripts/dtc/flattree.c 	char *p;
p                 851 scripts/dtc/flattree.c 	p = blob + sizeof(magic)  + sizeof(totalsize);
p                 858 scripts/dtc/flattree.c 		rc = fread(p, 1, sizeleft, f);
p                 864 scripts/dtc/flattree.c 		p += rc;
p                  15 scripts/dtc/include-prefixes/dt-bindings/pinctrl/r7s72100-pinctrl.h #define RZA1_PINMUX(b, p, f)	((b) * RZA1_PINS_PER_PORT + (p) | (f << 16))
p                  40 scripts/dtc/include-prefixes/dt-bindings/pinctrl/r7s9210-pinctrl.h #define RZA2_PINMUX(b, p, f)	((b) * RZA2_PINS_PER_PORT + (p) | (f << 16))
p                 131 scripts/dtc/libfdt/fdt.c 	const char *p;
p                 145 scripts/dtc/libfdt/fdt.c 			p = fdt_offset_ptr(fdt, offset++, 1);
p                 146 scripts/dtc/libfdt/fdt.c 		} while (p && (*p != '\0'));
p                 147 scripts/dtc/libfdt/fdt.c 		if (!p)
p                 269 scripts/dtc/libfdt/fdt.c 	const char *p;
p                 271 scripts/dtc/libfdt/fdt.c 	for (p = strtab; p <= last; p++)
p                 272 scripts/dtc/libfdt/fdt.c 		if (memcmp(p, s, len) == 0)
p                 273 scripts/dtc/libfdt/fdt.c 			return p;
p                 705 scripts/dtc/libfdt/fdt_overlay.c 	void *p;
p                 787 scripts/dtc/libfdt/fdt_overlay.c 				len + (len > 1) + rel_path_len + 1, &p);
p                 799 scripts/dtc/libfdt/fdt_overlay.c 		buf = p;
p                  17 scripts/dtc/libfdt/fdt_ro.c 	const char *p = fdt_get_name(fdt, offset, &olen);
p                  19 scripts/dtc/libfdt/fdt_ro.c 	if (!p || olen < len)
p                  23 scripts/dtc/libfdt/fdt_ro.c 	if (memcmp(p, s, len) != 0)
p                  26 scripts/dtc/libfdt/fdt_ro.c 	if (p[len] == '\0')
p                  28 scripts/dtc/libfdt/fdt_ro.c 	else if (!memchr(s, '@', len) && (p[len] == '@'))
p                  97 scripts/dtc/libfdt/fdt_ro.c 	const char *p = fdt_get_string(fdt, stroffset, &slen);
p                  99 scripts/dtc/libfdt/fdt_ro.c 	return p && (slen == len) && (memcmp(p, s, len) == 0);
p                 238 scripts/dtc/libfdt/fdt_ro.c 	const char *p = path;
p                 245 scripts/dtc/libfdt/fdt_ro.c 		const char *q = memchr(path, '/', end - p);
p                 250 scripts/dtc/libfdt/fdt_ro.c 		p = fdt_get_alias_namelen(fdt, p, q - p);
p                 251 scripts/dtc/libfdt/fdt_ro.c 		if (!p)
p                 253 scripts/dtc/libfdt/fdt_ro.c 		offset = fdt_path_offset(fdt, p);
p                 255 scripts/dtc/libfdt/fdt_ro.c 		p = q;
p                 258 scripts/dtc/libfdt/fdt_ro.c 	while (p < end) {
p                 261 scripts/dtc/libfdt/fdt_ro.c 		while (*p == '/') {
p                 262 scripts/dtc/libfdt/fdt_ro.c 			p++;
p                 263 scripts/dtc/libfdt/fdt_ro.c 			if (p == end)
p                 266 scripts/dtc/libfdt/fdt_ro.c 		q = memchr(p, '/', end - p);
p                 270 scripts/dtc/libfdt/fdt_ro.c 		offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p);
p                 274 scripts/dtc/libfdt/fdt_ro.c 		p = q;
p                 522 scripts/dtc/libfdt/fdt_ro.c 	int pdepth = 0, p = 0;
p                 536 scripts/dtc/libfdt/fdt_ro.c 				p--;
p                 537 scripts/dtc/libfdt/fdt_ro.c 			} while (buf[p-1] != '/');
p                 545 scripts/dtc/libfdt/fdt_ro.c 			if ((p + namelen + 1) <= buflen) {
p                 546 scripts/dtc/libfdt/fdt_ro.c 				memcpy(buf + p, name, namelen);
p                 547 scripts/dtc/libfdt/fdt_ro.c 				p += namelen;
p                 548 scripts/dtc/libfdt/fdt_ro.c 				buf[p++] = '/';
p                 557 scripts/dtc/libfdt/fdt_ro.c 			if (p > 1) /* special case so that root path is "/", not "" */
p                 558 scripts/dtc/libfdt/fdt_ro.c 				p--;
p                 559 scripts/dtc/libfdt/fdt_ro.c 			buf[p] = '\0';
p                 684 scripts/dtc/libfdt/fdt_ro.c 	const char *p;
p                 689 scripts/dtc/libfdt/fdt_ro.c 		p = memchr(strlist, '\0', listlen);
p                 690 scripts/dtc/libfdt/fdt_ro.c 		if (!p)
p                 692 scripts/dtc/libfdt/fdt_ro.c 		listlen -= (p-strlist) + 1;
p                 693 scripts/dtc/libfdt/fdt_ro.c 		strlist = p + 1;
p                  54 scripts/dtc/libfdt/fdt_rw.c 	char *p = splicepoint;
p                  57 scripts/dtc/libfdt/fdt_rw.c 	if (((p + oldlen) < p) || ((p + oldlen) > end))
p                  59 scripts/dtc/libfdt/fdt_rw.c 	if ((p < (char *)fdt) || ((end - oldlen + newlen) < (char *)fdt))
p                  63 scripts/dtc/libfdt/fdt_rw.c 	memmove(p + newlen, p + oldlen, end - p - oldlen);
p                  67 scripts/dtc/libfdt/fdt_rw.c static int fdt_splice_mem_rsv_(void *fdt, struct fdt_reserve_entry *p,
p                  70 scripts/dtc/libfdt/fdt_rw.c 	int delta = (newn - oldn) * sizeof(*p);
p                  72 scripts/dtc/libfdt/fdt_rw.c 	err = fdt_splice_(fdt, p, oldn * sizeof(*p), newn * sizeof(*p));
p                  80 scripts/dtc/libfdt/fdt_rw.c static int fdt_splice_struct_(void *fdt, void *p,
p                  86 scripts/dtc/libfdt/fdt_rw.c 	if ((err = fdt_splice_(fdt, p, oldlen, newlen)))
p                 104 scripts/dtc/libfdt/fdt_rw.c 	void *p = (char *)fdt
p                 108 scripts/dtc/libfdt/fdt_rw.c 	if ((err = fdt_splice_(fdt, p, 0, newlen)))
p                 118 scripts/dtc/libfdt/fdt_rw.c 	const char *p;
p                 125 scripts/dtc/libfdt/fdt_rw.c 	p = fdt_find_string_(strtab, fdt_size_dt_strings(fdt), s);
p                 126 scripts/dtc/libfdt/fdt_rw.c 	if (p)
p                 128 scripts/dtc/libfdt/fdt_rw.c 		return (p - strtab);
p                 272 scripts/dtc/libfdt/fdt_sw.c 	const char *p;
p                 276 scripts/dtc/libfdt/fdt_sw.c 	p = fdt_find_string_(strtab - strtabsize, strtabsize, s);
p                 277 scripts/dtc/libfdt/fdt_sw.c 	if (p)
p                 278 scripts/dtc/libfdt/fdt_sw.c 		return p - strtab;
p                 331 scripts/dtc/libfdt/fdt_sw.c 	char *p = (char *)fdt;
p                 348 scripts/dtc/libfdt/fdt_sw.c 	memmove(p + newstroffset, p + oldstroffset, fdt_size_dt_strings(fdt));
p                  53 scripts/dtc/libfdt/fdt_wip.c 	fdt32_t *p;
p                  55 scripts/dtc/libfdt/fdt_wip.c 	for (p = start; (char *)p < ((char *)start + len); p++)
p                  56 scripts/dtc/libfdt/fdt_wip.c 		*p = cpu_to_fdt32(FDT_NOP);
p                 127 scripts/dtc/libfdt/libfdt.h static inline uint32_t fdt32_ld(const fdt32_t *p)
p                 129 scripts/dtc/libfdt/libfdt.h 	const uint8_t *bp = (const uint8_t *)p;
p                 147 scripts/dtc/libfdt/libfdt.h static inline uint64_t fdt64_ld(const fdt64_t *p)
p                 149 scripts/dtc/libfdt/libfdt.h 	const uint8_t *bp = (const uint8_t *)p;
p                  87 scripts/dtc/libfdt/libfdt_env.h     const char *p = memchr(string, 0, max_count);
p                  88 scripts/dtc/libfdt/libfdt_env.h     return p ? p - string : max_count;
p                  75 scripts/dtc/livetree.c 	struct property *p = first;
p                  79 scripts/dtc/livetree.c 	while (p) {
p                  80 scripts/dtc/livetree.c 		next = p->next;
p                  81 scripts/dtc/livetree.c 		p->next = head;
p                  82 scripts/dtc/livetree.c 		head = p;
p                  83 scripts/dtc/livetree.c 		p = next;
p                 232 scripts/dtc/livetree.c 	struct property *p;
p                 240 scripts/dtc/livetree.c 		p = build_property("target-path", d, NULL);
p                 245 scripts/dtc/livetree.c 		p = build_property("target", d, NULL);
p                 251 scripts/dtc/livetree.c 	node = build_node(p, new_node, NULL);
p                 268 scripts/dtc/livetree.c 	struct property **p;
p                 272 scripts/dtc/livetree.c 	p = &node->proplist;
p                 273 scripts/dtc/livetree.c 	while (*p)
p                 274 scripts/dtc/livetree.c 		p = &((*p)->next);
p                 276 scripts/dtc/livetree.c 	*p = prop;
p                 300 scripts/dtc/livetree.c 	struct node **p;
p                 305 scripts/dtc/livetree.c 	p = &parent->children;
p                 306 scripts/dtc/livetree.c 	while (*p)
p                 307 scripts/dtc/livetree.c 		p = &((*p)->next_sibling);
p                 309 scripts/dtc/livetree.c 	*p = child;
p                 343 scripts/dtc/livetree.c 	struct property *p;
p                 345 scripts/dtc/livetree.c 	p = get_property(node, name);
p                 346 scripts/dtc/livetree.c 	if (p) {
p                 347 scripts/dtc/livetree.c 		d = data_add_marker(p->val, type, name);
p                 349 scripts/dtc/livetree.c 		p->val = d;
p                 353 scripts/dtc/livetree.c 		p = build_property(name, d, NULL);
p                 354 scripts/dtc/livetree.c 		add_property(node, p);
p                 477 scripts/dtc/livetree.c 	struct property *p;
p                 482 scripts/dtc/livetree.c 	for_each_property(tree, p) {
p                 483 scripts/dtc/livetree.c 		*prop = p;
p                 484 scripts/dtc/livetree.c 		m = p->val.markers;
p                 514 scripts/dtc/livetree.c 	const char *p;
p                 526 scripts/dtc/livetree.c 	p = strchr(path, '/');
p                 529 scripts/dtc/livetree.c 		if (p && (strlen(child->name) == p-path) &&
p                 530 scripts/dtc/livetree.c 		    strprefixeq(path, p - path, child->name))
p                 531 scripts/dtc/livetree.c 			return get_node_by_path(child, p+1);
p                 532 scripts/dtc/livetree.c 		else if (!p && streq(path, child->name))
p                 830 scripts/dtc/livetree.c 	struct property *p;
p                 840 scripts/dtc/livetree.c 			p = get_property(an, l->label);
p                 841 scripts/dtc/livetree.c 			if (p) {
p                 849 scripts/dtc/livetree.c 			p = build_property(l->label,
p                 853 scripts/dtc/livetree.c 			add_property(an, p);
p                 276 scripts/dtc/srcpos.c 	struct srcpos *p;
p                 281 scripts/dtc/srcpos.c 	for (p = pos; p->next != NULL; p = p->next);
p                 282 scripts/dtc/srcpos.c 	p->next = newtail;
p                 102 scripts/dtc/treesource.c static void write_propval_int(FILE *f, const char *p, size_t len, size_t width)
p                 104 scripts/dtc/treesource.c 	const char *end = p + len;
p                 107 scripts/dtc/treesource.c 	for (; p < end; p += width) {
p                 110 scripts/dtc/treesource.c 			fprintf(f, "%02"PRIx8, *(const uint8_t*)p);
p                 113 scripts/dtc/treesource.c 			fprintf(f, "0x%02"PRIx16, fdt16_to_cpu(*(const fdt16_t*)p));
p                 116 scripts/dtc/treesource.c 			fprintf(f, "0x%02"PRIx32, fdt32_to_cpu(*(const fdt32_t*)p));
p                 119 scripts/dtc/treesource.c 			fprintf(f, "0x%02"PRIx64, fdt64_to_cpu(*(const fdt64_t*)p));
p                 122 scripts/dtc/treesource.c 		if (p + width < end)
p                 166 scripts/dtc/treesource.c 	const char *p = prop->val.val;
p                 173 scripts/dtc/treesource.c 		if (! isstring(p[i]))
p                 175 scripts/dtc/treesource.c 		if (p[i] == '\0')
p                 186 scripts/dtc/treesource.c 	if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul < (len-nnul))
p                 231 scripts/dtc/treesource.c 		const char *p = &prop->val.val[m->offset];
p                 248 scripts/dtc/treesource.c 			write_propval_int(f, p, chunk_len, 2);
p                 251 scripts/dtc/treesource.c 			write_propval_int(f, p, chunk_len, 4);
p                 254 scripts/dtc/treesource.c 			write_propval_int(f, p, chunk_len, 8);
p                 257 scripts/dtc/treesource.c 			write_propval_string(f, p, chunk_len);
p                 260 scripts/dtc/treesource.c 			write_propval_int(f, p, chunk_len, 1);
p                  38 scripts/dtc/util.c 	char *p;
p                  41 scripts/dtc/util.c 	p = *strp;
p                  42 scripts/dtc/util.c 	if (p)
p                  43 scripts/dtc/util.c 		size = strlen(p);
p                  49 scripts/dtc/util.c 	p = xrealloc(p, size + n);
p                  51 scripts/dtc/util.c 	n = vsnprintf(p + size, n, fmt, ap);
p                  53 scripts/dtc/util.c 	*strp = p;
p                  54 scripts/dtc/util.c 	return strlen(p);
p                 400 scripts/dtc/util.c 		const unsigned char *p = (const unsigned char *)data;
p                 403 scripts/dtc/util.c 			printf("%02x%s", *p++, i < len - 1 ? " " : "");
p                  48 scripts/dtc/util.h static inline void *xrealloc(void *p, size_t len)
p                  50 scripts/dtc/util.h 	void *new = realloc(p, len);
p                  27 scripts/genksyms/parse.y remove_node(struct string_list **p)
p                  29 scripts/genksyms/parse.y   struct string_list *node = *p;
p                  30 scripts/genksyms/parse.y   *p = node->next;
p                  97 scripts/insert-sys-cert.c 	char *w, *p, *n;
p                 107 scripts/insert-sys-cert.c 		p = strchr(l, '\n');
p                 108 scripts/insert-sys-cert.c 		if (!p) {
p                  58 scripts/kconfig/conf.c 	char *p = str;
p                  61 scripts/kconfig/conf.c 	while ((isspace(*p)))
p                  62 scripts/kconfig/conf.c 		p++;
p                  63 scripts/kconfig/conf.c 	l = strlen(p);
p                  64 scripts/kconfig/conf.c 	if (p != str)
p                  65 scripts/kconfig/conf.c 		memmove(str, p, l + 1);
p                  68 scripts/kconfig/conf.c 	p = str + l - 1;
p                  69 scripts/kconfig/conf.c 	while ((isspace(*p)))
p                  70 scripts/kconfig/conf.c 		*p-- = 0;
p                  94 scripts/kconfig/confdata.c 	char *p;
p                 100 scripts/kconfig/confdata.c 	p = strrchr(tmp, '/');
p                 101 scripts/kconfig/confdata.c 	if (!p)
p                 103 scripts/kconfig/confdata.c 	*(p + 1) = 0;
p                 106 scripts/kconfig/confdata.c 	p = tmp;
p                 107 scripts/kconfig/confdata.c 	while (*p == '/')
p                 108 scripts/kconfig/confdata.c 		p++;
p                 110 scripts/kconfig/confdata.c 	while ((p = strchr(p, '/'))) {
p                 111 scripts/kconfig/confdata.c 		*p = 0;
p                 117 scripts/kconfig/confdata.c 		*p = '/';
p                 118 scripts/kconfig/confdata.c 		while (*p == '/')
p                 119 scripts/kconfig/confdata.c 			p++;
p                 234 scripts/kconfig/confdata.c static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p)
p                 240 scripts/kconfig/confdata.c 		if (p[0] == 'm') {
p                 247 scripts/kconfig/confdata.c 		if (p[0] == 'y') {
p                 252 scripts/kconfig/confdata.c 		if (p[0] == 'n') {
p                 259 scripts/kconfig/confdata.c 				     p, sym->name);
p                 262 scripts/kconfig/confdata.c 		if (*p++ != '"')
p                 264 scripts/kconfig/confdata.c 		for (p2 = p; (p2 = strpbrk(p2, "\"\\")); p2++) {
p                 279 scripts/kconfig/confdata.c 		if (sym_string_valid(sym, p)) {
p                 280 scripts/kconfig/confdata.c 			sym->def[def].val = xstrdup(p);
p                 285 scripts/kconfig/confdata.c 					     p, sym->name);
p                 355 scripts/kconfig/confdata.c 	char *p, *p2;
p                 419 scripts/kconfig/confdata.c 			p = strchr(line + 2 + strlen(CONFIG_), ' ');
p                 420 scripts/kconfig/confdata.c 			if (!p)
p                 422 scripts/kconfig/confdata.c 			*p++ = 0;
p                 423 scripts/kconfig/confdata.c 			if (strncmp(p, "is not set", 10))
p                 449 scripts/kconfig/confdata.c 			p = strchr(line + strlen(CONFIG_), '=');
p                 450 scripts/kconfig/confdata.c 			if (!p)
p                 452 scripts/kconfig/confdata.c 			*p++ = 0;
p                 453 scripts/kconfig/confdata.c 			p2 = strchr(p, '\n');
p                 478 scripts/kconfig/confdata.c 			if (conf_set_sym_val(sym, def, def_flags, p))
p                 616 scripts/kconfig/confdata.c 	const char *p = value;
p                 620 scripts/kconfig/confdata.c 		l = strcspn(p, "\n");
p                 624 scripts/kconfig/confdata.c 			xfwrite(p, l, 1, fp);
p                 625 scripts/kconfig/confdata.c 			p += l;
p                 628 scripts/kconfig/confdata.c 		if (*p++ == '\0')
p                 688 scripts/kconfig/confdata.c 	const char *p = value;
p                 693 scripts/kconfig/confdata.c 		l = strcspn(p, "\n");
p                 697 scripts/kconfig/confdata.c 			xfwrite(p, l, 1, fp);
p                 698 scripts/kconfig/confdata.c 			p += l;
p                 701 scripts/kconfig/confdata.c 		if (*p++ == '\0')
p                1250 scripts/kconfig/confdata.c 		int n, p[3];
p                1257 scripts/kconfig/confdata.c 				p[n++] = tmp;
p                1270 scripts/kconfig/confdata.c 			pby = p[0]; ptm = pby/2; pty = pby-ptm;
p                1273 scripts/kconfig/confdata.c 			pty = p[0]; ptm = p[1]; pby = pty + ptm;
p                1276 scripts/kconfig/confdata.c 			pby = p[0]; pty = p[1]; ptm = p[2];
p                  87 scripts/kconfig/lkc.h void *xrealloc(void *p, size_t size);
p                  32 scripts/kconfig/lxdialog/dialog.h #define wbkgdset(w,p)		/*nothing */
p                 145 scripts/kconfig/lxdialog/dialog.h void item_set_data(void *p);
p                 583 scripts/kconfig/lxdialog/util.c 	struct dialog_list *p, *next;
p                 585 scripts/kconfig/lxdialog/util.c 	for (p = item_head; p; p = next) {
p                 586 scripts/kconfig/lxdialog/util.c 		next = p->next;
p                 587 scripts/kconfig/lxdialog/util.c 		free(p);
p                 596 scripts/kconfig/lxdialog/util.c 	struct dialog_list *p = malloc(sizeof(*p));
p                 599 scripts/kconfig/lxdialog/util.c 		item_cur->next = p;
p                 601 scripts/kconfig/lxdialog/util.c 		item_head = p;
p                 602 scripts/kconfig/lxdialog/util.c 	item_cur = p;
p                 603 scripts/kconfig/lxdialog/util.c 	memset(p, 0, sizeof(*p));
p                 659 scripts/kconfig/lxdialog/util.c 	struct dialog_list *p;
p                 661 scripts/kconfig/lxdialog/util.c 	for (p = item_head; p; p = p->next)
p                 677 scripts/kconfig/lxdialog/util.c 	struct dialog_list *p;
p                 679 scripts/kconfig/lxdialog/util.c 	for (p = item_head; p; p = p->next) {
p                 680 scripts/kconfig/lxdialog/util.c 		if (p == item_cur)
p                 577 scripts/kconfig/parser.y 	const char *p;
p                 581 scripts/kconfig/parser.y 	while ((p = strchr(str, '"'))) {
p                 582 scripts/kconfig/parser.y 		len = p - str;
p                 586 scripts/kconfig/parser.y 		str = p + 1;
p                 143 scripts/kconfig/preprocess.c 	FILE *p;
p                 151 scripts/kconfig/preprocess.c 	p = popen(cmd, "r");
p                 152 scripts/kconfig/preprocess.c 	if (!p) {
p                 157 scripts/kconfig/preprocess.c 	nread = fread(buf, 1, sizeof(buf), p);
p                 173 scripts/kconfig/preprocess.c 	if (pclose(p) == -1) {
p                 348 scripts/kconfig/preprocess.c 	char *tmp, *name, *res, *endptr, *prev, *p;
p                 368 scripts/kconfig/preprocess.c 	prev = p = tmp;
p                 384 scripts/kconfig/preprocess.c 	while (*p) {
p                 385 scripts/kconfig/preprocess.c 		if (nest == 0 && *p == ',') {
p                 386 scripts/kconfig/preprocess.c 			*p = 0;
p                 390 scripts/kconfig/preprocess.c 			prev = p + 1;
p                 391 scripts/kconfig/preprocess.c 		} else if (*p == '(') {
p                 393 scripts/kconfig/preprocess.c 		} else if (*p == ')') {
p                 397 scripts/kconfig/preprocess.c 		p++;
p                 455 scripts/kconfig/preprocess.c 	const char *p = *str;
p                 464 scripts/kconfig/preprocess.c 	if (*p != '(') {
p                 465 scripts/kconfig/preprocess.c 		*str = p;
p                 469 scripts/kconfig/preprocess.c 	p++;
p                 470 scripts/kconfig/preprocess.c 	q = p;
p                 482 scripts/kconfig/preprocess.c 		pperror("unterminated reference to '%s': missing ')'", p);
p                 487 scripts/kconfig/preprocess.c 	return eval_clause(p, q - p, argc, argv);
p                 498 scripts/kconfig/preprocess.c 	const char *in, *p;
p                 506 scripts/kconfig/preprocess.c 	p = in = *str;
p                 509 scripts/kconfig/preprocess.c 		if (*p == '$') {
p                 510 scripts/kconfig/preprocess.c 			in_len = p - in;
p                 511 scripts/kconfig/preprocess.c 			p++;
p                 512 scripts/kconfig/preprocess.c 			expansion = expand_dollar_with_args(&p, argc, argv);
p                 518 scripts/kconfig/preprocess.c 			in = p;
p                 522 scripts/kconfig/preprocess.c 		if (is_end(*p))
p                 525 scripts/kconfig/preprocess.c 		p++;
p                 528 scripts/kconfig/preprocess.c 	in_len = p - in;
p                 534 scripts/kconfig/preprocess.c 	*str = p;
p                 303 scripts/kconfig/qconf.cc ConfigList::ConfigList(ConfigView* p, const char *name)
p                 304 scripts/kconfig/qconf.cc 	: Parent(p),
p                 770 scripts/kconfig/qconf.cc 	QPoint p = e->pos();
p                 771 scripts/kconfig/qconf.cc 	ConfigItem* item = (ConfigItem*)itemAt(p);
p                 781 scripts/kconfig/qconf.cc 	x = header()->offset() + p.x();
p                 787 scripts/kconfig/qconf.cc 			int off = header()->sectionPosition(0) + visualRect(indexAt(p)).x() + 4; // 4 is Hardcoded image offset. There might be a way to do it properly.
p                 831 scripts/kconfig/qconf.cc 	QPoint p = e->pos(); // TODO: Check if this works(was contentsToViewport).
p                 832 scripts/kconfig/qconf.cc 	ConfigItem* item = (ConfigItem*)itemAt(p);
p                1313 scripts/kconfig/qconf.cc 	struct symbol **p;
p                1324 scripts/kconfig/qconf.cc 	for (p = result; *p; p++) {
p                1325 scripts/kconfig/qconf.cc 		for_all_prompts((*p), prop)
p                  46 scripts/kconfig/qconf.h 	ConfigList(ConfigView* p, const char *name = 0);
p                 872 scripts/kconfig/symbol.c 	const char *p;
p                 879 scripts/kconfig/symbol.c 	p = in;
p                 881 scripts/kconfig/symbol.c 		l = strcspn(p, "\"\\");
p                 882 scripts/kconfig/symbol.c 		p += l;
p                 884 scripts/kconfig/symbol.c 		if (p[0] == '\0')
p                 888 scripts/kconfig/symbol.c 		p++;
p                 896 scripts/kconfig/symbol.c 	p = in;
p                 898 scripts/kconfig/symbol.c 		l = strcspn(p, "\"\\");
p                 899 scripts/kconfig/symbol.c 		strncat(res, p, l);
p                 900 scripts/kconfig/symbol.c 		p += l;
p                 902 scripts/kconfig/symbol.c 		if (p[0] == '\0')
p                 906 scripts/kconfig/symbol.c 		strncat(res, p++, 1);
p                  84 scripts/kconfig/util.c 	void *p = malloc(size);
p                  85 scripts/kconfig/util.c 	if (p)
p                  86 scripts/kconfig/util.c 		return p;
p                  93 scripts/kconfig/util.c 	void *p = calloc(nmemb, size);
p                  94 scripts/kconfig/util.c 	if (p)
p                  95 scripts/kconfig/util.c 		return p;
p                 100 scripts/kconfig/util.c void *xrealloc(void *p, size_t size)
p                 102 scripts/kconfig/util.c 	p = realloc(p, size);
p                 103 scripts/kconfig/util.c 	if (p)
p                 104 scripts/kconfig/util.c 		return p;
p                 111 scripts/kconfig/util.c 	char *p;
p                 113 scripts/kconfig/util.c 	p = strdup(s);
p                 114 scripts/kconfig/util.c 	if (p)
p                 115 scripts/kconfig/util.c 		return p;
p                 122 scripts/kconfig/util.c 	char *p;
p                 124 scripts/kconfig/util.c 	p = strndup(s, n);
p                 125 scripts/kconfig/util.c 	if (p)
p                 126 scripts/kconfig/util.c 		return p;
p                1067 scripts/mod/file2alias.c 	char *p = *outp;
p                1072 scripts/mod/file2alias.c 		*p++ = '?';
p                1076 scripts/mod/file2alias.c 		p += sprintf(p, "%X",  nibble);
p                1086 scripts/mod/file2alias.c 		*p++ = '[';
p                1089 scripts/mod/file2alias.c 				p += sprintf(p, "%X", i);
p                1090 scripts/mod/file2alias.c 		*p++ = ']';
p                1094 scripts/mod/file2alias.c 	*p = '\0';
p                1097 scripts/mod/file2alias.c 	*outp = p;
p                1110 scripts/mod/file2alias.c 	char *p = alias;
p                1119 scripts/mod/file2alias.c 	p += sprintf(alias, "amba:d");
p                1121 scripts/mod/file2alias.c 		append_nibble_mask(&p,
p                1468 scripts/mod/file2alias.c 			const struct devtable *p = &devtable[i];
p                1470 scripts/mod/file2alias.c 			if (sym_is(name, namelen, p->device_id)) {
p                1471 scripts/mod/file2alias.c 				do_table(symval, sym->st_size, p->id_size,
p                1472 scripts/mod/file2alias.c 					 p->device_id, p->do_entry, mod);
p                1484 scripts/mod/file2alias.c 	buf_write(buf, mod->dev_table_buf.p, mod->dev_table_buf.pos);
p                1485 scripts/mod/file2alias.c 	free(mod->dev_table_buf.p);
p                 138 scripts/mod/modpost.c 	char *p;
p                 142 scripts/mod/modpost.c 	p = NOFAIL(strdup(modname));
p                 145 scripts/mod/modpost.c 	if (strends(p, ".o")) {
p                 146 scripts/mod/modpost.c 		p[strlen(p) - 2] = '\0';
p                 151 scripts/mod/modpost.c 	mod->name = p;
p                 451 scripts/mod/modpost.c 	signed char *p = (signed char *)file + *pos;
p                 455 scripts/mod/modpost.c 		if (skip && isspace(*p)) {
p                 456 scripts/mod/modpost.c 			p++;
p                 460 scripts/mod/modpost.c 		if (*p != '\n' && (*pos < size)) {
p                 462 scripts/mod/modpost.c 			*s++ = *p++;
p                 634 scripts/mod/modpost.c 		Elf32_Word *p;
p                 640 scripts/mod/modpost.c 		for (p = info->symtab_shndx_start; p < info->symtab_shndx_stop;
p                 641 scripts/mod/modpost.c 		     p++)
p                 642 scripts/mod/modpost.c 			*p = TO_NATIVE(*p);
p                 795 scripts/mod/modpost.c 	char *p;
p                 805 scripts/mod/modpost.c 	for (p = modinfo; p; p = next_string(p, &size)) {
p                 806 scripts/mod/modpost.c 		if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
p                 807 scripts/mod/modpost.c 			return p + taglen + 1;
p                 857 scripts/mod/modpost.c 	const char *p;
p                 859 scripts/mod/modpost.c 		p = *pat++;
p                 860 scripts/mod/modpost.c 		const char *endp = p + strlen(p) - 1;
p                 863 scripts/mod/modpost.c 		if (*p == '*' && *endp == '*') {
p                 864 scripts/mod/modpost.c 			char *bare = NOFAIL(strndup(p + 1, strlen(p) - 2));
p                 872 scripts/mod/modpost.c 		else if (*p == '*') {
p                 873 scripts/mod/modpost.c 			if (strrcmp(sym, p + 1) == 0)
p                 878 scripts/mod/modpost.c 			if (strncmp(sym, p, strlen(p) - 1) == 0)
p                 883 scripts/mod/modpost.c 			if (strcmp(p, sym) == 0)
p                1400 scripts/mod/modpost.c 		char *p = NOFAIL(malloc(20));
p                1401 scripts/mod/modpost.c 		char *r = p;
p                1403 scripts/mod/modpost.c 		*p++ = '_';
p                1404 scripts/mod/modpost.c 		*p++ = '_';
p                1408 scripts/mod/modpost.c 			*p++ = *s++;
p                1409 scripts/mod/modpost.c 		*p = '\0';
p                1413 scripts/mod/modpost.c 			strcat(p, "const ");
p                1415 scripts/mod/modpost.c 			strcat(p, "data ");
p                1417 scripts/mod/modpost.c 			strcat(p, " ");
p                2146 scripts/mod/modpost.c 		buf->p = NOFAIL(realloc(buf->p, buf->size));
p                2148 scripts/mod/modpost.c 	strncpy(buf->p + buf->pos, s, len);
p                2364 scripts/mod/modpost.c 		const char *p;
p                2372 scripts/mod/modpost.c 		p = strrchr(s->module->name, '/');
p                2373 scripts/mod/modpost.c 		if (p)
p                2374 scripts/mod/modpost.c 			p++;
p                2376 scripts/mod/modpost.c 			p = s->module->name;
p                2377 scripts/mod/modpost.c 		buf_printf(b, "%s%s", first ? "" : ",", p);
p                2412 scripts/mod/modpost.c 	if (memcmp(tmp, b->p, b->pos) != 0)
p                2429 scripts/mod/modpost.c 	if (fwrite(b->p, 1, b->pos, file) != b->pos) {
p                2527 scripts/mod/modpost.c 	free(buf.p);
p                2694 scripts/mod/modpost.c 	free(buf.p);
p                 101 scripts/mod/modpost.h 	char *p;
p                 202 scripts/mod/sumversion.c 	char *p = (char *)mctx->block + offset;
p                 205 scripts/mod/sumversion.c 	*p++ = 0x80;
p                 207 scripts/mod/sumversion.c 		memset(p, 0x00, padding + sizeof (uint64_t));
p                 209 scripts/mod/sumversion.c 		p = (char *)mctx->block;
p                 213 scripts/mod/sumversion.c 	memset(p, 0, padding);
p                 335 scripts/mod/sumversion.c 		char* p = line;
p                 338 scripts/mod/sumversion.c 			p = strrchr(line, ' ');
p                 339 scripts/mod/sumversion.c 			if (!p) {
p                 343 scripts/mod/sumversion.c 			p++;
p                 344 scripts/mod/sumversion.c 			if (!parse_file(p, md)) {
p                 346 scripts/mod/sumversion.c 				     p, strerror(errno));
p                 359 scripts/mod/sumversion.c 		if ( *(p + strlen(p)-1) != '\\')
p                 362 scripts/mod/sumversion.c 		while (*p) {
p                 363 scripts/mod/sumversion.c 			if (isspace(*p)) {
p                 364 scripts/mod/sumversion.c 				*p = '\0';
p                 367 scripts/mod/sumversion.c 			p++;
p                  32 scripts/selinux/genheaders/genheaders.c 	char *p;
p                  39 scripts/selinux/genheaders/genheaders.c 	for (p = s2; *p; p++)
p                  40 scripts/selinux/genheaders/genheaders.c 		*p = toupper(*p);
p                 746 scripts/unifdef.c static Linetype op_strict(int *p, int v, Linetype at, Linetype bt) {
p                 748 scripts/unifdef.c 	return (*p = v, v ? LT_TRUE : LT_FALSE);
p                 750 scripts/unifdef.c static Linetype op_lt(int *p, Linetype at, int a, Linetype bt, int b) {
p                 751 scripts/unifdef.c 	return op_strict(p, a < b, at, bt);
p                 753 scripts/unifdef.c static Linetype op_gt(int *p, Linetype at, int a, Linetype bt, int b) {
p                 754 scripts/unifdef.c 	return op_strict(p, a > b, at, bt);
p                 756 scripts/unifdef.c static Linetype op_le(int *p, Linetype at, int a, Linetype bt, int b) {
p                 757 scripts/unifdef.c 	return op_strict(p, a <= b, at, bt);
p                 759 scripts/unifdef.c static Linetype op_ge(int *p, Linetype at, int a, Linetype bt, int b) {
p                 760 scripts/unifdef.c 	return op_strict(p, a >= b, at, bt);
p                 762 scripts/unifdef.c static Linetype op_eq(int *p, Linetype at, int a, Linetype bt, int b) {
p                 763 scripts/unifdef.c 	return op_strict(p, a == b, at, bt);
p                 765 scripts/unifdef.c static Linetype op_ne(int *p, Linetype at, int a, Linetype bt, int b) {
p                 766 scripts/unifdef.c 	return op_strict(p, a != b, at, bt);
p                 768 scripts/unifdef.c static Linetype op_or(int *p, Linetype at, int a, Linetype bt, int b) {
p                 770 scripts/unifdef.c 		return (*p = 1, LT_TRUE);
p                 771 scripts/unifdef.c 	return op_strict(p, a || b, at, bt);
p                 773 scripts/unifdef.c static Linetype op_and(int *p, Linetype at, int a, Linetype bt, int b) {
p                 775 scripts/unifdef.c 		return (*p = 0, LT_FALSE);
p                 776 scripts/unifdef.c 	return op_strict(p, a && b, at, bt);
p                 707 security/apparmor/apparmorfs.c 					      profile->data->p);
p                1591 security/apparmor/apparmorfs.c 		struct aa_profile *p;
p                1592 security/apparmor/apparmorfs.c 		p = aa_deref_parent(profile);
p                1593 security/apparmor/apparmorfs.c 		dent = prof_dir(p);
p                1598 security/apparmor/apparmorfs.c 		prof_child_dir(p) = parent = dent;
p                2040 security/apparmor/apparmorfs.c static struct aa_profile *__next_profile(struct aa_profile *p)
p                2043 security/apparmor/apparmorfs.c 	struct aa_ns *ns = p->ns;
p                2045 security/apparmor/apparmorfs.c 	AA_BUG(!mutex_is_locked(&profiles_ns(p)->lock));
p                2048 security/apparmor/apparmorfs.c 	if (!list_empty(&p->base.profiles))
p                2049 security/apparmor/apparmorfs.c 		return list_first_entry(&p->base.profiles, typeof(*p),
p                2053 security/apparmor/apparmorfs.c 	parent = rcu_dereference_protected(p->parent,
p                2054 security/apparmor/apparmorfs.c 					   mutex_is_locked(&p->ns->lock));
p                2056 security/apparmor/apparmorfs.c 		p = list_next_entry(p, base.list);
p                2057 security/apparmor/apparmorfs.c 		if (!list_entry_is_head(p, &parent->base.profiles, base.list))
p                2058 security/apparmor/apparmorfs.c 			return p;
p                2059 security/apparmor/apparmorfs.c 		p = parent;
p                2065 security/apparmor/apparmorfs.c 	p = list_next_entry(p, base.list);
p                2066 security/apparmor/apparmorfs.c 	if (!list_entry_is_head(p, &ns->base.profiles, base.list))
p                2067 security/apparmor/apparmorfs.c 		return p;
p                2127 security/apparmor/apparmorfs.c static void *p_next(struct seq_file *f, void *p, loff_t *pos)
p                2129 security/apparmor/apparmorfs.c 	struct aa_profile *profile = p;
p                2143 security/apparmor/apparmorfs.c static void p_stop(struct seq_file *f, void *p)
p                2145 security/apparmor/apparmorfs.c 	struct aa_profile *profile = p;
p                2163 security/apparmor/apparmorfs.c static int seq_show_profile(struct seq_file *f, void *p)
p                2165 security/apparmor/apparmorfs.c 	struct aa_profile *profile = (struct aa_profile *)p;
p                 668 security/apparmor/file.c static int match_file(const void *p, struct file *file, unsigned int fd)
p                 670 security/apparmor/file.c 	struct aa_label *label = (struct aa_label *)p;
p                 211 security/apparmor/include/policy.h static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p)
p                 213 security/apparmor/include/policy.h 	return labels_profile(aa_get_newest_label(&p->label));
p                 243 security/apparmor/include/policy.h static inline struct aa_profile *aa_get_profile(struct aa_profile *p)
p                 245 security/apparmor/include/policy.h 	if (p)
p                 246 security/apparmor/include/policy.h 		kref_get(&(p->label.count));
p                 248 security/apparmor/include/policy.h 	return p;
p                 258 security/apparmor/include/policy.h static inline struct aa_profile *aa_get_profile_not0(struct aa_profile *p)
p                 260 security/apparmor/include/policy.h 	if (p && kref_get_unless_zero(&p->label.count))
p                 261 security/apparmor/include/policy.h 		return p;
p                 273 security/apparmor/include/policy.h static inline struct aa_profile *aa_get_profile_rcu(struct aa_profile __rcu **p)
p                 279 security/apparmor/include/policy.h 		c = rcu_dereference(*p);
p                 290 security/apparmor/include/policy.h static inline void aa_put_profile(struct aa_profile *p)
p                 292 security/apparmor/include/policy.h 	if (p)
p                 293 security/apparmor/include/policy.h 		kref_put(&p->label.count, aa_label_kref);
p                  99 security/apparmor/include/policy_ns.h static inline struct aa_profile *aa_deref_parent(struct aa_profile *p)
p                 101 security/apparmor/include/policy_ns.h 	return rcu_dereference_protected(p->parent,
p                 102 security/apparmor/include/policy_ns.h 					 mutex_is_locked(&p->ns->lock));
p                 978 security/apparmor/label.c 	struct aa_profile *p = NULL;
p                 987 security/apparmor/label.c 	     k < z->size && (p = aa_label_next_in_merge(&i, a, b));
p                 989 security/apparmor/label.c 		int res = profile_cmp(p, z->vec[k]);
p                 995 security/apparmor/label.c 	if (p)
p                 709 security/apparmor/lsm.c static void apparmor_task_getsecid(struct task_struct *p, u32 *secid)
p                 711 security/apparmor/lsm.c 	struct aa_label *label = aa_get_task_label(p);
p                 505 security/apparmor/policy.c 	struct aa_profile *p, *profile;
p                 551 security/apparmor/policy.c 	p = __find_child(&parent->base.profiles, bname);
p                 552 security/apparmor/policy.c 	if (p) {
p                 554 security/apparmor/policy.c 		profile = aa_get_profile(p);
p                 749 security/apparmor/policy.c 			struct aa_profile *p;
p                 752 security/apparmor/policy.c 			p = __find_child(&new->base.profiles, child->base.name);
p                 753 security/apparmor/policy.c 			if (p) {
p                 755 security/apparmor/policy.c 				__replace_profile(child, p);
p                 762 security/apparmor/policy.c 			p = aa_deref_parent(child);
p                 765 security/apparmor/policy.c 			aa_put_profile(p);
p                 797 security/apparmor/policy.c 			    bool noreplace, struct aa_profile **p,
p                 800 security/apparmor/policy.c 	*p = aa_get_profile(__lookup_profile(&ns->base, hname));
p                 801 security/apparmor/policy.c 	if (*p) {
p                 802 security/apparmor/policy.c 		int error = replacement_allowed(*p, noreplace, info);
p                 955 security/apparmor/policy.c 			struct aa_profile *p;
p                 956 security/apparmor/policy.c 			p = __list_lookup_parent(&lh, ent->new);
p                 957 security/apparmor/policy.c 			if (!p) {
p                 962 security/apparmor/policy.c 			rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
p                 965 security/apparmor/policy.c 			struct aa_profile *p = (struct aa_profile *) policy;
p                 966 security/apparmor/policy.c 			rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
p                 983 security/apparmor/policy.c 				struct aa_profile *p;
p                 984 security/apparmor/policy.c 				p = aa_deref_parent(ent->new);
p                 985 security/apparmor/policy.c 				parent = prof_child_dir(p);
p                 205 security/apparmor/policy_unpack.c 	void *p = kvmalloc(len, GFP_KERNEL);
p                 207 security/apparmor/policy_unpack.c 	if (p)
p                 208 security/apparmor/policy_unpack.c 		memcpy(p, src, len);
p                 209 security/apparmor/policy_unpack.c 	return p;
p                 904 security/apparmor/policy_unpack.c 					       profile->data->p);
p                  26 security/apparmor/task.c 	struct aa_label *p;
p                  29 security/apparmor/task.c 	p = aa_get_newest_label(__aa_task_raw_label(task));
p                  32 security/apparmor/task.c 	return p;
p                1084 security/commoncap.c static int cap_safe_nice(struct task_struct *p)
p                1089 security/commoncap.c 	is_subset = cap_issubset(__task_cred(p)->cap_permitted,
p                1091 security/commoncap.c 	if (!is_subset && !ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
p                1105 security/commoncap.c int cap_task_setscheduler(struct task_struct *p)
p                1107 security/commoncap.c 	return cap_safe_nice(p);
p                1118 security/commoncap.c int cap_task_setioprio(struct task_struct *p, int ioprio)
p                1120 security/commoncap.c 	return cap_safe_nice(p);
p                1131 security/commoncap.c int cap_task_setnice(struct task_struct *p, int nice)
p                1133 security/commoncap.c 	return cap_safe_nice(p);
p                  97 security/integrity/iint.c 	struct rb_node **p;
p                 111 security/integrity/iint.c 	p = &integrity_iint_tree.rb_node;
p                 112 security/integrity/iint.c 	while (*p) {
p                 113 security/integrity/iint.c 		parent = *p;
p                 117 security/integrity/iint.c 			p = &(*p)->rb_left;
p                 119 security/integrity/iint.c 			p = &(*p)->rb_right;
p                 125 security/integrity/iint.c 	rb_link_node(node, parent, p);
p                  54 security/integrity/ima/ima_crypto.c #define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
p                 282 security/integrity/ima/ima_fs.c 	char *p;
p                 295 security/integrity/ima/ima_fs.c 	while (size > 0 && (p = strsep(&datap, "\n"))) {
p                 296 security/integrity/ima/ima_fs.c 		pr_debug("rule: %s\n", p);
p                 297 security/integrity/ima/ima_fs.c 		rc = ima_parse_add_rule(p);
p                  67 security/integrity/ima/ima_modsig.c 	const void *p;
p                  73 security/integrity/ima/ima_modsig.c 	p = buf + buf_len - marker_len;
p                  74 security/integrity/ima/ima_modsig.c 	if (memcmp(p, MODULE_SIG_STRING, marker_len))
p                  78 security/integrity/ima/ima_modsig.c 	sig = (const struct module_signature *)(p - sizeof(*sig));
p                 226 security/integrity/ima/ima_policy.c 	char *p;
p                 228 security/integrity/ima/ima_policy.c 	while ((p = strsep(&str, " |\n")) != NULL) {
p                 229 security/integrity/ima/ima_policy.c 		if (*p == ' ')
p                 231 security/integrity/ima/ima_policy.c 		if ((strcmp(p, "tcb") == 0) && !ima_policy)
p                 233 security/integrity/ima/ima_policy.c 		else if (strcmp(p, "appraise_tcb") == 0)
p                 235 security/integrity/ima/ima_policy.c 		else if (strcmp(p, "secure_boot") == 0)
p                 237 security/integrity/ima/ima_policy.c 		else if (strcmp(p, "fail_securely") == 0)
p                 895 security/integrity/ima/ima_policy.c 	char *p;
p                 908 security/integrity/ima/ima_policy.c 	while ((p = strsep(&rule, " \t")) != NULL) {
p                 915 security/integrity/ima/ima_policy.c 		if ((*p == '\0') || (*p == ' ') || (*p == '\t'))
p                 917 security/integrity/ima/ima_policy.c 		token = match_token(p, policy_tokens, args);
p                1223 security/integrity/ima/ima_policy.c 			ima_log_string(ab, "UNKNOWN", p);
p                1254 security/integrity/ima/ima_policy.c 	char *p;
p                1259 security/integrity/ima/ima_policy.c 	p = strsep(&rule, "\n");
p                1260 security/integrity/ima/ima_policy.c 	len = strlen(p) + 1;
p                1261 security/integrity/ima/ima_policy.c 	p += strspn(p, " \t");
p                1263 security/integrity/ima/ima_policy.c 	if (*p == '#' || *p == '\0')
p                1275 security/integrity/ima/ima_policy.c 	result = ima_parse_rule(p, entry);
p                  79 security/integrity/platform_certs/load_uefi.c 	char *hash, *p;
p                  84 security/integrity/platform_certs/load_uefi.c 	p = memcpy(hash, type, type_len);
p                  85 security/integrity/platform_certs/load_uefi.c 	p += type_len;
p                  86 security/integrity/platform_certs/load_uefi.c 	bin2hex(p, data, len);
p                  87 security/integrity/platform_certs/load_uefi.c 	p += len * 2;
p                  88 security/integrity/platform_certs/load_uefi.c 	*p = 0;
p                  62 security/keys/dh.c 	kzfree(dh->p);
p                 284 security/keys/dh.c 	dlen = dh_data_from_key(pcopy.prime, &dh_inputs.p);
p                 179 security/keys/encrypted-keys/encrypted.c 	char *p, *keyword;
p                 189 security/keys/encrypted-keys/encrypted.c 	p = strsep(&datablob, " \t");
p                 190 security/keys/encrypted-keys/encrypted.c 	if (!p) {
p                 195 security/keys/encrypted-keys/encrypted.c 	key_format = match_token(p, key_format_tokens, args);
p                 200 security/keys/encrypted-keys/encrypted.c 		*format = p;
p                 204 security/keys/encrypted-keys/encrypted.c 		*master_desc = p;
p                 529 security/keys/encrypted-keys/encrypted.c 	char *p;
p                 538 security/keys/encrypted-keys/encrypted.c 		p = epayload->master_desc;
p                 541 security/keys/encrypted-keys/encrypted.c 		p = epayload->format;
p                 543 security/keys/encrypted-keys/encrypted.c 	ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len);
p                  53 security/keys/key.c 	struct rb_node *parent, **p;
p                  57 security/keys/key.c 	p = &key_user_tree.rb_node;
p                  61 security/keys/key.c 	while (*p) {
p                  62 security/keys/key.c 		parent = *p;
p                  66 security/keys/key.c 			p = &(*p)->rb_left;
p                  68 security/keys/key.c 			p = &(*p)->rb_right;
p                 101 security/keys/key.c 	rb_link_node(&candidate->node, parent, p);
p                 135 security/keys/key.c 	struct rb_node *parent, **p;
p                 150 security/keys/key.c 	p = &key_serial_tree.rb_node;
p                 152 security/keys/key.c 	while (*p) {
p                 153 security/keys/key.c 		parent = *p;
p                 157 security/keys/key.c 			p = &(*p)->rb_left;
p                 159 security/keys/key.c 			p = &(*p)->rb_right;
p                 165 security/keys/key.c 	rb_link_node(&key->serial_node, parent, p);
p                1129 security/keys/key.c 	struct key_type *p;
p                1138 security/keys/key.c 	list_for_each_entry(p, &key_types_list, link) {
p                1139 security/keys/key.c 		if (strcmp(p->name, ktype->name) == 0)
p                  42 security/keys/keyctl_pkey.c 	char *c = params->info, *p, *q;
p                  45 security/keys/keyctl_pkey.c 	while ((p = strsep(&c, " \t"))) {
p                  46 security/keys/keyctl_pkey.c 		if (*p == '\0' || *p == ' ' || *p == '\t')
p                  48 security/keys/keyctl_pkey.c 		token = match_token(p, param_keys, args);
p                  83 security/keys/keyctl_pkey.c 	void *p;
p                  89 security/keys/keyctl_pkey.c 	p = strndup_user(_info, PAGE_SIZE);
p                  90 security/keys/keyctl_pkey.c 	if (IS_ERR(p))
p                  91 security/keys/keyctl_pkey.c 		return PTR_ERR(p);
p                  92 security/keys/keyctl_pkey.c 	params->info = p;
p                  16 security/keys/proc.c static void *proc_keys_start(struct seq_file *p, loff_t *_pos);
p                  17 security/keys/proc.c static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos);
p                  18 security/keys/proc.c static void proc_keys_stop(struct seq_file *p, void *v);
p                  28 security/keys/proc.c static void *proc_key_users_start(struct seq_file *p, loff_t *_pos);
p                  29 security/keys/proc.c static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos);
p                  30 security/keys/proc.c static void proc_key_users_stop(struct seq_file *p, void *v);
p                  45 security/keys/proc.c 	struct proc_dir_entry *p;
p                  47 security/keys/proc.c 	p = proc_create_seq("keys", 0, NULL, &proc_keys_ops);
p                  48 security/keys/proc.c 	if (!p)
p                  51 security/keys/proc.c 	p = proc_create_seq("key-users", 0, NULL, &proc_key_users_ops);
p                  52 security/keys/proc.c 	if (!p)
p                  64 security/keys/proc.c static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n)
p                  66 security/keys/proc.c 	struct user_namespace *user_ns = seq_user_ns(p);
p                  78 security/keys/proc.c static struct key *find_ge_key(struct seq_file *p, key_serial_t id)
p                  80 security/keys/proc.c 	struct user_namespace *user_ns = seq_user_ns(p);
p                 112 security/keys/proc.c static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
p                 122 security/keys/proc.c 	key = find_ge_key(p, pos);
p                 135 security/keys/proc.c static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
p                 139 security/keys/proc.c 	n = key_serial_next(p, v);
p                 147 security/keys/proc.c static void proc_keys_stop(struct seq_file *p, void *v)
p                 274 security/keys/proc.c static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
p                 282 security/keys/proc.c 	_p = key_user_first(seq_user_ns(p), &key_user_tree);
p                 285 security/keys/proc.c 		_p = key_user_next(seq_user_ns(p), _p);
p                 291 security/keys/proc.c static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
p                 294 security/keys/proc.c 	return key_user_next(seq_user_ns(p), (struct rb_node *)v);
p                 297 security/keys/proc.c static void proc_key_users_stop(struct seq_file *p, void *v)
p                 670 security/keys/trusted.c static int key_seal(struct trusted_key_payload *p,
p                 681 security/keys/trusted.c 	p->key[p->key_len] = p->migratable;
p                 684 security/keys/trusted.c 		       p->key, p->key_len + 1, p->blob, &p->blob_len,
p                 696 security/keys/trusted.c static int key_unseal(struct trusted_key_payload *p,
p                 706 security/keys/trusted.c 	ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
p                 707 security/keys/trusted.c 			 o->blobauth, p->key, &p->key_len);
p                 712 security/keys/trusted.c 		p->migratable = p->key[--p->key_len];
p                 749 security/keys/trusted.c 	char *p = c;
p                 765 security/keys/trusted.c 	while ((p = strsep(&c, " \t"))) {
p                 766 security/keys/trusted.c 		if (*p == '\0' || *p == ' ' || *p == '\t')
p                 768 security/keys/trusted.c 		token = match_token(p, key_tokens, args);
p                 864 security/keys/trusted.c static int datablob_parse(char *datablob, struct trusted_key_payload *p,
p                 887 security/keys/trusted.c 		p->key_len = keylen;
p                 888 security/keys/trusted.c 		ret = getoptions(datablob, p, o);
p                 898 security/keys/trusted.c 		p->blob_len = strlen(c) / 2;
p                 899 security/keys/trusted.c 		if (p->blob_len > MAX_BLOB_SIZE)
p                 901 security/keys/trusted.c 		ret = hex2bin(p->blob, c, p->blob_len);
p                 904 security/keys/trusted.c 		ret = getoptions(datablob, p, o);
p                 911 security/keys/trusted.c 		ret = getoptions(datablob, p, o);
p                 945 security/keys/trusted.c 	struct trusted_key_payload *p = NULL;
p                 948 security/keys/trusted.c 	ret = key_payload_reserve(key, sizeof *p);
p                 950 security/keys/trusted.c 		return p;
p                 951 security/keys/trusted.c 	p = kzalloc(sizeof *p, GFP_KERNEL);
p                 952 security/keys/trusted.c 	if (p)
p                 953 security/keys/trusted.c 		p->migratable = 1; /* migratable by default */
p                 954 security/keys/trusted.c 	return p;
p                1059 security/keys/trusted.c 	struct trusted_key_payload *p;
p                1061 security/keys/trusted.c 	p = container_of(rcu, struct trusted_key_payload, rcu);
p                1062 security/keys/trusted.c 	kzfree(p);
p                1070 security/keys/trusted.c 	struct trusted_key_payload *p;
p                1079 security/keys/trusted.c 	p = key->payload.data[0];
p                1080 security/keys/trusted.c 	if (!p->migratable)
p                1115 security/keys/trusted.c 	new_p->migratable = p->migratable;
p                1116 security/keys/trusted.c 	new_p->key_len = p->key_len;
p                1117 security/keys/trusted.c 	memcpy(new_p->key, p->key, p->key_len);
p                1118 security/keys/trusted.c 	dump_payload(p);
p                1136 security/keys/trusted.c 	call_rcu(&p->rcu, trusted_rcu_free);
p                1150 security/keys/trusted.c 	const struct trusted_key_payload *p;
p                1154 security/keys/trusted.c 	p = dereference_key_locked(key);
p                1155 security/keys/trusted.c 	if (!p)
p                1158 security/keys/trusted.c 	if (buffer && buflen >= 2 * p->blob_len) {
p                1160 security/keys/trusted.c 		for (i = 0; i < p->blob_len; i++)
p                1161 security/keys/trusted.c 			bufp = hex_byte_pack(bufp, p->blob[i]);
p                1163 security/keys/trusted.c 	return 2 * p->blob_len;
p                 195 security/keys/user_defined.c 	char *p;
p                 198 security/keys/user_defined.c 	p = strchr(desc, ':');
p                 199 security/keys/user_defined.c 	if (!p)
p                 203 security/keys/user_defined.c 	if (p == desc)
p                 323 security/lsm_audit.c 			char *p = NULL;
p                 360 security/lsm_audit.c 				p = &addr->name->sun_path[0];
p                 362 security/lsm_audit.c 				if (*p)
p                 363 security/lsm_audit.c 					audit_log_untrustedstring(ab, p);
p                 365 security/lsm_audit.c 					audit_log_n_hex(ab, p, len);
p                 114 security/safesetid/securityfs.c 	char *buf, *p, *end;
p                 123 security/safesetid/securityfs.c 	p = buf = memdup_user_nul(ubuf, len);
p                 135 security/safesetid/securityfs.c 	while (*p != '\0') {
p                 138 security/safesetid/securityfs.c 		end = strchr(p, '\n');
p                 151 security/safesetid/securityfs.c 		err = parse_policy_line(file, p, rule);
p                 163 security/safesetid/securityfs.c 		p = end + 1;
p                1637 security/security.c int security_task_setpgid(struct task_struct *p, pid_t pgid)
p                1639 security/security.c 	return call_int_hook(task_setpgid, 0, p, pgid);
p                1642 security/security.c int security_task_getpgid(struct task_struct *p)
p                1644 security/security.c 	return call_int_hook(task_getpgid, 0, p);
p                1647 security/security.c int security_task_getsid(struct task_struct *p)
p                1649 security/security.c 	return call_int_hook(task_getsid, 0, p);
p                1652 security/security.c void security_task_getsecid(struct task_struct *p, u32 *secid)
p                1655 security/security.c 	call_void_hook(task_getsecid, p, secid);
p                1659 security/security.c int security_task_setnice(struct task_struct *p, int nice)
p                1661 security/security.c 	return call_int_hook(task_setnice, 0, p, nice);
p                1664 security/security.c int security_task_setioprio(struct task_struct *p, int ioprio)
p                1666 security/security.c 	return call_int_hook(task_setioprio, 0, p, ioprio);
p                1669 security/security.c int security_task_getioprio(struct task_struct *p)
p                1671 security/security.c 	return call_int_hook(task_getioprio, 0, p);
p                1680 security/security.c int security_task_setrlimit(struct task_struct *p, unsigned int resource,
p                1683 security/security.c 	return call_int_hook(task_setrlimit, 0, p, resource, new_rlim);
p                1686 security/security.c int security_task_setscheduler(struct task_struct *p)
p                1688 security/security.c 	return call_int_hook(task_setscheduler, 0, p);
p                1691 security/security.c int security_task_getscheduler(struct task_struct *p)
p                1693 security/security.c 	return call_int_hook(task_getscheduler, 0, p);
p                1696 security/security.c int security_task_movememory(struct task_struct *p)
p                1698 security/security.c 	return call_int_hook(task_movememory, 0, p);
p                1701 security/security.c int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
p                1704 security/security.c 	return call_int_hook(task_kill, 0, p, info, sig, cred);
p                1725 security/security.c void security_task_to_inode(struct task_struct *p, struct inode *inode)
p                1727 security/security.c 	call_void_hook(task_to_inode, p, inode);
p                1878 security/security.c int security_getprocattr(struct task_struct *p, const char *lsm, char *name,
p                1886 security/security.c 		return hp->hook.getprocattr(p, name, value);
p                 204 security/selinux/avc.c 		rc = security_xperm_test(xpd->allowed->p, perm);
p                 207 security/selinux/avc.c 		rc = security_xperm_test(xpd->auditallow->p, perm);
p                 210 security/selinux/avc.c 		rc = security_xperm_test(xpd->dontaudit->p, perm);
p                 218 security/selinux/avc.c 	security_xperm_set(xp_node->xp.drivers.p, driver);
p                 221 security/selinux/avc.c 		security_xperm_set(xpd->allowed->p, perm);
p                 258 security/selinux/avc.c 		memcpy(dest->allowed->p, src->allowed->p,
p                 259 security/selinux/avc.c 				sizeof(src->allowed->p));
p                 261 security/selinux/avc.c 		memcpy(dest->auditallow->p, src->auditallow->p,
p                 262 security/selinux/avc.c 				sizeof(src->auditallow->p));
p                 264 security/selinux/avc.c 		memcpy(dest->dontaudit->p, src->dontaudit->p,
p                 265 security/selinux/avc.c 				sizeof(src->dontaudit->p));
p                 284 security/selinux/avc.c 		dest->allowed->p[i] = src->allowed->p[i];
p                 286 security/selinux/avc.c 		dest->auditallow->p[i] = src->auditallow->p[i];
p                 288 security/selinux/avc.c 		dest->dontaudit->p[i] = src->dontaudit->p[i];
p                 364 security/selinux/avc.c 	memcpy(dest->xp.drivers.p, src->xp.drivers.p, sizeof(dest->xp.drivers.p));
p                1075 security/selinux/avc.c 		if (!security_xperm_test(xp_node->xp.drivers.p, driver)) {
p                2445 security/selinux/hooks.c static int match_file(const void *p, struct file *file, unsigned fd)
p                2447 security/selinux/hooks.c 	return file_has_perm(p, file, file_to_av(file)) ? fd + 1 : 0;
p                2637 security/selinux/hooks.c 			char *p, *q;
p                2641 security/selinux/hooks.c 				for (p = q = arg; p < from + len; p++) {
p                2642 security/selinux/hooks.c 					char c = *p;
p                4042 security/selinux/hooks.c static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
p                4045 security/selinux/hooks.c 			    current_sid(), task_sid(p), SECCLASS_PROCESS,
p                4049 security/selinux/hooks.c static int selinux_task_getpgid(struct task_struct *p)
p                4052 security/selinux/hooks.c 			    current_sid(), task_sid(p), SECCLASS_PROCESS,
p                4056 security/selinux/hooks.c static int selinux_task_getsid(struct task_struct *p)
p                4059 security/selinux/hooks.c 			    current_sid(), task_sid(p), SECCLASS_PROCESS,
p                4063 security/selinux/hooks.c static void selinux_task_getsecid(struct task_struct *p, u32 *secid)
p                4065 security/selinux/hooks.c 	*secid = task_sid(p);
p                4068 security/selinux/hooks.c static int selinux_task_setnice(struct task_struct *p, int nice)
p                4071 security/selinux/hooks.c 			    current_sid(), task_sid(p), SECCLASS_PROCESS,
p                4075 security/selinux/hooks.c static int selinux_task_setioprio(struct task_struct *p, int ioprio)
p                4078 security/selinux/hooks.c 			    current_sid(), task_sid(p), SECCLASS_PROCESS,
p                4082 security/selinux/hooks.c static int selinux_task_getioprio(struct task_struct *p)
p                4085 security/selinux/hooks.c 			    current_sid(), task_sid(p), SECCLASS_PROCESS,
p                4105 security/selinux/hooks.c static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource,
p                4108 security/selinux/hooks.c 	struct rlimit *old_rlim = p->signal->rlim + resource;
p                4116 security/selinux/hooks.c 				    current_sid(), task_sid(p),
p                4122 security/selinux/hooks.c static int selinux_task_setscheduler(struct task_struct *p)
p                4125 security/selinux/hooks.c 			    current_sid(), task_sid(p), SECCLASS_PROCESS,
p                4129 security/selinux/hooks.c static int selinux_task_getscheduler(struct task_struct *p)
p                4132 security/selinux/hooks.c 			    current_sid(), task_sid(p), SECCLASS_PROCESS,
p                4136 security/selinux/hooks.c static int selinux_task_movememory(struct task_struct *p)
p                4139 security/selinux/hooks.c 			    current_sid(), task_sid(p), SECCLASS_PROCESS,
p                4143 security/selinux/hooks.c static int selinux_task_kill(struct task_struct *p, struct kernel_siginfo *info,
p                4158 security/selinux/hooks.c 			    secid, task_sid(p), SECCLASS_PROCESS, perm, NULL);
p                4161 security/selinux/hooks.c static void selinux_task_to_inode(struct task_struct *p,
p                4165 security/selinux/hooks.c 	u32 sid = task_sid(p);
p                6294 security/selinux/hooks.c static int selinux_getprocattr(struct task_struct *p,
p                6303 security/selinux/hooks.c 	__tsec = selinux_cred(__task_cred(p));
p                6305 security/selinux/hooks.c 	if (current != p) {
p                 206 security/selinux/include/security.h 	u32 p[8];
p                 389 security/selinux/ss/avtab.c 				   struct avtab_datum *d, void *p),
p                 390 security/selinux/ss/avtab.c 		    void *p)
p                 398 security/selinux/ss/avtab.c 	__le32 buf32[ARRAY_SIZE(xperms.perms.p)];
p                 464 security/selinux/ss/avtab.c 				rc = insertf(a, &key, &datum, p);
p                 525 security/selinux/ss/avtab.c 		rc = next_entry(buf32, fp, sizeof(u32)*ARRAY_SIZE(xperms.perms.p));
p                 530 security/selinux/ss/avtab.c 		for (i = 0; i < ARRAY_SIZE(xperms.perms.p); i++)
p                 531 security/selinux/ss/avtab.c 			xperms.perms.p[i] = le32_to_cpu(buf32[i]);
p                 546 security/selinux/ss/avtab.c 	return insertf(a, &key, &datum, p);
p                 550 security/selinux/ss/avtab.c 			 struct avtab_datum *d, void *p)
p                 599 security/selinux/ss/avtab.c int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp)
p                 602 security/selinux/ss/avtab.c 	__le32 buf32[ARRAY_SIZE(cur->datum.u.xperms->perms.p)];
p                 621 security/selinux/ss/avtab.c 		for (i = 0; i < ARRAY_SIZE(cur->datum.u.xperms->perms.p); i++)
p                 622 security/selinux/ss/avtab.c 			buf32[i] = cpu_to_le32(cur->datum.u.xperms->perms.p[i]);
p                 624 security/selinux/ss/avtab.c 				ARRAY_SIZE(cur->datum.u.xperms->perms.p), fp);
p                 634 security/selinux/ss/avtab.c int avtab_write(struct policydb *p, struct avtab *a, void *fp)
p                 649 security/selinux/ss/avtab.c 			rc = avtab_write_item(p, cur, fp);
p                  99 security/selinux/ss/avtab.h 				  struct avtab_datum *d, void *p),
p                 100 security/selinux/ss/avtab.h 		    void *p);
p                 103 security/selinux/ss/avtab.h int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp);
p                 104 security/selinux/ss/avtab.h int avtab_write(struct policydb *p, struct avtab *a, void *fp);
p                  24 security/selinux/ss/conditional.c static int cond_evaluate_expr(struct policydb *p, struct cond_expr *expr)
p                  37 security/selinux/ss/conditional.c 			s[sp] = p->bool_val_to_struct[cur->bool - 1]->state;
p                  88 security/selinux/ss/conditional.c int evaluate_cond_node(struct policydb *p, struct cond_node *node)
p                  93 security/selinux/ss/conditional.c 	new_state = cond_evaluate_expr(p, node->expr);
p                 117 security/selinux/ss/conditional.c int cond_policydb_init(struct policydb *p)
p                 121 security/selinux/ss/conditional.c 	p->bool_val_to_struct = NULL;
p                 122 security/selinux/ss/conditional.c 	p->cond_list = NULL;
p                 124 security/selinux/ss/conditional.c 	rc = avtab_init(&p->te_cond_avtab);
p                 167 security/selinux/ss/conditional.c void cond_policydb_destroy(struct policydb *p)
p                 169 security/selinux/ss/conditional.c 	kfree(p->bool_val_to_struct);
p                 170 security/selinux/ss/conditional.c 	avtab_destroy(&p->te_cond_avtab);
p                 171 security/selinux/ss/conditional.c 	cond_list_destroy(p->cond_list);
p                 174 security/selinux/ss/conditional.c int cond_init_bool_indexes(struct policydb *p)
p                 176 security/selinux/ss/conditional.c 	kfree(p->bool_val_to_struct);
p                 177 security/selinux/ss/conditional.c 	p->bool_val_to_struct = kmalloc_array(p->p_bools.nprim,
p                 178 security/selinux/ss/conditional.c 					      sizeof(*p->bool_val_to_struct),
p                 180 security/selinux/ss/conditional.c 	if (!p->bool_val_to_struct)
p                 185 security/selinux/ss/conditional.c int cond_destroy_bool(void *key, void *datum, void *p)
p                 194 security/selinux/ss/conditional.c 	struct policydb *p;
p                 198 security/selinux/ss/conditional.c 	p = datap;
p                 200 security/selinux/ss/conditional.c 	if (!booldatum->value || booldatum->value > p->p_bools.nprim)
p                 203 security/selinux/ss/conditional.c 	p->sym_val_to_name[SYM_BOOLS][booldatum->value - 1] = key;
p                 204 security/selinux/ss/conditional.c 	p->bool_val_to_struct[booldatum->value - 1] = booldatum;
p                 216 security/selinux/ss/conditional.c int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp)
p                 262 security/selinux/ss/conditional.c 	struct policydb *p;
p                 271 security/selinux/ss/conditional.c 	struct policydb *p = data->p;
p                 283 security/selinux/ss/conditional.c 		if (avtab_search(&p->te_avtab, k)) {
p                 296 security/selinux/ss/conditional.c 			node_ptr = avtab_search_node(&p->te_cond_avtab, k);
p                 315 security/selinux/ss/conditional.c 			if (avtab_search(&p->te_cond_avtab, k)) {
p                 322 security/selinux/ss/conditional.c 	node_ptr = avtab_insert_nonunique(&p->te_cond_avtab, k, d);
p                 349 security/selinux/ss/conditional.c static int cond_read_av_list(struct policydb *p, void *fp, struct cond_av_list **ret_list, struct cond_av_list *other)
p                 366 security/selinux/ss/conditional.c 	data.p = p;
p                 371 security/selinux/ss/conditional.c 		rc = avtab_read_item(&p->te_cond_avtab, fp, p, cond_insertf,
p                 381 security/selinux/ss/conditional.c static int expr_isvalid(struct policydb *p, struct cond_expr *expr)
p                 388 security/selinux/ss/conditional.c 	if (expr->bool > p->p_bools.nprim) {
p                 395 security/selinux/ss/conditional.c static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp)
p                 424 security/selinux/ss/conditional.c 		if (!expr_isvalid(p, expr)) {
p                 437 security/selinux/ss/conditional.c 	rc = cond_read_av_list(p, fp, &node->true_list, NULL);
p                 440 security/selinux/ss/conditional.c 	rc = cond_read_av_list(p, fp, &node->false_list, node->true_list);
p                 449 security/selinux/ss/conditional.c int cond_read_list(struct policydb *p, void *fp)
p                 462 security/selinux/ss/conditional.c 	rc = avtab_alloc(&(p->te_cond_avtab), p->te_avtab.nel);
p                 472 security/selinux/ss/conditional.c 		rc = cond_read_node(p, node, fp);
p                 477 security/selinux/ss/conditional.c 			p->cond_list = node;
p                 484 security/selinux/ss/conditional.c 	cond_list_destroy(p->cond_list);
p                 485 security/selinux/ss/conditional.c 	p->cond_list = NULL;
p                 521 security/selinux/ss/conditional.c static int cond_write_av_list(struct policydb *p,
p                 542 security/selinux/ss/conditional.c 		rc = avtab_write_item(p, cur_list->node, fp);
p                 550 security/selinux/ss/conditional.c static int cond_write_node(struct policydb *p, struct cond_node *node,
p                 579 security/selinux/ss/conditional.c 	rc = cond_write_av_list(p, node->true_list, fp);
p                 582 security/selinux/ss/conditional.c 	rc = cond_write_av_list(p, node->false_list, fp);
p                 589 security/selinux/ss/conditional.c int cond_write_list(struct policydb *p, struct cond_node *list, void *fp)
p                 605 security/selinux/ss/conditional.c 		rc = cond_write_node(p, cur, fp);
p                  61 security/selinux/ss/conditional.h int cond_policydb_init(struct policydb *p);
p                  62 security/selinux/ss/conditional.h void cond_policydb_destroy(struct policydb *p);
p                  64 security/selinux/ss/conditional.h int cond_init_bool_indexes(struct policydb *p);
p                  65 security/selinux/ss/conditional.h int cond_destroy_bool(void *key, void *datum, void *p);
p                  69 security/selinux/ss/conditional.h int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp);
p                  70 security/selinux/ss/conditional.h int cond_read_list(struct policydb *p, void *fp);
p                  72 security/selinux/ss/conditional.h int cond_write_list(struct policydb *p, struct cond_node *list, void *fp);
p                  78 security/selinux/ss/conditional.h int evaluate_cond_node(struct policydb *p, struct cond_node *node);
p                  19 security/selinux/ss/hashtab.c 	struct hashtab *p;
p                  22 security/selinux/ss/hashtab.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                  23 security/selinux/ss/hashtab.c 	if (!p)
p                  24 security/selinux/ss/hashtab.c 		return p;
p                  26 security/selinux/ss/hashtab.c 	p->size = size;
p                  27 security/selinux/ss/hashtab.c 	p->nel = 0;
p                  28 security/selinux/ss/hashtab.c 	p->hash_value = hash_value;
p                  29 security/selinux/ss/hashtab.c 	p->keycmp = keycmp;
p                  30 security/selinux/ss/hashtab.c 	p->htable = kmalloc_array(size, sizeof(*p->htable), GFP_KERNEL);
p                  31 security/selinux/ss/hashtab.c 	if (!p->htable) {
p                  32 security/selinux/ss/hashtab.c 		kfree(p);
p                  37 security/selinux/ss/hashtab.c 		p->htable[i] = NULL;
p                  39 security/selinux/ss/hashtab.c 	return p;
p                  36 security/selinux/ss/mls.c int mls_compute_context_len(struct policydb *p, struct context *context)
p                  43 security/selinux/ss/mls.c 	if (!p->mls_enabled)
p                  49 security/selinux/ss/mls.c 		len += strlen(sym_name(p, SYM_LEVELS, index_sens - 1));
p                  59 security/selinux/ss/mls.c 					nm = sym_name(p, SYM_CATS, prev);
p                  62 security/selinux/ss/mls.c 				nm = sym_name(p, SYM_CATS, i);
p                  69 security/selinux/ss/mls.c 			nm = sym_name(p, SYM_CATS, prev);
p                  89 security/selinux/ss/mls.c void mls_sid_to_context(struct policydb *p,
p                  98 security/selinux/ss/mls.c 	if (!p->mls_enabled)
p                 107 security/selinux/ss/mls.c 		strcpy(scontextp, sym_name(p, SYM_LEVELS,
p                 123 security/selinux/ss/mls.c 					nm = sym_name(p, SYM_CATS, prev);
p                 131 security/selinux/ss/mls.c 				nm = sym_name(p, SYM_CATS, i);
p                 144 security/selinux/ss/mls.c 			nm = sym_name(p, SYM_CATS, prev);
p                 162 security/selinux/ss/mls.c int mls_level_isvalid(struct policydb *p, struct mls_level *l)
p                 166 security/selinux/ss/mls.c 	if (!l->sens || l->sens > p->p_levels.nprim)
p                 168 security/selinux/ss/mls.c 	levdatum = hashtab_search(p->p_levels.table,
p                 169 security/selinux/ss/mls.c 				  sym_name(p, SYM_LEVELS, l->sens - 1));
p                 179 security/selinux/ss/mls.c 				p->p_cats.nprim);
p                 182 security/selinux/ss/mls.c int mls_range_isvalid(struct policydb *p, struct mls_range *r)
p                 184 security/selinux/ss/mls.c 	return (mls_level_isvalid(p, &r->level[0]) &&
p                 185 security/selinux/ss/mls.c 		mls_level_isvalid(p, &r->level[1]) &&
p                 193 security/selinux/ss/mls.c int mls_context_isvalid(struct policydb *p, struct context *c)
p                 197 security/selinux/ss/mls.c 	if (!p->mls_enabled)
p                 200 security/selinux/ss/mls.c 	if (!mls_range_isvalid(p, &c->range))
p                 209 security/selinux/ss/mls.c 	if (!c->user || c->user > p->p_users.nprim)
p                 211 security/selinux/ss/mls.c 	usrdatum = p->user_val_to_struct[c->user - 1];
p                 361 security/selinux/ss/mls.c int mls_from_string(struct policydb *p, char *str, struct context *context,
p                 367 security/selinux/ss/mls.c 	if (!p->mls_enabled)
p                 374 security/selinux/ss/mls.c 		rc = mls_context_to_sid(p, ':', tmpstr, context,
p                 402 security/selinux/ss/mls.c int mls_setup_user_range(struct policydb *p,
p                 406 security/selinux/ss/mls.c 	if (p->mls_enabled) {
p                 487 security/selinux/ss/mls.c int mls_compute_sid(struct policydb *p,
p                 500 security/selinux/ss/mls.c 	if (!p->mls_enabled)
p                 509 security/selinux/ss/mls.c 		r = hashtab_search(p->range_tr, &rtr);
p                 513 security/selinux/ss/mls.c 		if (tclass && tclass <= p->p_classes.nprim) {
p                 514 security/selinux/ss/mls.c 			cladatum = p->class_val_to_struct[tclass - 1];
p                 536 security/selinux/ss/mls.c 		if ((tclass == p->process_class) || (sock == true))
p                 562 security/selinux/ss/mls.c void mls_export_netlbl_lvl(struct policydb *p,
p                 566 security/selinux/ss/mls.c 	if (!p->mls_enabled)
p                 583 security/selinux/ss/mls.c void mls_import_netlbl_lvl(struct policydb *p,
p                 587 security/selinux/ss/mls.c 	if (!p->mls_enabled)
p                 604 security/selinux/ss/mls.c int mls_export_netlbl_cat(struct policydb *p,
p                 610 security/selinux/ss/mls.c 	if (!p->mls_enabled)
p                 633 security/selinux/ss/mls.c int mls_import_netlbl_cat(struct policydb *p,
p                 639 security/selinux/ss/mls.c 	if (!p->mls_enabled)
p                  28 security/selinux/ss/mls.h int mls_compute_context_len(struct policydb *p, struct context *context);
p                  29 security/selinux/ss/mls.h void mls_sid_to_context(struct policydb *p, struct context *context,
p                  31 security/selinux/ss/mls.h int mls_context_isvalid(struct policydb *p, struct context *c);
p                  32 security/selinux/ss/mls.h int mls_range_isvalid(struct policydb *p, struct mls_range *r);
p                  33 security/selinux/ss/mls.h int mls_level_isvalid(struct policydb *p, struct mls_level *l);
p                  35 security/selinux/ss/mls.h int mls_context_to_sid(struct policydb *p,
p                  42 security/selinux/ss/mls.h int mls_from_string(struct policydb *p, char *str, struct context *context,
p                  52 security/selinux/ss/mls.h int mls_compute_sid(struct policydb *p,
p                  60 security/selinux/ss/mls.h int mls_setup_user_range(struct policydb *p,
p                  65 security/selinux/ss/mls.h void mls_export_netlbl_lvl(struct policydb *p,
p                  68 security/selinux/ss/mls.h void mls_import_netlbl_lvl(struct policydb *p,
p                  71 security/selinux/ss/mls.h int mls_export_netlbl_cat(struct policydb *p,
p                  74 security/selinux/ss/mls.h int mls_import_netlbl_cat(struct policydb *p,
p                  78 security/selinux/ss/mls.h static inline void mls_export_netlbl_lvl(struct policydb *p,
p                  84 security/selinux/ss/mls.h static inline void mls_import_netlbl_lvl(struct policydb *p,
p                  90 security/selinux/ss/mls.h static inline int mls_export_netlbl_cat(struct policydb *p,
p                  96 security/selinux/ss/mls.h static inline int mls_import_netlbl_cat(struct policydb *p,
p                 185 security/selinux/ss/policydb.c static int perm_destroy(void *key, void *datum, void *p)
p                 192 security/selinux/ss/policydb.c static int common_destroy(void *key, void *datum, void *p)
p                 219 security/selinux/ss/policydb.c static int cls_destroy(void *key, void *datum, void *p)
p                 261 security/selinux/ss/policydb.c static int role_destroy(void *key, void *datum, void *p)
p                 275 security/selinux/ss/policydb.c static int type_destroy(void *key, void *datum, void *p)
p                 282 security/selinux/ss/policydb.c static int user_destroy(void *key, void *datum, void *p)
p                 298 security/selinux/ss/policydb.c static int sens_destroy(void *key, void *datum, void *p)
p                 313 security/selinux/ss/policydb.c static int cat_destroy(void *key, void *datum, void *p)
p                 332 security/selinux/ss/policydb.c static int filenametr_destroy(void *key, void *datum, void *p)
p                 343 security/selinux/ss/policydb.c static int range_tr_destroy(void *key, void *datum, void *p)
p                 371 security/selinux/ss/policydb.c static int roles_init(struct policydb *p)
p                 382 security/selinux/ss/policydb.c 	role->value = ++p->p_roles.nprim;
p                 391 security/selinux/ss/policydb.c 	rc = hashtab_insert(p->p_roles.table, key, role);
p                 468 security/selinux/ss/policydb.c static int policydb_init(struct policydb *p)
p                 472 security/selinux/ss/policydb.c 	memset(p, 0, sizeof(*p));
p                 475 security/selinux/ss/policydb.c 		rc = symtab_init(&p->symtab[i], symtab_sizes[i]);
p                 480 security/selinux/ss/policydb.c 	rc = avtab_init(&p->te_avtab);
p                 484 security/selinux/ss/policydb.c 	rc = roles_init(p);
p                 488 security/selinux/ss/policydb.c 	rc = cond_policydb_init(p);
p                 492 security/selinux/ss/policydb.c 	p->filename_trans = hashtab_create(filenametr_hash, filenametr_cmp,
p                 494 security/selinux/ss/policydb.c 	if (!p->filename_trans) {
p                 499 security/selinux/ss/policydb.c 	p->range_tr = hashtab_create(rangetr_hash, rangetr_cmp, 256);
p                 500 security/selinux/ss/policydb.c 	if (!p->range_tr) {
p                 505 security/selinux/ss/policydb.c 	ebitmap_init(&p->filename_trans_ttypes);
p                 506 security/selinux/ss/policydb.c 	ebitmap_init(&p->policycaps);
p                 507 security/selinux/ss/policydb.c 	ebitmap_init(&p->permissive_map);
p                 511 security/selinux/ss/policydb.c 	hashtab_destroy(p->filename_trans);
p                 512 security/selinux/ss/policydb.c 	hashtab_destroy(p->range_tr);
p                 514 security/selinux/ss/policydb.c 		hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
p                 515 security/selinux/ss/policydb.c 		hashtab_destroy(p->symtab[i].table);
p                 532 security/selinux/ss/policydb.c 	struct policydb *p;
p                 536 security/selinux/ss/policydb.c 	p = datap;
p                 537 security/selinux/ss/policydb.c 	if (!comdatum->value || comdatum->value > p->p_commons.nprim)
p                 540 security/selinux/ss/policydb.c 	p->sym_val_to_name[SYM_COMMONS][comdatum->value - 1] = key;
p                 547 security/selinux/ss/policydb.c 	struct policydb *p;
p                 551 security/selinux/ss/policydb.c 	p = datap;
p                 552 security/selinux/ss/policydb.c 	if (!cladatum->value || cladatum->value > p->p_classes.nprim)
p                 555 security/selinux/ss/policydb.c 	p->sym_val_to_name[SYM_CLASSES][cladatum->value - 1] = key;
p                 556 security/selinux/ss/policydb.c 	p->class_val_to_struct[cladatum->value - 1] = cladatum;
p                 562 security/selinux/ss/policydb.c 	struct policydb *p;
p                 566 security/selinux/ss/policydb.c 	p = datap;
p                 568 security/selinux/ss/policydb.c 	    || role->value > p->p_roles.nprim
p                 569 security/selinux/ss/policydb.c 	    || role->bounds > p->p_roles.nprim)
p                 572 security/selinux/ss/policydb.c 	p->sym_val_to_name[SYM_ROLES][role->value - 1] = key;
p                 573 security/selinux/ss/policydb.c 	p->role_val_to_struct[role->value - 1] = role;
p                 579 security/selinux/ss/policydb.c 	struct policydb *p;
p                 583 security/selinux/ss/policydb.c 	p = datap;
p                 587 security/selinux/ss/policydb.c 		    || typdatum->value > p->p_types.nprim
p                 588 security/selinux/ss/policydb.c 		    || typdatum->bounds > p->p_types.nprim)
p                 590 security/selinux/ss/policydb.c 		p->sym_val_to_name[SYM_TYPES][typdatum->value - 1] = key;
p                 591 security/selinux/ss/policydb.c 		p->type_val_to_struct[typdatum->value - 1] = typdatum;
p                 599 security/selinux/ss/policydb.c 	struct policydb *p;
p                 603 security/selinux/ss/policydb.c 	p = datap;
p                 605 security/selinux/ss/policydb.c 	    || usrdatum->value > p->p_users.nprim
p                 606 security/selinux/ss/policydb.c 	    || usrdatum->bounds > p->p_users.nprim)
p                 609 security/selinux/ss/policydb.c 	p->sym_val_to_name[SYM_USERS][usrdatum->value - 1] = key;
p                 610 security/selinux/ss/policydb.c 	p->user_val_to_struct[usrdatum->value - 1] = usrdatum;
p                 616 security/selinux/ss/policydb.c 	struct policydb *p;
p                 620 security/selinux/ss/policydb.c 	p = datap;
p                 624 security/selinux/ss/policydb.c 		    levdatum->level->sens > p->p_levels.nprim)
p                 627 security/selinux/ss/policydb.c 		p->sym_val_to_name[SYM_LEVELS][levdatum->level->sens - 1] = key;
p                 635 security/selinux/ss/policydb.c 	struct policydb *p;
p                 639 security/selinux/ss/policydb.c 	p = datap;
p                 642 security/selinux/ss/policydb.c 		if (!catdatum->value || catdatum->value > p->p_cats.nprim)
p                 645 security/selinux/ss/policydb.c 		p->sym_val_to_name[SYM_CATS][catdatum->value - 1] = key;
p                 694 security/selinux/ss/policydb.c static int policydb_index(struct policydb *p)
p                 698 security/selinux/ss/policydb.c 	if (p->mls_enabled)
p                 700 security/selinux/ss/policydb.c 			 p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim,
p                 701 security/selinux/ss/policydb.c 			 p->p_bools.nprim, p->p_levels.nprim, p->p_cats.nprim);
p                 704 security/selinux/ss/policydb.c 			 p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim,
p                 705 security/selinux/ss/policydb.c 			 p->p_bools.nprim);
p                 708 security/selinux/ss/policydb.c 		 p->p_classes.nprim, p->te_avtab.nel);
p                 711 security/selinux/ss/policydb.c 	avtab_hash_eval(&p->te_avtab, "rules");
p                 712 security/selinux/ss/policydb.c 	symtab_hash_eval(p->symtab);
p                 715 security/selinux/ss/policydb.c 	p->class_val_to_struct = kcalloc(p->p_classes.nprim,
p                 716 security/selinux/ss/policydb.c 					 sizeof(*p->class_val_to_struct),
p                 718 security/selinux/ss/policydb.c 	if (!p->class_val_to_struct)
p                 721 security/selinux/ss/policydb.c 	p->role_val_to_struct = kcalloc(p->p_roles.nprim,
p                 722 security/selinux/ss/policydb.c 					sizeof(*p->role_val_to_struct),
p                 724 security/selinux/ss/policydb.c 	if (!p->role_val_to_struct)
p                 727 security/selinux/ss/policydb.c 	p->user_val_to_struct = kcalloc(p->p_users.nprim,
p                 728 security/selinux/ss/policydb.c 					sizeof(*p->user_val_to_struct),
p                 730 security/selinux/ss/policydb.c 	if (!p->user_val_to_struct)
p                 733 security/selinux/ss/policydb.c 	p->type_val_to_struct = kvcalloc(p->p_types.nprim,
p                 734 security/selinux/ss/policydb.c 					 sizeof(*p->type_val_to_struct),
p                 736 security/selinux/ss/policydb.c 	if (!p->type_val_to_struct)
p                 739 security/selinux/ss/policydb.c 	rc = cond_init_bool_indexes(p);
p                 744 security/selinux/ss/policydb.c 		p->sym_val_to_name[i] = kvcalloc(p->symtab[i].nprim,
p                 747 security/selinux/ss/policydb.c 		if (!p->sym_val_to_name[i])
p                 750 security/selinux/ss/policydb.c 		rc = hashtab_map(p->symtab[i].table, index_f[i], p);
p                 762 security/selinux/ss/policydb.c void policydb_destroy(struct policydb *p)
p                 772 security/selinux/ss/policydb.c 		hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
p                 773 security/selinux/ss/policydb.c 		hashtab_destroy(p->symtab[i].table);
p                 777 security/selinux/ss/policydb.c 		kvfree(p->sym_val_to_name[i]);
p                 779 security/selinux/ss/policydb.c 	kfree(p->class_val_to_struct);
p                 780 security/selinux/ss/policydb.c 	kfree(p->role_val_to_struct);
p                 781 security/selinux/ss/policydb.c 	kfree(p->user_val_to_struct);
p                 782 security/selinux/ss/policydb.c 	kvfree(p->type_val_to_struct);
p                 784 security/selinux/ss/policydb.c 	avtab_destroy(&p->te_avtab);
p                 788 security/selinux/ss/policydb.c 		c = p->ocontexts[i];
p                 794 security/selinux/ss/policydb.c 		p->ocontexts[i] = NULL;
p                 797 security/selinux/ss/policydb.c 	g = p->genfs;
p                 811 security/selinux/ss/policydb.c 	p->genfs = NULL;
p                 813 security/selinux/ss/policydb.c 	cond_policydb_destroy(p);
p                 815 security/selinux/ss/policydb.c 	for (tr = p->role_tr; tr; tr = tr->next) {
p                 822 security/selinux/ss/policydb.c 	for (ra = p->role_allow; ra; ra = ra->next) {
p                 829 security/selinux/ss/policydb.c 	hashtab_map(p->filename_trans, filenametr_destroy, NULL);
p                 830 security/selinux/ss/policydb.c 	hashtab_destroy(p->filename_trans);
p                 832 security/selinux/ss/policydb.c 	hashtab_map(p->range_tr, range_tr_destroy, NULL);
p                 833 security/selinux/ss/policydb.c 	hashtab_destroy(p->range_tr);
p                 835 security/selinux/ss/policydb.c 	if (p->type_attr_map_array) {
p                 836 security/selinux/ss/policydb.c 		for (i = 0; i < p->p_types.nprim; i++)
p                 837 security/selinux/ss/policydb.c 			ebitmap_destroy(&p->type_attr_map_array[i]);
p                 838 security/selinux/ss/policydb.c 		kvfree(p->type_attr_map_array);
p                 841 security/selinux/ss/policydb.c 	ebitmap_destroy(&p->filename_trans_ttypes);
p                 842 security/selinux/ss/policydb.c 	ebitmap_destroy(&p->policycaps);
p                 843 security/selinux/ss/policydb.c 	ebitmap_destroy(&p->permissive_map);
p                 850 security/selinux/ss/policydb.c int policydb_load_isids(struct policydb *p, struct sidtab *s)
p                 861 security/selinux/ss/policydb.c 	head = p->ocontexts[OCON_ISID];
p                 890 security/selinux/ss/policydb.c int policydb_class_isvalid(struct policydb *p, unsigned int class)
p                 892 security/selinux/ss/policydb.c 	if (!class || class > p->p_classes.nprim)
p                 897 security/selinux/ss/policydb.c int policydb_role_isvalid(struct policydb *p, unsigned int role)
p                 899 security/selinux/ss/policydb.c 	if (!role || role > p->p_roles.nprim)
p                 904 security/selinux/ss/policydb.c int policydb_type_isvalid(struct policydb *p, unsigned int type)
p                 906 security/selinux/ss/policydb.c 	if (!type || type > p->p_types.nprim)
p                 915 security/selinux/ss/policydb.c int policydb_context_isvalid(struct policydb *p, struct context *c)
p                 920 security/selinux/ss/policydb.c 	if (!c->role || c->role > p->p_roles.nprim)
p                 923 security/selinux/ss/policydb.c 	if (!c->user || c->user > p->p_users.nprim)
p                 926 security/selinux/ss/policydb.c 	if (!c->type || c->type > p->p_types.nprim)
p                 933 security/selinux/ss/policydb.c 		role = p->role_val_to_struct[c->role - 1];
p                 941 security/selinux/ss/policydb.c 		usrdatum = p->user_val_to_struct[c->user - 1];
p                 950 security/selinux/ss/policydb.c 	if (!mls_context_isvalid(p, c))
p                1020 security/selinux/ss/policydb.c 				     struct policydb *p,
p                1034 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_MLS) {
p                1043 security/selinux/ss/policydb.c 	if (!policydb_context_isvalid(p, c)) {
p                1082 security/selinux/ss/policydb.c static int perm_read(struct policydb *p, struct hashtab *h, void *fp)
p                1115 security/selinux/ss/policydb.c static int common_read(struct policydb *p, struct hashtab *h, void *fp)
p                1145 security/selinux/ss/policydb.c 		rc = perm_read(p, comdatum->permissions.table, fp);
p                1184 security/selinux/ss/policydb.c static int read_cons_helper(struct policydb *p,
p                1254 security/selinux/ss/policydb.c 				if (p->policyvers >=
p                1280 security/selinux/ss/policydb.c static int class_read(struct policydb *p, struct hashtab *h, void *fp)
p                1318 security/selinux/ss/policydb.c 		cladatum->comdatum = hashtab_search(p->p_commons.table, cladatum->comkey);
p                1326 security/selinux/ss/policydb.c 		rc = perm_read(p, cladatum->permissions.table, fp);
p                1331 security/selinux/ss/policydb.c 	rc = read_cons_helper(p, &cladatum->constraints, ncons, 0, fp);
p                1335 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_VALIDATETRANS) {
p                1341 security/selinux/ss/policydb.c 		rc = read_cons_helper(p, &cladatum->validatetrans,
p                1347 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS) {
p                1357 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_DEFAULT_TYPE) {
p                1374 security/selinux/ss/policydb.c static int role_read(struct policydb *p, struct hashtab *h, void *fp)
p                1386 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
p                1395 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
p                1430 security/selinux/ss/policydb.c static int type_read(struct policydb *p, struct hashtab *h, void *fp)
p                1442 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
p                1451 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) {
p                1504 security/selinux/ss/policydb.c static int user_read(struct policydb *p, struct hashtab *h, void *fp)
p                1516 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
p                1525 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
p                1536 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_MLS) {
p                1554 security/selinux/ss/policydb.c static int sens_read(struct policydb *p, struct hashtab *h, void *fp)
p                1595 security/selinux/ss/policydb.c static int cat_read(struct policydb *p, struct hashtab *h, void *fp)
p                1628 security/selinux/ss/policydb.c static int (*read_f[SYM_NUM]) (struct policydb *p, struct hashtab *h, void *fp) =
p                1643 security/selinux/ss/policydb.c 	struct policydb *p = datap;
p                1658 security/selinux/ss/policydb.c 		upper = p->user_val_to_struct[upper->bounds - 1];
p                1665 security/selinux/ss/policydb.c 			       sym_name(p, SYM_USERS, user->value - 1),
p                1666 security/selinux/ss/policydb.c 			       sym_name(p, SYM_ROLES, bit),
p                1667 security/selinux/ss/policydb.c 			       sym_name(p, SYM_USERS, upper->value - 1));
p                1679 security/selinux/ss/policydb.c 	struct policydb *p = datap;
p                1694 security/selinux/ss/policydb.c 		upper = p->role_val_to_struct[upper->bounds - 1];
p                1701 security/selinux/ss/policydb.c 			       sym_name(p, SYM_ROLES, role->value - 1),
p                1702 security/selinux/ss/policydb.c 			       sym_name(p, SYM_TYPES, bit),
p                1703 security/selinux/ss/policydb.c 			       sym_name(p, SYM_ROLES, upper->value - 1));
p                1715 security/selinux/ss/policydb.c 	struct policydb *p = datap;
p                1727 security/selinux/ss/policydb.c 		upper = p->type_val_to_struct[upper->bounds - 1];
p                1734 security/selinux/ss/policydb.c 			       sym_name(p, SYM_TYPES, upper->value - 1));
p                1742 security/selinux/ss/policydb.c static int policydb_bounds_sanity_check(struct policydb *p)
p                1746 security/selinux/ss/policydb.c 	if (p->policyvers < POLICYDB_VERSION_BOUNDARY)
p                1749 security/selinux/ss/policydb.c 	rc = hashtab_map(p->p_users.table,
p                1750 security/selinux/ss/policydb.c 			 user_bounds_sanity_check, p);
p                1754 security/selinux/ss/policydb.c 	rc = hashtab_map(p->p_roles.table,
p                1755 security/selinux/ss/policydb.c 			 role_bounds_sanity_check, p);
p                1759 security/selinux/ss/policydb.c 	rc = hashtab_map(p->p_types.table,
p                1760 security/selinux/ss/policydb.c 			 type_bounds_sanity_check, p);
p                1767 security/selinux/ss/policydb.c u16 string_to_security_class(struct policydb *p, const char *name)
p                1771 security/selinux/ss/policydb.c 	cladatum = hashtab_search(p->p_classes.table, name);
p                1778 security/selinux/ss/policydb.c u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name)
p                1784 security/selinux/ss/policydb.c 	if (!tclass || tclass > p->p_classes.nprim)
p                1787 security/selinux/ss/policydb.c 	cladatum = p->class_val_to_struct[tclass-1];
p                1801 security/selinux/ss/policydb.c static int range_read(struct policydb *p, void *fp)
p                1809 security/selinux/ss/policydb.c 	if (p->policyvers < POLICYDB_VERSION_MLS)
p                1829 security/selinux/ss/policydb.c 		if (p->policyvers >= POLICYDB_VERSION_RANGETRANS) {
p                1835 security/selinux/ss/policydb.c 			rt->target_class = p->process_class;
p                1838 security/selinux/ss/policydb.c 		if (!policydb_type_isvalid(p, rt->source_type) ||
p                1839 security/selinux/ss/policydb.c 		    !policydb_type_isvalid(p, rt->target_type) ||
p                1840 security/selinux/ss/policydb.c 		    !policydb_class_isvalid(p, rt->target_class))
p                1853 security/selinux/ss/policydb.c 		if (!mls_range_isvalid(p, r)) {
p                1858 security/selinux/ss/policydb.c 		rc = hashtab_insert(p->range_tr, rt, r);
p                1865 security/selinux/ss/policydb.c 	hash_eval(p->range_tr, "rangetr");
p                1873 security/selinux/ss/policydb.c static int filename_trans_read(struct policydb *p, void *fp)
p                1882 security/selinux/ss/policydb.c 	if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS)
p                1927 security/selinux/ss/policydb.c 		rc = ebitmap_set_bit(&p->filename_trans_ttypes, ft->ttype, 1);
p                1931 security/selinux/ss/policydb.c 		rc = hashtab_insert(p->filename_trans, ft, otype);
p                1945 security/selinux/ss/policydb.c 	hash_eval(p->filename_trans, "filenametr");
p                1955 security/selinux/ss/policydb.c static int genfs_read(struct policydb *p, void *fp)
p                1985 security/selinux/ss/policydb.c 		for (genfs_p = NULL, genfs = p->genfs; genfs;
p                2000 security/selinux/ss/policydb.c 			p->genfs = newgenfs;
p                2029 security/selinux/ss/policydb.c 			rc = context_read_and_validate(&newc->context[0], p, fp);
p                2068 security/selinux/ss/policydb.c static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
p                2093 security/selinux/ss/policydb.c 				p->ocontexts[i] = c;
p                2103 security/selinux/ss/policydb.c 				rc = context_read_and_validate(&c->context[0], p, fp);
p                2118 security/selinux/ss/policydb.c 				rc = context_read_and_validate(&c->context[0], p, fp);
p                2121 security/selinux/ss/policydb.c 				rc = context_read_and_validate(&c->context[1], p, fp);
p                2132 security/selinux/ss/policydb.c 				rc = context_read_and_validate(&c->context[0], p, fp);
p                2142 security/selinux/ss/policydb.c 				rc = context_read_and_validate(&c->context[0], p, fp);
p                2164 security/selinux/ss/policydb.c 				rc = context_read_and_validate(&c->context[0], p, fp);
p                2178 security/selinux/ss/policydb.c 				rc = context_read_and_validate(&c->context[0], p, fp);
p                2209 security/selinux/ss/policydb.c 							       p,
p                2236 security/selinux/ss/policydb.c 							       p,
p                2254 security/selinux/ss/policydb.c int policydb_read(struct policydb *p, void *fp)
p                2265 security/selinux/ss/policydb.c 	rc = policydb_init(p);
p                2324 security/selinux/ss/policydb.c 	p->policyvers = le32_to_cpu(buf[0]);
p                2325 security/selinux/ss/policydb.c 	if (p->policyvers < POLICYDB_VERSION_MIN ||
p                2326 security/selinux/ss/policydb.c 	    p->policyvers > POLICYDB_VERSION_MAX) {
p                2334 security/selinux/ss/policydb.c 		p->mls_enabled = 1;
p                2337 security/selinux/ss/policydb.c 		if (p->policyvers < POLICYDB_VERSION_MLS) {
p                2340 security/selinux/ss/policydb.c 				p->policyvers);
p                2344 security/selinux/ss/policydb.c 	p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN);
p                2345 security/selinux/ss/policydb.c 	p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN);
p                2347 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
p                2348 security/selinux/ss/policydb.c 		rc = ebitmap_read(&p->policycaps, fp);
p                2353 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) {
p                2354 security/selinux/ss/policydb.c 		rc = ebitmap_read(&p->permissive_map, fp);
p                2360 security/selinux/ss/policydb.c 	info = policydb_lookup_compat(p->policyvers);
p                2363 security/selinux/ss/policydb.c 		       "for version %d\n", p->policyvers);
p                2384 security/selinux/ss/policydb.c 			rc = read_f[i](p, p->symtab[i].table, fp);
p                2389 security/selinux/ss/policydb.c 		p->symtab[i].nprim = nprim;
p                2393 security/selinux/ss/policydb.c 	p->process_class = string_to_security_class(p, "process");
p                2394 security/selinux/ss/policydb.c 	if (!p->process_class)
p                2397 security/selinux/ss/policydb.c 	rc = avtab_read(&p->te_avtab, fp, p);
p                2401 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_BOOL) {
p                2402 security/selinux/ss/policydb.c 		rc = cond_read_list(p, fp);
p                2420 security/selinux/ss/policydb.c 			p->role_tr = tr;
p                2429 security/selinux/ss/policydb.c 		if (p->policyvers >= POLICYDB_VERSION_ROLETRANS) {
p                2435 security/selinux/ss/policydb.c 			tr->tclass = p->process_class;
p                2438 security/selinux/ss/policydb.c 		if (!policydb_role_isvalid(p, tr->role) ||
p                2439 security/selinux/ss/policydb.c 		    !policydb_type_isvalid(p, tr->type) ||
p                2440 security/selinux/ss/policydb.c 		    !policydb_class_isvalid(p, tr->tclass) ||
p                2441 security/selinux/ss/policydb.c 		    !policydb_role_isvalid(p, tr->new_role))
p                2459 security/selinux/ss/policydb.c 			p->role_allow = ra;
p                2467 security/selinux/ss/policydb.c 		if (!policydb_role_isvalid(p, ra->role) ||
p                2468 security/selinux/ss/policydb.c 		    !policydb_role_isvalid(p, ra->new_role))
p                2473 security/selinux/ss/policydb.c 	rc = filename_trans_read(p, fp);
p                2477 security/selinux/ss/policydb.c 	rc = policydb_index(p);
p                2482 security/selinux/ss/policydb.c 	p->process_trans_perms = string_to_av_perm(p, p->process_class, "transition");
p                2483 security/selinux/ss/policydb.c 	p->process_trans_perms |= string_to_av_perm(p, p->process_class, "dyntransition");
p                2484 security/selinux/ss/policydb.c 	if (!p->process_trans_perms)
p                2487 security/selinux/ss/policydb.c 	rc = ocontext_read(p, info, fp);
p                2491 security/selinux/ss/policydb.c 	rc = genfs_read(p, fp);
p                2495 security/selinux/ss/policydb.c 	rc = range_read(p, fp);
p                2499 security/selinux/ss/policydb.c 	p->type_attr_map_array = kvcalloc(p->p_types.nprim,
p                2500 security/selinux/ss/policydb.c 					  sizeof(*p->type_attr_map_array),
p                2502 security/selinux/ss/policydb.c 	if (!p->type_attr_map_array)
p                2506 security/selinux/ss/policydb.c 	for (i = 0; i < p->p_types.nprim; i++)
p                2507 security/selinux/ss/policydb.c 		ebitmap_init(&p->type_attr_map_array[i]);
p                2509 security/selinux/ss/policydb.c 	for (i = 0; i < p->p_types.nprim; i++) {
p                2510 security/selinux/ss/policydb.c 		struct ebitmap *e = &p->type_attr_map_array[i];
p                2512 security/selinux/ss/policydb.c 		if (p->policyvers >= POLICYDB_VERSION_AVTAB) {
p                2523 security/selinux/ss/policydb.c 	rc = policydb_bounds_sanity_check(p);
p                2531 security/selinux/ss/policydb.c 	policydb_destroy(p);
p                2648 security/selinux/ss/policydb.c static int role_trans_write(struct policydb *p, void *fp)
p                2650 security/selinux/ss/policydb.c 	struct role_trans *r = p->role_tr;
p                2670 security/selinux/ss/policydb.c 		if (p->policyvers >= POLICYDB_VERSION_ROLETRANS) {
p                2709 security/selinux/ss/policydb.c static int context_write(struct policydb *p, struct context *c,
p                2806 security/selinux/ss/policydb.c static int write_cons_helper(struct policydb *p, struct constraint_node *node,
p                2837 security/selinux/ss/policydb.c 				if (p->policyvers >=
p                2859 security/selinux/ss/policydb.c 	struct policydb *p = pd->p;
p                2903 security/selinux/ss/policydb.c 	rc = write_cons_helper(p, cladatum->constraints, fp);
p                2917 security/selinux/ss/policydb.c 	rc = write_cons_helper(p, cladatum->validatetrans, fp);
p                2921 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS) {
p                2931 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_DEFAULT_TYPE) {
p                2947 security/selinux/ss/policydb.c 	struct policydb *p = pd->p;
p                2956 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
p                2985 security/selinux/ss/policydb.c 	struct policydb *p = pd->p;
p                2995 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) {
p                3026 security/selinux/ss/policydb.c 	struct policydb *p = pd->p;
p                3036 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
p                3075 security/selinux/ss/policydb.c static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
p                3086 security/selinux/ss/policydb.c 		for (c = p->ocontexts[i]; c; c = c->next)
p                3092 security/selinux/ss/policydb.c 		for (c = p->ocontexts[i]; c; c = c->next) {
p                3099 security/selinux/ss/policydb.c 				rc = context_write(p, &c->context[0], fp);
p                3113 security/selinux/ss/policydb.c 				rc = context_write(p, &c->context[0], fp);
p                3116 security/selinux/ss/policydb.c 				rc = context_write(p, &c->context[1], fp);
p                3127 security/selinux/ss/policydb.c 				rc = context_write(p, &c->context[0], fp);
p                3137 security/selinux/ss/policydb.c 				rc = context_write(p, &c->context[0], fp);
p                3151 security/selinux/ss/policydb.c 				rc = context_write(p, &c->context[0], fp);
p                3163 security/selinux/ss/policydb.c 				rc = context_write(p, &c->context[0], fp);
p                3181 security/selinux/ss/policydb.c 				rc = context_write(p, &c->context[0], fp);
p                3195 security/selinux/ss/policydb.c 				rc = context_write(p, &c->context[0], fp);
p                3205 security/selinux/ss/policydb.c static int genfs_write(struct policydb *p, void *fp)
p                3214 security/selinux/ss/policydb.c 	for (genfs = p->genfs; genfs; genfs = genfs->next)
p                3220 security/selinux/ss/policydb.c 	for (genfs = p->genfs; genfs; genfs = genfs->next) {
p                3249 security/selinux/ss/policydb.c 			rc = context_write(p, &c->context[0], fp);
p                3272 security/selinux/ss/policydb.c 	struct policydb *p = pd->p;
p                3280 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_RANGETRANS) {
p                3293 security/selinux/ss/policydb.c static int range_write(struct policydb *p, void *fp)
p                3299 security/selinux/ss/policydb.c 	pd.p = p;
p                3304 security/selinux/ss/policydb.c 	rc = hashtab_map(p->range_tr, hashtab_cnt, &nel);
p                3314 security/selinux/ss/policydb.c 	rc = hashtab_map(p->range_tr, range_write_helper, &pd);
p                3352 security/selinux/ss/policydb.c static int filename_trans_write(struct policydb *p, void *fp)
p                3358 security/selinux/ss/policydb.c 	if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS)
p                3362 security/selinux/ss/policydb.c 	rc = hashtab_map(p->filename_trans, hashtab_cnt, &nel);
p                3371 security/selinux/ss/policydb.c 	rc = hashtab_map(p->filename_trans, filename_write_helper, fp);
p                3383 security/selinux/ss/policydb.c int policydb_write(struct policydb *p, void *fp)
p                3398 security/selinux/ss/policydb.c 	if (p->policyvers < POLICYDB_VERSION_AVTAB) {
p                3400 security/selinux/ss/policydb.c 		       "  Because it is less than version %d\n", p->policyvers,
p                3406 security/selinux/ss/policydb.c 	if (p->mls_enabled)
p                3409 security/selinux/ss/policydb.c 	if (p->reject_unknown)
p                3411 security/selinux/ss/policydb.c 	if (p->allow_unknown)
p                3426 security/selinux/ss/policydb.c 	info = policydb_lookup_compat(p->policyvers);
p                3429 security/selinux/ss/policydb.c 		    "version %d", p->policyvers);
p                3433 security/selinux/ss/policydb.c 	buf[0] = cpu_to_le32(p->policyvers);
p                3442 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
p                3443 security/selinux/ss/policydb.c 		rc = ebitmap_write(&p->policycaps, fp);
p                3448 security/selinux/ss/policydb.c 	if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) {
p                3449 security/selinux/ss/policydb.c 		rc = ebitmap_write(&p->permissive_map, fp);
p                3459 security/selinux/ss/policydb.c 		pd.p = p;
p                3461 security/selinux/ss/policydb.c 		buf[0] = cpu_to_le32(p->symtab[i].nprim);
p                3462 security/selinux/ss/policydb.c 		buf[1] = cpu_to_le32(p->symtab[i].table->nel);
p                3467 security/selinux/ss/policydb.c 		rc = hashtab_map(p->symtab[i].table, write_f[i], &pd);
p                3472 security/selinux/ss/policydb.c 	rc = avtab_write(p, &p->te_avtab, fp);
p                3476 security/selinux/ss/policydb.c 	rc = cond_write_list(p, p->cond_list, fp);
p                3480 security/selinux/ss/policydb.c 	rc = role_trans_write(p, fp);
p                3484 security/selinux/ss/policydb.c 	rc = role_allow_write(p->role_allow, fp);
p                3488 security/selinux/ss/policydb.c 	rc = filename_trans_write(p, fp);
p                3492 security/selinux/ss/policydb.c 	rc = ocontext_write(p, info, fp);
p                3496 security/selinux/ss/policydb.c 	rc = genfs_write(p, fp);
p                3500 security/selinux/ss/policydb.c 	rc = range_write(p, fp);
p                3504 security/selinux/ss/policydb.c 	for (i = 0; i < p->p_types.nprim; i++) {
p                3505 security/selinux/ss/policydb.c 		struct ebitmap *e = &p->type_attr_map_array[i];
p                 311 security/selinux/ss/policydb.h extern void policydb_destroy(struct policydb *p);
p                 312 security/selinux/ss/policydb.h extern int policydb_load_isids(struct policydb *p, struct sidtab *s);
p                 313 security/selinux/ss/policydb.h extern int policydb_context_isvalid(struct policydb *p, struct context *c);
p                 314 security/selinux/ss/policydb.h extern int policydb_class_isvalid(struct policydb *p, unsigned int class);
p                 315 security/selinux/ss/policydb.h extern int policydb_type_isvalid(struct policydb *p, unsigned int type);
p                 316 security/selinux/ss/policydb.h extern int policydb_role_isvalid(struct policydb *p, unsigned int role);
p                 317 security/selinux/ss/policydb.h extern int policydb_read(struct policydb *p, void *fp);
p                 318 security/selinux/ss/policydb.h extern int policydb_write(struct policydb *p, void *fp);
p                 340 security/selinux/ss/policydb.h 	struct policydb *p;
p                 366 security/selinux/ss/policydb.h static inline char *sym_name(struct policydb *p, unsigned int sym_num, unsigned int element_nr)
p                 368 security/selinux/ss/policydb.h 	return p->sym_val_to_name[sym_num][element_nr];
p                 371 security/selinux/ss/policydb.h extern u16 string_to_security_class(struct policydb *p, const char *name);
p                 372 security/selinux/ss/policydb.h extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name);
p                 246 security/selinux/ss/services.c 	struct policydb *p = &state->ss->policydb;
p                 248 security/selinux/ss/services.c 	return p->mls_enabled;
p                 596 security/selinux/ss/services.c 		for (i = 0; i < ARRAY_SIZE(xperms->drivers.p); i++)
p                 597 security/selinux/ss/services.c 			xperms->drivers.p[i] |= node->datum.u.xperms->perms.p[i];
p                 600 security/selinux/ss/services.c 		security_xperm_set(xperms->drivers.p,
p                 724 security/selinux/ss/services.c 	struct policydb *p = &state->ss->policydb;
p                 728 security/selinux/ss/services.c 	if (context_struct_to_string(p, ocontext, &o, &olen))
p                 730 security/selinux/ss/services.c 	if (context_struct_to_string(p, ncontext, &n, &nlen))
p                 732 security/selinux/ss/services.c 	if (context_struct_to_string(p, tcontext, &t, &tlen))
p                 737 security/selinux/ss/services.c 		  o, n, t, sym_name(p, SYM_CLASSES, tclass-1));
p                 953 security/selinux/ss/services.c 		if (!security_xperm_test(node->datum.u.xperms->perms.p,
p                 963 security/selinux/ss/services.c 			memset(xpermd->allowed->p, 0xff,
p                 964 security/selinux/ss/services.c 					sizeof(xpermd->allowed->p));
p                 967 security/selinux/ss/services.c 			for (i = 0; i < ARRAY_SIZE(xpermd->allowed->p); i++)
p                 968 security/selinux/ss/services.c 				xpermd->allowed->p[i] |=
p                 969 security/selinux/ss/services.c 					node->datum.u.xperms->perms.p[i];
p                 974 security/selinux/ss/services.c 			memset(xpermd->auditallow->p, 0xff,
p                 975 security/selinux/ss/services.c 					sizeof(xpermd->auditallow->p));
p                 978 security/selinux/ss/services.c 			for (i = 0; i < ARRAY_SIZE(xpermd->auditallow->p); i++)
p                 979 security/selinux/ss/services.c 				xpermd->auditallow->p[i] |=
p                 980 security/selinux/ss/services.c 					node->datum.u.xperms->perms.p[i];
p                 985 security/selinux/ss/services.c 			memset(xpermd->dontaudit->p, 0xff,
p                 986 security/selinux/ss/services.c 					sizeof(xpermd->dontaudit->p));
p                 989 security/selinux/ss/services.c 			for (i = 0; i < ARRAY_SIZE(xpermd->dontaudit->p); i++)
p                 990 security/selinux/ss/services.c 				xpermd->dontaudit->p[i] |=
p                 991 security/selinux/ss/services.c 					node->datum.u.xperms->perms.p[i];
p                1017 security/selinux/ss/services.c 	memset(xpermd->allowed->p, 0, sizeof(xpermd->allowed->p));
p                1018 security/selinux/ss/services.c 	memset(xpermd->auditallow->p, 0, sizeof(xpermd->auditallow->p));
p                1019 security/selinux/ss/services.c 	memset(xpermd->dontaudit->p, 0, sizeof(xpermd->dontaudit->p));
p                1077 security/selinux/ss/services.c 	memset(xpermd->allowed->p, 0xff, sizeof(xpermd->allowed->p));
p                1208 security/selinux/ss/services.c static int context_struct_to_string(struct policydb *p,
p                1229 security/selinux/ss/services.c 	*scontext_len += strlen(sym_name(p, SYM_USERS, context->user - 1)) + 1;
p                1230 security/selinux/ss/services.c 	*scontext_len += strlen(sym_name(p, SYM_ROLES, context->role - 1)) + 1;
p                1231 security/selinux/ss/services.c 	*scontext_len += strlen(sym_name(p, SYM_TYPES, context->type - 1)) + 1;
p                1232 security/selinux/ss/services.c 	*scontext_len += mls_compute_context_len(p, context);
p                1247 security/selinux/ss/services.c 		sym_name(p, SYM_USERS, context->user - 1),
p                1248 security/selinux/ss/services.c 		sym_name(p, SYM_ROLES, context->role - 1),
p                1249 security/selinux/ss/services.c 		sym_name(p, SYM_TYPES, context->type - 1));
p                1251 security/selinux/ss/services.c 	mls_sid_to_context(p, context, &scontextp);
p                1383 security/selinux/ss/services.c 	char *scontextp, *p, oldc;
p                1394 security/selinux/ss/services.c 	p = scontextp;
p                1395 security/selinux/ss/services.c 	while (*p && *p != ':')
p                1396 security/selinux/ss/services.c 		p++;
p                1398 security/selinux/ss/services.c 	if (*p == 0)
p                1401 security/selinux/ss/services.c 	*p++ = 0;
p                1410 security/selinux/ss/services.c 	scontextp = p;
p                1411 security/selinux/ss/services.c 	while (*p && *p != ':')
p                1412 security/selinux/ss/services.c 		p++;
p                1414 security/selinux/ss/services.c 	if (*p == 0)
p                1417 security/selinux/ss/services.c 	*p++ = 0;
p                1425 security/selinux/ss/services.c 	scontextp = p;
p                1426 security/selinux/ss/services.c 	while (*p && *p != ':')
p                1427 security/selinux/ss/services.c 		p++;
p                1428 security/selinux/ss/services.c 	oldc = *p;
p                1429 security/selinux/ss/services.c 	*p++ = 0;
p                1437 security/selinux/ss/services.c 	rc = mls_context_to_sid(pol, oldc, p, ctx, sidtabp, def_sid);
p                1928 security/selinux/ss/services.c static int convert_context(struct context *oldc, struct context *newc, void *p)
p                1939 security/selinux/ss/services.c 	args = p;
p                2052 security/selinux/ss/services.c 	struct policydb *p = &state->ss->policydb;
p                2057 security/selinux/ss/services.c 		state->policycap[i] = ebitmap_get_bit(&p->policycaps, i);
p                2062 security/selinux/ss/services.c 			ebitmap_get_bit(&p->policycaps, i));
p                2064 security/selinux/ss/services.c 	ebitmap_for_each_positive_bit(&p->policycaps, node, i) {
p                2243 security/selinux/ss/services.c 	struct policydb *p = &state->ss->policydb;
p                2247 security/selinux/ss/services.c 	len = p->len;
p                  14 security/selinux/ss/symtab.c 	const char *p, *keyp;
p                  21 security/selinux/ss/symtab.c 	for (p = keyp; (p - keyp) < size; p++)
p                  22 security/selinux/ss/symtab.c 		val = (val << 4 | (val >> (8*sizeof(unsigned int)-4))) ^ (*p);
p                 478 security/smack/smack.h 					     struct path p)
p                 480 security/smack/smack.h 	a->a.u.path = p;
p                 511 security/smack/smack.h 					     struct path p)
p                2040 security/smack/smack_lsm.c static int smk_curacc_on_task(struct task_struct *p, int access,
p                2044 security/smack/smack_lsm.c 	struct smack_known *skp = smk_of_task_struct(p);
p                2048 security/smack/smack_lsm.c 	smk_ad_setfield_u_tsk(&ad, p);
p                2050 security/smack/smack_lsm.c 	rc = smk_bu_task(p, access, rc);
p                2061 security/smack/smack_lsm.c static int smack_task_setpgid(struct task_struct *p, pid_t pgid)
p                2063 security/smack/smack_lsm.c 	return smk_curacc_on_task(p, MAY_WRITE, __func__);
p                2072 security/smack/smack_lsm.c static int smack_task_getpgid(struct task_struct *p)
p                2074 security/smack/smack_lsm.c 	return smk_curacc_on_task(p, MAY_READ, __func__);
p                2083 security/smack/smack_lsm.c static int smack_task_getsid(struct task_struct *p)
p                2085 security/smack/smack_lsm.c 	return smk_curacc_on_task(p, MAY_READ, __func__);
p                2095 security/smack/smack_lsm.c static void smack_task_getsecid(struct task_struct *p, u32 *secid)
p                2097 security/smack/smack_lsm.c 	struct smack_known *skp = smk_of_task_struct(p);
p                2109 security/smack/smack_lsm.c static int smack_task_setnice(struct task_struct *p, int nice)
p                2111 security/smack/smack_lsm.c 	return smk_curacc_on_task(p, MAY_WRITE, __func__);
p                2121 security/smack/smack_lsm.c static int smack_task_setioprio(struct task_struct *p, int ioprio)
p                2123 security/smack/smack_lsm.c 	return smk_curacc_on_task(p, MAY_WRITE, __func__);
p                2132 security/smack/smack_lsm.c static int smack_task_getioprio(struct task_struct *p)
p                2134 security/smack/smack_lsm.c 	return smk_curacc_on_task(p, MAY_READ, __func__);
p                2143 security/smack/smack_lsm.c static int smack_task_setscheduler(struct task_struct *p)
p                2145 security/smack/smack_lsm.c 	return smk_curacc_on_task(p, MAY_WRITE, __func__);
p                2154 security/smack/smack_lsm.c static int smack_task_getscheduler(struct task_struct *p)
p                2156 security/smack/smack_lsm.c 	return smk_curacc_on_task(p, MAY_READ, __func__);
p                2165 security/smack/smack_lsm.c static int smack_task_movememory(struct task_struct *p)
p                2167 security/smack/smack_lsm.c 	return smk_curacc_on_task(p, MAY_WRITE, __func__);
p                2180 security/smack/smack_lsm.c static int smack_task_kill(struct task_struct *p, struct kernel_siginfo *info,
p                2185 security/smack/smack_lsm.c 	struct smack_known *tkp = smk_of_task_struct(p);
p                2192 security/smack/smack_lsm.c 	smk_ad_setfield_u_tsk(&ad, p);
p                2199 security/smack/smack_lsm.c 		rc = smk_bu_task(p, MAY_DELIVER, rc);
p                2220 security/smack/smack_lsm.c static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
p                2223 security/smack/smack_lsm.c 	struct smack_known *skp = smk_of_task_struct(p);
p                3480 security/smack/smack_lsm.c static int smack_getprocattr(struct task_struct *p, char *name, char **value)
p                3482 security/smack/smack_lsm.c 	struct smack_known *skp = smk_of_task_struct(p);
p                  30 security/tomoyo/audit.c 	unsigned long pos = bprm->p;
p                 335 security/tomoyo/audit.c 	struct tomoyo_profile *p;
p                 339 security/tomoyo/audit.c 	p = tomoyo_profile(ns, profile);
p                 340 security/tomoyo/audit.c 	if (tomoyo_log_count >= p->pref[TOMOYO_PREF_MAX_AUDIT_LOG])
p                 345 security/tomoyo/audit.c 	mode = p->config[index];
p                 347 security/tomoyo/audit.c 		mode = p->config[category];
p                 349 security/tomoyo/audit.c 		mode = p->default_config;
p                1002 security/tomoyo/common.c 		struct task_struct *p;
p                1006 security/tomoyo/common.c 			p = find_task_by_pid_ns(pid, &init_pid_ns);
p                1008 security/tomoyo/common.c 			p = find_task_by_vpid(pid);
p                1009 security/tomoyo/common.c 		if (p)
p                1010 security/tomoyo/common.c 			domain = tomoyo_task(p)->domain_info;
p                1701 security/tomoyo/common.c 	struct task_struct *p;
p                1718 security/tomoyo/common.c 		p = find_task_by_pid_ns(pid, &init_pid_ns);
p                1720 security/tomoyo/common.c 		p = find_task_by_vpid(pid);
p                1721 security/tomoyo/common.c 	if (p)
p                1722 security/tomoyo/common.c 		domain = tomoyo_task(p)->domain_info;
p                 113 security/tomoyo/condition.c 	unsigned long pos = bprm->p;
p                 623 security/tomoyo/domain.c 	unsigned long pos = bprm->p;
p                  26 security/tomoyo/realpath.c 	const char *p = str;
p                  30 security/tomoyo/realpath.c 	if (!p)
p                  33 security/tomoyo/realpath.c 		const unsigned char c = p[i];
p                  48 security/tomoyo/realpath.c 	p = str;
p                  50 security/tomoyo/realpath.c 		const unsigned char c = p[i];
p                 832 security/tomoyo/util.c static bool tomoyo_path_matches_pattern2(const char *f, const char *p)
p                 837 security/tomoyo/util.c 	while (*f && *p) {
p                 841 security/tomoyo/util.c 		p_delimiter = strchr(p, '/');
p                 843 security/tomoyo/util.c 			p_delimiter = p + strlen(p);
p                 844 security/tomoyo/util.c 		if (*p == '\\' && *(p + 1) == '{')
p                 846 security/tomoyo/util.c 		if (!tomoyo_file_matches_pattern(f, f_delimiter, p,
p                 852 security/tomoyo/util.c 		p = p_delimiter;
p                 853 security/tomoyo/util.c 		if (*p)
p                 854 security/tomoyo/util.c 			p++;
p                 857 security/tomoyo/util.c 	while (*p == '\\' &&
p                 858 security/tomoyo/util.c 	       (*(p + 1) == '*' || *(p + 1) == '@'))
p                 859 security/tomoyo/util.c 		p += 2;
p                 860 security/tomoyo/util.c 	return !*f && !*p;
p                 868 security/tomoyo/util.c 	if (*(p - 1) != '/' || p_delimiter <= p + 3 || *p_delimiter != '/' ||
p                 873 security/tomoyo/util.c 		if (!tomoyo_file_matches_pattern(f, f_delimiter, p + 2,
p                 919 security/tomoyo/util.c 	const char *p = pattern->name;
p                 929 security/tomoyo/util.c 	if (strncmp(f, p, len))
p                 932 security/tomoyo/util.c 	p += len;
p                 933 security/tomoyo/util.c 	return tomoyo_path_matches_pattern2(f, p);
p                 974 security/tomoyo/util.c 	struct tomoyo_profile *p;
p                 978 security/tomoyo/util.c 	p = tomoyo_profile(ns, profile);
p                 979 security/tomoyo/util.c 	mode = p->config[index];
p                 981 security/tomoyo/util.c 		mode = p->config[tomoyo_index2category[index]
p                 984 security/tomoyo/util.c 		mode = p->default_config;
p                 213 sound/aoa/core/gpio-pmf.c 	struct pmf_args args = { .count = 1, .u[0].p = &ret };
p                 864 sound/aoa/soundbus/i2sbus/pcm.c 	struct codec_info_item *p, *tmp;
p                 869 sound/aoa/soundbus/i2sbus/pcm.c 	list_for_each_entry_safe(p, tmp, &i2sdev->sound.codec_list, list) {
p                 871 sound/aoa/soundbus/i2sbus/pcm.c 		list_del(&p->list);
p                 872 sound/aoa/soundbus/i2sbus/pcm.c 		module_put(p->codec->owner);
p                 873 sound/aoa/soundbus/i2sbus/pcm.c 		kfree(p);
p                 380 sound/arm/aaci.c static int aaci_rule_channels(struct snd_pcm_hw_params *p,
p                 395 sound/arm/aaci.c 	return snd_interval_list(hw_param_interval(p, rule->var),
p                 272 sound/core/control.c 	kctl->tlv.p = ncontrol->tlv.p;
p                1162 sound/core/control.c 	char *names, *p;
p                1177 sound/core/control.c 	p = names;
p                1179 sound/core/control.c 		name_len = strnlen(p, buf_len);
p                1184 sound/core/control.c 		p += name_len + 1;
p                1450 sound/core/control.c 	if (kctl->tlv.p == NULL)
p                1453 sound/core/control.c 	len = sizeof(unsigned int) * 2 + kctl->tlv.p[1];
p                1457 sound/core/control.c 	if (copy_to_user(buf, kctl->tlv.p, len))
p                1514 sound/core/control.c 	struct snd_kctl_ioctl *p;
p                1573 sound/core/control.c 	list_for_each_entry(p, &snd_control_ioctls, list) {
p                1574 sound/core/control.c 		err = p->fioctl(card, ctl, cmd, arg);
p                1709 sound/core/control.c 	struct snd_kctl_ioctl *p;
p                1714 sound/core/control.c 	list_for_each_entry(p, lists, list) {
p                1715 sound/core/control.c 		if (p->fioctl == fcn) {
p                1716 sound/core/control.c 			list_del(&p->list);
p                1718 sound/core/control.c 			kfree(p);
p                 435 sound/core/control_compat.c 	struct snd_kctl_ioctl *p;
p                 477 sound/core/control_compat.c 	list_for_each_entry(p, &snd_control_compat_ioctls, list) {
p                 478 sound/core/control_compat.c 		if (p->fioctl) {
p                 479 sound/core/control_compat.c 			err = p->fioctl(ctl->card, ctl, cmd, arg);
p                  33 sound/core/device.c 	struct list_head *p;
p                  48 sound/core/device.c 	list_for_each_prev(p, &card->devices) {
p                  49 sound/core/device.c 		struct snd_device *pdev = list_entry(p, struct snd_device, list);
p                  54 sound/core/device.c 	list_add(&dev->list, p);
p                  79 sound/core/info.c 	if (!entry || !entry->p)
p                 354 sound/core/info.c static int snd_info_seq_show(struct seq_file *seq, void *p)
p                 460 sound/core/info.c 	snd_proc_root->p = proc_mkdir("asound", NULL);
p                 461 sound/core/info.c 	if (!snd_proc_root->p)
p                 528 sound/core/info.c 	struct proc_dir_entry *p;
p                 543 sound/core/info.c 	p = proc_symlink(card->id, snd_proc_root->p, card->proc_root->name);
p                 544 sound/core/info.c 	if (!p)
p                 546 sound/core/info.c 	card->proc_root_link = p;
p                 562 sound/core/info.c 						    snd_proc_root->p,
p                 751 sound/core/info.c 	struct snd_info_entry *p;
p                 753 sound/core/info.c 	if (!entry->p)
p                 755 sound/core/info.c 	list_for_each_entry(p, &entry->children, list)
p                 756 sound/core/info.c 		snd_info_disconnect(p);
p                 757 sound/core/info.c 	proc_remove(entry->p);
p                 758 sound/core/info.c 	entry->p = NULL;
p                 769 sound/core/info.c 	struct snd_info_entry *p, *n;
p                 773 sound/core/info.c 	if (entry->p) {
p                 780 sound/core/info.c 	list_for_each_entry_safe(p, n, &entry->children, list)
p                 781 sound/core/info.c 		snd_info_free_entry(p);
p                 783 sound/core/info.c 	p = entry->parent;
p                 784 sound/core/info.c 	if (p) {
p                 785 sound/core/info.c 		mutex_lock(&p->access);
p                 787 sound/core/info.c 		mutex_unlock(&p->access);
p                 798 sound/core/info.c 	struct proc_dir_entry *root, *p = NULL;
p                 802 sound/core/info.c 	root = entry->parent == NULL ? snd_proc_root->p : entry->parent->p;
p                 804 sound/core/info.c 	if (entry->p || !root)
p                 807 sound/core/info.c 		p = proc_mkdir_mode(entry->name, entry->mode, root);
p                 808 sound/core/info.c 		if (!p) {
p                 818 sound/core/info.c 		p = proc_create_data(entry->name, entry->mode, root,
p                 820 sound/core/info.c 		if (!p) {
p                 824 sound/core/info.c 		proc_set_size(p, entry->size);
p                 826 sound/core/info.c 	entry->p = p;
p                 843 sound/core/info.c 	struct snd_info_entry *p;
p                 846 sound/core/info.c 	if (!entry->p) {
p                 852 sound/core/info.c 	list_for_each_entry(p, &entry->children, list) {
p                 853 sound/core/info.c 		err = snd_info_register(p);
p                  46 sound/core/oss/linear.c 	unsigned char *p = (unsigned char *)&tmp;
p                  48 sound/core/oss/linear.c 	memcpy(p + data->copy_ofs, src + data->src_ofs, data->copy_bytes);
p                  52 sound/core/oss/linear.c 	memcpy(dst, p + data->dst_ofs, data->dst_bytes);
p                 300 sound/core/oss/mixer_oss.c 	int __user *p = argp;
p                 312 sound/core/oss/mixer_oss.c 			if (get_user(tmp, p))
p                 317 sound/core/oss/mixer_oss.c 			return put_user(tmp, p);
p                 319 sound/core/oss/mixer_oss.c 			return put_user(SNDRV_OSS_VERSION, p);
p                 321 sound/core/oss/mixer_oss.c 			return put_user(1, p);
p                 326 sound/core/oss/mixer_oss.c 			return put_user(tmp, p);
p                 331 sound/core/oss/mixer_oss.c 			return put_user(tmp, p);
p                 336 sound/core/oss/mixer_oss.c 			return put_user(tmp, p);
p                 341 sound/core/oss/mixer_oss.c 			return put_user(tmp, p);
p                 346 sound/core/oss/mixer_oss.c 			return put_user(tmp, p);
p                 350 sound/core/oss/mixer_oss.c 		if (get_user(tmp, p))
p                 355 sound/core/oss/mixer_oss.c 		return put_user(tmp, p);
p                 360 sound/core/oss/mixer_oss.c 		return put_user(tmp, p);
p                 934 sound/core/oss/mixer_oss.c 	struct slot *p = chn->private_data;
p                 935 sound/core/oss/mixer_oss.c 	if (p) {
p                 936 sound/core/oss/mixer_oss.c 		if (p->allocated && p->assigned) {
p                 937 sound/core/oss/mixer_oss.c 			kfree(p->assigned->name);
p                 938 sound/core/oss/mixer_oss.c 			kfree(p->assigned);
p                 940 sound/core/oss/mixer_oss.c 		kfree(p);
p                1150 sound/core/oss/mixer_oss.c 		struct slot *p;
p                1154 sound/core/oss/mixer_oss.c 		p = (struct slot *)mixer->slots[i].private_data;
p                1156 sound/core/oss/mixer_oss.c 		if (p && p->assigned)
p                1158 sound/core/oss/mixer_oss.c 				    p->assigned->name,
p                1159 sound/core/oss/mixer_oss.c 				    p->assigned->index);
p                2558 sound/core/oss/pcm_oss.c 	int __user *p = (int __user *)arg;
p                2563 sound/core/oss/pcm_oss.c 		return put_user(SNDRV_OSS_VERSION, p);
p                2565 sound/core/oss/pcm_oss.c 		return put_user(1, p);
p                2591 sound/core/oss/pcm_oss.c 		if (get_user(res, p))
p                2595 sound/core/oss/pcm_oss.c 		return put_user(res, p);
p                2600 sound/core/oss/pcm_oss.c 		return put_user(res, p);
p                2602 sound/core/oss/pcm_oss.c 		if (get_user(res, p))
p                2607 sound/core/oss/pcm_oss.c 		return put_user(--res, p);
p                2612 sound/core/oss/pcm_oss.c 		return put_user(res, p);
p                2614 sound/core/oss/pcm_oss.c 		if (get_user(res, p))
p                2619 sound/core/oss/pcm_oss.c 		return put_user(res, p);
p                2624 sound/core/oss/pcm_oss.c 		return put_user(res, p);
p                2626 sound/core/oss/pcm_oss.c 		if (get_user(res, p))
p                2631 sound/core/oss/pcm_oss.c 		return put_user(res, p);
p                2636 sound/core/oss/pcm_oss.c 		return put_user(res, p);
p                2643 sound/core/oss/pcm_oss.c 		if (get_user(res, p))
p                2648 sound/core/oss/pcm_oss.c 		return put_user(res, p);
p                2650 sound/core/oss/pcm_oss.c 		if (get_user(res, p))
p                2657 sound/core/oss/pcm_oss.c 		return put_user(res, p);
p                2670 sound/core/oss/pcm_oss.c 		return put_user(res, p);
p                2675 sound/core/oss/pcm_oss.c 		return put_user(res, p);
p                2677 sound/core/oss/pcm_oss.c 		if (get_user(res, p))
p                2703 sound/core/oss/pcm_oss.c 			put_user(0, p);
p                2706 sound/core/oss/pcm_oss.c 		return put_user(res, p);
p                 167 sound/core/pcm_compat.c #define snd_pcm_ioctl_channel_info_x32(s, p)	\
p                 168 sound/core/pcm_compat.c 	snd_pcm_channel_info_user(s, p)
p                  65 sound/core/seq/oss/seq_oss_ioctl.c 	int __user *p = arg;
p                  97 sound/core/seq/oss/seq_oss_ioctl.c 		if (get_user(dev, p))
p                 104 sound/core/seq/oss/seq_oss_ioctl.c 		return put_user(dp->readq->qlen, p) ? -EFAULT : 0;
p                 109 sound/core/seq/oss/seq_oss_ioctl.c 		return put_user(snd_seq_oss_writeq_get_free_size(dp->writeq), p) ? -EFAULT : 0;
p                 112 sound/core/seq/oss/seq_oss_ioctl.c 		return put_user(snd_seq_oss_timer_cur_tick(dp->timer), p) ? -EFAULT : 0;
p                 115 sound/core/seq/oss/seq_oss_ioctl.c 		if (get_user(dev, p))
p                 120 sound/core/seq/oss/seq_oss_ioctl.c 		return put_user(dp->max_synthdev, p) ? -EFAULT : 0;
p                 123 sound/core/seq/oss/seq_oss_ioctl.c 		return put_user(dp->max_mididev, p) ? -EFAULT : 0;
p                 126 sound/core/seq/oss/seq_oss_ioctl.c 		if (get_user(dev, p))
p                 129 sound/core/seq/oss/seq_oss_ioctl.c 		return put_user(val, p) ? -EFAULT : 0;
p                 132 sound/core/seq/oss/seq_oss_ioctl.c 		if (get_user(dev, p))
p                 150 sound/core/seq/oss/seq_oss_ioctl.c 		if (get_user(val, p))
p                 162 sound/core/seq/oss/seq_oss_ioctl.c 		if (get_user(val, p))
p                 169 sound/core/seq/oss/seq_oss_ioctl.c 		return put_user(val, p) ? -EFAULT : 0;
p                 442 sound/core/seq/oss/seq_oss_synth.c 			    const char __user *buf, int p, int c)
p                 460 sound/core/seq/oss/seq_oss_synth.c 		rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c);
p                  26 sound/core/seq/oss/seq_oss_synth.h 				 const char __user *buf, int p, int c);
p                1956 sound/core/seq/seq_clientmgr.c 	struct list_head *p;
p                1980 sound/core/seq/seq_clientmgr.c 	list_for_each(p, &group->list_head) {
p                1985 sound/core/seq/seq_clientmgr.c 				s = list_entry(p, struct snd_seq_subscribers, src_list);
p                1988 sound/core/seq/seq_clientmgr.c 				s = list_entry(p, struct snd_seq_subscribers, dest_list);
p                2389 sound/core/seq/seq_clientmgr.c 	struct list_head *p;
p                2399 sound/core/seq/seq_clientmgr.c 	list_for_each(p, &group->list_head) {
p                2401 sound/core/seq/seq_clientmgr.c 			s = list_entry(p, struct snd_seq_subscribers, src_list);
p                2403 sound/core/seq/seq_clientmgr.c 			s = list_entry(p, struct snd_seq_subscribers, dest_list);
p                2427 sound/core/seq/seq_clientmgr.c 	struct snd_seq_client_port *p;
p                2430 sound/core/seq/seq_clientmgr.c 	list_for_each_entry(p, &client->ports_list_head, list) {
p                2432 sound/core/seq/seq_clientmgr.c 			    p->addr.port, p->name,
p                2433 sound/core/seq/seq_clientmgr.c 			    FLAG_PERM_RD(p->capability),
p                2434 sound/core/seq/seq_clientmgr.c 			    FLAG_PERM_WR(p->capability),
p                2435 sound/core/seq/seq_clientmgr.c 			    FLAG_PERM_EX(p->capability),
p                2436 sound/core/seq/seq_clientmgr.c 			    FLAG_PERM_DUPLEX(p->capability));
p                2437 sound/core/seq/seq_clientmgr.c 		snd_seq_info_dump_subscribers(buffer, &p->c_src, 1, "    Connecting To: ");
p                2438 sound/core/seq/seq_clientmgr.c 		snd_seq_info_dump_subscribers(buffer, &p->c_dest, 0, "    Connected From: ");
p                  77 sound/core/seq/seq_dummy.c 	struct snd_seq_dummy_port *p;
p                  80 sound/core/seq/seq_dummy.c 	p = private_data;
p                  85 sound/core/seq/seq_dummy.c 	if (p->duplex)
p                  86 sound/core/seq/seq_dummy.c 		tmpev.source.port = p->connect;
p                  88 sound/core/seq/seq_dummy.c 		tmpev.source.port = p->port;
p                  90 sound/core/seq/seq_dummy.c 	return snd_seq_kernel_client_dispatch(p->client, &tmpev, atomic, hop);
p                 267 sound/core/seq/seq_midi.c 	unsigned int p, ports;
p                 324 sound/core/seq/seq_midi.c 	for (p = 0; p < ports; p++) {
p                 325 sound/core/seq/seq_midi.c 		ms = &msynth[p];
p                 327 sound/core/seq/seq_midi.c 		if (snd_seq_midisynth_new(ms, card, device, p) < 0)
p                 333 sound/core/seq/seq_midi.c 		port->addr.port = device * (256 / SNDRV_RAWMIDI_DEVICES) + p;
p                 337 sound/core/seq/seq_midi.c 		if (p < output_count)
p                 341 sound/core/seq/seq_midi.c 		info->subdevice = p;
p                 347 sound/core/seq/seq_midi.c 					snprintf(port->name, sizeof(port->name), "%s-%u", info->name, p);
p                 353 sound/core/seq/seq_midi.c 					sprintf(port->name, "MIDI %d-%d-%u", card->number, device, p);
p                 358 sound/core/seq/seq_midi.c 		if ((info->flags & SNDRV_RAWMIDI_INFO_OUTPUT) && p < output_count)
p                 360 sound/core/seq/seq_midi.c 		if ((info->flags & SNDRV_RAWMIDI_INFO_INPUT) && p < input_count)
p                 379 sound/core/seq/seq_midi.c 			rmidi->ops->get_port_info(rmidi, p, port);
p                 397 sound/core/seq/seq_midi.c 	      	for (p = 0; p < ports; p++)
p                 398 sound/core/seq/seq_midi.c 	      		snd_seq_midisynth_delete(&msynth[p]);
p                 419 sound/core/seq/seq_midi.c 	int device = dev->device, p, ports;
p                 431 sound/core/seq/seq_midi.c 	for (p = 0; p < ports; p++)
p                 432 sound/core/seq/seq_midi.c 		snd_seq_midisynth_delete(&msynth[p]);
p                 460 sound/core/seq/seq_midi_emul.c 	int p = cmd & 0x0f;
p                 461 sound/core/seq/seq_midi_emul.c 	if (p == 0)
p                 462 sound/core/seq/seq_midi_emul.c 		p = 9;
p                 463 sound/core/seq/seq_midi_emul.c 	else if (p < 10)
p                 464 sound/core/seq/seq_midi_emul.c 		p--;
p                 465 sound/core/seq/seq_midi_emul.c 	return p;
p                 528 sound/core/seq/seq_midi_emul.c 			int p = get_channel(buf[5]);
p                 529 sound/core/seq/seq_midi_emul.c 			if (p < chset->max_channels) {
p                 532 sound/core/seq/seq_midi_emul.c 					chset->channels[p].drum_channel = 1;
p                 534 sound/core/seq/seq_midi_emul.c 					chset->channels[p].drum_channel = 0;
p                 539 sound/core/seq/seq_midi_emul.c 			int p = get_channel(buf[5]);
p                 540 sound/core/seq/seq_midi_emul.c 			if (p < chset->max_channels &&
p                 541 sound/core/seq/seq_midi_emul.c 			    ! chset->channels[p].drum_channel) {
p                 543 sound/core/seq/seq_midi_emul.c 				chset->channels[p].midi_program = buf[7];
p                 620 sound/core/seq/seq_midi_emul.c static void snd_midi_channel_init(struct snd_midi_channel *p, int n)
p                 622 sound/core/seq/seq_midi_emul.c 	if (p == NULL)
p                 625 sound/core/seq/seq_midi_emul.c 	memset(p, 0, sizeof(struct snd_midi_channel));
p                 626 sound/core/seq/seq_midi_emul.c 	p->private = NULL;
p                 627 sound/core/seq/seq_midi_emul.c 	p->number = n;
p                 629 sound/core/seq/seq_midi_emul.c 	snd_midi_reset_controllers(p);
p                 630 sound/core/seq/seq_midi_emul.c 	p->gm_rpn_pitch_bend_range = 256; /* 2 semitones */
p                 631 sound/core/seq/seq_midi_emul.c 	p->gm_rpn_fine_tuning = 0;
p                 632 sound/core/seq/seq_midi_emul.c 	p->gm_rpn_coarse_tuning = 0;
p                 635 sound/core/seq/seq_midi_emul.c 		p->drum_channel = 1;	/* Default ch 10 as drums */
p                 116 sound/core/seq/seq_ports.c 	struct snd_seq_client_port *new_port, *p;
p                 145 sound/core/seq/seq_ports.c 	list_for_each_entry(p, &client->ports_list_head, list) {
p                 146 sound/core/seq/seq_ports.c 		if (p->addr.port > num)
p                 149 sound/core/seq/seq_ports.c 			num = p->addr.port + 1;
p                 152 sound/core/seq/seq_ports.c 	list_add_tail(&new_port->list, &p->list);
p                 176 sound/core/seq/seq_ports.c 	struct snd_seq_client_port *p;
p                 179 sound/core/seq/seq_ports.c 		p = snd_seq_port_use_ptr(*cp, addr->port);
p                 180 sound/core/seq/seq_ports.c 		if (! p) {
p                 184 sound/core/seq/seq_ports.c 		return p;
p                 195 sound/core/seq/seq_ports.c get_subscriber(struct list_head *p, bool is_src)
p                 198 sound/core/seq/seq_ports.c 		return list_entry(p, struct snd_seq_subscribers, src_list);
p                 200 sound/core/seq/seq_ports.c 		return list_entry(p, struct snd_seq_subscribers, dest_list);
p                 212 sound/core/seq/seq_ports.c 	struct list_head *p, *n;
p                 214 sound/core/seq/seq_ports.c 	list_for_each_safe(p, n, &grp->list_head) {
p                 219 sound/core/seq/seq_ports.c 		subs = get_subscriber(p, is_src);
p                 270 sound/core/seq/seq_ports.c 	struct snd_seq_client_port *found = NULL, *p;
p                 274 sound/core/seq/seq_ports.c 	list_for_each_entry(p, &client->ports_list_head, list) {
p                 275 sound/core/seq/seq_ports.c 		if (p->addr.port == port) {
p                 277 sound/core/seq/seq_ports.c 			list_del(&p->list);
p                 279 sound/core/seq/seq_ports.c 			found = p;
p                 474 sound/core/seq/seq_ports.c 	struct list_head *p;
p                 488 sound/core/seq/seq_ports.c 		list_for_each(p, &grp->list_head) {
p                 489 sound/core/seq/seq_ports.c 			s = get_subscriber(p, is_src);
p                 965 sound/core/timer.c 		struct list_head *p, *n;
p                 968 sound/core/timer.c 		list_for_each_safe(p, n, &timer->open_list_head) {
p                 969 sound/core/timer.c 			list_del_init(p);
p                 970 sound/core/timer.c 			ti = list_entry(p, struct snd_timer_instance, open_list);
p                1519 sound/core/timer.c 	struct list_head *p;
p                1536 sound/core/timer.c 			list_for_each(p, &snd_timer_list) {
p                1537 sound/core/timer.c 				timer = list_entry(p, struct snd_timer, device_list);
p                1547 sound/core/timer.c 			if (p == &snd_timer_list)
p                1564 sound/core/timer.c 			list_for_each(p, &snd_timer_list) {
p                1565 sound/core/timer.c 				timer = list_entry(p, struct snd_timer, device_list);
p                1593 sound/core/timer.c 			if (p == &snd_timer_list)
p                1612 sound/core/timer.c 	struct list_head *p;
p                1635 sound/core/timer.c 		list_for_each(p, &t->open_list_head) {
p                1981 sound/core/timer.c 	int __user *p = argp;
p                1986 sound/core/timer.c 		return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0;
p                1995 sound/core/timer.c 		if (get_user(xarg, p))
p                 427 sound/core/vmaster.c 			kctl->tlv.p = master->tlv;
p                 722 sound/drivers/dummy.c   .tlv = { .p = db_scale_dummy } }
p                 183 sound/drivers/mtpav.c 	int p;
p                 187 sound/drivers/mtpav.c 		p = hwport - 1;
p                 188 sound/drivers/mtpav.c 		if (p >= chip->num_ports)
p                 189 sound/drivers/mtpav.c 			p = 0;
p                 190 sound/drivers/mtpav.c 		return p;
p                 192 sound/drivers/mtpav.c 		p = hwport - 0x09 + chip->num_ports;
p                 193 sound/drivers/mtpav.c 		if (p >= chip->num_ports * 2)
p                 194 sound/drivers/mtpav.c 			p = chip->num_ports;
p                 195 sound/drivers/mtpav.c 		return p;
p                 325 sound/drivers/mtpav.c 	u8 p;
p                 327 sound/drivers/mtpav.c 	for (p = 0; p < 8; p++) {
p                 329 sound/drivers/mtpav.c 		snd_mtpav_send_byte(chip, p);
p                 397 sound/drivers/mtpav.c 	int p;
p                 403 sound/drivers/mtpav.c 	for (p = 0; p <= chip->num_ports * 2 + MTPAV_PIDX_BROADCAST; p++) {
p                 404 sound/drivers/mtpav.c 		struct mtpav_port *portp = &chip->ports[p];
p                 127 sound/drivers/mts64.c static void mts64_enable_readout(struct parport *p);
p                 128 sound/drivers/mts64.c static void mts64_disable_readout(struct parport *p);
p                 129 sound/drivers/mts64.c static int mts64_device_ready(struct parport *p);
p                 130 sound/drivers/mts64.c static int mts64_device_init(struct parport *p);
p                 134 sound/drivers/mts64.c static int mts64_probe(struct parport *p);
p                 135 sound/drivers/mts64.c static u16 mts64_read(struct parport *p);
p                 136 sound/drivers/mts64.c static u8 mts64_read_char(struct parport *p);
p                 137 sound/drivers/mts64.c static void mts64_smpte_start(struct parport *p,
p                 141 sound/drivers/mts64.c static void mts64_smpte_stop(struct parport *p);
p                 142 sound/drivers/mts64.c static void mts64_write_command(struct parport *p, u8 c);
p                 143 sound/drivers/mts64.c static void mts64_write_data(struct parport *p, u8 c);
p                 152 sound/drivers/mts64.c static void mts64_enable_readout(struct parport *p)
p                 156 sound/drivers/mts64.c 	c = parport_read_control(p);
p                 158 sound/drivers/mts64.c 	parport_write_control(p, c); 
p                 165 sound/drivers/mts64.c static void mts64_disable_readout(struct parport *p)
p                 169 sound/drivers/mts64.c 	c = parport_read_control(p);
p                 171 sound/drivers/mts64.c 	parport_write_control(p, c);
p                 180 sound/drivers/mts64.c static int mts64_device_ready(struct parport *p)
p                 186 sound/drivers/mts64.c 		c = parport_read_status(p);
p                 201 sound/drivers/mts64.c static int mts64_device_init(struct parport *p)
p                 205 sound/drivers/mts64.c 	mts64_write_command(p, MTS64_CMD_RESET);
p                 210 sound/drivers/mts64.c 		if (mts64_probe(p) == 0) {
p                 212 sound/drivers/mts64.c 			mts64_disable_readout(p);
p                 216 sound/drivers/mts64.c 	mts64_disable_readout(p);
p                 227 sound/drivers/mts64.c 	struct parport *p = mts->pardev->port;
p                 230 sound/drivers/mts64.c 		mts64_write_command(p, MTS64_CMD_COM_OPEN);
p                 241 sound/drivers/mts64.c 	struct parport *p = mts->pardev->port;
p                 244 sound/drivers/mts64.c 		mts64_write_command(p, MTS64_CMD_COM_CLOSE1);
p                 245 sound/drivers/mts64.c 		mts64_write_command(p, MTS64_CMD_COM_CLOSE2);
p                 277 sound/drivers/mts64.c static int mts64_probe(struct parport *p)
p                 281 sound/drivers/mts64.c 	mts64_smpte_stop(p);
p                 282 sound/drivers/mts64.c 	mts64_write_command(p, MTS64_CMD_PROBE);
p                 286 sound/drivers/mts64.c 	c = mts64_read(p);
p                 301 sound/drivers/mts64.c static u16 mts64_read(struct parport *p)
p                 305 sound/drivers/mts64.c 	mts64_device_ready(p);
p                 306 sound/drivers/mts64.c 	mts64_enable_readout(p);
p                 307 sound/drivers/mts64.c 	status = parport_read_status(p);
p                 308 sound/drivers/mts64.c 	data = mts64_read_char(p);
p                 309 sound/drivers/mts64.c 	mts64_disable_readout(p);
p                 325 sound/drivers/mts64.c static u8 mts64_read_char(struct parport *p)
p                 332 sound/drivers/mts64.c 		parport_write_data(p, i);
p                 334 sound/drivers/mts64.c 		status = parport_read_status(p);
p                 351 sound/drivers/mts64.c static void mts64_smpte_start(struct parport *p,
p                 362 sound/drivers/mts64.c 	mts64_write_command(p, MTS64_CMD_SMPTE_SET_TIME);
p                 363 sound/drivers/mts64.c 	mts64_write_command(p, frames);
p                 364 sound/drivers/mts64.c 	mts64_write_command(p, seconds);
p                 365 sound/drivers/mts64.c 	mts64_write_command(p, minutes);
p                 366 sound/drivers/mts64.c 	mts64_write_command(p, hours);
p                 368 sound/drivers/mts64.c 	mts64_write_command(p, MTS64_CMD_SMPTE_SET_FPS);
p                 369 sound/drivers/mts64.c 	mts64_write_command(p, fps[idx]);
p                 374 sound/drivers/mts64.c static void mts64_smpte_stop(struct parport *p)
p                 376 sound/drivers/mts64.c 	mts64_write_command(p, MTS64_CMD_SMPTE_STOP);
p                 381 sound/drivers/mts64.c static void mts64_write_command(struct parport *p, u8 c)
p                 383 sound/drivers/mts64.c 	mts64_device_ready(p);
p                 385 sound/drivers/mts64.c 	parport_write_data(p, c);
p                 387 sound/drivers/mts64.c 	parport_write_control(p, MTS64_CTL_WRITE_CMD);
p                 388 sound/drivers/mts64.c 	parport_write_control(p, MTS64_CTL_WRITE_CMD | MTS64_CTL_STROBE);
p                 389 sound/drivers/mts64.c 	parport_write_control(p, MTS64_CTL_WRITE_CMD);
p                 394 sound/drivers/mts64.c static void mts64_write_data(struct parport *p, u8 c)
p                 396 sound/drivers/mts64.c 	mts64_device_ready(p);
p                 398 sound/drivers/mts64.c 	parport_write_data(p, c);
p                 400 sound/drivers/mts64.c 	parport_write_control(p, MTS64_CTL_WRITE_DATA);
p                 401 sound/drivers/mts64.c 	parport_write_control(p, MTS64_CTL_WRITE_DATA | MTS64_CTL_STROBE);
p                 402 sound/drivers/mts64.c 	parport_write_control(p, MTS64_CTL_WRITE_DATA);
p                 413 sound/drivers/mts64.c 	struct parport *p = mts->pardev->port;
p                 417 sound/drivers/mts64.c 		mts64_write_command(p, midiport);
p                 420 sound/drivers/mts64.c 	mts64_write_data(p, c);
p                 837 sound/drivers/mts64.c static void snd_mts64_attach(struct parport *p)
p                 846 sound/drivers/mts64.c 	platform_set_drvdata(device, p);
p                 865 sound/drivers/mts64.c static void snd_mts64_detach(struct parport *p)
p                 905 sound/drivers/mts64.c 	struct parport *p;
p                 917 sound/drivers/mts64.c 	p = platform_get_drvdata(pdev);
p                 934 sound/drivers/mts64.c 		card->shortname, p->base, p->irq);
p                 937 sound/drivers/mts64.c 	pardev = parport_register_dev_model(p,		 /* port */
p                 961 sound/drivers/mts64.c 	err = mts64_probe(p);
p                 973 sound/drivers/mts64.c 	if ((err = mts64_device_init(p)) < 0)
p                 984 sound/drivers/mts64.c 	snd_printk(KERN_INFO "ESI Miditerminal 4140 on 0x%lx\n", p->base);
p                  14 sound/drivers/opl3/opl3_midi.c static void snd_opl3_note_off_unsafe(void *p, int note, int vel,
p                 276 sound/drivers/opl3/opl3_midi.c void snd_opl3_note_on(void *p, int note, int vel, struct snd_midi_channel *chan)
p                 304 sound/drivers/opl3/opl3_midi.c 	opl3 = p;
p                 654 sound/drivers/opl3/opl3_midi.c static void snd_opl3_note_off_unsafe(void *p, int note, int vel,
p                 662 sound/drivers/opl3/opl3_midi.c 	opl3 = p;
p                 691 sound/drivers/opl3/opl3_midi.c void snd_opl3_note_off(void *p, int note, int vel,
p                 694 sound/drivers/opl3/opl3_midi.c 	struct snd_opl3 *opl3 = p;
p                 698 sound/drivers/opl3/opl3_midi.c 	snd_opl3_note_off_unsafe(p, note, vel, chan);
p                 705 sound/drivers/opl3/opl3_midi.c void snd_opl3_key_press(void *p, int note, int vel, struct snd_midi_channel *chan)
p                 716 sound/drivers/opl3/opl3_midi.c void snd_opl3_terminate_note(void *p, int note, struct snd_midi_channel *chan)
p                 802 sound/drivers/opl3/opl3_midi.c void snd_opl3_control(void *p, int type, struct snd_midi_channel *chan)
p                 806 sound/drivers/opl3/opl3_midi.c 	opl3 = p;
p                 838 sound/drivers/opl3/opl3_midi.c void snd_opl3_nrpn(void *p, struct snd_midi_channel *chan,
p                 850 sound/drivers/opl3/opl3_midi.c void snd_opl3_sysex(void *p, unsigned char *buf, int len,
p                  18 sound/drivers/opl3/opl3_voice.h void snd_opl3_note_on(void *p, int note, int vel, struct snd_midi_channel *chan);
p                  19 sound/drivers/opl3/opl3_voice.h void snd_opl3_note_off(void *p, int note, int vel, struct snd_midi_channel *chan);
p                  20 sound/drivers/opl3/opl3_voice.h void snd_opl3_key_press(void *p, int note, int vel, struct snd_midi_channel *chan);
p                  21 sound/drivers/opl3/opl3_voice.h void snd_opl3_terminate_note(void *p, int note, struct snd_midi_channel *chan);
p                  22 sound/drivers/opl3/opl3_voice.h void snd_opl3_control(void *p, int type, struct snd_midi_channel *chan);
p                  23 sound/drivers/opl3/opl3_voice.h void snd_opl3_nrpn(void *p, struct snd_midi_channel *chan, struct snd_midi_channel_set *chset);
p                  24 sound/drivers/opl3/opl3_voice.h void snd_opl3_sysex(void *p, unsigned char *buf, int len, int parsed, struct snd_midi_channel_set *chset);
p                 225 sound/drivers/opl4/opl4_local.h void snd_opl4_note_on(void *p, int note, int vel, struct snd_midi_channel *chan);
p                 226 sound/drivers/opl4/opl4_local.h void snd_opl4_note_off(void *p, int note, int vel, struct snd_midi_channel *chan);
p                 227 sound/drivers/opl4/opl4_local.h void snd_opl4_terminate_note(void *p, int note, struct snd_midi_channel *chan);
p                 228 sound/drivers/opl4/opl4_local.h void snd_opl4_control(void *p, int type, struct snd_midi_channel *chan);
p                 229 sound/drivers/opl4/opl4_local.h void snd_opl4_sysex(void *p, unsigned char *buf, int len, int parsed, struct snd_midi_channel_set *chset);
p                 432 sound/drivers/portman2x4.c static int portman_probe(struct parport *p)
p                 438 sound/drivers/portman2x4.c 	parport_write_data(p, 0);
p                 447 sound/drivers/portman2x4.c 	parport_write_control(p, 0);
p                 451 sound/drivers/portman2x4.c 	parport_write_control(p, RXDATA0);	/* Write Strobe=0 to command reg. */
p                 455 sound/drivers/portman2x4.c 	if ((parport_read_status(p) & ESTB) == ESTB)
p                 460 sound/drivers/portman2x4.c 	parport_write_control(p, RXDATA0 + STROBE);	/* Write Strobe=1 to command reg. */
p                 463 sound/drivers/portman2x4.c 	if ((parport_read_status(p) & ESTB) != ESTB)
p                 467 sound/drivers/portman2x4.c 	parport_write_control(p, 0);	/* Reset Strobe=0. */
p                 473 sound/drivers/portman2x4.c 	parport_write_control(p, TXDATA0);	/* Tx channel 0, strobe off. */
p                 479 sound/drivers/portman2x4.c 	if ((parport_read_status(p) & TXEMPTY) == 0)
p                 636 sound/drivers/portman2x4.c static void snd_portman_attach(struct parport *p)
p                 645 sound/drivers/portman2x4.c 	platform_set_drvdata(device, p);
p                 664 sound/drivers/portman2x4.c static void snd_portman_detach(struct parport *p)
p                 704 sound/drivers/portman2x4.c 	struct parport *p;
p                 716 sound/drivers/portman2x4.c 	p = platform_get_drvdata(pdev);
p                 733 sound/drivers/portman2x4.c 		card->shortname, p->base, p->irq);
p                 736 sound/drivers/portman2x4.c 	pardev = parport_register_dev_model(p,		   /* port */
p                 760 sound/drivers/portman2x4.c 	err = portman_probe(p);
p                 783 sound/drivers/portman2x4.c 	snd_printk(KERN_INFO "Portman 2x4 on 0x%lx\n", p->base);
p                 715 sound/drivers/vx/vx_mixer.c 	.tlv = { .p = db_scale_audio_gain },
p                 732 sound/drivers/vx/vx_mixer.c 	.tlv = { .p = db_scale_audio_gain },
p                 912 sound/drivers/vx/vx_mixer.c 		temp.tlv.p = chip->hw->output_level_db_scale;
p                1119 sound/drivers/vx/vx_pcm.c 			int p, buf, capture, eob;
p                1120 sound/drivers/vx/vx_pcm.c 			p = chip->irq_rmh.Stat[i] & MASK_FIRST_FIELD;
p                1134 sound/drivers/vx/vx_pcm.c 			if (snd_BUG_ON(p < 0 || p >= chip->audio_outs))
p                1136 sound/drivers/vx/vx_pcm.c 			pipe = chip->playback_pipes[p];
p                  60 sound/firewire/amdtp-am824.c 	struct amdtp_am824 *p = s->protocol;
p                  87 sound/firewire/amdtp-am824.c 	p->pcm_channels = pcm_channels;
p                  88 sound/firewire/amdtp-am824.c 	p->midi_ports = midi_ports;
p                  96 sound/firewire/amdtp-am824.c 		p->frame_multiplier = 2;
p                  98 sound/firewire/amdtp-am824.c 		p->frame_multiplier = 1;
p                 102 sound/firewire/amdtp-am824.c 		p->pcm_positions[i] = i;
p                 103 sound/firewire/amdtp-am824.c 	p->midi_position = p->pcm_channels;
p                 111 sound/firewire/amdtp-am824.c 	p->midi_fifo_limit = rate - MIDI_BYTES_PER_SECOND * s->syt_interval + 1;
p                 127 sound/firewire/amdtp-am824.c 	struct amdtp_am824 *p = s->protocol;
p                 129 sound/firewire/amdtp-am824.c 	if (index < p->pcm_channels)
p                 130 sound/firewire/amdtp-am824.c 		p->pcm_positions[index] = position;
p                 143 sound/firewire/amdtp-am824.c 	struct amdtp_am824 *p = s->protocol;
p                 145 sound/firewire/amdtp-am824.c 	p->midi_position = position;
p                 153 sound/firewire/amdtp-am824.c 	struct amdtp_am824 *p = s->protocol;
p                 154 sound/firewire/amdtp-am824.c 	unsigned int channels = p->pcm_channels;
p                 170 sound/firewire/amdtp-am824.c 			buffer[p->pcm_positions[c]] =
p                 184 sound/firewire/amdtp-am824.c 	struct amdtp_am824 *p = s->protocol;
p                 185 sound/firewire/amdtp-am824.c 	unsigned int channels = p->pcm_channels;
p                 201 sound/firewire/amdtp-am824.c 			*dst = be32_to_cpu(buffer[p->pcm_positions[c]]) << 8;
p                 213 sound/firewire/amdtp-am824.c 	struct amdtp_am824 *p = s->protocol;
p                 214 sound/firewire/amdtp-am824.c 	unsigned int i, c, channels = p->pcm_channels;
p                 218 sound/firewire/amdtp-am824.c 			buffer[p->pcm_positions[c]] = cpu_to_be32(0x40000000);
p                 256 sound/firewire/amdtp-am824.c 	struct amdtp_am824 *p = s->protocol;
p                 258 sound/firewire/amdtp-am824.c 	if (port < p->midi_ports)
p                 259 sound/firewire/amdtp-am824.c 		WRITE_ONCE(p->midi[port], midi);
p                 275 sound/firewire/amdtp-am824.c 	struct amdtp_am824 *p = s->protocol;
p                 278 sound/firewire/amdtp-am824.c 	used = p->midi_fifo_used[port];
p                 284 sound/firewire/amdtp-am824.c 	p->midi_fifo_used[port] = used;
p                 286 sound/firewire/amdtp-am824.c 	return used < p->midi_fifo_limit;
p                 291 sound/firewire/amdtp-am824.c 	struct amdtp_am824 *p = s->protocol;
p                 293 sound/firewire/amdtp-am824.c 	p->midi_fifo_used[port] += amdtp_rate_table[s->sfc];
p                 299 sound/firewire/amdtp-am824.c 	struct amdtp_am824 *p = s->protocol;
p                 304 sound/firewire/amdtp-am824.c 		b = (u8 *)&buffer[p->midi_position];
p                 309 sound/firewire/amdtp-am824.c 		    p->midi[port] != NULL &&
p                 310 sound/firewire/amdtp-am824.c 		    snd_rawmidi_transmit(p->midi[port], &b[1], 1) == 1) {
p                 327 sound/firewire/amdtp-am824.c 	struct amdtp_am824 *p = s->protocol;
p                 338 sound/firewire/amdtp-am824.c 		b = (u8 *)&buffer[p->midi_position];
p                 341 sound/firewire/amdtp-am824.c 		if ((1 <= len) &&  (len <= 3) && (p->midi[port]))
p                 342 sound/firewire/amdtp-am824.c 			snd_rawmidi_receive(p->midi[port], b + 1, len);
p                 353 sound/firewire/amdtp-am824.c 	struct amdtp_am824 *p = s->protocol;
p                 364 sound/firewire/amdtp-am824.c 			pcm_frames += data_blocks * p->frame_multiplier;
p                 369 sound/firewire/amdtp-am824.c 		if (p->midi_ports) {
p                 383 sound/firewire/amdtp-am824.c 	struct amdtp_am824 *p = s->protocol;
p                 394 sound/firewire/amdtp-am824.c 			pcm_frames += data_blocks * p->frame_multiplier;
p                 397 sound/firewire/amdtp-am824.c 		if (p->midi_ports) {
p                 116 sound/firewire/digi00x/amdtp-dot.c 	struct amdtp_dot *p = s->protocol;
p                 132 sound/firewire/digi00x/amdtp-dot.c 	p->pcm_channels = pcm_channels;
p                 140 sound/firewire/digi00x/amdtp-dot.c 	p->midi_fifo_limit = rate - MIDI_BYTES_PER_SECOND * s->syt_interval + 1;
p                 149 sound/firewire/digi00x/amdtp-dot.c 	struct amdtp_dot *p = s->protocol;
p                 150 sound/firewire/digi00x/amdtp-dot.c 	unsigned int channels = p->pcm_channels;
p                 168 sound/firewire/digi00x/amdtp-dot.c 			dot_encode_step(&p->state, &buffer[c]);
p                 181 sound/firewire/digi00x/amdtp-dot.c 	struct amdtp_dot *p = s->protocol;
p                 182 sound/firewire/digi00x/amdtp-dot.c 	unsigned int channels = p->pcm_channels;
p                 211 sound/firewire/digi00x/amdtp-dot.c 	struct amdtp_dot *p = s->protocol;
p                 214 sound/firewire/digi00x/amdtp-dot.c 	channels = p->pcm_channels;
p                 226 sound/firewire/digi00x/amdtp-dot.c 	struct amdtp_dot *p = s->protocol;
p                 229 sound/firewire/digi00x/amdtp-dot.c 	used = p->midi_fifo_used[port];
p                 235 sound/firewire/digi00x/amdtp-dot.c 	p->midi_fifo_used[port] = used;
p                 237 sound/firewire/digi00x/amdtp-dot.c 	return used < p->midi_fifo_limit;
p                 243 sound/firewire/digi00x/amdtp-dot.c 	struct amdtp_dot *p = s->protocol;
p                 245 sound/firewire/digi00x/amdtp-dot.c 	p->midi_fifo_used[port] += amdtp_rate_table[s->sfc] * count;
p                 251 sound/firewire/digi00x/amdtp-dot.c 	struct amdtp_dot *p = s->protocol;
p                 263 sound/firewire/digi00x/amdtp-dot.c 		    p->midi[port] != NULL)
p                 264 sound/firewire/digi00x/amdtp-dot.c 			len = snd_rawmidi_transmit(p->midi[port], b + 1, 2);
p                 295 sound/firewire/digi00x/amdtp-dot.c 	struct amdtp_dot *p = s->protocol;
p                 314 sound/firewire/digi00x/amdtp-dot.c 			if (port < MAX_MIDI_PORTS && p->midi[port])
p                 315 sound/firewire/digi00x/amdtp-dot.c 				snd_rawmidi_receive(p->midi[port], b + 1, len);
p                 338 sound/firewire/digi00x/amdtp-dot.c 	struct amdtp_dot *p = s->protocol;
p                 341 sound/firewire/digi00x/amdtp-dot.c 		WRITE_ONCE(p->midi[port], midi);
p                 416 sound/firewire/digi00x/amdtp-dot.c 	struct amdtp_dot *p = s->protocol;
p                 418 sound/firewire/digi00x/amdtp-dot.c 	p->state.carry = 0x00;
p                 419 sound/firewire/digi00x/amdtp-dot.c 	p->state.idx = 0x00;
p                 420 sound/firewire/digi00x/amdtp-dot.c 	p->state.off = 0;
p                  18 sound/firewire/fireface/amdtp-ff.c 	struct amdtp_ff *p = s->protocol;
p                  24 sound/firewire/fireface/amdtp-ff.c 	p->pcm_channels = pcm_channels;
p                  34 sound/firewire/fireface/amdtp-ff.c 	struct amdtp_ff *p = s->protocol;
p                  35 sound/firewire/fireface/amdtp-ff.c 	unsigned int channels = p->pcm_channels;
p                  64 sound/firewire/fireface/amdtp-ff.c 	struct amdtp_ff *p = s->protocol;
p                  65 sound/firewire/fireface/amdtp-ff.c 	unsigned int channels = p->pcm_channels;
p                  93 sound/firewire/fireface/amdtp-ff.c 	struct amdtp_ff *p = s->protocol;
p                  94 sound/firewire/fireface/amdtp-ff.c 	unsigned int i, c, channels = p->pcm_channels;
p                 588 sound/firewire/isight.c 		ctl->tlv.p = isight->gain_tlv;
p                  61 sound/firewire/motu/amdtp-motu.c 	struct amdtp_motu *p = s->protocol;
p                  94 sound/firewire/motu/amdtp-motu.c 	p->pcm_chunks = pcm_chunks;
p                  95 sound/firewire/motu/amdtp-motu.c 	p->pcm_byte_offset = formats->pcm_byte_offset;
p                  97 sound/firewire/motu/amdtp-motu.c 	p->midi_ports = midi_ports;
p                  98 sound/firewire/motu/amdtp-motu.c 	p->midi_flag_offset = formats->midi_flag_offset;
p                  99 sound/firewire/motu/amdtp-motu.c 	p->midi_byte_offset = formats->midi_byte_offset;
p                 101 sound/firewire/motu/amdtp-motu.c 	p->midi_db_count = 0;
p                 102 sound/firewire/motu/amdtp-motu.c 	p->midi_db_interval = rate / MIDI_BYTES_PER_SECOND;
p                 110 sound/firewire/motu/amdtp-motu.c 	p->next_seconds = 0;
p                 111 sound/firewire/motu/amdtp-motu.c 	p->next_cycles = delay / 3072;
p                 112 sound/firewire/motu/amdtp-motu.c 	p->quotient_ticks_per_event = params[s->sfc].quotient_ticks_per_event;
p                 113 sound/firewire/motu/amdtp-motu.c 	p->remainder_ticks_per_event = params[s->sfc].remainder_ticks_per_event;
p                 114 sound/firewire/motu/amdtp-motu.c 	p->next_ticks = delay % 3072;
p                 115 sound/firewire/motu/amdtp-motu.c 	p->next_accumulated = 0;
p                 124 sound/firewire/motu/amdtp-motu.c 	struct amdtp_motu *p = s->protocol;
p                 125 sound/firewire/motu/amdtp-motu.c 	unsigned int channels = p->pcm_chunks;
p                 141 sound/firewire/motu/amdtp-motu.c 		byte = (u8 *)buffer + p->pcm_byte_offset;
p                 160 sound/firewire/motu/amdtp-motu.c 	struct amdtp_motu *p = s->protocol;
p                 161 sound/firewire/motu/amdtp-motu.c 	unsigned int channels = p->pcm_chunks;
p                 177 sound/firewire/motu/amdtp-motu.c 		byte = (u8 *)buffer + p->pcm_byte_offset;
p                 196 sound/firewire/motu/amdtp-motu.c 	struct amdtp_motu *p = s->protocol;
p                 200 sound/firewire/motu/amdtp-motu.c 	channels = p->pcm_chunks;
p                 203 sound/firewire/motu/amdtp-motu.c 		byte = (u8 *)buffer + p->pcm_byte_offset;
p                 232 sound/firewire/motu/amdtp-motu.c 	struct amdtp_motu *p = s->protocol;
p                 234 sound/firewire/motu/amdtp-motu.c 	if (port < p->midi_ports)
p                 235 sound/firewire/motu/amdtp-motu.c 		WRITE_ONCE(p->midi, midi);
p                 241 sound/firewire/motu/amdtp-motu.c 	struct amdtp_motu *p = s->protocol;
p                 242 sound/firewire/motu/amdtp-motu.c 	struct snd_rawmidi_substream *midi = READ_ONCE(p->midi);
p                 249 sound/firewire/motu/amdtp-motu.c 		if (midi && p->midi_db_count == 0 &&
p                 250 sound/firewire/motu/amdtp-motu.c 		    snd_rawmidi_transmit(midi, b + p->midi_byte_offset, 1) == 1) {
p                 251 sound/firewire/motu/amdtp-motu.c 			b[p->midi_flag_offset] = 0x01;
p                 253 sound/firewire/motu/amdtp-motu.c 			b[p->midi_byte_offset] = 0x00;
p                 254 sound/firewire/motu/amdtp-motu.c 			b[p->midi_flag_offset] = 0x00;
p                 259 sound/firewire/motu/amdtp-motu.c 		if (--p->midi_db_count < 0)
p                 260 sound/firewire/motu/amdtp-motu.c 			p->midi_db_count = p->midi_db_interval;
p                 267 sound/firewire/motu/amdtp-motu.c 	struct amdtp_motu *p = s->protocol;
p                 274 sound/firewire/motu/amdtp-motu.c 		midi = READ_ONCE(p->midi);
p                 276 sound/firewire/motu/amdtp-motu.c 		if (midi && (b[p->midi_flag_offset] & 0x01))
p                 277 sound/firewire/motu/amdtp-motu.c 			snd_rawmidi_receive(midi, b + p->midi_byte_offset, 1);
p                 334 sound/firewire/motu/amdtp-motu.c 	struct amdtp_motu *p = s->protocol;
p                 349 sound/firewire/motu/amdtp-motu.c 		if (p->midi_ports)
p                 361 sound/firewire/motu/amdtp-motu.c static inline void compute_next_elapse_from_start(struct amdtp_motu *p)
p                 363 sound/firewire/motu/amdtp-motu.c 	p->next_accumulated += p->remainder_ticks_per_event;
p                 364 sound/firewire/motu/amdtp-motu.c 	if (p->next_accumulated >= 441) {
p                 365 sound/firewire/motu/amdtp-motu.c 		p->next_accumulated -= 441;
p                 366 sound/firewire/motu/amdtp-motu.c 		p->next_ticks++;
p                 369 sound/firewire/motu/amdtp-motu.c 	p->next_ticks += p->quotient_ticks_per_event;
p                 370 sound/firewire/motu/amdtp-motu.c 	if (p->next_ticks >= 3072) {
p                 371 sound/firewire/motu/amdtp-motu.c 		p->next_ticks -= 3072;
p                 372 sound/firewire/motu/amdtp-motu.c 		p->next_cycles++;
p                 375 sound/firewire/motu/amdtp-motu.c 	if (p->next_cycles >= 8000) {
p                 376 sound/firewire/motu/amdtp-motu.c 		p->next_cycles -= 8000;
p                 377 sound/firewire/motu/amdtp-motu.c 		p->next_seconds++;
p                 380 sound/firewire/motu/amdtp-motu.c 	if (p->next_seconds >= 128)
p                 381 sound/firewire/motu/amdtp-motu.c 		p->next_seconds -= 128;
p                 387 sound/firewire/motu/amdtp-motu.c 	struct amdtp_motu *p = s->protocol;
p                 393 sound/firewire/motu/amdtp-motu.c 		next_cycles = (s->start_cycle + p->next_cycles) % 8000;
p                 394 sound/firewire/motu/amdtp-motu.c 		sph = ((next_cycles << 12) | p->next_ticks) & 0x01ffffff;
p                 397 sound/firewire/motu/amdtp-motu.c 		compute_next_elapse_from_start(p);
p                 408 sound/firewire/motu/amdtp-motu.c 	struct amdtp_motu *p = s->protocol;
p                 425 sound/firewire/motu/amdtp-motu.c 		if (p->midi_ports)
p                  27 sound/firewire/packets-buffer.c 	void *p;
p                  51 sound/firewire/packets-buffer.c 		p = page_address(b->iso_buffer.pages[page_index]);
p                  53 sound/firewire/packets-buffer.c 		b->packets[i].buffer = p + offset_in_page;
p                  20 sound/firewire/tascam/amdtp-tascam.c 	struct amdtp_tscm *p = s->protocol;
p                  26 sound/firewire/tascam/amdtp-tascam.c 	data_channels = p->pcm_channels;
p                  39 sound/firewire/tascam/amdtp-tascam.c 	struct amdtp_tscm *p = s->protocol;
p                  40 sound/firewire/tascam/amdtp-tascam.c 	unsigned int channels = p->pcm_channels;
p                  69 sound/firewire/tascam/amdtp-tascam.c 	struct amdtp_tscm *p = s->protocol;
p                  70 sound/firewire/tascam/amdtp-tascam.c 	unsigned int channels = p->pcm_channels;
p                 101 sound/firewire/tascam/amdtp-tascam.c 	struct amdtp_tscm *p = s->protocol;
p                 104 sound/firewire/tascam/amdtp-tascam.c 	channels = p->pcm_channels;
p                 231 sound/firewire/tascam/amdtp-tascam.c 	struct amdtp_tscm *p;
p                 257 sound/firewire/tascam/amdtp-tascam.c 	p = s->protocol;
p                 258 sound/firewire/tascam/amdtp-tascam.c 	p->pcm_channels = pcm_channels;
p                 412 sound/hda/hdac_regmap.c 	unsigned int *p = snd_array_new(&codec->vendor_verbs);
p                 414 sound/hda/hdac_regmap.c 	if (!p)
p                 416 sound/hda/hdac_regmap.c 	*p = verb | 0x800; /* set GET bit */
p                 323 sound/hda/hdac_sysfs.c 	struct kobject **p;
p                 329 sound/hda/hdac_sysfs.c 		for (p = tree->nodes; *p; p++)
p                 330 sound/hda/hdac_sysfs.c 			free_widget_node(*p, &widget_node_group);
p                 222 sound/hda/hdmi_chmap.c 	struct hdac_cea_channel_speaker_allocation *p;
p                 225 sound/hda/hdmi_chmap.c 		p = channel_allocations + i;
p                 226 sound/hda/hdmi_chmap.c 		p->channels = 0;
p                 227 sound/hda/hdmi_chmap.c 		p->spk_mask = 0;
p                 228 sound/hda/hdmi_chmap.c 		for (j = 0; j < ARRAY_SIZE(p->speakers); j++)
p                 229 sound/hda/hdmi_chmap.c 			if (p->speakers[j]) {
p                 230 sound/hda/hdmi_chmap.c 				p->channels++;
p                 231 sound/hda/hdmi_chmap.c 				p->spk_mask |= p->speakers[j];
p                 658 sound/i2c/other/ak4xxx-adda.c 			knew.tlv.p = db_scale_vol_datt;
p                 665 sound/i2c/other/ak4xxx-adda.c 			knew.tlv.p = db_scale_vol_datt;
p                 672 sound/i2c/other/ak4xxx-adda.c 			knew.tlv.p = db_scale_8bit;
p                 678 sound/i2c/other/ak4xxx-adda.c 			knew.tlv.p = db_scale_8bit;
p                 685 sound/i2c/other/ak4xxx-adda.c 			knew.tlv.p = db_scale_7bit;
p                 692 sound/i2c/other/ak4xxx-adda.c 			knew.tlv.p = db_scale_linear;
p                 698 sound/i2c/other/ak4xxx-adda.c 			knew.tlv.p = db_scale_linear;
p                 753 sound/i2c/other/ak4xxx-adda.c 		knew.tlv.p = db_scale_vol_datt;
p                 191 sound/i2c/other/pt2258.c 		knew.tlv.p = pt2258_db_scale;
p                 764 sound/isa/ad1816a/ad1816a_lib.c   .tlv = { .p = (xtlv) } }
p                 828 sound/isa/ad1816a/ad1816a_lib.c   .tlv = { .p = (xtlv) } }
p                 392 sound/isa/cs423x/cs4236_lib.c   .tlv = { .p = (xtlv) } }
p                 505 sound/isa/cs423x/cs4236_lib.c   .tlv = { .p = (xtlv) } }
p                 592 sound/isa/cs423x/cs4236_lib.c   .tlv = { .p = (xtlv) } }
p                 653 sound/isa/cs423x/cs4236_lib.c   .tlv = { .p = (xtlv) } }
p                 697 sound/isa/cs423x/cs4236_lib.c   .tlv = { .p = (xtlv) } }
p                 338 sound/isa/opl3sa2.c   .tlv = { .p = (xtlv) } }
p                 393 sound/isa/opl3sa2.c   .tlv = { .p = (xtlv) } }
p                 321 sound/isa/sb/emu8000.c 	unsigned short *p;
p                 323 sound/isa/sb/emu8000.c 	p = data;
p                 324 sound/isa/sb/emu8000.c 	for (i = 0; i < size; i++, p++)
p                 325 sound/isa/sb/emu8000.c 		EMU8000_INIT1_WRITE(emu, i, *p);
p                 326 sound/isa/sb/emu8000.c 	for (i = 0; i < size; i++, p++)
p                 327 sound/isa/sb/emu8000.c 		EMU8000_INIT2_WRITE(emu, i, *p);
p                 328 sound/isa/sb/emu8000.c 	for (i = 0; i < size; i++, p++)
p                 329 sound/isa/sb/emu8000.c 		EMU8000_INIT3_WRITE(emu, i, *p);
p                 330 sound/isa/sb/emu8000.c 	for (i = 0; i < size; i++, p++)
p                 331 sound/isa/sb/emu8000.c 		EMU8000_INIT4_WRITE(emu, i, *p);
p                  78 sound/isa/sb/sb16_csp.c static int snd_sb_csp_riff_load(struct snd_sb_csp * p,
p                  80 sound/isa/sb/sb16_csp.c static int snd_sb_csp_unload(struct snd_sb_csp * p);
p                  81 sound/isa/sb/sb16_csp.c static int snd_sb_csp_load_user(struct snd_sb_csp * p, const unsigned char __user *buf, int size, int load_flags);
p                  82 sound/isa/sb/sb16_csp.c static int snd_sb_csp_autoload(struct snd_sb_csp * p, snd_pcm_format_t pcm_sfmt, int play_rec_mode);
p                  83 sound/isa/sb/sb16_csp.c static int snd_sb_csp_check_version(struct snd_sb_csp * p);
p                  85 sound/isa/sb/sb16_csp.c static int snd_sb_csp_use(struct snd_sb_csp * p);
p                  86 sound/isa/sb/sb16_csp.c static int snd_sb_csp_unuse(struct snd_sb_csp * p);
p                  87 sound/isa/sb/sb16_csp.c static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channels);
p                  88 sound/isa/sb/sb16_csp.c static int snd_sb_csp_stop(struct snd_sb_csp * p);
p                  89 sound/isa/sb/sb16_csp.c static int snd_sb_csp_pause(struct snd_sb_csp * p);
p                  90 sound/isa/sb/sb16_csp.c static int snd_sb_csp_restart(struct snd_sb_csp * p);
p                  92 sound/isa/sb/sb16_csp.c static int snd_sb_qsound_build(struct snd_sb_csp * p);
p                  93 sound/isa/sb/sb16_csp.c static void snd_sb_qsound_destroy(struct snd_sb_csp * p);
p                  94 sound/isa/sb/sb16_csp.c static int snd_sb_csp_qsound_transfer(struct snd_sb_csp * p);
p                  96 sound/isa/sb/sb16_csp.c static int init_proc_entry(struct snd_sb_csp * p, int device);
p                 104 sound/isa/sb/sb16_csp.c 	struct snd_sb_csp *p;
p                 118 sound/isa/sb/sb16_csp.c 	if ((p = kzalloc(sizeof(*p), GFP_KERNEL)) == NULL) {
p                 122 sound/isa/sb/sb16_csp.c 	p->chip = chip;
p                 123 sound/isa/sb/sb16_csp.c 	p->version = version;
p                 126 sound/isa/sb/sb16_csp.c 	p->ops.csp_use = snd_sb_csp_use;
p                 127 sound/isa/sb/sb16_csp.c 	p->ops.csp_unuse = snd_sb_csp_unuse;
p                 128 sound/isa/sb/sb16_csp.c 	p->ops.csp_autoload = snd_sb_csp_autoload;
p                 129 sound/isa/sb/sb16_csp.c 	p->ops.csp_start = snd_sb_csp_start;
p                 130 sound/isa/sb/sb16_csp.c 	p->ops.csp_stop = snd_sb_csp_stop;
p                 131 sound/isa/sb/sb16_csp.c 	p->ops.csp_qsound_transfer = snd_sb_csp_qsound_transfer;
p                 133 sound/isa/sb/sb16_csp.c 	mutex_init(&p->access_mutex);
p                 136 sound/isa/sb/sb16_csp.c 	hw->private_data = p;
p                 145 sound/isa/sb/sb16_csp.c 	init_proc_entry(p, device);
p                 157 sound/isa/sb/sb16_csp.c 	struct snd_sb_csp *p = hwdep->private_data;
p                 158 sound/isa/sb/sb16_csp.c 	if (p) {
p                 159 sound/isa/sb/sb16_csp.c 		if (p->running & SNDRV_SB_CSP_ST_RUNNING)
p                 160 sound/isa/sb/sb16_csp.c 			snd_sb_csp_stop(p);
p                 161 sound/isa/sb/sb16_csp.c 		for (i = 0; i < ARRAY_SIZE(p->csp_programs); ++i)
p                 162 sound/isa/sb/sb16_csp.c 			release_firmware(p->csp_programs[i]);
p                 163 sound/isa/sb/sb16_csp.c 		kfree(p);
p                 174 sound/isa/sb/sb16_csp.c 	struct snd_sb_csp *p = hw->private_data;
p                 175 sound/isa/sb/sb16_csp.c 	return (snd_sb_csp_use(p));
p                 183 sound/isa/sb/sb16_csp.c 	struct snd_sb_csp *p = hw->private_data;
p                 188 sound/isa/sb/sb16_csp.c 	if (snd_BUG_ON(!p))
p                 191 sound/isa/sb/sb16_csp.c 	if (snd_sb_csp_check_version(p))
p                 198 sound/isa/sb/sb16_csp.c 		*info.codec_name = *p->codec_name;
p                 199 sound/isa/sb/sb16_csp.c 		info.func_nr = p->func_nr;
p                 200 sound/isa/sb/sb16_csp.c 		info.acc_format = p->acc_format;
p                 201 sound/isa/sb/sb16_csp.c 		info.acc_channels = p->acc_channels;
p                 202 sound/isa/sb/sb16_csp.c 		info.acc_width = p->acc_width;
p                 203 sound/isa/sb/sb16_csp.c 		info.acc_rates = p->acc_rates;
p                 204 sound/isa/sb/sb16_csp.c 		info.csp_mode = p->mode;
p                 205 sound/isa/sb/sb16_csp.c 		info.run_channels = p->run_channels;
p                 206 sound/isa/sb/sb16_csp.c 		info.run_width = p->run_width;
p                 207 sound/isa/sb/sb16_csp.c 		info.version = p->version;
p                 208 sound/isa/sb/sb16_csp.c 		info.state = p->running;
p                 217 sound/isa/sb/sb16_csp.c 		err = (p->running & SNDRV_SB_CSP_ST_RUNNING ?
p                 218 sound/isa/sb/sb16_csp.c 		       -EBUSY : snd_sb_csp_riff_load(p, (struct snd_sb_csp_microcode __user *) arg));
p                 221 sound/isa/sb/sb16_csp.c 		err = (p->running & SNDRV_SB_CSP_ST_RUNNING ?
p                 222 sound/isa/sb/sb16_csp.c 		       -EBUSY : snd_sb_csp_unload(p));
p                 230 sound/isa/sb/sb16_csp.c 			err = snd_sb_csp_start(p, start_info.sample_width, start_info.channels);
p                 233 sound/isa/sb/sb16_csp.c 		err = snd_sb_csp_stop(p);
p                 236 sound/isa/sb/sb16_csp.c 		err = snd_sb_csp_pause(p);
p                 239 sound/isa/sb/sb16_csp.c 		err = snd_sb_csp_restart(p);
p                 254 sound/isa/sb/sb16_csp.c 	struct snd_sb_csp *p = hw->private_data;
p                 255 sound/isa/sb/sb16_csp.c 	return (snd_sb_csp_unuse(p));
p                 263 sound/isa/sb/sb16_csp.c static int snd_sb_csp_use(struct snd_sb_csp * p)
p                 265 sound/isa/sb/sb16_csp.c 	mutex_lock(&p->access_mutex);
p                 266 sound/isa/sb/sb16_csp.c 	if (p->used) {
p                 267 sound/isa/sb/sb16_csp.c 		mutex_unlock(&p->access_mutex);
p                 270 sound/isa/sb/sb16_csp.c 	p->used++;
p                 271 sound/isa/sb/sb16_csp.c 	mutex_unlock(&p->access_mutex);
p                 280 sound/isa/sb/sb16_csp.c static int snd_sb_csp_unuse(struct snd_sb_csp * p)
p                 282 sound/isa/sb/sb16_csp.c 	mutex_lock(&p->access_mutex);
p                 283 sound/isa/sb/sb16_csp.c 	p->used--;
p                 284 sound/isa/sb/sb16_csp.c 	mutex_unlock(&p->access_mutex);
p                 293 sound/isa/sb/sb16_csp.c static int snd_sb_csp_riff_load(struct snd_sb_csp * p,
p                 352 sound/isa/sb/sb16_csp.c 			if (p->mode == SNDRV_SB_CSP_MODE_QSOUND) {
p                 353 sound/isa/sb/sb16_csp.c 				snd_sb_qsound_destroy(p);
p                 356 sound/isa/sb/sb16_csp.c 			p->running = 0;
p                 357 sound/isa/sb/sb16_csp.c 			p->mode = 0;
p                 370 sound/isa/sb/sb16_csp.c 				err = snd_sb_csp_load_user(p, data_ptr, le32_to_cpu(code_h.len),
p                 385 sound/isa/sb/sb16_csp.c 			err = snd_sb_csp_load_user(p, data_ptr,
p                 391 sound/isa/sb/sb16_csp.c 			strlcpy(p->codec_name, info.codec_name, sizeof(p->codec_name));
p                 392 sound/isa/sb/sb16_csp.c 			p->func_nr = func_nr;
p                 393 sound/isa/sb/sb16_csp.c 			p->mode = le16_to_cpu(funcdesc_h.flags_play_rec);
p                 397 sound/isa/sb/sb16_csp.c 					if (snd_sb_qsound_build(p) == 0)
p                 399 sound/isa/sb/sb16_csp.c 						p->mode = SNDRV_SB_CSP_MODE_QSOUND;
p                 401 sound/isa/sb/sb16_csp.c 				p->acc_format = 0;
p                 404 sound/isa/sb/sb16_csp.c 				p->acc_format = SNDRV_PCM_FMTBIT_A_LAW;
p                 407 sound/isa/sb/sb16_csp.c 				p->acc_format = SNDRV_PCM_FMTBIT_MU_LAW;
p                 411 sound/isa/sb/sb16_csp.c 				p->acc_format = SNDRV_PCM_FMTBIT_IMA_ADPCM;
p                 415 sound/isa/sb/sb16_csp.c 				p->acc_format = 0;
p                 419 sound/isa/sb/sb16_csp.c 				p->acc_format = SNDRV_PCM_FMTBIT_SPECIAL;
p                 422 sound/isa/sb/sb16_csp.c 				p->acc_format = p->acc_width = p->acc_rates = 0;
p                 423 sound/isa/sb/sb16_csp.c 				p->mode = 0;
p                 429 sound/isa/sb/sb16_csp.c 			p->acc_channels = le16_to_cpu(funcdesc_h.flags_stereo_mono);
p                 430 sound/isa/sb/sb16_csp.c 			p->acc_width = le16_to_cpu(funcdesc_h.flags_16bit_8bit);
p                 431 sound/isa/sb/sb16_csp.c 			p->acc_rates = le16_to_cpu(funcdesc_h.flags_rates);
p                 434 sound/isa/sb/sb16_csp.c 			spin_lock_irqsave(&p->chip->reg_lock, flags);
p                 435 sound/isa/sb/sb16_csp.c 			set_mode_register(p->chip, 0xfc);
p                 436 sound/isa/sb/sb16_csp.c 			set_mode_register(p->chip, 0x00);
p                 437 sound/isa/sb/sb16_csp.c 			spin_unlock_irqrestore(&p->chip->reg_lock, flags);
p                 440 sound/isa/sb/sb16_csp.c 			p->running = SNDRV_SB_CSP_ST_LOADED;	/* set LOADED flag */
p                 451 sound/isa/sb/sb16_csp.c static int snd_sb_csp_unload(struct snd_sb_csp * p)
p                 453 sound/isa/sb/sb16_csp.c 	if (p->running & SNDRV_SB_CSP_ST_RUNNING)
p                 455 sound/isa/sb/sb16_csp.c 	if (!(p->running & SNDRV_SB_CSP_ST_LOADED))
p                 459 sound/isa/sb/sb16_csp.c 	p->acc_format = 0;
p                 460 sound/isa/sb/sb16_csp.c 	p->acc_channels = p->acc_width = p->acc_rates = 0;
p                 462 sound/isa/sb/sb16_csp.c 	if (p->mode == SNDRV_SB_CSP_MODE_QSOUND) {
p                 463 sound/isa/sb/sb16_csp.c 		snd_sb_qsound_destroy(p);
p                 466 sound/isa/sb/sb16_csp.c 	p->running = 0;
p                 467 sound/isa/sb/sb16_csp.c 	p->mode = 0;
p                 595 sound/isa/sb/sb16_csp.c static int snd_sb_csp_check_version(struct snd_sb_csp * p)
p                 597 sound/isa/sb/sb16_csp.c 	if (p->version < 0x10 || p->version > 0x1f) {
p                 598 sound/isa/sb/sb16_csp.c 		snd_printd("%s: Invalid CSP version: 0x%x\n", __func__, p->version);
p                 607 sound/isa/sb/sb16_csp.c static int snd_sb_csp_load(struct snd_sb_csp * p, const unsigned char *buf, int size, int load_flags)
p                 614 sound/isa/sb/sb16_csp.c 	spin_lock_irqsave(&p->chip->reg_lock, flags);
p                 615 sound/isa/sb/sb16_csp.c 	snd_sbdsp_command(p->chip, 0x01);	/* CSP download command */
p                 616 sound/isa/sb/sb16_csp.c 	if (snd_sbdsp_get_byte(p->chip)) {
p                 621 sound/isa/sb/sb16_csp.c 	snd_sbdsp_command(p->chip, (unsigned char)(size - 1));
p                 623 sound/isa/sb/sb16_csp.c 	snd_sbdsp_command(p->chip, (unsigned char)((size - 1) >> 8));
p                 627 sound/isa/sb/sb16_csp.c 		if (!snd_sbdsp_command(p->chip, *buf++))
p                 630 sound/isa/sb/sb16_csp.c 	if (snd_sbdsp_get_byte(p->chip))
p                 637 sound/isa/sb/sb16_csp.c 			snd_sbdsp_command(p->chip, 0x03);
p                 638 sound/isa/sb/sb16_csp.c 			status = snd_sbdsp_get_byte(p->chip);
p                 653 sound/isa/sb/sb16_csp.c 		spin_lock(&p->chip->mixer_lock);
p                 654 sound/isa/sb/sb16_csp.c 		status = snd_sbmixer_read(p->chip, SB_DSP4_DMASETUP);
p                 655 sound/isa/sb/sb16_csp.c 		spin_unlock(&p->chip->mixer_lock);
p                 657 sound/isa/sb/sb16_csp.c 			err = (set_codec_parameter(p->chip, 0xaa, 0x00) ||
p                 658 sound/isa/sb/sb16_csp.c 			       set_codec_parameter(p->chip, 0xff, 0x00));
p                 659 sound/isa/sb/sb16_csp.c 			snd_sbdsp_reset(p->chip);		/* really! */
p                 662 sound/isa/sb/sb16_csp.c 			set_mode_register(p->chip, 0xc0);	/* c0 = STOP */
p                 663 sound/isa/sb/sb16_csp.c 			set_mode_register(p->chip, 0x70);	/* 70 = RUN */
p                 669 sound/isa/sb/sb16_csp.c 	spin_unlock_irqrestore(&p->chip->reg_lock, flags);
p                 673 sound/isa/sb/sb16_csp.c static int snd_sb_csp_load_user(struct snd_sb_csp * p, const unsigned char __user *buf, int size, int load_flags)
p                 682 sound/isa/sb/sb16_csp.c 	err = snd_sb_csp_load(p, kbuf, size, load_flags);
p                 688 sound/isa/sb/sb16_csp.c static int snd_sb_csp_firmware_load(struct snd_sb_csp *p, int index, int flags)
p                 700 sound/isa/sb/sb16_csp.c 	program = p->csp_programs[index];
p                 703 sound/isa/sb/sb16_csp.c 				       p->chip->card->dev);
p                 706 sound/isa/sb/sb16_csp.c 		p->csp_programs[index] = program;
p                 708 sound/isa/sb/sb16_csp.c 	return snd_sb_csp_load(p, program->data, program->size, flags);
p                 715 sound/isa/sb/sb16_csp.c static int snd_sb_csp_autoload(struct snd_sb_csp * p, snd_pcm_format_t pcm_sfmt, int play_rec_mode)
p                 721 sound/isa/sb/sb16_csp.c 	if (p->running & (SNDRV_SB_CSP_ST_RUNNING | SNDRV_SB_CSP_ST_LOADED)) 
p                 725 sound/isa/sb/sb16_csp.c 	if (((1U << (__force int)pcm_sfmt) & p->acc_format) && (play_rec_mode & p->mode)) {
p                 726 sound/isa/sb/sb16_csp.c 		p->running = SNDRV_SB_CSP_ST_AUTO;
p                 730 sound/isa/sb/sb16_csp.c 			err = snd_sb_csp_firmware_load(p, CSP_PROGRAM_MULAW, 0);
p                 731 sound/isa/sb/sb16_csp.c 			p->acc_format = SNDRV_PCM_FMTBIT_MU_LAW;
p                 732 sound/isa/sb/sb16_csp.c 			p->mode = SNDRV_SB_CSP_MODE_DSP_READ | SNDRV_SB_CSP_MODE_DSP_WRITE;
p                 735 sound/isa/sb/sb16_csp.c 			err = snd_sb_csp_firmware_load(p, CSP_PROGRAM_ALAW, 0);
p                 736 sound/isa/sb/sb16_csp.c 			p->acc_format = SNDRV_PCM_FMTBIT_A_LAW;
p                 737 sound/isa/sb/sb16_csp.c 			p->mode = SNDRV_SB_CSP_MODE_DSP_READ | SNDRV_SB_CSP_MODE_DSP_WRITE;
p                 740 sound/isa/sb/sb16_csp.c 			err = snd_sb_csp_firmware_load(p, CSP_PROGRAM_ADPCM_INIT,
p                 746 sound/isa/sb/sb16_csp.c 					(p, CSP_PROGRAM_ADPCM_PLAYBACK, 0);
p                 747 sound/isa/sb/sb16_csp.c 				p->mode = SNDRV_SB_CSP_MODE_DSP_WRITE;
p                 750 sound/isa/sb/sb16_csp.c 					(p, CSP_PROGRAM_ADPCM_CAPTURE, 0);
p                 751 sound/isa/sb/sb16_csp.c 				p->mode = SNDRV_SB_CSP_MODE_DSP_READ;
p                 753 sound/isa/sb/sb16_csp.c 			p->acc_format = SNDRV_PCM_FMTBIT_IMA_ADPCM;
p                 757 sound/isa/sb/sb16_csp.c 			if (p->running & SNDRV_SB_CSP_ST_AUTO) {
p                 758 sound/isa/sb/sb16_csp.c 				spin_lock_irqsave(&p->chip->reg_lock, flags);
p                 759 sound/isa/sb/sb16_csp.c 				set_mode_register(p->chip, 0xfc);
p                 760 sound/isa/sb/sb16_csp.c 				set_mode_register(p->chip, 0x00);
p                 761 sound/isa/sb/sb16_csp.c 				spin_unlock_irqrestore(&p->chip->reg_lock, flags);
p                 762 sound/isa/sb/sb16_csp.c 				p->running = 0;			/* clear autoloaded flag */
p                 767 sound/isa/sb/sb16_csp.c 			p->acc_format = 0;
p                 768 sound/isa/sb/sb16_csp.c 			p->acc_channels = p->acc_width = p->acc_rates = 0;
p                 770 sound/isa/sb/sb16_csp.c 			p->running = 0;				/* clear autoloaded flag */
p                 771 sound/isa/sb/sb16_csp.c 			p->mode = 0;
p                 774 sound/isa/sb/sb16_csp.c 			p->running = SNDRV_SB_CSP_ST_AUTO;	/* set autoloaded flag */
p                 775 sound/isa/sb/sb16_csp.c 			p->acc_width = SNDRV_SB_CSP_SAMPLE_16BIT;	/* only 16 bit data */
p                 776 sound/isa/sb/sb16_csp.c 			p->acc_channels = SNDRV_SB_CSP_MONO | SNDRV_SB_CSP_STEREO;
p                 777 sound/isa/sb/sb16_csp.c 			p->acc_rates = SNDRV_SB_CSP_RATE_ALL;	/* HW codecs accept all rates */
p                 781 sound/isa/sb/sb16_csp.c 	return (p->running & SNDRV_SB_CSP_ST_AUTO) ? 0 : -ENXIO;
p                 787 sound/isa/sb/sb16_csp.c static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channels)
p                 794 sound/isa/sb/sb16_csp.c 	if (!(p->running & (SNDRV_SB_CSP_ST_LOADED | SNDRV_SB_CSP_ST_AUTO))) {
p                 798 sound/isa/sb/sb16_csp.c 	if (p->running & SNDRV_SB_CSP_ST_RUNNING) {
p                 802 sound/isa/sb/sb16_csp.c 	if (!(sample_width & p->acc_width)) {
p                 806 sound/isa/sb/sb16_csp.c 	if (!(channels & p->acc_channels)) {
p                 812 sound/isa/sb/sb16_csp.c 	spin_lock_irqsave(&p->chip->mixer_lock, flags);
p                 813 sound/isa/sb/sb16_csp.c 	mixL = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV);
p                 814 sound/isa/sb/sb16_csp.c 	mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
p                 815 sound/isa/sb/sb16_csp.c 	snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
p                 816 sound/isa/sb/sb16_csp.c 	snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
p                 818 sound/isa/sb/sb16_csp.c 	spin_lock(&p->chip->reg_lock);
p                 819 sound/isa/sb/sb16_csp.c 	set_mode_register(p->chip, 0xc0);	/* c0 = STOP */
p                 820 sound/isa/sb/sb16_csp.c 	set_mode_register(p->chip, 0x70);	/* 70 = RUN */
p                 828 sound/isa/sb/sb16_csp.c 	if (set_codec_parameter(p->chip, 0x81, s_type)) {
p                 832 sound/isa/sb/sb16_csp.c 	if (set_codec_parameter(p->chip, 0x80, 0x00)) {
p                 836 sound/isa/sb/sb16_csp.c 	p->run_width = sample_width;
p                 837 sound/isa/sb/sb16_csp.c 	p->run_channels = channels;
p                 839 sound/isa/sb/sb16_csp.c 	p->running |= SNDRV_SB_CSP_ST_RUNNING;
p                 841 sound/isa/sb/sb16_csp.c 	if (p->mode & SNDRV_SB_CSP_MODE_QSOUND) {
p                 842 sound/isa/sb/sb16_csp.c 		set_codec_parameter(p->chip, 0xe0, 0x01);
p                 844 sound/isa/sb/sb16_csp.c 		set_codec_parameter(p->chip, 0x00, 0xff);
p                 845 sound/isa/sb/sb16_csp.c 		set_codec_parameter(p->chip, 0x01, 0xff);
p                 846 sound/isa/sb/sb16_csp.c 		p->running |= SNDRV_SB_CSP_ST_QSOUND;
p                 848 sound/isa/sb/sb16_csp.c 		snd_sb_csp_qsound_transfer(p);
p                 853 sound/isa/sb/sb16_csp.c 	spin_unlock(&p->chip->reg_lock);
p                 856 sound/isa/sb/sb16_csp.c 	snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
p                 857 sound/isa/sb/sb16_csp.c 	snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
p                 858 sound/isa/sb/sb16_csp.c 	spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
p                 866 sound/isa/sb/sb16_csp.c static int snd_sb_csp_stop(struct snd_sb_csp * p)
p                 872 sound/isa/sb/sb16_csp.c 	if (!(p->running & SNDRV_SB_CSP_ST_RUNNING))
p                 876 sound/isa/sb/sb16_csp.c 	spin_lock_irqsave(&p->chip->mixer_lock, flags);
p                 877 sound/isa/sb/sb16_csp.c 	mixL = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV);
p                 878 sound/isa/sb/sb16_csp.c 	mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
p                 879 sound/isa/sb/sb16_csp.c 	snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
p                 880 sound/isa/sb/sb16_csp.c 	snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
p                 882 sound/isa/sb/sb16_csp.c 	spin_lock(&p->chip->reg_lock);
p                 883 sound/isa/sb/sb16_csp.c 	if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
p                 884 sound/isa/sb/sb16_csp.c 		set_codec_parameter(p->chip, 0xe0, 0x01);
p                 886 sound/isa/sb/sb16_csp.c 		set_codec_parameter(p->chip, 0x00, 0x00);
p                 887 sound/isa/sb/sb16_csp.c 		set_codec_parameter(p->chip, 0x01, 0x00);
p                 889 sound/isa/sb/sb16_csp.c 		p->running &= ~SNDRV_SB_CSP_ST_QSOUND;
p                 891 sound/isa/sb/sb16_csp.c 	result = set_mode_register(p->chip, 0xc0);	/* c0 = STOP */
p                 892 sound/isa/sb/sb16_csp.c 	spin_unlock(&p->chip->reg_lock);
p                 895 sound/isa/sb/sb16_csp.c 	snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
p                 896 sound/isa/sb/sb16_csp.c 	snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
p                 897 sound/isa/sb/sb16_csp.c 	spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
p                 900 sound/isa/sb/sb16_csp.c 		p->running &= ~(SNDRV_SB_CSP_ST_PAUSED | SNDRV_SB_CSP_ST_RUNNING);
p                 907 sound/isa/sb/sb16_csp.c static int snd_sb_csp_pause(struct snd_sb_csp * p)
p                 912 sound/isa/sb/sb16_csp.c 	if (!(p->running & SNDRV_SB_CSP_ST_RUNNING))
p                 915 sound/isa/sb/sb16_csp.c 	spin_lock_irqsave(&p->chip->reg_lock, flags);
p                 916 sound/isa/sb/sb16_csp.c 	result = set_codec_parameter(p->chip, 0x80, 0xff);
p                 917 sound/isa/sb/sb16_csp.c 	spin_unlock_irqrestore(&p->chip->reg_lock, flags);
p                 919 sound/isa/sb/sb16_csp.c 		p->running |= SNDRV_SB_CSP_ST_PAUSED;
p                 927 sound/isa/sb/sb16_csp.c static int snd_sb_csp_restart(struct snd_sb_csp * p)
p                 932 sound/isa/sb/sb16_csp.c 	if (!(p->running & SNDRV_SB_CSP_ST_PAUSED))
p                 935 sound/isa/sb/sb16_csp.c 	spin_lock_irqsave(&p->chip->reg_lock, flags);
p                 936 sound/isa/sb/sb16_csp.c 	result = set_codec_parameter(p->chip, 0x80, 0x00);
p                 937 sound/isa/sb/sb16_csp.c 	spin_unlock_irqrestore(&p->chip->reg_lock, flags);
p                 939 sound/isa/sb/sb16_csp.c 		p->running &= ~SNDRV_SB_CSP_ST_PAUSED;
p                 954 sound/isa/sb/sb16_csp.c 	struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol);
p                 956 sound/isa/sb/sb16_csp.c 	ucontrol->value.integer.value[0] = p->q_enabled ? 1 : 0;
p                 962 sound/isa/sb/sb16_csp.c 	struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol);
p                 968 sound/isa/sb/sb16_csp.c 	spin_lock_irqsave(&p->q_lock, flags);
p                 969 sound/isa/sb/sb16_csp.c 	change = p->q_enabled != nval;
p                 970 sound/isa/sb/sb16_csp.c 	p->q_enabled = nval;
p                 971 sound/isa/sb/sb16_csp.c 	spin_unlock_irqrestore(&p->q_lock, flags);
p                 986 sound/isa/sb/sb16_csp.c 	struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol);
p                 989 sound/isa/sb/sb16_csp.c 	spin_lock_irqsave(&p->q_lock, flags);
p                 990 sound/isa/sb/sb16_csp.c 	ucontrol->value.integer.value[0] = p->qpos_left;
p                 991 sound/isa/sb/sb16_csp.c 	ucontrol->value.integer.value[1] = p->qpos_right;
p                 992 sound/isa/sb/sb16_csp.c 	spin_unlock_irqrestore(&p->q_lock, flags);
p                 998 sound/isa/sb/sb16_csp.c 	struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol);
p                1009 sound/isa/sb/sb16_csp.c 	spin_lock_irqsave(&p->q_lock, flags);
p                1010 sound/isa/sb/sb16_csp.c 	change = p->qpos_left != nval1 || p->qpos_right != nval2;
p                1011 sound/isa/sb/sb16_csp.c 	p->qpos_left = nval1;
p                1012 sound/isa/sb/sb16_csp.c 	p->qpos_right = nval2;
p                1013 sound/isa/sb/sb16_csp.c 	p->qpos_changed = change;
p                1014 sound/isa/sb/sb16_csp.c 	spin_unlock_irqrestore(&p->q_lock, flags);
p                1034 sound/isa/sb/sb16_csp.c static int snd_sb_qsound_build(struct snd_sb_csp * p)
p                1039 sound/isa/sb/sb16_csp.c 	if (snd_BUG_ON(!p))
p                1042 sound/isa/sb/sb16_csp.c 	card = p->chip->card;
p                1043 sound/isa/sb/sb16_csp.c 	p->qpos_left = p->qpos_right = SNDRV_SB_CSP_QSOUND_MAX_RIGHT / 2;
p                1044 sound/isa/sb/sb16_csp.c 	p->qpos_changed = 0;
p                1046 sound/isa/sb/sb16_csp.c 	spin_lock_init(&p->q_lock);
p                1048 sound/isa/sb/sb16_csp.c 	if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0)
p                1050 sound/isa/sb/sb16_csp.c 	if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0)
p                1056 sound/isa/sb/sb16_csp.c 	snd_sb_qsound_destroy(p);
p                1060 sound/isa/sb/sb16_csp.c static void snd_sb_qsound_destroy(struct snd_sb_csp * p)
p                1065 sound/isa/sb/sb16_csp.c 	if (snd_BUG_ON(!p))
p                1068 sound/isa/sb/sb16_csp.c 	card = p->chip->card;	
p                1071 sound/isa/sb/sb16_csp.c 	if (p->qsound_switch)
p                1072 sound/isa/sb/sb16_csp.c 		snd_ctl_remove(card, p->qsound_switch);
p                1073 sound/isa/sb/sb16_csp.c 	if (p->qsound_space)
p                1074 sound/isa/sb/sb16_csp.c 		snd_ctl_remove(card, p->qsound_space);
p                1078 sound/isa/sb/sb16_csp.c 	spin_lock_irqsave (&p->q_lock, flags);
p                1079 sound/isa/sb/sb16_csp.c 	p->qpos_changed = 0;
p                1080 sound/isa/sb/sb16_csp.c 	spin_unlock_irqrestore (&p->q_lock, flags);
p                1087 sound/isa/sb/sb16_csp.c static int snd_sb_csp_qsound_transfer(struct snd_sb_csp * p)
p                1091 sound/isa/sb/sb16_csp.c 	spin_lock(&p->q_lock);
p                1092 sound/isa/sb/sb16_csp.c 	if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
p                1093 sound/isa/sb/sb16_csp.c 		set_codec_parameter(p->chip, 0xe0, 0x01);
p                1095 sound/isa/sb/sb16_csp.c 		set_codec_parameter(p->chip, 0x00, p->qpos_left);
p                1096 sound/isa/sb/sb16_csp.c 		set_codec_parameter(p->chip, 0x02, 0x00);
p                1098 sound/isa/sb/sb16_csp.c 		set_codec_parameter(p->chip, 0x00, p->qpos_right);
p                1099 sound/isa/sb/sb16_csp.c 		set_codec_parameter(p->chip, 0x03, 0x00);
p                1102 sound/isa/sb/sb16_csp.c 	p->qpos_changed = 0;
p                1103 sound/isa/sb/sb16_csp.c 	spin_unlock(&p->q_lock);
p                1112 sound/isa/sb/sb16_csp.c static int init_proc_entry(struct snd_sb_csp * p, int device)
p                1117 sound/isa/sb/sb16_csp.c 	snd_card_ro_proc_new(p->chip->card, name, p, info_read);
p                1123 sound/isa/sb/sb16_csp.c 	struct snd_sb_csp *p = entry->private_data;
p                1125 sound/isa/sb/sb16_csp.c 	snd_iprintf(buffer, "Creative Signal Processor [v%d.%d]\n", (p->version >> 4), (p->version & 0x0f));
p                1126 sound/isa/sb/sb16_csp.c 	snd_iprintf(buffer, "State: %cx%c%c%c\n", ((p->running & SNDRV_SB_CSP_ST_QSOUND) ? 'Q' : '-'),
p                1127 sound/isa/sb/sb16_csp.c 		    ((p->running & SNDRV_SB_CSP_ST_PAUSED) ? 'P' : '-'),
p                1128 sound/isa/sb/sb16_csp.c 		    ((p->running & SNDRV_SB_CSP_ST_RUNNING) ? 'R' : '-'),
p                1129 sound/isa/sb/sb16_csp.c 		    ((p->running & SNDRV_SB_CSP_ST_LOADED) ? 'L' : '-'));
p                1130 sound/isa/sb/sb16_csp.c 	if (p->running & SNDRV_SB_CSP_ST_LOADED) {
p                1131 sound/isa/sb/sb16_csp.c 		snd_iprintf(buffer, "Codec: %s [func #%d]\n", p->codec_name, p->func_nr);
p                1133 sound/isa/sb/sb16_csp.c 		if (p->acc_rates == SNDRV_SB_CSP_RATE_ALL) {
p                1137 sound/isa/sb/sb16_csp.c 				    ((p->acc_rates & SNDRV_SB_CSP_RATE_8000) ? "8000Hz " : ""),
p                1138 sound/isa/sb/sb16_csp.c 				    ((p->acc_rates & SNDRV_SB_CSP_RATE_11025) ? "11025Hz " : ""),
p                1139 sound/isa/sb/sb16_csp.c 				    ((p->acc_rates & SNDRV_SB_CSP_RATE_22050) ? "22050Hz " : ""),
p                1140 sound/isa/sb/sb16_csp.c 				    ((p->acc_rates & SNDRV_SB_CSP_RATE_44100) ? "44100Hz" : ""));
p                1142 sound/isa/sb/sb16_csp.c 		if (p->mode == SNDRV_SB_CSP_MODE_QSOUND) {
p                1144 sound/isa/sb/sb16_csp.c 				    p->q_enabled ? "en" : "dis");
p                1147 sound/isa/sb/sb16_csp.c 				    p->acc_format,
p                1148 sound/isa/sb/sb16_csp.c 				    ((p->acc_width & SNDRV_SB_CSP_SAMPLE_16BIT) ? "16bit" : "-"),
p                1149 sound/isa/sb/sb16_csp.c 				    ((p->acc_width & SNDRV_SB_CSP_SAMPLE_8BIT) ? "8bit" : "-"),
p                1150 sound/isa/sb/sb16_csp.c 				    ((p->acc_channels & SNDRV_SB_CSP_MONO) ? "mono" : "-"),
p                1151 sound/isa/sb/sb16_csp.c 				    ((p->acc_channels & SNDRV_SB_CSP_STEREO) ? "stereo" : "-"),
p                1152 sound/isa/sb/sb16_csp.c 				    ((p->mode & SNDRV_SB_CSP_MODE_DSP_WRITE) ? "playback" : "-"),
p                1153 sound/isa/sb/sb16_csp.c 				    ((p->mode & SNDRV_SB_CSP_MODE_DSP_READ) ? "capture" : "-"));
p                1156 sound/isa/sb/sb16_csp.c 	if (p->running & SNDRV_SB_CSP_ST_AUTO) {
p                1159 sound/isa/sb/sb16_csp.c 	if (p->running & SNDRV_SB_CSP_ST_RUNNING) {
p                1161 sound/isa/sb/sb16_csp.c 			    ((p->run_width & SNDRV_SB_CSP_SAMPLE_16BIT) ? 16 : 8),
p                1162 sound/isa/sb/sb16_csp.c 			    ((p->run_channels & SNDRV_SB_CSP_MONO) ? "mono" : "stereo"));
p                1164 sound/isa/sb/sb16_csp.c 	if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
p                1166 sound/isa/sb/sb16_csp.c 			    p->qpos_left, p->qpos_right);
p                 687 sound/isa/wavefront/wavefront_synth.c 	wavefront_patch *p;
p                 698 sound/isa/wavefront/wavefront_synth.c 			p = (wavefront_patch *) patchbuf;
p                 700 sound/isa/wavefront/wavefront_synth.c 				[p->sample_number|(p->sample_msb<<7)] |=
p                 792 sound/isa/wavefront/wavefront_synth.c 	munge_buf ((unsigned char *)&header->hdr.p, bptr, WF_PATCH_BYTES);
p                1432 sound/isa/wavefront/wavefront_synth.c 		if (copy_from_user (&header->hdr.p, header->hdrptr,
p                 162 sound/oss/dmasound/dmasound_atari.c 	u_char *p = &frame[*frameUsed];
p                 172 sound/oss/dmasound/dmasound_atari.c 		*p++ = table[data];
p                 185 sound/oss/dmasound/dmasound_atari.c 	void *p = &frame[*frameUsed];
p                 191 sound/oss/dmasound/dmasound_atari.c 	if (copy_from_user(p, userPtr, count))
p                 205 sound/oss/dmasound/dmasound_atari.c 		u_char *p = &frame[*frameUsed];
p                 212 sound/oss/dmasound/dmasound_atari.c 			*p++ = data ^ 0x80;
p                 216 sound/oss/dmasound/dmasound_atari.c 		u_short *p = (u_short *)&frame[*frameUsed];
p                 224 sound/oss/dmasound/dmasound_atari.c 			*p++ = data ^ 0x8080;
p                 240 sound/oss/dmasound/dmasound_atari.c 		u_short *p = (u_short *)&frame[*frameUsed];
p                 248 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 249 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 254 sound/oss/dmasound/dmasound_atari.c 		void *p = (u_short *)&frame[*frameUsed];
p                 257 sound/oss/dmasound/dmasound_atari.c 		if (copy_from_user(p, userPtr, count))
p                 272 sound/oss/dmasound/dmasound_atari.c 		u_short *p = (u_short *)&frame[*frameUsed];
p                 281 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 282 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 287 sound/oss/dmasound/dmasound_atari.c 		u_long *p = (u_long *)&frame[*frameUsed];
p                 295 sound/oss/dmasound/dmasound_atari.c 			*p++ = data ^ 0x80008000;
p                 312 sound/oss/dmasound/dmasound_atari.c 		u_short *p = (u_short *)&frame[*frameUsed];
p                 321 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 322 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 327 sound/oss/dmasound/dmasound_atari.c 		u_long *p = (u_long *)&frame[*frameUsed];
p                 336 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 353 sound/oss/dmasound/dmasound_atari.c 		u_short *p = (u_short *)&frame[*frameUsed];
p                 362 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 363 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 367 sound/oss/dmasound/dmasound_atari.c 		u_long *p = (u_long *)&frame[*frameUsed];
p                 376 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 399 sound/oss/dmasound/dmasound_atari.c 		u_char *p = &frame[*frameUsed];
p                 412 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 418 sound/oss/dmasound/dmasound_atari.c 		u_short *p = (u_short *)&frame[*frameUsed];
p                 434 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 459 sound/oss/dmasound/dmasound_atari.c 		u_char *p = &frame[*frameUsed];
p                 470 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 476 sound/oss/dmasound/dmasound_atari.c 		u_short *p = (u_short *)&frame[*frameUsed];
p                 488 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 513 sound/oss/dmasound/dmasound_atari.c 		u_char *p = &frame[*frameUsed];
p                 525 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 531 sound/oss/dmasound/dmasound_atari.c 		u_short *p = (u_short *)&frame[*frameUsed];
p                 544 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 569 sound/oss/dmasound/dmasound_atari.c 		u_short *p = (u_short *)&frame[*frameUsed];
p                 581 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 582 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 588 sound/oss/dmasound/dmasound_atari.c 		u_long *p = (u_long *)&frame[*frameUsed];
p                 600 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 625 sound/oss/dmasound/dmasound_atari.c 		u_short *p = (u_short *)&frame[*frameUsed];
p                 638 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 639 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 645 sound/oss/dmasound/dmasound_atari.c 		u_long *p = (u_long *)&frame[*frameUsed];
p                 658 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 683 sound/oss/dmasound/dmasound_atari.c 		u_short *p = (u_short *)&frame[*frameUsed];
p                 696 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 697 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 703 sound/oss/dmasound/dmasound_atari.c 		u_long *p = (u_long *)&frame[*frameUsed];
p                 716 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 741 sound/oss/dmasound/dmasound_atari.c 		u_short *p = (u_short *)&frame[*frameUsed];
p                 754 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 755 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 761 sound/oss/dmasound/dmasound_atari.c 		u_long *p = (u_long *)&frame[*frameUsed];
p                 774 sound/oss/dmasound/dmasound_atari.c 			*p++ = data;
p                 163 sound/oss/dmasound/dmasound_paula.c 		void *p = &frame[*frameUsed];
p                 166 sound/oss/dmasound/dmasound_paula.c 		if (copy_from_user(p, userPtr, count))
p                 197 sound/oss/dmasound/dmasound_paula.c 		u_char *p = &frame[*frameUsed];				\
p                 204 sound/oss/dmasound/dmasound_paula.c 			*p++ = convsample(data);			\
p                  68 sound/oss/dmasound/dmasound_q40.c 	u_char *p = (u_char *) &frame[*frameUsed];
p                  71 sound/oss/dmasound/dmasound_q40.c 	if (copy_from_user(p,userPtr,count))
p                  74 sound/oss/dmasound/dmasound_q40.c 		*p = table[*p]+128;
p                  75 sound/oss/dmasound/dmasound_q40.c 		p++;
p                  88 sound/oss/dmasound/dmasound_q40.c 	u_char *p = (u_char *) &frame[*frameUsed];
p                  91 sound/oss/dmasound/dmasound_q40.c 	if (copy_from_user(p,userPtr,count))
p                  94 sound/oss/dmasound/dmasound_q40.c 		*p = *p + 128;
p                  95 sound/oss/dmasound/dmasound_q40.c 		p++;
p                 107 sound/oss/dmasound/dmasound_q40.c 	u_char *p = (u_char *) &frame[*frameUsed];
p                 110 sound/oss/dmasound/dmasound_q40.c 	if (copy_from_user(p,userPtr,count))
p                 125 sound/oss/dmasound/dmasound_q40.c 	u_char *p = (u_char *) &frame[*frameUsed];
p                 144 sound/oss/dmasound/dmasound_q40.c 		*p++ = data;
p                 160 sound/oss/dmasound/dmasound_q40.c 	u_char *p = (u_char *) &frame[*frameUsed];
p                 181 sound/oss/dmasound/dmasound_q40.c 		*p++ = data;
p                 197 sound/oss/dmasound/dmasound_q40.c 	u_char *p = (u_char *) &frame[*frameUsed];
p                 216 sound/oss/dmasound/dmasound_q40.c 		*p++ = data;
p                 235 sound/oss/dmasound/dmasound_q40.c 	u_char *p = (u_char *) &frame[*frameUsed];
p                 256 sound/oss/dmasound/dmasound_q40.c 		*p++ = data;
p                 273 sound/oss/dmasound/dmasound_q40.c 	u_char *p = (u_char *) &frame[*frameUsed];
p                 295 sound/oss/dmasound/dmasound_q40.c 		*p++ = data;
p                 312 sound/oss/dmasound/dmasound_q40.c 	u_char *p = (u_char *) &frame[*frameUsed];
p                 334 sound/oss/dmasound/dmasound_q40.c 		*p++ = data;
p                1229 sound/pci/ac97/ac97_codec.c 	kctl->tlv.p = tlv;
p                  48 sound/pci/ac97/ac97_patch.c 	if (kctl && kctl->tlv.p)
p                  49 sound/pci/ac97/ac97_patch.c 		kctl->tlv.p = tlv;
p                3454 sound/pci/ac97/ac97_patch.c 				   kctl->tlv.p, slave_vols_vt1616);
p                  58 sound/pci/ak4531_codec.c   .tlv = { .p = (xtlv) } }
p                 125 sound/pci/ak4531_codec.c   .tlv = { .p = (xtlv) } }
p                 284 sound/pci/asihpi/asihpi.c 				struct snd_pcm_hw_params *p)
p                 290 sound/pci/asihpi/asihpi.c 		params_rate(p), params_channels(p),
p                 291 sound/pci/asihpi/asihpi.c 		params_format(p), params_subformat(p));
p                 293 sound/pci/asihpi/asihpi.c 		params_buffer_bytes(p), params_period_bytes(p),
p                 294 sound/pci/asihpi/asihpi.c 		params_period_size(p), params_periods(p));
p                 296 sound/pci/asihpi/asihpi.c 		params_buffer_size(p), params_access(p),
p                 297 sound/pci/asihpi/asihpi.c 		params_rate(p) * params_channels(p) *
p                 298 sound/pci/asihpi/asihpi.c 		snd_pcm_format_width(params_format(p)) / 8);
p                1578 sound/pci/asihpi/asihpi.c 	snd_control.tlv.p = db_scale_100;
p                1673 sound/pci/asihpi/asihpi.c 	snd_control.tlv.p = db_scale_level;
p                1020 sound/pci/asihpi/hpi_internal.h 		struct hpi_profile_msg p;
p                1081 sound/pci/asihpi/hpi_internal.h 		struct hpi_profile_res p;
p                1123 sound/pci/asihpi/hpi_internal.h 	struct hpi_adapter_res p;
p                1140 sound/pci/asihpi/hpi_internal.h 	struct hpi_msg_cobranet_hmi p;
p                1146 sound/pci/asihpi/hpi_internal.h 	struct hpi_msg_cobranet_hmi p;
p                1189 sound/pci/asihpi/hpi_internal.h 		struct hpi_profile_msg p;
p                1208 sound/pci/asihpi/hpi_internal.h 		struct hpi_profile_res p;
p                1680 sound/pci/asihpi/hpifunc.c 	hm.p.attribute = HPI_COBRANET_SET;
p                1681 sound/pci/asihpi/hpifunc.c 	hm.p.byte_count = byte_count;
p                1682 sound/pci/asihpi/hpifunc.c 	hm.p.hmi_address = hmi_address;
p                1684 sound/pci/asihpi/hpifunc.c 	hm.h.size = (u16)(sizeof(hm.h) + sizeof(hm.p) + byte_count);
p                1706 sound/pci/asihpi/hpifunc.c 	hm.p.attribute = HPI_COBRANET_GET;
p                1707 sound/pci/asihpi/hpifunc.c 	hm.p.byte_count = max_byte_count;
p                1708 sound/pci/asihpi/hpifunc.c 	hm.p.hmi_address = hmi_address;
p                1167 sound/pci/au88x0/au88x0_core.c 	int page, p, pp, delta, i;
p                1186 sound/pci/au88x0/au88x0_core.c 			p = dma->period_virt + i + 4;
p                1187 sound/pci/au88x0/au88x0_core.c 			if (p >= dma->nr_periods)
p                1188 sound/pci/au88x0/au88x0_core.c 				p -= dma->nr_periods;
p                1197 sound/pci/au88x0/au88x0_core.c 				dma->period_bytes * p));
p                1218 sound/pci/au88x0/au88x0_core.c 	int p, pp, i;
p                1223 sound/pci/au88x0/au88x0_core.c 		p = dma->period_virt + i;
p                1224 sound/pci/au88x0/au88x0_core.c 		if (p >= dma->nr_periods)
p                1225 sound/pci/au88x0/au88x0_core.c 			p -= dma->nr_periods;
p                1239 sound/pci/au88x0/au88x0_core.c 					       dma->period_bytes * p));
p                1438 sound/pci/au88x0/au88x0_core.c 	int page, p, pp, delta, i;
p                1457 sound/pci/au88x0/au88x0_core.c 			p = dma->period_virt + i + 4;
p                1458 sound/pci/au88x0/au88x0_core.c 			if (p >= dma->nr_periods)
p                1459 sound/pci/au88x0/au88x0_core.c 				p -= dma->nr_periods;
p                1468 sound/pci/au88x0/au88x0_core.c 						       dma->period_bytes * p));
p                2114 sound/pci/au88x0/au88x0_core.c 	struct pcm_vol *p;
p                2247 sound/pci/au88x0/au88x0_core.c 				p = &vortex->pcm_vol[subdev];
p                2248 sound/pci/au88x0/au88x0_core.c 				p->dma = dma;
p                2250 sound/pci/au88x0/au88x0_core.c 					p->mixin[i] = mix[i];
p                2252 sound/pci/au88x0/au88x0_core.c 					p->vol[i] = 0;
p                 546 sound/pci/au88x0/au88x0_pcm.c 	struct pcm_vol *p = &vortex->pcm_vol[subdev];
p                 549 sound/pci/au88x0/au88x0_pcm.c 		ucontrol->value.integer.value[i] = p->vol[i];
p                 562 sound/pci/au88x0/au88x0_pcm.c 	struct pcm_vol *p = &vortex->pcm_vol[subdev];
p                 565 sound/pci/au88x0/au88x0_pcm.c 		if (p->vol[i] != ucontrol->value.integer.value[i]) {
p                 566 sound/pci/au88x0/au88x0_pcm.c 			p->vol[i] = ucontrol->value.integer.value[i];
p                 567 sound/pci/au88x0/au88x0_pcm.c 			if (p->active) {
p                 568 sound/pci/au88x0/au88x0_pcm.c 				switch (vortex->dma_adb[p->dma].nr_ch) {
p                 570 sound/pci/au88x0/au88x0_pcm.c 					mixin = p->mixin[0];
p                 574 sound/pci/au88x0/au88x0_pcm.c 					mixin = p->mixin[(i < 2) ? i : (i - 2)];
p                 577 sound/pci/au88x0/au88x0_pcm.c 					mixin = p->mixin[i];
p                 580 sound/pci/au88x0/au88x0_pcm.c 				vol = p->vol[i];
p                 601 sound/pci/au88x0/au88x0_pcm.c 	.tlv = { .p = vortex_pcm_vol_db_scale },
p                1020 sound/pci/azt3328.c 	const char * const *p = NULL;
p                1026 sound/pci/azt3328.c 			p = texts1;
p                1029 sound/pci/azt3328.c 			p = texts2;
p                1032 sound/pci/azt3328.c 			p = texts4;
p                1036 sound/pci/azt3328.c 		p = texts3;
p                1040 sound/pci/azt3328.c 				 reg.enum_c, p);
p                 549 sound/pci/ca0106/ca0106_mixer.c 	.tlv = { .p = snd_ca0106_db_scale1 },			\
p                 630 sound/pci/ca0106/ca0106_mixer.c 	.tlv = { .p = snd_ca0106_db_scale2 },			\
p                1050 sound/pci/cs4281.c 	.tlv = { .p = db_scale_dsp },
p                1061 sound/pci/cs4281.c 	.tlv = { .p = db_scale_dsp },
p                1258 sound/pci/ctxfi/ctatc.c 	const struct snd_pci_quirk *p;
p                1281 sound/pci/ctxfi/ctatc.c 	p = snd_pci_quirk_lookup_id(vendor_id, device_id, list);
p                1282 sound/pci/ctxfi/ctatc.c 	if (p) {
p                1283 sound/pci/ctxfi/ctatc.c 		if (p->value < 0) {
p                1289 sound/pci/ctxfi/ctatc.c 		atc->model = p->value;
p                 407 sound/pci/ctxfi/ctmixer.c 	.tlv		= { .p =  ct_vol_db_scale },
p                 361 sound/pci/ctxfi/ctsrc.c 	struct src *p;
p                 364 sound/pci/ctxfi/ctsrc.c 	for (i = 0, p = src; i < n; i++, p++) {
p                 365 sound/pci/ctxfi/ctsrc.c 		err = rsc_init(&p->rsc, idx + i, SRC, desc->msr, mgr->mgr.hw);
p                 370 sound/pci/ctxfi/ctsrc.c 		p->ops = &src_rsc_ops;
p                 371 sound/pci/ctxfi/ctsrc.c 		p->multi = (0 == i) ? desc->multi : 1;
p                 372 sound/pci/ctxfi/ctsrc.c 		p->mode = desc->mode;
p                 373 sound/pci/ctxfi/ctsrc.c 		src_default_config[desc->mode](p);
p                 374 sound/pci/ctxfi/ctsrc.c 		mgr->src_enable(mgr, p);
p                 375 sound/pci/ctxfi/ctsrc.c 		p->intlv = p + 1;
p                 377 sound/pci/ctxfi/ctsrc.c 	(--p)->intlv = NULL;	/* Set @intlv of the last SRC to NULL */
p                 384 sound/pci/ctxfi/ctsrc.c 	for (i--, p--; i >= 0; i--, p--) {
p                 385 sound/pci/ctxfi/ctsrc.c 		mgr->src_disable(mgr, p);
p                 386 sound/pci/ctxfi/ctsrc.c 		rsc_uninit(&p->rsc);
p                 395 sound/pci/ctxfi/ctsrc.c 	struct src *p;
p                 398 sound/pci/ctxfi/ctsrc.c 	for (i = 0, p = src; i < n; i++, p++) {
p                 399 sound/pci/ctxfi/ctsrc.c 		mgr->src_disable(mgr, p);
p                 400 sound/pci/ctxfi/ctsrc.c 		rsc_uninit(&p->rsc);
p                 401 sound/pci/ctxfi/ctsrc.c 		p->multi = 0;
p                 402 sound/pci/ctxfi/ctsrc.c 		p->ops = NULL;
p                 403 sound/pci/ctxfi/ctsrc.c 		p->mode = NUM_SRCMODES;
p                 404 sound/pci/ctxfi/ctsrc.c 		p->intlv = NULL;
p                1037 sound/pci/echoaudio/echoaudio.c 	.tlv = {.p = db_scale_output_gain},
p                1047 sound/pci/echoaudio/echoaudio.c 	.tlv = {.p = db_scale_output_gain},
p                1117 sound/pci/echoaudio/echoaudio.c 	.tlv = {.p = db_scale_input_gain},
p                1317 sound/pci/echoaudio/echoaudio.c 	.tlv = {.p = db_scale_output_gain},
p                1385 sound/pci/echoaudio/echoaudio.c 	.tlv = {.p = db_scale_output_gain},
p                1749 sound/pci/echoaudio/echoaudio.c 	.tlv = {.p = db_scale_output_gain},
p                 777 sound/pci/emu10k1/emufx.c 	kfree(kctl->tlv.p);
p                 823 sound/pci/emu10k1/emufx.c 		knew.tlv.p = copy_tlv((__force const unsigned int __user *)gctl->tlv, in_kernel);
p                 824 sound/pci/emu10k1/emufx.c 		if (knew.tlv.p)
p                 844 sound/pci/emu10k1/emufx.c 				kfree(knew.tlv.p);
p                 851 sound/pci/emu10k1/emufx.c 				kfree(knew.tlv.p);
p                1049 sound/pci/emu10k1/emumixer.c 	.tlv = { .p = snd_audigy_db_scale2 },			\
p                 168 sound/pci/emu10k1/memory.c 	struct list_head *p;
p                 172 sound/pci/emu10k1/memory.c 	if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
p                 173 sound/pci/emu10k1/memory.c 		q = get_emu10k1_memblk(p, mapped_link);
p                 177 sound/pci/emu10k1/memory.c 	if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
p                 178 sound/pci/emu10k1/memory.c 		q = get_emu10k1_memblk(p, mapped_link);
p                 204 sound/pci/emu10k1/memory.c 	struct list_head *p;
p                 210 sound/pci/emu10k1/memory.c 	list_for_each(p, &emu->memhdr->block) {
p                 211 sound/pci/emu10k1/memory.c 		blk = get_emu10k1_memblk(p, mem.list);
p                 221 sound/pci/emu10k1/memory.c 	blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
p                 258 sound/pci/emu10k1/memory.c 	struct list_head *p, *nextp;
p                 273 sound/pci/emu10k1/memory.c 		p = emu->mapped_order_link_head.next;
p                 274 sound/pci/emu10k1/memory.c 		for (; p != &emu->mapped_order_link_head; p = nextp) {
p                 275 sound/pci/emu10k1/memory.c 			nextp = p->next;
p                 276 sound/pci/emu10k1/memory.c 			deleted = get_emu10k1_memblk(p, mapped_order_link);
p                 453 sound/pci/emu10k1/memory.c 	struct list_head *p;
p                 457 sound/pci/emu10k1/memory.c 	if ((p = blk->mem.list.prev) != &hdr->block) {
p                 458 sound/pci/emu10k1/memory.c 		q = get_emu10k1_memblk(p, mem.list);
p                 463 sound/pci/emu10k1/memory.c 	if ((p = blk->mem.list.next) != &hdr->block) {
p                 464 sound/pci/emu10k1/memory.c 		q = get_emu10k1_memblk(p, mem.list);
p                 570 sound/pci/emu10k1/memory.c 	struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
p                 581 sound/pci/emu10k1/memory.c 		ptr = offset_ptr(emu, page + p->first_page, offset);
p                 600 sound/pci/emu10k1/memory.c 	struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
p                 611 sound/pci/emu10k1/memory.c 		ptr = offset_ptr(emu, page + p->first_page, offset);
p                 811 sound/pci/emu10k1/p16v.c 	.tlv = { .p = snd_p16v_db_scale1 },	\
p                1166 sound/pci/es1938.c   .tlv = { .p = xtlv } }
p                1227 sound/pci/es1938.c   .tlv = { .p = xtlv } }
p                1353 sound/pci/es1938.c 	.tlv = { .p = db_scale_master },
p                1407 sound/pci/es1968.c 	struct list_head *p;
p                1412 sound/pci/es1968.c 	while ((p = chip->buf_list.next) != &chip->buf_list) {
p                1413 sound/pci/es1968.c 		struct esm_memory *chunk = list_entry(p, struct esm_memory, list);
p                1414 sound/pci/es1968.c 		list_del(p);
p                 894 sound/pci/fm801.c   .tlv = { .p = (xtlv) } }
p                  98 sound/pci/hda/hda_codec.c 	struct hda_conn_list *p;
p                  99 sound/pci/hda/hda_codec.c 	list_for_each_entry(p, &codec->conn_list, list) {
p                 100 sound/pci/hda/hda_codec.c 		if (p->nid == nid)
p                 101 sound/pci/hda/hda_codec.c 			return p;
p                 109 sound/pci/hda/hda_codec.c 	struct hda_conn_list *p;
p                 111 sound/pci/hda/hda_codec.c 	p = kmalloc(struct_size(p, conns, len), GFP_KERNEL);
p                 112 sound/pci/hda/hda_codec.c 	if (!p)
p                 114 sound/pci/hda/hda_codec.c 	p->len = len;
p                 115 sound/pci/hda/hda_codec.c 	p->nid = nid;
p                 116 sound/pci/hda/hda_codec.c 	memcpy(p->conns, list, len * sizeof(hda_nid_t));
p                 117 sound/pci/hda/hda_codec.c 	list_add(&p->list, &codec->conn_list);
p                 124 sound/pci/hda/hda_codec.c 		struct hda_conn_list *p;
p                 125 sound/pci/hda/hda_codec.c 		p = list_first_entry(&codec->conn_list, typeof(*p), list);
p                 126 sound/pci/hda/hda_codec.c 		list_del(&p->list);
p                 127 sound/pci/hda/hda_codec.c 		kfree(p);
p                 175 sound/pci/hda/hda_codec.c 		const struct hda_conn_list *p;
p                 178 sound/pci/hda/hda_codec.c 		p = lookup_conn_list(codec, nid);
p                 179 sound/pci/hda/hda_codec.c 		if (p) {
p                 181 sound/pci/hda/hda_codec.c 				*listp = p->conns;
p                 182 sound/pci/hda/hda_codec.c 			return p->len;
p                 241 sound/pci/hda/hda_codec.c 	struct hda_conn_list *p;
p                 243 sound/pci/hda/hda_codec.c 	p = lookup_conn_list(codec, nid);
p                 244 sound/pci/hda/hda_codec.c 	if (p) {
p                 245 sound/pci/hda/hda_codec.c 		list_del(&p->list);
p                 246 sound/pci/hda/hda_codec.c 		kfree(p);
p                 680 sound/pci/hda/hda_codec.c 	struct hda_cvt_setup *p;
p                 683 sound/pci/hda/hda_codec.c 	snd_array_for_each(&codec->cvt_setups, i, p) {
p                 684 sound/pci/hda/hda_codec.c 		if (p->nid == nid)
p                 685 sound/pci/hda/hda_codec.c 			return p;
p                 687 sound/pci/hda/hda_codec.c 	p = snd_array_new(&codec->cvt_setups);
p                 688 sound/pci/hda/hda_codec.c 	if (p)
p                 689 sound/pci/hda/hda_codec.c 		p->nid = nid;
p                 690 sound/pci/hda/hda_codec.c 	return p;
p                1033 sound/pci/hda/hda_codec.c 				 struct hda_cvt_setup *p, hda_nid_t nid,
p                1038 sound/pci/hda/hda_codec.c 	if (p->stream_tag != stream_tag || p->channel_id != channel_id) {
p                1045 sound/pci/hda/hda_codec.c 		p->stream_tag = stream_tag;
p                1046 sound/pci/hda/hda_codec.c 		p->channel_id = channel_id;
p                1051 sound/pci/hda/hda_codec.c static void update_pcm_format(struct hda_codec *codec, struct hda_cvt_setup *p,
p                1056 sound/pci/hda/hda_codec.c 	if (p->format_id != format) {
p                1065 sound/pci/hda/hda_codec.c 		p->format_id = format;
p                1082 sound/pci/hda/hda_codec.c 	struct hda_cvt_setup *p;
p                1092 sound/pci/hda/hda_codec.c 	p = get_hda_cvt_setup(codec, nid);
p                1093 sound/pci/hda/hda_codec.c 	if (!p)
p                1099 sound/pci/hda/hda_codec.c 		update_pcm_format(codec, p, nid, format);
p                1100 sound/pci/hda/hda_codec.c 	update_pcm_stream_id(codec, p, nid, stream_tag, channel_id);
p                1102 sound/pci/hda/hda_codec.c 		update_pcm_format(codec, p, nid, format);
p                1104 sound/pci/hda/hda_codec.c 	p->active = 1;
p                1105 sound/pci/hda/hda_codec.c 	p->dirty = 0;
p                1110 sound/pci/hda/hda_codec.c 		snd_array_for_each(&c->cvt_setups, i, p) {
p                1111 sound/pci/hda/hda_codec.c 			if (!p->active && p->stream_tag == stream_tag &&
p                1112 sound/pci/hda/hda_codec.c 			    get_wcaps_type(get_wcaps(c, p->nid)) == type)
p                1113 sound/pci/hda/hda_codec.c 				p->dirty = 1;
p                1131 sound/pci/hda/hda_codec.c 	struct hda_cvt_setup *p;
p                1140 sound/pci/hda/hda_codec.c 	p = get_hda_cvt_setup(codec, nid);
p                1141 sound/pci/hda/hda_codec.c 	if (p) {
p                1147 sound/pci/hda/hda_codec.c 			really_cleanup_stream(codec, p);
p                1149 sound/pci/hda/hda_codec.c 			p->active = 0;
p                1173 sound/pci/hda/hda_codec.c 	struct hda_cvt_setup *p;
p                1177 sound/pci/hda/hda_codec.c 		snd_array_for_each(&c->cvt_setups, i, p) {
p                1178 sound/pci/hda/hda_codec.c 			if (p->dirty)
p                1179 sound/pci/hda/hda_codec.c 				really_cleanup_stream(c, p);
p                1188 sound/pci/hda/hda_codec.c 	struct hda_cvt_setup *p;
p                1191 sound/pci/hda/hda_codec.c 	snd_array_for_each(&codec->cvt_setups, i, p) {
p                1192 sound/pci/hda/hda_codec.c 		if (p->stream_tag)
p                1193 sound/pci/hda/hda_codec.c 			really_cleanup_stream(codec, p);
p                1878 sound/pci/hda/hda_codec.c 		tlv = kctl->tlv.p;
p                3425 sound/pci/hda/hda_codec.c 	const struct hda_amp_list *p;
p                3430 sound/pci/hda/hda_codec.c 	for (p = check->amplist; p->nid; p++) {
p                3431 sound/pci/hda/hda_codec.c 		if (p->nid == nid)
p                3434 sound/pci/hda/hda_codec.c 	if (!p->nid)
p                3437 sound/pci/hda/hda_codec.c 	for (p = check->amplist; p->nid; p++) {
p                3439 sound/pci/hda/hda_codec.c 			v = snd_hda_codec_amp_read(codec, p->nid, ch, p->dir,
p                3440 sound/pci/hda/hda_codec.c 						   p->idx);
p                5667 sound/pci/hda/hda_generic.c 	char *p;
p                5674 sound/pci/hda/hda_generic.c 	for (p = strchr(str, ' '); p; p = strchr(p + 1, ' ')) {
p                5675 sound/pci/hda/hda_generic.c 		if (!isalnum(p[1])) {
p                5676 sound/pci/hda/hda_generic.c 			*p = 0;
p                1285 sound/pci/hda/hda_intel.c 	struct pci_dev *p = get_bound_vga(chip->pci);
p                1287 sound/pci/hda/hda_intel.c 	if (p) {
p                1295 sound/pci/hda/hda_intel.c 		parent = pci_upstream_bridge(p);
p                1298 sound/pci/hda/hda_intel.c 		pci_dev_put(p);
p                1311 sound/pci/hda/hda_intel.c 	struct pci_dev *p;
p                1317 sound/pci/hda/hda_intel.c 	p = get_bound_vga(chip->pci);
p                1318 sound/pci/hda/hda_intel.c 	err = vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops, p);
p                1319 sound/pci/hda/hda_intel.c 	pci_dev_put(p);
p                1416 sound/pci/hda/hda_intel.c 	struct pci_dev *p;
p                1424 sound/pci/hda/hda_intel.c 			p = pci_get_domain_bus_and_slot(pci_domain_nr(pci->bus),
p                1426 sound/pci/hda/hda_intel.c 			if (p) {
p                1427 sound/pci/hda/hda_intel.c 				if ((p->class >> 16) == PCI_BASE_CLASS_DISPLAY)
p                1428 sound/pci/hda/hda_intel.c 					return p;
p                1429 sound/pci/hda/hda_intel.c 				pci_dev_put(p);
p                1440 sound/pci/hda/hda_intel.c 	struct pci_dev *p = get_bound_vga(pci);
p                1442 sound/pci/hda/hda_intel.c 	if (p) {
p                1443 sound/pci/hda/hda_intel.c 		if (vga_switcheroo_get_client_state(p) == VGA_SWITCHEROO_OFF)
p                1445 sound/pci/hda/hda_intel.c 		pci_dev_put(p);
p                 477 sound/pci/hda/hda_jack.c 	const hda_nid_t *p;
p                 497 sound/pci/hda/hda_jack.c 	for (i = 0, p = cfg->line_out_pins; i < cfg->line_outs; i++, p++) {
p                 498 sound/pci/hda/hda_jack.c 		err = add_jack_kctl(codec, *p, cfg, NULL);
p                 502 sound/pci/hda/hda_jack.c 	for (i = 0, p = cfg->hp_pins; i < cfg->hp_outs; i++, p++) {
p                 503 sound/pci/hda/hda_jack.c 		if (*p == *cfg->line_out_pins) /* might be duplicated */
p                 505 sound/pci/hda/hda_jack.c 		err = add_jack_kctl(codec, *p, cfg, NULL);
p                 509 sound/pci/hda/hda_jack.c 	for (i = 0, p = cfg->speaker_pins; i < cfg->speaker_outs; i++, p++) {
p                 510 sound/pci/hda/hda_jack.c 		if (*p == *cfg->line_out_pins) /* might be duplicated */
p                 512 sound/pci/hda/hda_jack.c 		err = add_jack_kctl(codec, *p, cfg, NULL);
p                 516 sound/pci/hda/hda_jack.c 	for (i = 0, p = cfg->dig_out_pins; i < cfg->dig_outs; i++, p++) {
p                 517 sound/pci/hda/hda_jack.c 		err = add_jack_kctl(codec, *p, cfg, NULL);
p                 157 sound/pci/hda/hda_sysfs.c 	char *p;
p                 160 sound/pci/hda/hda_sysfs.c 	p = strchr(s, '\n');
p                 161 sound/pci/hda/hda_sysfs.c 	if (p)
p                 162 sound/pci/hda/hda_sysfs.c 		*p = 0;
p                 296 sound/pci/hda/hda_sysfs.c 	char *p;
p                 299 sound/pci/hda/hda_sysfs.c 	p = str + strlen(str) - 1;
p                 300 sound/pci/hda/hda_sysfs.c 	for (; isspace(*p); p--) {
p                 301 sound/pci/hda/hda_sysfs.c 		*p = 0;
p                 302 sound/pci/hda/hda_sysfs.c 		if (p == str)
p                 439 sound/pci/hda/hda_sysfs.c 	const char *p;
p                 443 sound/pci/hda/hda_sysfs.c 	p = snd_hda_get_hint(codec, key);
p                 444 sound/pci/hda/hda_sysfs.c 	if (!p || !*p)
p                 447 sound/pci/hda/hda_sysfs.c 		switch (toupper(*p)) {
p                 475 sound/pci/hda/hda_sysfs.c 	const char *p;
p                 480 sound/pci/hda/hda_sysfs.c 	p = snd_hda_get_hint(codec, key);
p                 481 sound/pci/hda/hda_sysfs.c 	if (!p)
p                 483 sound/pci/hda/hda_sysfs.c 	else if (kstrtoul(p, 0, &val))
p                 682 sound/pci/hda/hda_sysfs.c 	const char *p = *fw_data_p;
p                 684 sound/pci/hda/hda_sysfs.c 	while (isspace(*p) && fw_size) {
p                 685 sound/pci/hda/hda_sysfs.c 		p++;
p                 692 sound/pci/hda/hda_sysfs.c 		if (!*p)
p                 694 sound/pci/hda/hda_sysfs.c 		if (*p == '\n') {
p                 695 sound/pci/hda/hda_sysfs.c 			p++;
p                 700 sound/pci/hda/hda_sysfs.c 			*buf++ = *p++;
p                 704 sound/pci/hda/hda_sysfs.c 	*fw_data_p = p;
p                2707 sound/pci/hda/patch_ca0132.c static bool is_valid(const struct dsp_image_seg *p)
p                2709 sound/pci/hda/patch_ca0132.c 	return p->magic == g_magic_value;
p                2712 sound/pci/hda/patch_ca0132.c static bool is_hci_prog_list_seg(const struct dsp_image_seg *p)
p                2714 sound/pci/hda/patch_ca0132.c 	return g_chip_addr_magic_value == p->chip_addr;
p                2717 sound/pci/hda/patch_ca0132.c static bool is_last(const struct dsp_image_seg *p)
p                2719 sound/pci/hda/patch_ca0132.c 	return p->count == 0;
p                2722 sound/pci/hda/patch_ca0132.c static size_t dsp_sizeof(const struct dsp_image_seg *p)
p                2724 sound/pci/hda/patch_ca0132.c 	return struct_size(p, data, p->count);
p                2728 sound/pci/hda/patch_ca0132.c 				const struct dsp_image_seg *p)
p                2730 sound/pci/hda/patch_ca0132.c 	return (struct dsp_image_seg *)((unsigned char *)(p) + dsp_sizeof(p));
p                3985 sound/pci/hda/patch_ca0132.c 	knew.tlv.p = 0;
p                3991 sound/pci/hda/patch_ca0132.c 		knew.tlv.p = voice_focus_db_scale;
p                4002 sound/pci/hda/patch_ca0132.c 		knew.tlv.p = eq_db_scale;
p                 983 sound/pci/hda/patch_cirrus.c 	.tlv = { .p = cs421x_speaker_boost_db_scale },
p                 966 sound/pci/hda/patch_conexant.c 	const hda_nid_t *p;
p                 968 sound/pci/hda/patch_conexant.c 	for (p = out_nids; *p; p++)
p                 969 sound/pci/hda/patch_conexant.c 		snd_hda_override_amp_caps(codec, *p, HDA_OUTPUT,
p                 971 sound/pci/hda/patch_conexant.c 					  query_amp_caps(codec, *p, HDA_OUTPUT));
p                 480 sound/pci/hda/patch_realtek.c 	const hda_nid_t *p;
p                 481 sound/pci/hda/patch_realtek.c 	for (p = pins; *p; p++)
p                 482 sound/pci/hda/patch_realtek.c 		set_eapd(codec, *p, on);
p                1016 sound/pci/hda/patch_realtek.c 	const struct alc_codec_rename_table *p;
p                1019 sound/pci/hda/patch_realtek.c 	for (p = rename_tbl; p->vendor_id; p++) {
p                1020 sound/pci/hda/patch_realtek.c 		if (p->vendor_id != codec->core.vendor_id)
p                1022 sound/pci/hda/patch_realtek.c 		if ((alc_get_coef0(codec) & p->coef_mask) == p->coef_bits)
p                1023 sound/pci/hda/patch_realtek.c 			return alc_codec_rename(codec, p->name);
p                4784 sound/pci/hda/patch_sigmatel.c 		const hda_nid_t *p;
p                4785 sound/pci/hda/patch_sigmatel.c 		for (p = unmute_nids; *p; p++)
p                4786 sound/pci/hda/patch_sigmatel.c 			snd_hda_codec_amp_init_stereo(codec, *p, HDA_INPUT, 0,
p                 294 sound/pci/hda/patch_via.c 	const struct hda_amp_list *p;
p                 297 sound/pci/hda/patch_via.c 	p = spec->gen.loopback.amplist;
p                 298 sound/pci/hda/patch_via.c 	if (!p)
p                 300 sound/pci/hda/patch_via.c 	for (; p->nid; p++) {
p                 302 sound/pci/hda/patch_via.c 			v = snd_hda_codec_amp_read(codec, p->nid, ch, p->dir,
p                 303 sound/pci/hda/patch_via.c 						   p->idx);
p                1408 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_wm_dac }
p                1427 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_wm_dac }
p                1446 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_wm_dac }
p                1465 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_wm_dac }
p                1484 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_wm_dac }
p                1503 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_wm_dac }
p                1523 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_wm_pcm }
p                1540 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_wm_adc }
p                1591 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_ac97_master }
p                1610 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_ac97_gain }
p                1629 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_ac97_gain }
p                1648 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_ac97_gain }
p                1667 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_ac97_gain }
p                1696 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_ac97_master }
p                1715 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_ac97_gain }
p                1734 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_ac97_gain }
p                1753 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_ac97_gain }
p                1772 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_ac97_gain }
p                1798 sound/pci/ice1712/aureon.c 		.tlv = { .p = db_scale_ac97_gain }
p                2009 sound/pci/ice1712/aureon.c 	const unsigned short *p;
p                2038 sound/pci/ice1712/aureon.c 		p = wm_inits_prodigy;
p                2040 sound/pci/ice1712/aureon.c 		p = wm_inits_aureon;
p                2041 sound/pci/ice1712/aureon.c 	for (; *p != (unsigned short)-1; p += 2)
p                2042 sound/pci/ice1712/aureon.c 		wm_put(ice, p[0], p[1]);
p                2047 sound/pci/ice1712/aureon.c 		for (p = cs_inits; *p != (unsigned short)-1; p++)
p                2048 sound/pci/ice1712/aureon.c 			aureon_spi_write(ice, AUREON_CS8415_CS, *p | 0x200000, 24);
p                1386 sound/pci/ice1712/ice1712.c 		.tlv = { .p = db_scale_playback }
p                1418 sound/pci/ice1712/ice1712.c 	.tlv = { .p = db_scale_playback }
p                 436 sound/pci/ice1712/maya44.c 		.tlv = { .p = db_scale_hp },
p                 448 sound/pci/ice1712/maya44.c 		.tlv = { .p = db_scale_dac },
p                 460 sound/pci/ice1712/maya44.c 		.tlv = { .p = db_scale_adc },
p                 409 sound/pci/ice1712/phase.c 	const unsigned short *p;
p                 445 sound/pci/ice1712/phase.c 	p = wm_inits_phase28;
p                 446 sound/pci/ice1712/phase.c 	for (; *p != (unsigned short)-1; p += 2)
p                 447 sound/pci/ice1712/phase.c 		wm_put(ice, p[0], p[1]);
p                 764 sound/pci/ice1712/phase.c 		.tlv = { .p = db_scale_wm_dac }
p                 783 sound/pci/ice1712/phase.c 		.tlv = { .p = db_scale_wm_dac }
p                 802 sound/pci/ice1712/phase.c 		.tlv = { .p = db_scale_wm_dac }
p                 821 sound/pci/ice1712/phase.c 		.tlv = { .p = db_scale_wm_dac }
p                 840 sound/pci/ice1712/phase.c 		.tlv = { .p = db_scale_wm_dac }
p                 859 sound/pci/ice1712/phase.c 		.tlv = { .p = db_scale_wm_dac }
p                 879 sound/pci/ice1712/phase.c 		.tlv = { .p = db_scale_wm_pcm }
p                 541 sound/pci/ice1712/pontis.c 		.tlv = { .p = db_scale_volume },
p                 551 sound/pci/ice1712/pontis.c 		.tlv = { .p = db_scale_volume },
p                 357 sound/pci/ice1712/prodigy192.c 		.tlv = { .p = db_scale_dac }
p                 368 sound/pci/ice1712/prodigy192.c 		.tlv = { .p = db_scale_dac }
p                 387 sound/pci/ice1712/prodigy192.c 		.tlv = { .p = db_scale_dac }
p                 407 sound/pci/ice1712/prodigy192.c 		.tlv = { .p = db_scale_adc }
p                 715 sound/pci/ice1712/prodigy192.c 	const unsigned short *p;
p                 731 sound/pci/ice1712/prodigy192.c 	p = stac_inits_prodigy;
p                 732 sound/pci/ice1712/prodigy192.c 	for (; *p != (unsigned short)-1; p += 2)
p                 733 sound/pci/ice1712/prodigy192.c 		stac9460_put(ice, p[0], p[1]);
p                 296 sound/pci/ice1712/prodigy_hifi.c 	.tlv = { .p = ak4396_db_scale },
p                 753 sound/pci/ice1712/prodigy_hifi.c 		.tlv = { .p = db_scale_wm_dac }
p                 763 sound/pci/ice1712/prodigy_hifi.c 		.tlv = { .p = db_scale_wm_dac },
p                 774 sound/pci/ice1712/prodigy_hifi.c 		.tlv = { .p = db_scale_wm_dac },
p                 785 sound/pci/ice1712/prodigy_hifi.c 		.tlv = { .p = db_scale_wm_dac }
p                 796 sound/pci/ice1712/prodigy_hifi.c 		.tlv = { .p = db_scale_wm_dac }
p                 807 sound/pci/ice1712/prodigy_hifi.c 		.tlv = { .p = db_scale_wm_dac },
p                 817 sound/pci/ice1712/prodigy_hifi.c 		.tlv = { .p = db_scale_wm_dac },
p                 593 sound/pci/ice1712/se.c 		cont.tlv.p = NULL;
p                 602 sound/pci/ice1712/se.c 				cont.tlv.p = db_scale_gain1;
p                 604 sound/pci/ice1712/se.c 				cont.tlv.p = db_scale_gain2;
p                 289 sound/pci/ice1712/wm8766.c 	cont.tlv.p = NULL;
p                 297 sound/pci/ice1712/wm8766.c 		cont.tlv.p = wm->ctl[num].tlv;
p                 563 sound/pci/ice1712/wm8776.c 	cont.tlv.p = NULL;
p                 571 sound/pci/ice1712/wm8776.c 		cont.tlv.p = wm->ctl[num].tlv;
p                 490 sound/pci/ice1712/wtm.c 		.tlv = { .p = db_scale_dac }
p                 527 sound/pci/ice1712/wtm.c 		.tlv = { .p = db_scale_dac }
p                 547 sound/pci/ice1712/wtm.c 		.tlv = { .p = db_scale_adc }
p                 575 sound/pci/ice1712/wtm.c 	unsigned short *p;
p                 592 sound/pci/ice1712/wtm.c 	p = stac_inits_wtm;
p                 593 sound/pci/ice1712/wtm.c 	for (; *p != (unsigned short)-1; p += 2) {
p                 594 sound/pci/ice1712/wtm.c 		stac9460_put(ice, p[0], p[1]);
p                 595 sound/pci/ice1712/wtm.c 		stac9460_2_put(ice, p[0], p[1]);
p                 724 sound/pci/lola/lola_mixer.c 	.tlv.p = lola_src_gain_tlv,
p                 811 sound/pci/lola/lola_mixer.c 	.tlv.p = lola_dest_gain_tlv,
p                 402 sound/pci/mixart/mixart_mixer.c 	.tlv = { .p = db_scale_analog },
p                 897 sound/pci/mixart/mixart_mixer.c 	.tlv = { .p = db_scale_digital },
p                1022 sound/pci/mixart/mixart_mixer.c 	.tlv = { .p = db_scale_digital },
p                 746 sound/pci/oxygen/oxygen_mixer.c 		.tlv = { .p = ac97_db_scale, }, \
p                 875 sound/pci/oxygen/oxygen_mixer.c 				.tlv = { .p = monitor_db_scale, },
p                 900 sound/pci/oxygen/oxygen_mixer.c 				.tlv = { .p = monitor_db_scale, },
p                 927 sound/pci/oxygen/oxygen_mixer.c 				.tlv = { .p = monitor_db_scale, },
p                 954 sound/pci/oxygen/oxygen_mixer.c 				.tlv = { .p = monitor_db_scale, },
p                 979 sound/pci/oxygen/oxygen_mixer.c 				.tlv = { .p = monitor_db_scale, },
p                1014 sound/pci/oxygen/oxygen_mixer.c 		.tlv = { .p = ac97_rec_db_scale, },
p                1068 sound/pci/oxygen/oxygen_mixer.c 			template.tlv.p = chip->model.dac_tlv;
p                 365 sound/pci/oxygen/xonar_dg_mixer.c 	.tlv = { .p = pga_db_scale }, \
p                 386 sound/pci/oxygen/xonar_dg_mixer.c 		.tlv = { .p = hp_db_scale, },
p                 998 sound/pci/oxygen/xonar_wm87x6.c 	.tlv = { .p = tlv_p }, \
p                1017 sound/pci/oxygen/xonar_wm87x6.c 		.tlv = { .p = wm8776_hp_db_scale },
p                1027 sound/pci/oxygen/xonar_wm87x6.c 		.tlv = { .p = wm8776_adc_db_scale },
p                1080 sound/pci/oxygen/xonar_wm87x6.c 		.tlv = { .p = wm8776_hp_db_scale },
p                1090 sound/pci/oxygen/xonar_wm87x6.c 		.tlv = { .p = wm8776_adc_db_scale },
p                 742 sound/pci/pcxhr/pcxhr_mix22.c 	.tlv = { .p = db_scale_mic_hr222 },
p                 792 sound/pci/pcxhr/pcxhr_mix22.c 	.tlv = { .p = db_scale_micboost_hr222 },
p                 409 sound/pci/pcxhr/pcxhr_mixer.c 	.tlv = { .p = db_scale_digital },
p                 507 sound/pci/pcxhr/pcxhr_mixer.c 	.tlv = { .p = db_scale_digital },
p                1120 sound/pci/pcxhr/pcxhr_mixer.c 				temp.tlv.p = db_scale_a_hr222_playback;
p                1122 sound/pci/pcxhr/pcxhr_mixer.c 				temp.tlv.p = db_scale_analog_playback;
p                1168 sound/pci/pcxhr/pcxhr_mixer.c 				temp.tlv.p = db_scale_a_hr222_capture;
p                1170 sound/pci/pcxhr/pcxhr_mixer.c 				temp.tlv.p = db_scale_analog_capture;
p                 166 sound/pci/riptide/riptide.c #define READ_PORT_ULONG(p)     inl((unsigned long)&(p))
p                 167 sound/pci/riptide/riptide.c #define WRITE_PORT_ULONG(p,x)  outl(x,(unsigned long)&(p))
p                 169 sound/pci/riptide/riptide.c #define READ_AUDIO_CONTROL(p)     READ_PORT_ULONG(p->audio_control)
p                 170 sound/pci/riptide/riptide.c #define WRITE_AUDIO_CONTROL(p,x)  WRITE_PORT_ULONG(p->audio_control,x)
p                 171 sound/pci/riptide/riptide.c #define UMASK_AUDIO_CONTROL(p,x)  WRITE_PORT_ULONG(p->audio_control,READ_PORT_ULONG(p->audio_control)|x)
p                 172 sound/pci/riptide/riptide.c #define MASK_AUDIO_CONTROL(p,x)   WRITE_PORT_ULONG(p->audio_control,READ_PORT_ULONG(p->audio_control)&x)
p                 173 sound/pci/riptide/riptide.c #define READ_AUDIO_STATUS(p)      READ_PORT_ULONG(p->audio_status)
p                 175 sound/pci/riptide/riptide.c #define SET_GRESET(p)     UMASK_AUDIO_CONTROL(p,0x0001)	/* global reset switch */
p                 176 sound/pci/riptide/riptide.c #define UNSET_GRESET(p)   MASK_AUDIO_CONTROL(p,~0x0001)
p                 177 sound/pci/riptide/riptide.c #define SET_AIE(p)        UMASK_AUDIO_CONTROL(p,0x0004)	/* interrupt enable */
p                 178 sound/pci/riptide/riptide.c #define UNSET_AIE(p)      MASK_AUDIO_CONTROL(p,~0x0004)
p                 179 sound/pci/riptide/riptide.c #define SET_AIACK(p)      UMASK_AUDIO_CONTROL(p,0x0008)	/* interrupt acknowledge */
p                 180 sound/pci/riptide/riptide.c #define UNSET_AIACKT(p)   MASKAUDIO_CONTROL(p,~0x0008)
p                 181 sound/pci/riptide/riptide.c #define SET_ECMDAE(p)     UMASK_AUDIO_CONTROL(p,0x0010)
p                 182 sound/pci/riptide/riptide.c #define UNSET_ECMDAE(p)   MASK_AUDIO_CONTROL(p,~0x0010)
p                 183 sound/pci/riptide/riptide.c #define SET_ECMDBE(p)     UMASK_AUDIO_CONTROL(p,0x0020)
p                 184 sound/pci/riptide/riptide.c #define UNSET_ECMDBE(p)   MASK_AUDIO_CONTROL(p,~0x0020)
p                 185 sound/pci/riptide/riptide.c #define SET_EDATAF(p)     UMASK_AUDIO_CONTROL(p,0x0040)
p                 186 sound/pci/riptide/riptide.c #define UNSET_EDATAF(p)   MASK_AUDIO_CONTROL(p,~0x0040)
p                 187 sound/pci/riptide/riptide.c #define SET_EDATBF(p)     UMASK_AUDIO_CONTROL(p,0x0080)
p                 188 sound/pci/riptide/riptide.c #define UNSET_EDATBF(p)   MASK_AUDIO_CONTROL(p,~0x0080)
p                 189 sound/pci/riptide/riptide.c #define SET_ESBIRQON(p)   UMASK_AUDIO_CONTROL(p,0x0100)
p                 190 sound/pci/riptide/riptide.c #define UNSET_ESBIRQON(p) MASK_AUDIO_CONTROL(p,~0x0100)
p                 191 sound/pci/riptide/riptide.c #define SET_EMPUIRQ(p)    UMASK_AUDIO_CONTROL(p,0x0200)
p                 192 sound/pci/riptide/riptide.c #define UNSET_EMPUIRQ(p)  MASK_AUDIO_CONTROL(p,~0x0200)
p                 195 sound/pci/riptide/riptide.c #define IS_READY(p)       (READ_AUDIO_STATUS(p)&0x0001)
p                 196 sound/pci/riptide/riptide.c #define IS_DLREADY(p)     (READ_AUDIO_STATUS(p)&0x0002)
p                 197 sound/pci/riptide/riptide.c #define IS_DLERR(p)       (READ_AUDIO_STATUS(p)&0x0004)
p                 198 sound/pci/riptide/riptide.c #define IS_GERR(p)        (READ_AUDIO_STATUS(p)&0x0008)	/* error ! */
p                 199 sound/pci/riptide/riptide.c #define IS_CMDAEIRQ(p)    (READ_AUDIO_STATUS(p)&0x0010)
p                 200 sound/pci/riptide/riptide.c #define IS_CMDBEIRQ(p)    (READ_AUDIO_STATUS(p)&0x0020)
p                 201 sound/pci/riptide/riptide.c #define IS_DATAFIRQ(p)    (READ_AUDIO_STATUS(p)&0x0040)
p                 202 sound/pci/riptide/riptide.c #define IS_DATBFIRQ(p)    (READ_AUDIO_STATUS(p)&0x0080)
p                 203 sound/pci/riptide/riptide.c #define IS_EOBIRQ(p)      (READ_AUDIO_STATUS(p)&0x0100)	/* interrupt status */
p                 204 sound/pci/riptide/riptide.c #define IS_EOSIRQ(p)      (READ_AUDIO_STATUS(p)&0x0200)
p                 205 sound/pci/riptide/riptide.c #define IS_EOCIRQ(p)      (READ_AUDIO_STATUS(p)&0x0400)
p                 206 sound/pci/riptide/riptide.c #define IS_UNSLIRQ(p)     (READ_AUDIO_STATUS(p)&0x0800)
p                 207 sound/pci/riptide/riptide.c #define IS_SBIRQ(p)       (READ_AUDIO_STATUS(p)&0x1000)
p                 208 sound/pci/riptide/riptide.c #define IS_MPUIRQ(p)      (READ_AUDIO_STATUS(p)&0x2000)
p                 229 sound/pci/riptide/riptide.c #define SEND_GETV(p,b)             sendcmd(p,RESP,GETV,0,RET(b))	/* get version */
p                 230 sound/pci/riptide/riptide.c #define SEND_GETC(p,b,c)           sendcmd(p,PARM|RESP,GETC,c,RET(b))
p                 231 sound/pci/riptide/riptide.c #define SEND_GUNS(p,b)             sendcmd(p,RESP,GUNS,0,RET(b))
p                 232 sound/pci/riptide/riptide.c #define SEND_SCID(p,b)             sendcmd(p,RESP,SCID,0,RET(b))
p                 233 sound/pci/riptide/riptide.c #define SEND_RMEM(p,b,c,d)         sendcmd(p,PARM|RESP,RMEM|BYTE1(b),LONG0(c),RET(d))	/* memory access for firmware write */
p                 234 sound/pci/riptide/riptide.c #define SEND_SMEM(p,b,c)           sendcmd(p,PARM,SMEM|BYTE1(b),LONG0(c),RET(0))	/* memory access for firmware write */
p                 235 sound/pci/riptide/riptide.c #define SEND_WMEM(p,b,c)           sendcmd(p,PARM,WMEM|BYTE1(b),LONG0(c),RET(0))	/* memory access for firmware write */
p                 236 sound/pci/riptide/riptide.c #define SEND_SDTM(p,b,c)           sendcmd(p,PARM|RESP,SDTM|TRINIB1(b),0,RET(c))	/* memory access for firmware write */
p                 237 sound/pci/riptide/riptide.c #define SEND_GOTO(p,b)             sendcmd(p,PARM,GOTO,LONG0(b),RET(0))	/* memory access for firmware write */
p                 238 sound/pci/riptide/riptide.c #define SEND_SETDPLL(p)	           sendcmd(p,0,ARM_SETDPLL,0,RET(0))
p                 239 sound/pci/riptide/riptide.c #define SEND_SSTR(p,b,c)           sendcmd(p,PARM,SSTR|BYTE3(b),LONG0(c),RET(0))	/* start stream */
p                 240 sound/pci/riptide/riptide.c #define SEND_PSTR(p,b)             sendcmd(p,PARM,PSTR,BYTE3(b),RET(0))	/* pause stream */
p                 241 sound/pci/riptide/riptide.c #define SEND_KSTR(p,b)             sendcmd(p,PARM,KSTR,BYTE3(b),RET(0))	/* stop stream */
p                 242 sound/pci/riptide/riptide.c #define SEND_KDMA(p)               sendcmd(p,0,KDMA,0,RET(0))	/* stop all dma */
p                 243 sound/pci/riptide/riptide.c #define SEND_GPOS(p,b,c,d)         sendcmd(p,PARM|RESP,GPOS,BYTE3(c)|BYTE2(b),RET(d))	/* get position in dma */
p                 244 sound/pci/riptide/riptide.c #define SEND_SETF(p,b,c,d,e,f,g)   sendcmd(p,PARM,SETF|WORD1(b)|BYTE3(c),d|BYTE1(e)|BYTE2(f)|BYTE3(g),RET(0))	/* set sample format at mixer */
p                 245 sound/pci/riptide/riptide.c #define SEND_GSTS(p,b,c,d)         sendcmd(p,PARM|RESP,GSTS,BYTE3(c)|BYTE2(b),RET(d))
p                 246 sound/pci/riptide/riptide.c #define SEND_NGPOS(p,b,c,d)        sendcmd(p,PARM|RESP,NGPOS,BYTE3(c)|BYTE2(b),RET(d))
p                 247 sound/pci/riptide/riptide.c #define SEND_PSEL(p,b,c)           sendcmd(p,PARM,PSEL,BYTE2(b)|BYTE3(c),RET(0))	/* activate lbus path */
p                 248 sound/pci/riptide/riptide.c #define SEND_PCLR(p,b,c)           sendcmd(p,PARM,PCLR,BYTE2(b)|BYTE3(c),RET(0))	/* deactivate lbus path */
p                 249 sound/pci/riptide/riptide.c #define SEND_PLST(p,b)             sendcmd(p,PARM,PLST,BYTE3(b),RET(0))
p                 250 sound/pci/riptide/riptide.c #define SEND_RSSV(p,b,c,d)         sendcmd(p,PARM|RESP,RSSV,BYTE2(b)|BYTE3(c),RET(d))
p                 251 sound/pci/riptide/riptide.c #define SEND_LSEL(p,b,c,d,e,f,g,h) sendcmd(p,PARM,LSEL|BYTE1(b)|BYTE2(c)|BYTE3(d),BYTE0(e)|BYTE1(f)|BYTE2(g)|BYTE3(h),RET(0))	/* select paths for internal connections */
p                 252 sound/pci/riptide/riptide.c #define SEND_SSRC(p,b,c,d,e)       sendcmd(p,PARM,SSRC|BYTE1(b)|WORD2(c),WORD0(d)|WORD2(e),RET(0))	/* configure source */
p                 253 sound/pci/riptide/riptide.c #define SEND_SLST(p,b)             sendcmd(p,PARM,SLST,BYTE3(b),RET(0))
p                 254 sound/pci/riptide/riptide.c #define SEND_RSRC(p,b,c)           sendcmd(p,RESP,RSRC|BYTE1(b),0,RET(c))	/* read source config */
p                 255 sound/pci/riptide/riptide.c #define SEND_SSRB(p,b,c)           sendcmd(p,PARM,SSRB|BYTE1(b),WORD2(c),RET(0))
p                 256 sound/pci/riptide/riptide.c #define SEND_SDGV(p,b,c,d,e)       sendcmd(p,PARM,SDGV|BYTE2(b)|BYTE3(c),WORD0(d)|WORD2(e),RET(0))	/* set digital mixer */
p                 257 sound/pci/riptide/riptide.c #define SEND_RDGV(p,b,c,d)         sendcmd(p,PARM|RESP,RDGV|BYTE2(b)|BYTE3(c),0,RET(d))	/* read digital mixer */
p                 258 sound/pci/riptide/riptide.c #define SEND_DLST(p,b)             sendcmd(p,PARM,DLST,BYTE3(b),RET(0))
p                 259 sound/pci/riptide/riptide.c #define SEND_SACR(p,b,c)           sendcmd(p,PARM,SACR,WORD0(b)|WORD2(c),RET(0))	/* set AC97 register */
p                 260 sound/pci/riptide/riptide.c #define SEND_RACR(p,b,c)           sendcmd(p,PARM|RESP,RACR,WORD2(b),RET(c))	/* get AC97 register */
p                 261 sound/pci/riptide/riptide.c #define SEND_ALST(p,b)             sendcmd(p,PARM,ALST,BYTE3(b),RET(0))
p                 262 sound/pci/riptide/riptide.c #define SEND_TXAC(p,b,c,d,e,f)     sendcmd(p,PARM,TXAC|BYTE1(b)|WORD2(c),WORD0(d)|BYTE2(e)|BYTE3(f),RET(0))
p                 263 sound/pci/riptide/riptide.c #define SEND_RXAC(p,b,c,d)         sendcmd(p,PARM|RESP,RXAC,BYTE2(b)|BYTE3(c),RET(d))
p                 264 sound/pci/riptide/riptide.c #define SEND_SI2S(p,b)             sendcmd(p,PARM,SI2S,WORD2(b),RET(0))
p                 675 sound/pci/riptide/riptide.c 	const unsigned char *p;
p                 681 sound/pci/riptide/riptide.c 	p = in + 9;
p                 683 sound/pci/riptide/riptide.c 		data = atoh(p, 8);
p                 689 sound/pci/riptide/riptide.c 		p += 8;
p                 976 sound/pci/riptide/riptide.c 	unsigned int p[2] = { 0, 0 };
p                 986 sound/pci/riptide/riptide.c 			p[i] += rptr.retwords[1];
p                 987 sound/pci/riptide/riptide.c 			p[i] *= rptr.retwords[2];
p                 988 sound/pci/riptide/riptide.c 			p[i] += rptr.retwords[3];
p                 989 sound/pci/riptide/riptide.c 			p[i] /= 65536;
p                 993 sound/pci/riptide/riptide.c 	if (p[0]) {
p                 994 sound/pci/riptide/riptide.c 		if (p[1] != p[0])
p                 995 sound/pci/riptide/riptide.c 			snd_printdd("rates differ %d %d\n", p[0], p[1]);
p                 996 sound/pci/riptide/riptide.c 		*rate = (unsigned int)p[0];
p                 998 sound/pci/riptide/riptide.c 		*rate = (unsigned int)p[1];
p                1477 sound/pci/riptide/riptide.c 		struct sgd *c, *p = NULL;
p                1495 sound/pci/riptide/riptide.c 			if (p)
p                1496 sound/pci/riptide/riptide.c 				p->dwNextLink = cpu_to_le32(data->sgdlist.addr +
p                1511 sound/pci/riptide/riptide.c 			p = c;
p                1897 sound/pci/riptide/riptide.c 	unsigned char p[256];
p                1952 sound/pci/riptide/riptide.c 	i = getpaths(cif, p);
p                1955 sound/pci/riptide/riptide.c 		snd_iprintf(buffer, "%x->%x ", p[i], p[i + 1]);
p                2805 sound/pci/rme9652/hdspm.c 	int p = 0;
p                2892 sound/pci/rme9652/hdspm.c 			case 0: p = 0; break;  /* WC */
p                2893 sound/pci/rme9652/hdspm.c 			case 1: p = 3; break;  /* ADAT 1 */
p                2894 sound/pci/rme9652/hdspm.c 			case 2: p = 4; break;  /* ADAT 2 */
p                2895 sound/pci/rme9652/hdspm.c 			case 3: p = 5; break;  /* ADAT 3 */
p                2896 sound/pci/rme9652/hdspm.c 			case 4: p = 6; break;  /* ADAT 4 */
p                2897 sound/pci/rme9652/hdspm.c 			case 5: p = 1; break;  /* AES */
p                2898 sound/pci/rme9652/hdspm.c 			case 6: p = 2; break;  /* SPDIF */
p                2899 sound/pci/rme9652/hdspm.c 			case 7: p = 9; break;  /* TCO */
p                2900 sound/pci/rme9652/hdspm.c 			case 8: p = 10; break; /* SYNC_IN */
p                2905 sound/pci/rme9652/hdspm.c 			case 0: p = 0; break;  /* WC */
p                2906 sound/pci/rme9652/hdspm.c 			case 1: p = 3; break;  /* ADAT 1 */
p                2907 sound/pci/rme9652/hdspm.c 			case 2: p = 4; break;  /* ADAT 2 */
p                2908 sound/pci/rme9652/hdspm.c 			case 3: p = 5; break;  /* ADAT 3 */
p                2909 sound/pci/rme9652/hdspm.c 			case 4: p = 6; break;  /* ADAT 4 */
p                2910 sound/pci/rme9652/hdspm.c 			case 5: p = 1; break;  /* AES */
p                2911 sound/pci/rme9652/hdspm.c 			case 6: p = 2; break;  /* SPDIF */
p                2912 sound/pci/rme9652/hdspm.c 			case 7: p = 10; break; /* SYNC_IN */
p                2921 sound/pci/rme9652/hdspm.c 			case 0: p = 0; break;  /* WC */
p                2922 sound/pci/rme9652/hdspm.c 			case 1: p = 3; break;  /* ADAT */
p                2923 sound/pci/rme9652/hdspm.c 			case 2: p = 1; break;  /* AES */
p                2924 sound/pci/rme9652/hdspm.c 			case 3: p = 2; break;  /* SPDIF */
p                2925 sound/pci/rme9652/hdspm.c 			case 4: p = 9; break;  /* TCO */
p                2926 sound/pci/rme9652/hdspm.c 			case 5: p = 10; break; /* SYNC_IN */
p                2931 sound/pci/rme9652/hdspm.c 			case 0: p = 0; break;  /* WC */
p                2932 sound/pci/rme9652/hdspm.c 			case 1: p = 3; break;  /* ADAT */
p                2933 sound/pci/rme9652/hdspm.c 			case 2: p = 1; break;  /* AES */
p                2934 sound/pci/rme9652/hdspm.c 			case 3: p = 2; break;  /* SPDIF */
p                2935 sound/pci/rme9652/hdspm.c 			case 4: p = 10; break; /* SYNC_IN */
p                2946 sound/pci/rme9652/hdspm.c 		hdspm->settings_register |= HDSPM_c0_SyncRef0 * p;
p                2619 sound/pci/trident/trident_main.c 	.tlv = { .p = db_scale_gvol },
p                2630 sound/pci/trident/trident_main.c 	.tlv = { .p = db_scale_gvol },
p                2819 sound/pci/trident/trident_main.c 	.tlv = { .p = db_scale_crvol },
p                2875 sound/pci/trident/trident_main.c 	.tlv = { .p = db_scale_crvol },
p                 129 sound/pci/trident/trident_memory.c 	struct list_head *p;
p                 133 sound/pci/trident/trident_memory.c 	list_for_each(p, &hdr->block) {
p                 134 sound/pci/trident/trident_memory.c 		blk = list_entry(p, struct snd_util_memblk, list);
p                 144 sound/pci/trident/trident_memory.c 	blk = __snd_util_memblk_new(hdr, psize * ALIGN_PAGE_SIZE, p->prev);
p                1771 sound/pci/via82xx.c 	.tlv = { .p = db_scale_dxs }
p                1785 sound/pci/via82xx.c 	.tlv = { .p = db_scale_dxs }
p                 943 sound/pci/vx222/vx222_ops.c 	.tlv = { .p = db_scale_mic },
p                 954 sound/pci/vx222/vx222_ops.c 	.tlv = { .p = db_scale_mic },
p                1501 sound/pci/ymfpci/ymfpci_main.c   .tlv = { .p = db_scale_native } }
p                1617 sound/pci/ymfpci/ymfpci_main.c 	.tlv = { .p = db_scale_native },
p                  67 sound/pcmcia/vx/vxp_mixer.c 	.tlv = { .p = db_scale_mic },
p                  96 sound/ppc/beep.c 	short *p;
p                 141 sound/ppc/beep.c 		p = beep->buf;
p                 142 sound/ppc/beep.c 		for (i = 0; i < nsamples; ++i, p += 2) {
p                 143 sound/ppc/beep.c 			p[0] = p[1] = beep_wform[j >> 8] * beep->volume;
p                 723 sound/soc/codecs/hdac_hdmi.c 	struct hdac_hdmi_port *p;
p                 729 sound/soc/codecs/hdac_hdmi.c 		list_for_each_entry(p, &pcm->port_list, head) {
p                 730 sound/soc/codecs/hdac_hdmi.c 			if (p->id == port->id && port->pin == p->pin)
p                 902 sound/soc/codecs/hdac_hdmi.c 	struct hdac_hdmi_port *p, *p_next;
p                 924 sound/soc/codecs/hdac_hdmi.c 		list_for_each_entry_safe(p, p_next, &pcm->port_list, head) {
p                 925 sound/soc/codecs/hdac_hdmi.c 			if (p == port && p->id == port->id &&
p                 926 sound/soc/codecs/hdac_hdmi.c 					p->pin == port->pin) {
p                 928 sound/soc/codecs/hdac_hdmi.c 				list_del(&p->head);
p                 857 sound/soc/codecs/msm8916-wcd-digital.c 	struct msm8916_wcd_digital_priv *p = dev_get_drvdata(component->dev);
p                 859 sound/soc/codecs/msm8916-wcd-digital.c 	return clk_set_rate(p->mclk, freq);
p                 190 sound/soc/codecs/tas2552.c 		u8 p = snd_soc_component_read32(component, TAS2552_PLL_CTRL_1);
p                 192 sound/soc/codecs/tas2552.c 		p = (p >> 7);
p                 195 sound/soc/codecs/tas2552.c 		t = (pll_clk * 2) << p;
p                 149 sound/soc/codecs/tas571x.c 	uint8_t *buf, *p;
p                 158 sound/soc/codecs/tas571x.c 	for (i = 0, p = buf + 1; i < len; i++, p += sizeof(uint32_t))
p                 159 sound/soc/codecs/tas571x.c 		put_unaligned_be32(values[i], p);
p                 181 sound/soc/codecs/tas571x.c 	uint8_t *recv_buf, *p;
p                 210 sound/soc/codecs/tas571x.c 	for (i = 0, p = recv_buf; i < len; i++, p += sizeof(uint32_t))
p                 211 sound/soc/codecs/tas571x.c 		values[i] = get_unaligned_be32(p);
p                 273 sound/soc/codecs/tda7419.c 	.tlv.p = (xtlv_array), \
p                 287 sound/soc/codecs/tda7419.c 	.tlv.p = (xtlv_array), \
p                  33 sound/soc/codecs/tlv320aic32x4-clk.c 	u8 p;
p                  88 sound/soc/codecs/tlv320aic32x4-clk.c 	settings->p = (val & AIC32X4_PLL_P_MASK) >> AIC32X4_PLL_P_SHIFT;
p                 121 sound/soc/codecs/tlv320aic32x4-clk.c 				settings->p << AIC32X4_PLL_P_SHIFT);
p                 151 sound/soc/codecs/tlv320aic32x4-clk.c 	return (unsigned long) DIV_ROUND_UP_ULL(rate, settings->p * 10000);
p                 159 sound/soc/codecs/tlv320aic32x4-clk.c 	settings->p = parent_rate / AIC32X4_MAX_PLL_CLKIN + 1;
p                 160 sound/soc/codecs/tlv320aic32x4-clk.c 	if (settings->p > 8)
p                 168 sound/soc/codecs/tlv320aic32x4-clk.c 	multiplier = (u64) rate * settings->p * 10000;
p                1050 sound/soc/codecs/tlv320aic3x.c 	u8 data, j, r, p, pll_q, pll_p = 1, pll_r = 1, pll_j = 1;
p                1129 sound/soc/codecs/tlv320aic3x.c 		for (p = 1; p <= 8; p++) {
p                1135 sound/soc/codecs/tlv320aic3x.c 				int tmp_clk = (1000 * j * r) / p;
p                1143 sound/soc/codecs/tlv320aic3x.c 					pll_r = r; pll_p = p;
p                1154 sound/soc/codecs/tlv320aic3x.c 	for (p = 1; p <= 8; p++) {
p                1155 sound/soc/codecs/tlv320aic3x.c 		j = codec_clk * p / 1000;
p                1161 sound/soc/codecs/tlv320aic3x.c 		d = ((2048 * p * fsref) - j * aic3x->sysclk)
p                1164 sound/soc/codecs/tlv320aic3x.c 		clk = (10000 * j + d) / (10 * p);
p                1169 sound/soc/codecs/tlv320aic3x.c 			pll_j = j; pll_d = d; pll_r = 1; pll_p = p;
p                  89 sound/soc/codecs/wcd9335.c #define WCD9335_SLIM_RX_CH(p) \
p                  90 sound/soc/codecs/wcd9335.c 	{.port = p + WCD9335_RX_START, .shift = p,}
p                  92 sound/soc/codecs/wcd9335.c #define WCD9335_SLIM_TX_CH(p) \
p                  93 sound/soc/codecs/wcd9335.c 	{.port = p, .shift = p,}
p                 617 sound/soc/codecs/wcd9335.h #define WCD9335_SLIM_PGD_RX_PORT_CFG(p)	WCD9335_REG(0, (0x30 + p))
p                 618 sound/soc/codecs/wcd9335.h #define WCD9335_SLIM_PGD_PORT_CFG(p)	WCD9335_REG(0, (0x40 + p))
p                 619 sound/soc/codecs/wcd9335.h #define WCD9335_SLIM_PGD_TX_PORT_CFG(p)	WCD9335_REG(0, (0x50 + p))
p                 620 sound/soc/codecs/wcd9335.h #define WCD9335_SLIM_PGD_PORT_INT_SRC(p)	WCD9335_REG(0, (0x60 + p))
p                 621 sound/soc/codecs/wcd9335.h #define WCD9335_SLIM_PGD_PORT_INT_STATUS(p)	WCD9335_REG(0, (0x80 + p))
p                 622 sound/soc/codecs/wcd9335.h #define WCD9335_SLIM_PGD_TX_PORT_MULTI_CHNL_0(p) WCD9335_REG(0, (0x100 + 4 * p))
p                 624 sound/soc/codecs/wcd9335.h #define WCD9335_SLIM_PGD_TX_PORT_MULTI_CHNL_1(p) WCD9335_REG(0, (0x101 + 4 * p))
p                 625 sound/soc/codecs/wcd9335.h #define WCD9335_SLIM_PGD_RX_PORT_MULTI_CHNL_0(p) WCD9335_REG(0, (0x140 + 4 * p))
p                 579 sound/soc/codecs/wm0010.c 	u32 *p, len;
p                 687 sound/soc/codecs/wm0010.c 		p = (u32 *)out;
p                 691 sound/soc/codecs/wm0010.c 			if (*p == 0x0e00ed0f) {
p                 696 sound/soc/codecs/wm0010.c 			p++;
p                1067 sound/soc/codecs/wm_adsp.c 	char *p = ucontrol->value.bytes.data;
p                1075 sound/soc/codecs/wm_adsp.c 		memcpy(ctl->cache, p, ctl->len);
p                1079 sound/soc/codecs/wm_adsp.c 		ret = wm_coeff_write_control(ctl, p, ctl->len);
p                1172 sound/soc/codecs/wm_adsp.c 	char *p = ucontrol->value.bytes.data;
p                1179 sound/soc/codecs/wm_adsp.c 			ret = wm_coeff_read_control(ctl, p, ctl->len);
p                1186 sound/soc/codecs/wm_adsp.c 		memcpy(p, ctl->cache, ctl->len);
p                2650 sound/soc/codecs/wm_adsp.c 	char *p;
p                2660 sound/soc/codecs/wm_adsp.c 		p = devm_kstrdup(dsp->dev, dsp->name, GFP_KERNEL);
p                2661 sound/soc/codecs/wm_adsp.c 		if (!p)
p                2664 sound/soc/codecs/wm_adsp.c 		dsp->fwf_name = p;
p                2665 sound/soc/codecs/wm_adsp.c 		for (; *p != 0; ++p)
p                2666 sound/soc/codecs/wm_adsp.c 			*p = tolower(*p);
p                  29 sound/soc/dwc/dwc-pcm.c 	const u##sample_bits (*p)[2] = (void *)runtime->dma_area; \
p                  34 sound/soc/dwc/dwc-pcm.c 		iowrite32(p[tx_ptr][0], dev->i2s_base + LRBR_LTHR(0)); \
p                  35 sound/soc/dwc/dwc-pcm.c 		iowrite32(p[tx_ptr][1], dev->i2s_base + RRBR_RTHR(0)); \
p                  49 sound/soc/dwc/dwc-pcm.c 	u##sample_bits (*p)[2] = (void *)runtime->dma_area; \
p                  54 sound/soc/dwc/dwc-pcm.c 		p[rx_ptr][0] = ioread32(dev->i2s_base + LRBR_LTHR(0)); \
p                  55 sound/soc/dwc/dwc-pcm.c 		p[rx_ptr][1] = ioread32(dev->i2s_base + RRBR_RTHR(0)); \
p                1379 sound/soc/fsl/fsl_ssi.c 	const char *p, *sprop;
p                1447 sound/soc/fsl/fsl_ssi.c 		p = strrchr(sprop, ',');
p                1448 sound/soc/fsl/fsl_ssi.c 		if (p)
p                1449 sound/soc/fsl/fsl_ssi.c 			sprop = p + 1;
p                1310 sound/soc/intel/atom/sst-atom-controls.c 	struct snd_soc_dapm_path *p = NULL;
p                1318 sound/soc/intel/atom/sst-atom-controls.c 		snd_soc_dapm_widget_for_each_sink_path(w, p) {
p                1319 sound/soc/intel/atom/sst-atom-controls.c 			if (p->connected && !p->connected(w, p->sink))
p                1322 sound/soc/intel/atom/sst-atom-controls.c 			if (p->connect && p->sink->power &&
p                1323 sound/soc/intel/atom/sst-atom-controls.c 					is_sst_dapm_widget(p->sink)) {
p                1324 sound/soc/intel/atom/sst-atom-controls.c 				struct sst_ids *ids = p->sink->priv;
p                1327 sound/soc/intel/atom/sst-atom-controls.c 						p->sink->name);
p                1337 sound/soc/intel/atom/sst-atom-controls.c 		snd_soc_dapm_widget_for_each_source_path(w, p) {
p                1338 sound/soc/intel/atom/sst-atom-controls.c 			if (p->connected && !p->connected(w, p->source))
p                1341 sound/soc/intel/atom/sst-atom-controls.c 			if (p->connect &&  p->source->power &&
p                1342 sound/soc/intel/atom/sst-atom-controls.c 					is_sst_dapm_widget(p->source)) {
p                1343 sound/soc/intel/atom/sst-atom-controls.c 				struct sst_ids *ids = p->source->priv;
p                1346 sound/soc/intel/atom/sst-atom-controls.c 						p->source->name);
p                 314 sound/soc/intel/atom/sst-atom-controls.h 		dst.location_id.p.cell_nbr_idx = (cell_idx);		\
p                 315 sound/soc/intel/atom/sst-atom-controls.h 		dst.location_id.p.path_id = (pipe_id);			\
p                 345 sound/soc/intel/atom/sst-atom-controls.h 		} __packed	p;		/* part */
p                 718 sound/soc/intel/atom/sst-atom-controls.h 	.tlv.p = (tlv_array), \
p                 208 sound/soc/intel/atom/sst-mfld-dsp.h 	} p;
p                  63 sound/soc/intel/atom/sst/sst.c 		header.p.header_high.part.done = 0;
p                  85 sound/soc/intel/atom/sst/sst.c 		if (sst_create_ipc_msg(&msg, header.p.header_high.part.large)) {
p                  90 sound/soc/intel/atom/sst/sst.c 		if (header.p.header_high.part.large) {
p                  91 sound/soc/intel/atom/sst/sst.c 			size = header.p.header_low_payload;
p                  98 sound/soc/intel/atom/sst/sst.c 				header.p.header_low_payload = 0;
p                 104 sound/soc/intel/atom/sst/sst.c 			sst_is_process_reply(header.p.header_high.part.msg_id);
p                 129 sound/soc/intel/atom/sst/sst_ipc.c 		while (header.p.header_high.part.busy) {
p                 149 sound/soc/intel/atom/sst/sst_ipc.c 		if (header.p.header_high.part.busy) {
p                 161 sound/soc/intel/atom/sst/sst_ipc.c 				msg->mrfld_header.p.header_high.full);
p                 163 sound/soc/intel/atom/sst/sst_ipc.c 			msg->mrfld_header.p.header_low_payload);
p                 165 sound/soc/intel/atom/sst/sst_ipc.c 	if (msg->mrfld_header.p.header_high.part.large)
p                 168 sound/soc/intel/atom/sst/sst_ipc.c 			msg->mrfld_header.p.header_low_payload);
p                 197 sound/soc/intel/atom/sst/sst_ipc.c 	clear_ipc.p.header_high.part.busy = 0;
p                 198 sound/soc/intel/atom/sst/sst_ipc.c 	clear_ipc.p.header_high.part.done = 1;
p                 199 sound/soc/intel/atom/sst/sst_ipc.c 	clear_ipc.p.header_low_payload = IPC_ACK_SUCCESS;
p                 259 sound/soc/intel/atom/sst/sst_ipc.c 	msg_low = msg->mrfld_header.p.header_low_payload;
p                 328 sound/soc/intel/atom/sst/sst_ipc.c 	msg_high = msg->mrfld_header.p.header_high;
p                 329 sound/soc/intel/atom/sst/sst_ipc.c 	msg_low = msg->mrfld_header.p.header_low_payload;
p                 332 sound/soc/intel/atom/sst/sst_ipc.c 			msg->mrfld_header.p.header_high.full,
p                 333 sound/soc/intel/atom/sst/sst_ipc.c 			msg->mrfld_header.p.header_low_payload);
p                 250 sound/soc/intel/atom/sst/sst_pvt.c 	msg->mrfld_header.p.header_low_payload = sizeof(dsp_hdr) + mbox_data_len;
p                 251 sound/soc/intel/atom/sst/sst_pvt.c 	msg->mrfld_header.p.header_high.part.res_rqd = !sync;
p                 253 sound/soc/intel/atom/sst/sst_pvt.c 			msg->mrfld_header.p.header_high.full);
p                 255 sound/soc/intel/atom/sst/sst_pvt.c 			msg->mrfld_header.p.header_high.part.res_rqd);
p                 257 sound/soc/intel/atom/sst/sst_pvt.c 			msg->mrfld_header.p.header_low_payload);
p                 307 sound/soc/intel/atom/sst/sst_pvt.c 	header->p.header_high.part.msg_id = msg;
p                 308 sound/soc/intel/atom/sst/sst_pvt.c 	header->p.header_high.part.task_id = task_id;
p                 309 sound/soc/intel/atom/sst/sst_pvt.c 	header->p.header_high.part.large = large;
p                 310 sound/soc/intel/atom/sst/sst_pvt.c 	header->p.header_high.part.drv_id = drv_id;
p                 311 sound/soc/intel/atom/sst/sst_pvt.c 	header->p.header_high.part.done = 0;
p                 312 sound/soc/intel/atom/sst/sst_pvt.c 	header->p.header_high.part.busy = 1;
p                 313 sound/soc/intel/atom/sst/sst_pvt.c 	header->p.header_high.part.res_rqd = 1;
p                 189 sound/soc/intel/atom/sst/sst_stream.c 	msg->mrfld_header.p.header_high.part.res_rqd = bytes->block;
p                 191 sound/soc/intel/atom/sst/sst_stream.c 	msg->mrfld_header.p.header_low_payload = length;
p                 205 sound/soc/intel/atom/sst/sst_stream.c 			msg->mrfld_header.p.header_low_payload);
p                  29 sound/soc/intel/boards/kbl_rt5663_max98927.c #define DMIC_CH(p) p->list[p->count-1]
p                  37 sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c #define DMIC_CH(p) p->list[p->count-1]
p                  22 sound/soc/intel/boards/skl_nau88l25_max98357a.c #define DMIC_CH(p)     p->list[p->count-1]
p                  26 sound/soc/intel/boards/skl_nau88l25_ssm4567.c #define DMIC_CH(p)     p->list[p->count-1]
p                1382 sound/soc/intel/skylake/skl-pcm.c 	struct skl_pipeline *p;
p                1388 sound/soc/intel/skylake/skl-pcm.c 	list_for_each_entry(p, &skl->ppl_list, node) {
p                1389 sound/soc/intel/skylake/skl-pcm.c 		list_for_each_entry(m, &p->pipe->w_list, node) {
p                 922 sound/soc/intel/skylake/skl-topology.c 	struct snd_soc_dapm_path *p;
p                 927 sound/soc/intel/skylake/skl-topology.c 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
p                 928 sound/soc/intel/skylake/skl-topology.c 		if (!p->connect)
p                 934 sound/soc/intel/skylake/skl-topology.c 			"%s: sink widget=%s\n", __func__, p->sink->name);
p                 936 sound/soc/intel/skylake/skl-topology.c 		next_sink = p->sink;
p                 938 sound/soc/intel/skylake/skl-topology.c 		if (!is_skl_dsp_widget_type(p->sink, skl->dev))
p                 939 sound/soc/intel/skylake/skl-topology.c 			return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
p                 946 sound/soc/intel/skylake/skl-topology.c 		if ((p->sink->priv != NULL) &&
p                 947 sound/soc/intel/skylake/skl-topology.c 				is_skl_dsp_widget_type(p->sink, skl->dev)) {
p                 949 sound/soc/intel/skylake/skl-topology.c 			sink = p->sink;
p                1044 sound/soc/intel/skylake/skl-topology.c 	struct snd_soc_dapm_path *p;
p                1047 sound/soc/intel/skylake/skl-topology.c 	snd_soc_dapm_widget_for_each_source_path(w, p) {
p                1048 sound/soc/intel/skylake/skl-topology.c 		src_w = p->source;
p                1049 sound/soc/intel/skylake/skl-topology.c 		if (!p->connect)
p                1053 sound/soc/intel/skylake/skl-topology.c 		dev_dbg(skl->dev, "src widget=%s\n", p->source->name);
p                1060 sound/soc/intel/skylake/skl-topology.c 		if ((p->source->priv != NULL) &&
p                1061 sound/soc/intel/skylake/skl-topology.c 				is_skl_dsp_widget_type(p->source, skl->dev)) {
p                1062 sound/soc/intel/skylake/skl-topology.c 			return p->source;
p                1601 sound/soc/intel/skylake/skl-topology.c 	struct snd_soc_dapm_path *p = NULL;
p                1605 sound/soc/intel/skylake/skl-topology.c 		snd_soc_dapm_widget_for_each_sink_path(w, p) {
p                1606 sound/soc/intel/skylake/skl-topology.c 			if (p->connect && p->sink->power &&
p                1607 sound/soc/intel/skylake/skl-topology.c 				!is_skl_dsp_widget_type(p->sink, dai->dev))
p                1610 sound/soc/intel/skylake/skl-topology.c 			if (p->sink->priv) {
p                1612 sound/soc/intel/skylake/skl-topology.c 						p->sink->name);
p                1613 sound/soc/intel/skylake/skl-topology.c 				return p->sink->priv;
p                1618 sound/soc/intel/skylake/skl-topology.c 		snd_soc_dapm_widget_for_each_source_path(w, p) {
p                1619 sound/soc/intel/skylake/skl-topology.c 			if (p->connect && p->source->power &&
p                1620 sound/soc/intel/skylake/skl-topology.c 				!is_skl_dsp_widget_type(p->source, dai->dev))
p                1623 sound/soc/intel/skylake/skl-topology.c 			if (p->source->priv) {
p                1625 sound/soc/intel/skylake/skl-topology.c 						p->source->name);
p                1626 sound/soc/intel/skylake/skl-topology.c 				return p->source->priv;
p                1637 sound/soc/intel/skylake/skl-topology.c 	struct snd_soc_dapm_path *p;
p                1640 sound/soc/intel/skylake/skl-topology.c 	snd_soc_dapm_widget_for_each_source_path(w, p) {
p                1642 sound/soc/intel/skylake/skl-topology.c 			if (p->connect &&
p                1643 sound/soc/intel/skylake/skl-topology.c 				    (p->sink->id == snd_soc_dapm_aif_out) &&
p                1644 sound/soc/intel/skylake/skl-topology.c 				    p->source->priv) {
p                1645 sound/soc/intel/skylake/skl-topology.c 				mconfig = p->source->priv;
p                1648 sound/soc/intel/skylake/skl-topology.c 			mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
p                1659 sound/soc/intel/skylake/skl-topology.c 	struct snd_soc_dapm_path *p;
p                1662 sound/soc/intel/skylake/skl-topology.c 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
p                1664 sound/soc/intel/skylake/skl-topology.c 			if (p->connect &&
p                1665 sound/soc/intel/skylake/skl-topology.c 				    (p->source->id == snd_soc_dapm_aif_in) &&
p                1666 sound/soc/intel/skylake/skl-topology.c 				    p->sink->priv) {
p                1667 sound/soc/intel/skylake/skl-topology.c 				mconfig = p->sink->priv;
p                1670 sound/soc/intel/skylake/skl-topology.c 			mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
p                1768 sound/soc/intel/skylake/skl-topology.c 	struct snd_soc_dapm_path *p;
p                1771 sound/soc/intel/skylake/skl-topology.c 	snd_soc_dapm_widget_for_each_source_path(w, p) {
p                1772 sound/soc/intel/skylake/skl-topology.c 		if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) &&
p                1773 sound/soc/intel/skylake/skl-topology.c 						p->source->priv) {
p                1776 sound/soc/intel/skylake/skl-topology.c 						p->source->priv, params);
p                1781 sound/soc/intel/skylake/skl-topology.c 						p->source, params);
p                1793 sound/soc/intel/skylake/skl-topology.c 	struct snd_soc_dapm_path *p = NULL;
p                1796 sound/soc/intel/skylake/skl-topology.c 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
p                1797 sound/soc/intel/skylake/skl-topology.c 		if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) &&
p                1798 sound/soc/intel/skylake/skl-topology.c 						p->sink->priv) {
p                1801 sound/soc/intel/skylake/skl-topology.c 						p->sink->priv, params);
p                1806 sound/soc/intel/skylake/skl-topology.c 						dai, p->sink, params);
p                 629 sound/soc/meson/axg-card.c 						  const char *p))
p                  60 sound/soc/meson/axg-tdmin.c 	struct snd_soc_dapm_path *p = NULL;
p                  63 sound/soc/meson/axg-tdmin.c 	snd_soc_dapm_widget_for_each_source_path(w, p) {
p                  64 sound/soc/meson/axg-tdmin.c 		if (!p->connect)
p                  67 sound/soc/meson/axg-tdmin.c 		if (p->source->id == snd_soc_dapm_dai_out)
p                  68 sound/soc/meson/axg-tdmin.c 			return (struct snd_soc_dai *)p->source->priv;
p                  70 sound/soc/meson/axg-tdmin.c 		be = axg_tdmin_get_be(p->source);
p                  58 sound/soc/meson/axg-tdmout.c 	struct snd_soc_dapm_path *p = NULL;
p                  61 sound/soc/meson/axg-tdmout.c 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
p                  62 sound/soc/meson/axg-tdmout.c 		if (!p->connect)
p                  65 sound/soc/meson/axg-tdmout.c 		if (p->sink->id == snd_soc_dapm_dai_in)
p                  66 sound/soc/meson/axg-tdmout.c 			return (struct snd_soc_dai *)p->sink->priv;
p                  68 sound/soc/meson/axg-tdmout.c 		be = axg_tdmout_get_be(p->sink);
p                  38 sound/soc/meson/g12a-tohdmitx.c 	struct snd_soc_dapm_path *p = NULL;
p                  41 sound/soc/meson/g12a-tohdmitx.c 	snd_soc_dapm_widget_for_each_source_path(w, p) {
p                  42 sound/soc/meson/g12a-tohdmitx.c 		if (!p->connect)
p                  47 sound/soc/meson/g12a-tohdmitx.c 		    snd_soc_dapm_to_component(p->source->dapm))
p                  50 sound/soc/meson/g12a-tohdmitx.c 		if (p->source->id == snd_soc_dapm_dai_in)
p                  51 sound/soc/meson/g12a-tohdmitx.c 			return p->source;
p                  53 sound/soc/meson/g12a-tohdmitx.c 		in = g12a_tohdmitx_get_input(p->source);
p                 328 sound/soc/qcom/qdsp6/q6adm.c 	void *p;
p                 332 sound/soc/qcom/qdsp6/q6adm.c 	p = kzalloc(pkt_size, GFP_KERNEL);
p                 333 sound/soc/qcom/qdsp6/q6adm.c 	if (!p)
p                 336 sound/soc/qcom/qdsp6/q6adm.c 	pkt = p;
p                 337 sound/soc/qcom/qdsp6/q6adm.c 	open = p + APR_HDR_SIZE;
p                 729 sound/soc/qcom/qdsp6/q6afe.c 	struct q6afe_port *p = NULL;
p                 734 sound/soc/qcom/qdsp6/q6afe.c 	list_for_each_entry(p, &afe->port_list, node)
p                 735 sound/soc/qcom/qdsp6/q6afe.c 		if (p->token == token) {
p                 736 sound/soc/qcom/qdsp6/q6afe.c 			ret = p;
p                 737 sound/soc/qcom/qdsp6/q6afe.c 			kref_get(&p->refcount);
p                 848 sound/soc/qcom/qdsp6/q6afe.c 	void *p, *pl;
p                 851 sound/soc/qcom/qdsp6/q6afe.c 	p = kzalloc(pkt_size, GFP_KERNEL);
p                 852 sound/soc/qcom/qdsp6/q6afe.c 	if (!p)
p                 855 sound/soc/qcom/qdsp6/q6afe.c 	pkt = p;
p                 856 sound/soc/qcom/qdsp6/q6afe.c 	param = p + APR_HDR_SIZE;
p                 857 sound/soc/qcom/qdsp6/q6afe.c 	pdata = p + APR_HDR_SIZE + sizeof(*param);
p                 858 sound/soc/qcom/qdsp6/q6afe.c 	pl = p + APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata);
p                 896 sound/soc/qcom/qdsp6/q6afe.c 	void *p, *pl;
p                 899 sound/soc/qcom/qdsp6/q6afe.c 	p = kzalloc(pkt_size, GFP_KERNEL);
p                 900 sound/soc/qcom/qdsp6/q6afe.c 	if (!p)
p                 903 sound/soc/qcom/qdsp6/q6afe.c 	pkt = p;
p                 904 sound/soc/qcom/qdsp6/q6afe.c 	param = p + APR_HDR_SIZE;
p                 905 sound/soc/qcom/qdsp6/q6afe.c 	pdata = p + APR_HDR_SIZE + sizeof(*param);
p                 906 sound/soc/qcom/qdsp6/q6afe.c 	pl = p + APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata);
p                1029 sound/soc/qcom/qdsp6/q6afe.c 	void *p;
p                1039 sound/soc/qcom/qdsp6/q6afe.c 	p = kzalloc(pkt_size, GFP_KERNEL);
p                1040 sound/soc/qcom/qdsp6/q6afe.c 	if (!p)
p                1043 sound/soc/qcom/qdsp6/q6afe.c 	pkt = p;
p                1044 sound/soc/qcom/qdsp6/q6afe.c 	stop = p + APR_HDR_SIZE;
p                1306 sound/soc/qcom/qdsp6/q6afe.c 	void *p;
p                1329 sound/soc/qcom/qdsp6/q6afe.c 	p = kzalloc(pkt_size, GFP_KERNEL);
p                1330 sound/soc/qcom/qdsp6/q6afe.c 	if (!p)
p                1333 sound/soc/qcom/qdsp6/q6afe.c 	pkt = p;
p                1334 sound/soc/qcom/qdsp6/q6afe.c 	start = p + APR_HDR_SIZE;
p                 257 sound/soc/qcom/qdsp6/q6asm.c 	void *p;
p                 265 sound/soc/qcom/qdsp6/q6asm.c 	p = kzalloc(pkt_size, GFP_KERNEL);
p                 266 sound/soc/qcom/qdsp6/q6asm.c 	if (!p)
p                 269 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
p                 270 sound/soc/qcom/qdsp6/q6asm.c 	mem_unmap = p + APR_HDR_SIZE;
p                 353 sound/soc/qcom/qdsp6/q6asm.c 	void *p;
p                 372 sound/soc/qcom/qdsp6/q6asm.c 	p = kzalloc(pkt_size, GFP_KERNEL);
p                 373 sound/soc/qcom/qdsp6/q6asm.c 	if (!p)
p                 376 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
p                 377 sound/soc/qcom/qdsp6/q6asm.c 	cmd = p + APR_HDR_SIZE;
p                 378 sound/soc/qcom/qdsp6/q6asm.c 	mregions = p + APR_HDR_SIZE +  sizeof(*cmd);
p                 850 sound/soc/qcom/qdsp6/q6asm.c 	void *p;
p                 855 sound/soc/qcom/qdsp6/q6asm.c 	p = kzalloc(pkt_size, GFP_KERNEL);
p                 856 sound/soc/qcom/qdsp6/q6asm.c 	if (!p)
p                 859 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
p                 860 sound/soc/qcom/qdsp6/q6asm.c 	open = p + APR_HDR_SIZE;
p                 903 sound/soc/qcom/qdsp6/q6asm.c 	void *p;
p                 906 sound/soc/qcom/qdsp6/q6asm.c 	p = kzalloc(pkt_size, GFP_ATOMIC);
p                 907 sound/soc/qcom/qdsp6/q6asm.c 	if (!p)
p                 910 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
p                 911 sound/soc/qcom/qdsp6/q6asm.c 	run = p + APR_HDR_SIZE;
p                 984 sound/soc/qcom/qdsp6/q6asm.c 	void *p;
p                 988 sound/soc/qcom/qdsp6/q6asm.c 	p = kzalloc(pkt_size, GFP_KERNEL);
p                 989 sound/soc/qcom/qdsp6/q6asm.c 	if (!p)
p                 992 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
p                 993 sound/soc/qcom/qdsp6/q6asm.c 	fmt = p + APR_HDR_SIZE;
p                1042 sound/soc/qcom/qdsp6/q6asm.c 	void *p;
p                1045 sound/soc/qcom/qdsp6/q6asm.c 	p = kzalloc(pkt_size, GFP_KERNEL);
p                1046 sound/soc/qcom/qdsp6/q6asm.c 	if (!p)
p                1049 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
p                1050 sound/soc/qcom/qdsp6/q6asm.c 	enc_cfg = p + APR_HDR_SIZE;
p                1094 sound/soc/qcom/qdsp6/q6asm.c 	void *p;
p                1097 sound/soc/qcom/qdsp6/q6asm.c 	p = kzalloc(pkt_size, GFP_ATOMIC);
p                1098 sound/soc/qcom/qdsp6/q6asm.c 	if (!p)
p                1101 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
p                1102 sound/soc/qcom/qdsp6/q6asm.c 	read = p + APR_HDR_SIZE;
p                1140 sound/soc/qcom/qdsp6/q6asm.c 	void *p;
p                1143 sound/soc/qcom/qdsp6/q6asm.c 	p = kzalloc(pkt_size, GFP_KERNEL);
p                1144 sound/soc/qcom/qdsp6/q6asm.c 	if (!p)
p                1147 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
p                1148 sound/soc/qcom/qdsp6/q6asm.c 	open = p + APR_HDR_SIZE;
p                1214 sound/soc/qcom/qdsp6/q6asm.c 	void *p;
p                1217 sound/soc/qcom/qdsp6/q6asm.c 	p = kzalloc(pkt_size, GFP_ATOMIC);
p                1218 sound/soc/qcom/qdsp6/q6asm.c 	if (!p)
p                1221 sound/soc/qcom/qdsp6/q6asm.c 	pkt = p;
p                1222 sound/soc/qcom/qdsp6/q6asm.c 	write = p + APR_HDR_SIZE;
p                 334 sound/soc/sh/fsi.c #define fsi_reg_write(p, r, d)\
p                 335 sound/soc/sh/fsi.c 	__fsi_reg_write((p->base + REG_##r), d)
p                 337 sound/soc/sh/fsi.c #define fsi_reg_read(p, r)\
p                 338 sound/soc/sh/fsi.c 	__fsi_reg_read((p->base + REG_##r))
p                 340 sound/soc/sh/fsi.c #define fsi_reg_mask_set(p, r, m, d)\
p                 341 sound/soc/sh/fsi.c 	__fsi_reg_mask_set((p->base + REG_##r), m, d)
p                 343 sound/soc/sh/fsi.c #define fsi_master_read(p, r) _fsi_master_read(p, MST_##r)
p                 344 sound/soc/sh/fsi.c #define fsi_core_read(p, r)   _fsi_master_read(p, p->core->r)
p                 357 sound/soc/sh/fsi.c #define fsi_master_mask_set(p, r, m, d) _fsi_master_mask_set(p, MST_##r, m, d)
p                 358 sound/soc/sh/fsi.c #define fsi_core_mask_set(p, r, m, d)  _fsi_master_mask_set(p, p->core->r, m, d)
p                 884 sound/soc/sh/rcar/core.c 	struct snd_interval p;
p                 888 sound/soc/sh/rcar/core.c 	snd_interval_any(&p);
p                 889 sound/soc/sh/rcar/core.c 	p.min = UINT_MAX;
p                 890 sound/soc/sh/rcar/core.c 	p.max = 0;
p                 900 sound/soc/sh/rcar/core.c 			p.min = min(p.min, list[i]);
p                 901 sound/soc/sh/rcar/core.c 			p.max = max(p.max, list[i]);
p                 907 sound/soc/sh/rcar/core.c 			p.min = min(p.min, list[i]);
p                 908 sound/soc/sh/rcar/core.c 			p.max = max(p.max, list[i]);
p                 912 sound/soc/sh/rcar/core.c 	return snd_interval_refine(iv, &p);
p                  52 sound/soc/sh/rcar/dma.c #define rsnd_priv_to_dmac(p)	((struct rsnd_dma_ctrl *)(p)->dma)
p                  33 sound/soc/sh/rcar/gen.c #define rsnd_priv_to_gen(p)	((struct rsnd_gen *)(p)->gen)
p                 695 sound/soc/sh/rcar/rsnd.h #define rsnd_flags_has(p, f) ((p)->flags & (f))
p                 696 sound/soc/sh/rcar/rsnd.h #define rsnd_flags_set(p, f) ((p)->flags |= (f))
p                 697 sound/soc/sh/rcar/rsnd.h #define rsnd_flags_del(p, f) ((p)->flags &= ~(f))
p                 305 sound/soc/soc-ac97.c 	struct pinctrl *p;
p                 310 sound/soc/soc-ac97.c 	p = devm_pinctrl_get(dev);
p                 311 sound/soc/soc-ac97.c 	if (IS_ERR(p)) {
p                 313 sound/soc/soc-ac97.c 		return PTR_ERR(p);
p                 315 sound/soc/soc-ac97.c 	cfg->pctl = p;
p                 317 sound/soc/soc-ac97.c 	state = pinctrl_lookup_state(p, "ac97-reset");
p                 324 sound/soc/soc-ac97.c 	state = pinctrl_lookup_state(p, "ac97-warm-reset");
p                 331 sound/soc/soc-ac97.c 	state = pinctrl_lookup_state(p, "ac97-running");
p                 209 sound/soc/soc-dapm.c 	struct snd_soc_dapm_path *p;
p                 221 sound/soc/soc-dapm.c 		snd_soc_dapm_widget_for_each_path(w, dir, p) {
p                 222 sound/soc/soc-dapm.c 			if (p->is_supply || p->weak || !p->connect)
p                 224 sound/soc/soc-dapm.c 			node = p->node[rdir];
p                 278 sound/soc/soc-dapm.c static void dapm_path_invalidate(struct snd_soc_dapm_path *p)
p                 284 sound/soc/soc-dapm.c 	if (p->weak || p->is_supply)
p                 293 sound/soc/soc-dapm.c 	if (p->source->endpoints[SND_SOC_DAPM_DIR_IN] != 0)
p                 294 sound/soc/soc-dapm.c 		dapm_widget_invalidate_input_paths(p->sink);
p                 295 sound/soc/soc-dapm.c 	if (p->sink->endpoints[SND_SOC_DAPM_DIR_OUT] != 0)
p                 296 sound/soc/soc-dapm.c 		dapm_widget_invalidate_output_paths(p->source);
p                 782 sound/soc/soc-dapm.c static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i,
p                 786 sound/soc/soc-dapm.c 		p->sink->kcontrol_news[i].private_value;
p                 795 sound/soc/soc-dapm.c 		soc_dapm_read(p->sink->dapm, reg, &val);
p                 810 sound/soc/soc-dapm.c 				soc_dapm_read(p->sink->dapm, mc->rreg, &val);
p                 817 sound/soc/soc-dapm.c 		p->connect = !!val;
p                 825 sound/soc/soc-dapm.c 		p->connect = invert;
p                1374 sound/soc/soc-dapm.c 	struct pinctrl *p = w->pinctrl;
p                1377 sound/soc/soc-dapm.c 	if (!p || !priv)
p                1381 sound/soc/soc-dapm.c 		s = pinctrl_lookup_state(p, priv->active_state);
p                1383 sound/soc/soc-dapm.c 		s = pinctrl_lookup_state(p, priv->sleep_state);
p                1388 sound/soc/soc-dapm.c 	return pinctrl_select_state(p, s);
p                2105 sound/soc/soc-dapm.c 	struct snd_soc_dapm_path *p = NULL;
p                2140 sound/soc/soc-dapm.c 		snd_soc_dapm_widget_for_each_path(w, dir, p) {
p                2141 sound/soc/soc-dapm.c 			if (p->connected && !p->connected(p->source, p->sink))
p                2144 sound/soc/soc-dapm.c 			if (!p->connect)
p                2150 sound/soc/soc-dapm.c 					p->name ? p->name : "static",
p                2151 sound/soc/soc-dapm.c 					p->node[rdir]->name);
p                2483 sound/soc/soc-dapm.c 	struct snd_soc_dapm_path *p, *next_p;
p                2493 sound/soc/soc-dapm.c 		snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p)
p                2494 sound/soc/soc-dapm.c 			dapm_free_path(p);
p                2613 sound/soc/soc-dapm.c static int dapm_update_dai_chan(struct snd_soc_dapm_path *p,
p                2627 sound/soc/soc-dapm.c 		p->source->name, p->sink->name);
p                2630 sound/soc/soc-dapm.c 		soc_dapm_connect_path(p, true, "dai update");
p                2632 sound/soc/soc-dapm.c 		soc_dapm_connect_path(p, false, "dai update");
p                2643 sound/soc/soc-dapm.c 	struct snd_soc_dapm_path *p;
p                2658 sound/soc/soc-dapm.c 	snd_soc_dapm_widget_for_each_sink_path(w, p) {
p                2659 sound/soc/soc-dapm.c 		ret = dapm_update_dai_chan(p, p->sink, channels);
p                2664 sound/soc/soc-dapm.c 	snd_soc_dapm_widget_for_each_source_path(w, p) {
p                2665 sound/soc/soc-dapm.c 		ret = dapm_update_dai_chan(p, p->source, channels);
p                2700 sound/soc/soc-dapm.c 	struct snd_soc_dapm_path *p;
p                2709 sound/soc/soc-dapm.c 		snd_soc_dapm_widget_for_each_source_path(w, p) {
p                2710 sound/soc/soc-dapm.c 			if (p->source->id == snd_soc_dapm_micbias ||
p                2711 sound/soc/soc-dapm.c 				p->source->id == snd_soc_dapm_mic ||
p                2712 sound/soc/soc-dapm.c 				p->source->id == snd_soc_dapm_line ||
p                2713 sound/soc/soc-dapm.c 				p->source->id == snd_soc_dapm_output) {
p                2724 sound/soc/soc-dapm.c 		snd_soc_dapm_widget_for_each_sink_path(w, p) {
p                2725 sound/soc/soc-dapm.c 			if (p->sink->id == snd_soc_dapm_spk ||
p                2726 sound/soc/soc-dapm.c 				p->sink->id == snd_soc_dapm_hp ||
p                2727 sound/soc/soc-dapm.c 				p->sink->id == snd_soc_dapm_line ||
p                2728 sound/soc/soc-dapm.c 				p->sink->id == snd_soc_dapm_input) {
p                2997 sound/soc/soc-dapm.c 	struct snd_soc_dapm_path *path, *p;
p                3024 sound/soc/soc-dapm.c 	list_for_each_entry(p, &dapm->card->paths, list) {
p                3025 sound/soc/soc-dapm.c 		if (strcmp(p->source->name, source) != 0)
p                3027 sound/soc/soc-dapm.c 		if (strcmp(p->sink->name, sink) != 0)
p                3029 sound/soc/soc-dapm.c 		path = p;
p                  31 sound/soc/soc-generic-dmaengine-pcm.c static struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
p                  33 sound/soc/soc-generic-dmaengine-pcm.c 	return container_of(p, struct dmaengine_pcm, component);
p                 375 sound/soc/soc-topology.c 	const unsigned int *p = NULL;
p                 383 sound/soc/soc-topology.c 	if (dobj->control.kcontrol->tlv.p)
p                 384 sound/soc/soc-topology.c 		p = dobj->control.kcontrol->tlv.p;
p                 388 sound/soc/soc-topology.c 	kfree(p);
p                 493 sound/soc/soc-topology.c 				kfree(kcontrol->tlv.p);
p                 698 sound/soc/soc-topology.c 	unsigned int *p;
p                 700 sound/soc/soc-topology.c 	p = kzalloc(item_len + 2 * sizeof(unsigned int), GFP_KERNEL);
p                 701 sound/soc/soc-topology.c 	if (!p)
p                 704 sound/soc/soc-topology.c 	p[0] = SNDRV_CTL_TLVT_DB_SCALE;
p                 705 sound/soc/soc-topology.c 	p[1] = item_len;
p                 706 sound/soc/soc-topology.c 	p[2] = le32_to_cpu(scale->min);
p                 707 sound/soc/soc-topology.c 	p[3] = (le32_to_cpu(scale->step) & TLV_DB_SCALE_MASK)
p                 710 sound/soc/soc-topology.c 	kc->tlv.p = (void *)p;
p                 744 sound/soc/soc-topology.c 	kfree(kc->tlv.p);
p                 346 sound/soc/sof/imx/imx8.c 			      void *p, size_t sz)
p                 348 sound/soc/sof/imx/imx8.c 	sof_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
p                 281 sound/soc/sof/intel/hda-ipc.c 		      void *p, size_t sz)
p                 284 sound/soc/sof/intel/hda-ipc.c 		sof_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
p                 296 sound/soc/sof/intel/hda-ipc.c 					 p, sz);
p                 514 sound/soc/sof/intel/hda.h 		      void *p, size_t sz);
p                  30 sound/soc/sof/intel/intel-ipc.c 			void *p, size_t sz)
p                  33 sound/soc/sof/intel/intel-ipc.c 		sof_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
p                  39 sound/soc/sof/intel/intel-ipc.c 			sof_mailbox_read(sdev, stream->posn_offset, p, sz);
p                 359 sound/soc/sof/ops.h 					void *p, size_t sz)
p                 361 sound/soc/sof/ops.h 	sof_ops(sdev)->ipc_msg_data(sdev, substream, p, sz);
p                 162 sound/soc/sof/sof-priv.h 			     void *p, size_t sz); /* mandatory */
p                 658 sound/soc/sof/sof-priv.h 			void *p, size_t sz);
p                 195 sound/soc/sof/topology.c static inline int get_tlv_data(const int *p, int tlv[TLV_ITEMS])
p                 198 sound/soc/sof/topology.c 	if ((int)p[SNDRV_CTL_TLVO_TYPE] != SNDRV_CTL_TLVT_DB_SCALE)
p                 202 sound/soc/sof/topology.c 	tlv[TLV_MIN] = (int)p[SNDRV_CTL_TLVO_DB_SCALE_MIN] / 100;
p                 205 sound/soc/sof/topology.c 	tlv[TLV_STEP] = (int)(p[SNDRV_CTL_TLVO_DB_SCALE_MUTE_AND_STEP] &
p                 209 sound/soc/sof/topology.c 	if ((p[SNDRV_CTL_TLVO_DB_SCALE_MUTE_AND_STEP] &
p                 477 sound/soc/sof/topology.c 	if (get_tlv_data(kc->tlv.p, tlv) < 0) {
p                1222 sound/soc/sti/uniperif.h #define UNIPERIF_TYPE_IS_HDMI(p) \
p                1223 sound/soc/sti/uniperif.h 	((p)->type == SND_ST_UNIPERIF_TYPE_HDMI)
p                1224 sound/soc/sti/uniperif.h #define UNIPERIF_TYPE_IS_PCM(p) \
p                1225 sound/soc/sti/uniperif.h 	((p)->type == SND_ST_UNIPERIF_TYPE_PCM)
p                1226 sound/soc/sti/uniperif.h #define UNIPERIF_TYPE_IS_SPDIF(p) \
p                1227 sound/soc/sti/uniperif.h 	((p)->type == SND_ST_UNIPERIF_TYPE_SPDIF)
p                1228 sound/soc/sti/uniperif.h #define UNIPERIF_TYPE_IS_IEC958(p) \
p                1229 sound/soc/sti/uniperif.h 	(UNIPERIF_TYPE_IS_HDMI(p) || \
p                1230 sound/soc/sti/uniperif.h 		UNIPERIF_TYPE_IS_SPDIF(p))
p                1231 sound/soc/sti/uniperif.h #define UNIPERIF_TYPE_IS_TDM(p) \
p                1232 sound/soc/sti/uniperif.h 	((p)->type == SND_ST_UNIPERIF_TYPE_TDM)
p                 453 sound/soc/stm/stm32_sai_sub.c 	char *mclk_name, *p, *s = (char *)pname;
p                 469 sound/soc/stm/stm32_sai_sub.c 	p = mclk_name;
p                 471 sound/soc/stm/stm32_sai_sub.c 		*p++ = *s++;
p                 474 sound/soc/stm/stm32_sai_sub.c 	STM_SAI_IS_SUB_A(sai) ? strcat(p, "a_mclk") : strcat(p, "b_mclk");
p                  67 sound/soc/uniphier/aio-dma.c static irqreturn_t aiodma_irq(int irq, void *p)
p                  69 sound/soc/uniphier/aio-dma.c 	struct platform_device *pdev = p;
p                 134 sound/soc/xtensa/xtfpga-i2s.c 	const u##sample_bits (*p)[channels] = \
p                 139 sound/soc/xtensa/xtfpga-i2s.c 		iowrite32(p[tx_ptr][0], \
p                 141 sound/soc/xtensa/xtfpga-i2s.c 		iowrite32(p[tx_ptr][channels - 1], \
p                 372 sound/soc/xtensa/xtfpga-i2s.c 	void *p;
p                 375 sound/soc/xtensa/xtfpga-i2s.c 	p = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
p                 376 sound/soc/xtensa/xtfpga-i2s.c 	runtime->private_data = p;
p                 219 sound/sound_core.c 		struct sound_unit *p=*list;
p                 220 sound/sound_core.c 		if(p->unit_minor==unit)
p                 222 sound/sound_core.c 			*list=p->next;
p                 223 sound/sound_core.c 			return p;
p                 225 sound/sound_core.c 		list=&(p->next);
p                 300 sound/sound_core.c 	struct sound_unit *p;
p                 303 sound/sound_core.c 	p = __sound_remove_unit(list, unit);
p                 305 sound/sound_core.c 	if (p) {
p                 307 sound/sound_core.c 			__unregister_chrdev(SOUND_MAJOR, p->unit_minor, 1,
p                 308 sound/sound_core.c 					    p->name);
p                 309 sound/sound_core.c 		device_destroy(sound_class, MKDEV(SOUND_MAJOR, p->unit_minor));
p                 310 sound/sound_core.c 		kfree(p);
p                1085 sound/sparc/amd7930.c 	struct snd_amd7930 *p = amd7930_list;
p                1087 sound/sparc/amd7930.c 	while (p != NULL) {
p                1088 sound/sparc/amd7930.c 		struct snd_amd7930 *next = p->next;
p                1090 sound/sparc/amd7930.c 		snd_card_free(p->card);
p                1092 sound/sparc/amd7930.c 		p = next;
p                1884 sound/sparc/cs4231.c static void snd_cs4231_ebus_play_callback(struct ebus_dma_info *p, int event,
p                1892 sound/sparc/cs4231.c static void snd_cs4231_ebus_capture_callback(struct ebus_dma_info *p,
p                 266 sound/synth/emux/emux_effect.c snd_emux_create_effect(struct snd_emux_port *p)
p                 269 sound/synth/emux/emux_effect.c 	p->effect = kcalloc(p->chset.max_channels,
p                 271 sound/synth/emux/emux_effect.c 	if (p->effect) {
p                 272 sound/synth/emux/emux_effect.c 		for (i = 0; i < p->chset.max_channels; i++)
p                 273 sound/synth/emux/emux_effect.c 			p->chset.channels[i].private = p->effect + i;
p                 275 sound/synth/emux/emux_effect.c 		for (i = 0; i < p->chset.max_channels; i++)
p                 276 sound/synth/emux/emux_effect.c 			p->chset.channels[i].private = NULL;
p                 281 sound/synth/emux/emux_effect.c snd_emux_delete_effect(struct snd_emux_port *p)
p                 283 sound/synth/emux/emux_effect.c 	kfree(p->effect);
p                 284 sound/synth/emux/emux_effect.c 	p->effect = NULL;
p                 288 sound/synth/emux/emux_effect.c snd_emux_clear_effect(struct snd_emux_port *p)
p                 290 sound/synth/emux/emux_effect.c 	if (p->effect) {
p                 291 sound/synth/emux/emux_effect.c 		memset(p->effect, 0, sizeof(struct snd_emux_effect_table) *
p                 292 sound/synth/emux/emux_effect.c 		       p->chset.max_channels);
p                 273 sound/synth/emux/emux_nrpn.c snd_emux_nrpn(void *p, struct snd_midi_channel *chan,
p                 278 sound/synth/emux/emux_nrpn.c 	port = p;
p                 362 sound/synth/emux/emux_nrpn.c snd_emux_sysex(void *p, unsigned char *buf, int len, int parsed,
p                 368 sound/synth/emux/emux_nrpn.c 	port = p;
p                  90 sound/synth/emux/emux_oss.c #define SF_CLIENT_NO(p)	((p) + 0x1000)
p                  99 sound/synth/emux/emux_oss.c 	struct snd_emux_port *p;
p                 115 sound/synth/emux/emux_oss.c 	p = snd_emux_create_port(emu, tmpname, 32,
p                 117 sound/synth/emux/emux_oss.c 	if (p == NULL) {
p                 124 sound/synth/emux/emux_oss.c 	arg->private_data = p;
p                 125 sound/synth/emux/emux_oss.c 	arg->addr.client = p->chset.client;
p                 126 sound/synth/emux/emux_oss.c 	arg->addr.port = p->chset.port;
p                 127 sound/synth/emux/emux_oss.c 	p->oss_arg = arg;
p                 129 sound/synth/emux/emux_oss.c 	reset_port_mode(p, arg->seq_mode);
p                 131 sound/synth/emux/emux_oss.c 	snd_emux_reset_port(p);
p                 165 sound/synth/emux/emux_oss.c 	struct snd_emux_port *p;
p                 169 sound/synth/emux/emux_oss.c 	p = arg->private_data;
p                 170 sound/synth/emux/emux_oss.c 	if (snd_BUG_ON(!p))
p                 173 sound/synth/emux/emux_oss.c 	emu = p->emu;
p                 177 sound/synth/emux/emux_oss.c 	snd_emux_sounds_off_all(p);
p                 178 sound/synth/emux/emux_oss.c 	snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port));
p                 179 sound/synth/emux/emux_oss.c 	snd_seq_event_port_detach(p->chset.client, p->chset.port);
p                 194 sound/synth/emux/emux_oss.c 	struct snd_emux_port *p;
p                 199 sound/synth/emux/emux_oss.c 	p = arg->private_data;
p                 200 sound/synth/emux/emux_oss.c 	if (snd_BUG_ON(!p))
p                 203 sound/synth/emux/emux_oss.c 	emu = p->emu;
p                 209 sound/synth/emux/emux_oss.c 						 SF_CLIENT_NO(p->chset.port));
p                 218 sound/synth/emux/emux_oss.c 			rc = snd_soundfont_load(emu->sflist, buf, count, SF_CLIENT_NO(p->chset.port));
p                 237 sound/synth/emux/emux_oss.c 	struct snd_emux_port *p;
p                 242 sound/synth/emux/emux_oss.c 	p = arg->private_data;
p                 243 sound/synth/emux/emux_oss.c 	if (snd_BUG_ON(!p))
p                 246 sound/synth/emux/emux_oss.c 	emu = p->emu;
p                 271 sound/synth/emux/emux_oss.c 	struct snd_emux_port *p;
p                 275 sound/synth/emux/emux_oss.c 	p = arg->private_data;
p                 276 sound/synth/emux/emux_oss.c 	if (snd_BUG_ON(!p))
p                 278 sound/synth/emux/emux_oss.c 	snd_emux_reset_port(p);
p                 291 sound/synth/emux/emux_oss.c 	struct snd_emux_port *p;
p                 294 sound/synth/emux/emux_oss.c 	p = private_data;
p                 295 sound/synth/emux/emux_oss.c 	if (snd_BUG_ON(!p))
p                 297 sound/synth/emux/emux_oss.c 	emu = p->emu;
p                 309 sound/synth/emux/emux_oss.c 		emuspec_control(emu, p, cmd, data, atomic, hop);
p                 311 sound/synth/emux/emux_oss.c 		gusspec_control(emu, p, cmd, data, atomic, hop);
p                  15 sound/synth/emux/emux_seq.c static void snd_emux_init_port(struct snd_emux_port *p);
p                  84 sound/synth/emux/emux_seq.c 		struct snd_emux_port *p;
p                  87 sound/synth/emux/emux_seq.c 		p = snd_emux_create_port(emu, tmpname, MIDI_CHANNELS,
p                  89 sound/synth/emux/emux_seq.c 		if (!p) {
p                  94 sound/synth/emux/emux_seq.c 		p->port_mode =  SNDRV_EMUX_PORT_MODE_MIDI;
p                  95 sound/synth/emux/emux_seq.c 		snd_emux_init_port(p);
p                  96 sound/synth/emux/emux_seq.c 		emu->ports[i] = p->chset.port;
p                  97 sound/synth/emux/emux_seq.c 		emu->portptrs[i] = p;
p                 130 sound/synth/emux/emux_seq.c 	struct snd_emux_port *p;
p                 134 sound/synth/emux/emux_seq.c 	p = kzalloc(sizeof(*p), GFP_KERNEL);
p                 135 sound/synth/emux/emux_seq.c 	if (!p)
p                 138 sound/synth/emux/emux_seq.c 	p->chset.channels = kcalloc(max_channels, sizeof(*p->chset.channels),
p                 140 sound/synth/emux/emux_seq.c 	if (!p->chset.channels) {
p                 141 sound/synth/emux/emux_seq.c 		kfree(p);
p                 145 sound/synth/emux/emux_seq.c 		p->chset.channels[i].number = i;
p                 146 sound/synth/emux/emux_seq.c 	p->chset.private_data = p;
p                 147 sound/synth/emux/emux_seq.c 	p->chset.max_channels = max_channels;
p                 148 sound/synth/emux/emux_seq.c 	p->emu = emu;
p                 149 sound/synth/emux/emux_seq.c 	p->chset.client = emu->client;
p                 151 sound/synth/emux/emux_seq.c 	snd_emux_create_effect(p);
p                 154 sound/synth/emux/emux_seq.c 	callback->private_data = p;
p                 164 sound/synth/emux/emux_seq.c 	p->chset.port = snd_seq_event_port_attach(emu->client, callback,
p                 168 sound/synth/emux/emux_seq.c 	return p;
p                 178 sound/synth/emux/emux_seq.c 	struct snd_emux_port *p;
p                 180 sound/synth/emux/emux_seq.c 	p = private_data;
p                 181 sound/synth/emux/emux_seq.c 	if (p) {
p                 183 sound/synth/emux/emux_seq.c 		snd_emux_delete_effect(p);
p                 185 sound/synth/emux/emux_seq.c 		kfree(p->chset.channels);
p                 186 sound/synth/emux/emux_seq.c 		kfree(p);
p                 197 sound/synth/emux/emux_seq.c snd_emux_init_port(struct snd_emux_port *p)
p                 199 sound/synth/emux/emux_seq.c 	p->drum_flags = DEFAULT_DRUM_FLAGS;
p                 200 sound/synth/emux/emux_seq.c 	p->volume_atten = 0;
p                 202 sound/synth/emux/emux_seq.c 	snd_emux_reset_port(p);
p                 308 sound/synth/emux/emux_seq.c 	struct snd_emux_port *p;
p                 311 sound/synth/emux/emux_seq.c 	p = private_data;
p                 312 sound/synth/emux/emux_seq.c 	if (snd_BUG_ON(!p))
p                 314 sound/synth/emux/emux_seq.c 	emu = p->emu;
p                 319 sound/synth/emux/emux_seq.c 	snd_emux_init_port(p);
p                 331 sound/synth/emux/emux_seq.c 	struct snd_emux_port *p;
p                 334 sound/synth/emux/emux_seq.c 	p = private_data;
p                 335 sound/synth/emux/emux_seq.c 	if (snd_BUG_ON(!p))
p                 337 sound/synth/emux/emux_seq.c 	emu = p->emu;
p                 342 sound/synth/emux/emux_seq.c 	snd_emux_sounds_off_all(p);
p                  46 sound/synth/emux/emux_synth.c snd_emux_note_on(void *p, int note, int vel, struct snd_midi_channel *chan)
p                  55 sound/synth/emux/emux_synth.c 	port = p;
p                 145 sound/synth/emux/emux_synth.c snd_emux_note_off(void *p, int note, int vel, struct snd_midi_channel *chan)
p                 153 sound/synth/emux/emux_synth.c 	port = p;
p                 222 sound/synth/emux/emux_synth.c snd_emux_key_press(void *p, int note, int vel, struct snd_midi_channel *chan)
p                 230 sound/synth/emux/emux_synth.c 	port = p;
p                 311 sound/synth/emux/emux_synth.c snd_emux_control(void *p, int type, struct snd_midi_channel *chan)
p                 315 sound/synth/emux/emux_synth.c 	port = p;
p                 385 sound/synth/emux/emux_synth.c snd_emux_terminate_note(void *p, int note, struct snd_midi_channel *chan)
p                 390 sound/synth/emux/emux_synth.c 	port = p;
p                  34 sound/synth/emux/emux_voice.h void snd_emux_note_on(void *p, int note, int vel, struct snd_midi_channel *chan);
p                  35 sound/synth/emux/emux_voice.h void snd_emux_note_off(void *p, int note, int vel, struct snd_midi_channel *chan);
p                  36 sound/synth/emux/emux_voice.h void snd_emux_key_press(void *p, int note, int vel, struct snd_midi_channel *chan);
p                  37 sound/synth/emux/emux_voice.h void snd_emux_terminate_note(void *p, int note, struct snd_midi_channel *chan);
p                  38 sound/synth/emux/emux_voice.h void snd_emux_control(void *p, int type, struct snd_midi_channel *chan);
p                  49 sound/synth/emux/emux_voice.h void snd_emux_create_effect(struct snd_emux_port *p);
p                  50 sound/synth/emux/emux_voice.h void snd_emux_delete_effect(struct snd_emux_port *p);
p                  51 sound/synth/emux/emux_voice.h void snd_emux_clear_effect(struct snd_emux_port *p);
p                 480 sound/synth/emux/soundfont.c 	struct snd_sf_zone *prev, *next, *p;
p                 484 sound/synth/emux/soundfont.c 	for (p = sf->zones; p; p = next) {
p                 485 sound/synth/emux/soundfont.c 		next = p->next;
p                 486 sound/synth/emux/soundfont.c 		if (! p->mapped &&
p                 487 sound/synth/emux/soundfont.c 		    p->bank == bank && p->instr == instr) {
p                 494 sound/synth/emux/soundfont.c 			kfree(p);
p                 496 sound/synth/emux/soundfont.c 			prev = p;
p                 675 sound/synth/emux/soundfont.c 	struct snd_sf_sample *p;
p                 680 sound/synth/emux/soundfont.c 	for (p = sf->samples; p; p = p->next) {
p                 681 sound/synth/emux/soundfont.c 		if (p->v.sample == sample_id)
p                 682 sound/synth/emux/soundfont.c 			return p;
p                 843 sound/synth/emux/soundfont.c 	int r, p, t;
p                 845 sound/synth/emux/soundfont.c 	p = rate & 0x3f;
p                 846 sound/synth/emux/soundfont.c 	if (!p)
p                 847 sound/synth/emux/soundfont.c 		p = 1;
p                 854 sound/synth/emux/soundfont.c 	return (t * 10) / (p * 441);
p                1171 sound/synth/emux/soundfont.c 		struct snd_sf_zone *p;
p                1173 sound/synth/emux/soundfont.c 		for (p = zone; p; p = p->next_zone) {
p                1174 sound/synth/emux/soundfont.c 			if (p->counter > cur->counter)
p                1198 sound/synth/emux/soundfont.c 	struct snd_sf_zone *p;
p                1202 sound/synth/emux/soundfont.c 	for (p = sflist->presets[index]; p; p = p->next_instr) {
p                1203 sound/synth/emux/soundfont.c 		while (p->next_instr == zp) {
p                1204 sound/synth/emux/soundfont.c 			p->next_instr = zp->next_instr;
p                  19 sound/synth/util_mem.c #define get_memblk(p)	list_entry(p, struct snd_util_memblk, list)
p                  44 sound/synth/util_mem.c 	struct list_head *p;
p                  49 sound/synth/util_mem.c 	while ((p = hdr->block.next) != &hdr->block) {
p                  50 sound/synth/util_mem.c 		list_del(p);
p                  51 sound/synth/util_mem.c 		kfree(get_memblk(p));
p                  64 sound/synth/util_mem.c 	struct list_head *p;
p                  78 sound/synth/util_mem.c 	list_for_each(p, &hdr->block) {
p                  79 sound/synth/util_mem.c 		blk = get_memblk(p);
p                  88 sound/synth/util_mem.c 	return __snd_util_memblk_new(hdr, units, p->prev);
p                 110 sound/synth/util_mem.c 		struct snd_util_memblk *p = get_memblk(prev);
p                 111 sound/synth/util_mem.c 		blk->offset = p->offset + p->size;
p                 411 sound/usb/6fire/control.c 		.tlv = { .p = tlv_output }
p                 423 sound/usb/6fire/control.c 		.tlv = { .p = tlv_output }
p                 435 sound/usb/6fire/control.c 		.tlv = { .p = tlv_output }
p                 511 sound/usb/6fire/control.c 		.tlv = { .p = tlv_input }
p                 531 sound/usb/card.c 	const struct usb_device_id *p;
p                 533 sound/usb/card.c 	for (p = usb_audio_ids; p->match_flags; p++) {
p                 535 sound/usb/card.c 		if ((p->match_flags & USB_DEVICE_ID_MATCH_DEVICE) ==
p                 537 sound/usb/card.c 		    p->idVendor == USB_ID_VENDOR(id) &&
p                 538 sound/usb/card.c 		    p->idProduct == USB_ID_PRODUCT(id))
p                 539 sound/usb/card.c 			return (const struct snd_usb_audio_quirk *)p->driver_info;
p                 700 sound/usb/card.c 	struct list_head *p;
p                 728 sound/usb/card.c 		list_for_each(p, &chip->midi_list) {
p                 729 sound/usb/card.c 			snd_usbmidi_disconnect(p);
p                 808 sound/usb/card.c 	struct list_head *p;
p                 819 sound/usb/card.c 		list_for_each(p, &chip->midi_list)
p                 820 sound/usb/card.c 			snd_usbmidi_suspend(p);
p                 838 sound/usb/card.c 	struct list_head *p;
p                 864 sound/usb/card.c 	list_for_each(p, &chip->midi_list) {
p                 865 sound/usb/card.c 		snd_usbmidi_resume(p);
p                  38 sound/usb/clock.c static bool validate_clock_source_v2(void *p, int id)
p                  40 sound/usb/clock.c 	struct uac_clock_source_descriptor *cs = p;
p                  44 sound/usb/clock.c static bool validate_clock_source_v3(void *p, int id)
p                  46 sound/usb/clock.c 	struct uac3_clock_source_descriptor *cs = p;
p                  50 sound/usb/clock.c static bool validate_clock_selector_v2(void *p, int id)
p                  52 sound/usb/clock.c 	struct uac_clock_selector_descriptor *cs = p;
p                  56 sound/usb/clock.c static bool validate_clock_selector_v3(void *p, int id)
p                  58 sound/usb/clock.c 	struct uac3_clock_selector_descriptor *cs = p;
p                  62 sound/usb/clock.c static bool validate_clock_multiplier_v2(void *p, int id)
p                  64 sound/usb/clock.c 	struct uac_clock_multiplier_descriptor *cs = p;
p                  68 sound/usb/clock.c static bool validate_clock_multiplier_v3(void *p, int id)
p                  70 sound/usb/clock.c 	struct uac3_clock_multiplier_descriptor *cs = p;
p                  33 sound/usb/helper.c 	u8 *p, *end, *next;
p                  35 sound/usb/helper.c 	p = descstart;
p                  36 sound/usb/helper.c 	end = p + desclen;
p                  37 sound/usb/helper.c 	for (; p < end;) {
p                  38 sound/usb/helper.c 		if (p[0] < 2)
p                  40 sound/usb/helper.c 		next = p + p[0];
p                  43 sound/usb/helper.c 		if (p[1] == dtype && (!after || (void *)p > after)) {
p                  44 sound/usb/helper.c 			return p;
p                  46 sound/usb/helper.c 		p = next;
p                  56 sound/usb/helper.c 	unsigned char *p = after;
p                  58 sound/usb/helper.c 	while ((p = snd_usb_find_desc(buffer, buflen, p,
p                  60 sound/usb/helper.c 		if (p[0] >= 3 && p[2] == dsubtype)
p                  61 sound/usb/helper.c 			return p;
p                  35 sound/usb/helper.h bool snd_usb_validate_audio_desc(void *p, int protocol);
p                  36 sound/usb/helper.h bool snd_usb_validate_midi_desc(void *p);
p                  30 sound/usb/line6/playback.c 		__le16 *p, *buf_end;
p                  32 sound/usb/line6/playback.c 		p = (__le16 *)urb_out->transfer_buffer;
p                  33 sound/usb/line6/playback.c 		buf_end = p + urb_out->transfer_buffer_length / sizeof(*p);
p                  35 sound/usb/line6/playback.c 		for (; p < buf_end; ++p) {
p                  36 sound/usb/line6/playback.c 			short pv = le16_to_cpu(*p);
p                  39 sound/usb/line6/playback.c 			*p = cpu_to_le16(pv);
p                  43 sound/usb/line6/playback.c 		unsigned char *p, *buf_end;
p                  45 sound/usb/line6/playback.c 		p = (unsigned char *)urb_out->transfer_buffer;
p                  46 sound/usb/line6/playback.c 		buf_end = p + urb_out->transfer_buffer_length;
p                  48 sound/usb/line6/playback.c 		for (; p < buf_end; p += 3) {
p                  51 sound/usb/line6/playback.c 			val = p[0] + (p[1] << 8) + ((signed char)p[2] << 16);
p                  54 sound/usb/line6/playback.c 			p[0] = val;
p                  55 sound/usb/line6/playback.c 			p[1] = val >> 8;
p                  56 sound/usb/line6/playback.c 			p[2] = val >> 16;
p                 657 sound/usb/midi.c 	int p;
p                 660 sound/usb/midi.c 	for (p = 0; p < 0x10; ++p) {
p                 661 sound/usb/midi.c 		struct usbmidi_out_port *port = &ep->ports[p];
p                1485 sound/usb/midi.c void snd_usbmidi_disconnect(struct list_head *p)
p                1490 sound/usb/midi.c 	umidi = list_entry(p, struct snd_usb_midi, list);
p                2267 sound/usb/midi.c void snd_usbmidi_input_stop(struct list_head *p)
p                2272 sound/usb/midi.c 	umidi = list_entry(p, struct snd_usb_midi, list);
p                2301 sound/usb/midi.c void snd_usbmidi_input_start(struct list_head *p)
p                2306 sound/usb/midi.c 	umidi = list_entry(p, struct snd_usb_midi, list);
p                2318 sound/usb/midi.c void snd_usbmidi_suspend(struct list_head *p)
p                2322 sound/usb/midi.c 	umidi = list_entry(p, struct snd_usb_midi, list);
p                2324 sound/usb/midi.c 	snd_usbmidi_input_stop(p);
p                2332 sound/usb/midi.c void snd_usbmidi_resume(struct list_head *p)
p                2336 sound/usb/midi.c 	umidi = list_entry(p, struct snd_usb_midi, list);
p                2338 sound/usb/midi.c 	snd_usbmidi_input_start(p);
p                  57 sound/usb/midi.h void snd_usbmidi_input_stop(struct list_head *p);
p                  58 sound/usb/midi.h void snd_usbmidi_input_start(struct list_head *p);
p                  59 sound/usb/midi.h void snd_usbmidi_disconnect(struct list_head *p);
p                  60 sound/usb/midi.h void snd_usbmidi_suspend(struct list_head *p);
p                  61 sound/usb/midi.h void snd_usbmidi_resume(struct list_head *p);
p                 101 sound/usb/mixer.c find_map(const struct usbmix_name_map *p, int unitid, int control)
p                 103 sound/usb/mixer.c 	if (!p)
p                 106 sound/usb/mixer.c 	for (; p->id; p++) {
p                 107 sound/usb/mixer.c 		if (p->id == unitid &&
p                 108 sound/usb/mixer.c 		    (!control || !p->control || control == p->control))
p                 109 sound/usb/mixer.c 			return p;
p                 116 sound/usb/mixer.c check_mapped_name(const struct usbmix_name_map *p, char *buf, int buflen)
p                 118 sound/usb/mixer.c 	if (!p || !p->name)
p                 122 sound/usb/mixer.c 	return strlcpy(buf, p->name, buflen);
p                 131 sound/usb/mixer.c check_ignored_ctl(const struct usbmix_name_map *p)
p                 133 sound/usb/mixer.c 	if (!p || p->name || p->dB)
p                 139 sound/usb/mixer.c static inline void check_mapped_dB(const struct usbmix_name_map *p,
p                 142 sound/usb/mixer.c 	if (p && p->dB) {
p                 143 sound/usb/mixer.c 		cval->dBmin = p->dB->min;
p                 144 sound/usb/mixer.c 		cval->dBmax = p->dB->max;
p                 153 sound/usb/mixer.c 	const struct usbmix_selector_map *p;
p                 157 sound/usb/mixer.c 	for (p = state->selector_map; p->id; p++) {
p                 158 sound/usb/mixer.c 		if (p->id == unitid && index < p->count)
p                 159 sound/usb/mixer.c 			return strlcpy(buf, p->names[index], buflen);
p                3094 sound/usb/mixer.c 	void *p;
p                3113 sound/usb/mixer.c 	p = NULL;
p                3114 sound/usb/mixer.c 	while ((p = snd_usb_find_csint_desc(mixer->hostif->extra,
p                3116 sound/usb/mixer.c 					    p, UAC_OUTPUT_TERMINAL)) != NULL) {
p                3117 sound/usb/mixer.c 		if (!snd_usb_validate_audio_desc(p, mixer->protocol))
p                3121 sound/usb/mixer.c 			struct uac1_output_terminal_descriptor *desc = p;
p                3132 sound/usb/mixer.c 			struct uac2_output_terminal_descriptor *desc = p;
p                3158 sound/usb/mixer.c 			struct uac3_output_terminal_descriptor *desc = p;
p                2349 sound/usb/mixer_quirks.c 		kctl->tlv.p = scale;
p                 487 sound/usb/mixer_scarlett.c 	.tlv = { .p = db_scale_scarlett_gain }
p                 499 sound/usb/mixer_scarlett.c 	.tlv = { .p = db_scale_scarlett_gain }
p                1099 sound/usb/mixer_scarlett_gen2.c 	.tlv = { .p = db_scale_scarlett2_gain }
p                1111 sound/usb/mixer_scarlett_gen2.c 	.tlv = { .p = db_scale_scarlett2_gain }
p                1534 sound/usb/mixer_scarlett_gen2.c 	.tlv = { .p = db_scale_scarlett2_mixer }
p                  21 sound/usb/power.c 	void *p;
p                  27 sound/usb/power.c 	p = NULL;
p                  28 sound/usb/power.c 	while ((p = snd_usb_find_csint_desc(ctrl_iface->extra,
p                  30 sound/usb/power.c 					    p, UAC3_POWER_DOMAIN)) != NULL) {
p                  31 sound/usb/power.c 		struct uac3_power_domain_descriptor *pd_desc = p;
p                  34 sound/usb/power.c 		if (!snd_usb_validate_audio_desc(p, UAC_VERSION_3))
p                 329 sound/usb/stream.c 	void *p = cluster;
p                 341 sound/usb/stream.c 	p += sizeof(struct uac3_cluster_header_descriptor);
p                 343 sound/usb/stream.c 	while (((p - (void *)cluster) < len) && (c < channels)) {
p                 344 sound/usb/stream.c 		struct uac3_cluster_segment_descriptor *cs_desc = p;
p                 352 sound/usb/stream.c 			struct uac3_cluster_information_segment_descriptor *is = p;
p                 455 sound/usb/stream.c 		p += cs_len;
p                 291 sound/usb/usx2y/us122l.c 	struct list_head *p;
p                 292 sound/usb/usx2y/us122l.c 	list_for_each(p, &us122l->midi_list)
p                 293 sound/usb/usx2y/us122l.c 		snd_usbmidi_input_stop(p);
p                 320 sound/usb/usx2y/us122l.c 	struct list_head *p;
p                 358 sound/usb/usx2y/us122l.c 	list_for_each(p, &us122l->midi_list)
p                 359 sound/usb/usx2y/us122l.c 		snd_usbmidi_input_start(p);
p                 496 sound/usb/usx2y/us122l.c 		struct list_head *p;
p                 497 sound/usb/usx2y/us122l.c 		list_for_each(p, &us122l->midi_list)
p                 498 sound/usb/usx2y/us122l.c 			snd_usbmidi_disconnect(p);
p                 617 sound/usb/usx2y/us122l.c 	struct list_head *p;
p                 631 sound/usb/usx2y/us122l.c 	list_for_each(p, &us122l->midi_list) {
p                 632 sound/usb/usx2y/us122l.c 		snd_usbmidi_disconnect(p);
p                 649 sound/usb/usx2y/us122l.c 	struct list_head *p;
p                 660 sound/usb/usx2y/us122l.c 	list_for_each(p, &us122l->midi_list)
p                 661 sound/usb/usx2y/us122l.c 		snd_usbmidi_input_stop(p);
p                 674 sound/usb/usx2y/us122l.c 	struct list_head *p;
p                 713 sound/usb/usx2y/us122l.c 	list_for_each(p, &us122l->midi_list)
p                 714 sound/usb/usx2y/us122l.c 		snd_usbmidi_input_start(p);
p                  50 sound/usb/usx2y/usb_stream.c 	int u, p;
p                  74 sound/usb/usx2y/usb_stream.c 		for (p = 1; p < sk->n_o_ps; ++p) {
p                  75 sound/usb/usx2y/usb_stream.c 			desc[p].offset = desc[p - 1].offset + maxpacket;
p                  76 sound/usb/usx2y/usb_stream.c 			desc[p].length = maxpacket;
p                 280 sound/usb/usx2y/usb_stream.c 	int p = 0, lb = 0, l = 0;
p                 285 sound/usb/usx2y/usb_stream.c 	for (; s->sync_packet < 0; ++p, ++s->sync_packet) {
p                 291 sound/usb/usx2y/usb_stream.c 		od[p].length = l;
p                 292 sound/usb/usx2y/usb_stream.c 		od[p].offset = lb;
p                 297 sound/usb/usx2y/usb_stream.c 	     s->sync_packet < inurb->number_of_packets && p < sk->n_o_ps;
p                 298 sound/usb/usx2y/usb_stream.c 	     ++p, ++s->sync_packet) {
p                 304 sound/usb/usx2y/usb_stream.c 		od[p].length = l;
p                 305 sound/usb/usx2y/usb_stream.c 		od[p].offset = lb;
p                 314 sound/usb/usx2y/usb_stream.c 			   s->sync_packet, p, inurb->number_of_packets,
p                 326 sound/usb/usx2y/usb_stream.c 	io->number_of_packets = p;
p                 338 sound/usb/usx2y/usb_stream.c 	int p;
p                 343 sound/usb/usx2y/usb_stream.c 	for (p = 0; p < iu->number_of_packets - 1; ++p)
p                 344 sound/usb/usx2y/usb_stream.c 		id[p + 1].offset = id[p].offset + id[p].length;
p                 381 sound/usb/usx2y/usb_stream.c 	int il, ol, l, p;
p                 392 sound/usb/usx2y/usb_stream.c 		p = s->insplit_pack;
p                 396 sound/usb/usx2y/usb_stream.c 	for (; p < iu->number_of_packets && l < s->period_size; ++p) {
p                 397 sound/usb/usx2y/usb_stream.c 		i = iu->transfer_buffer + id[p].offset;
p                 398 sound/usb/usx2y/usb_stream.c 		il = id[p].actual_length;
p                 425 sound/usb/usx2y/usb_stream.c 	p = 0;
p                 439 sound/usb/usx2y/usb_stream.c 	int l, p;
p                 448 sound/usb/usx2y/usb_stream.c 	for (p = 0; p < inurb->number_of_packets; ++p) {
p                 450 sound/usb/usx2y/usb_stream.c 		l = id[p].actual_length;
p                 451 sound/usb/usx2y/usb_stream.c 		if (unlikely(l == 0 || id[p].status)) {
p                 453 sound/usb/usx2y/usb_stream.c 				   id[p].status);
p                 462 sound/usb/usx2y/usb_stream.c 			id[p].offset + (inurb->transfer_buffer - (void *)s);
p                 522 sound/usb/usx2y/usb_stream.c 		int l, p, max_diff, max_diff_0;
p                 537 sound/usb/usx2y/usb_stream.c 		for (p = 0; p < inurb->number_of_packets; ++p) {
p                 539 sound/usb/usx2y/usb_stream.c 			l = inurb->iso_frame_desc[p].actual_length;
p                 602 sound/usb/usx2y/usb_stream.c 	int p;
p                 610 sound/usb/usx2y/usb_stream.c 	for (p = 0; p < urb->number_of_packets; ++p) {
p                 611 sound/usb/usx2y/usb_stream.c 		int l = id[p].actual_length;
p                 622 sound/usb/usx2y/usb_stream.c 			id[p].offset + (urb->transfer_buffer - (void *)s);
p                 439 sound/usb/usx2y/usbusx2y.c 		struct list_head *p;
p                 445 sound/usb/usx2y/usbusx2y.c 		list_for_each(p, &usX2Y->midi_list) {
p                 446 sound/usb/usx2y/usbusx2y.c 			snd_usbmidi_disconnect(p);
p                 708 sound/usb/usx2y/usbusx2yaudio.c 	struct list_head* p;
p                 716 sound/usb/usx2y/usbusx2yaudio.c 	list_for_each(p, &usX2Y->midi_list) {
p                 717 sound/usb/usx2y/usbusx2yaudio.c 		snd_usbmidi_input_stop(p);
p                 726 sound/usb/usx2y/usbusx2yaudio.c 	list_for_each(p, &usX2Y->midi_list) {
p                 727 sound/usb/usx2y/usbusx2yaudio.c 		snd_usbmidi_input_start(p);
p                 393 sound/usb/usx2y/usx2yhwdeppcm.c 	int	p, u, err,
p                 402 sound/usb/usx2y/usx2yhwdeppcm.c 	for (p = 0; 3 >= (stream + p); p += 2) {
p                 403 sound/usb/usx2y/usx2yhwdeppcm.c 		struct snd_usX2Y_substream *subs = usX2Y->subs[stream + p];
p                 411 sound/usb/usx2y/usx2yhwdeppcm.c 	for (p = 0; p < 4; p++) {
p                 412 sound/usb/usx2y/usx2yhwdeppcm.c 		struct snd_usX2Y_substream *subs = usX2Y->subs[p];
p                 420 sound/usb/usx2y/usx2yhwdeppcm.c 		for (p = 0; 3 >= (stream + p); p += 2) {
p                 421 sound/usb/usx2y/usx2yhwdeppcm.c 			struct snd_usX2Y_substream *subs = usX2Y->subs[stream + p];
p                  18 sound/usb/validate.c 	bool (*func)(const void *p, const struct usb_desc_validator *v);
p                  25 sound/usb/validate.c static bool validate_uac1_header(const void *p,
p                  28 sound/usb/validate.c 	const struct uac1_ac_header_descriptor *d = p;
p                  35 sound/usb/validate.c static bool validate_mixer_unit(const void *p,
p                  38 sound/usb/validate.c 	const struct uac_mixer_unit_descriptor *d = p;
p                  69 sound/usb/validate.c static bool validate_processing_unit(const void *p,
p                  72 sound/usb/validate.c 	const struct uac_processing_unit_descriptor *d = p;
p                  73 sound/usb/validate.c 	const unsigned char *hdr = p;
p                 167 sound/usb/validate.c static bool validate_selector_unit(const void *p,
p                 170 sound/usb/validate.c 	const struct uac_selector_unit_descriptor *d = p;
p                 191 sound/usb/validate.c static bool validate_uac1_feature_unit(const void *p,
p                 194 sound/usb/validate.c 	const struct uac_feature_unit_descriptor *d = p;
p                 202 sound/usb/validate.c static bool validate_uac2_feature_unit(const void *p,
p                 205 sound/usb/validate.c 	const struct uac2_feature_unit_descriptor *d = p;
p                 213 sound/usb/validate.c static bool validate_uac3_feature_unit(const void *p,
p                 216 sound/usb/validate.c 	const struct uac3_feature_unit_descriptor *d = p;
p                 224 sound/usb/validate.c static bool validate_midi_out_jack(const void *p,
p                 227 sound/usb/validate.c 	const struct usb_midi_out_jack_descriptor *d = p;
p                 233 sound/usb/validate.c #define FIXED(p, t, s) { .protocol = (p), .type = (t), .size = sizeof(s) }
p                 234 sound/usb/validate.c #define FUNC(p, t, f) { .protocol = (p), .type = (t), .func = (f) }
p                 323 sound/usb/validate.c bool snd_usb_validate_audio_desc(void *p, int protocol)
p                 325 sound/usb/validate.c 	return validate_desc(p, protocol, audio_validators);
p                 328 sound/usb/validate.c bool snd_usb_validate_midi_desc(void *p)
p                 330 sound/usb/validate.c 	return validate_desc(p, UAC_VERSION_1, midi_validators);
p                 394 sound/x86/intel_hdmi_audio.c 	struct cea_channel_speaker_allocation *p;
p                 397 sound/x86/intel_hdmi_audio.c 		p = channel_allocations + i;
p                 398 sound/x86/intel_hdmi_audio.c 		p->channels = 0;
p                 399 sound/x86/intel_hdmi_audio.c 		p->spk_mask = 0;
p                 400 sound/x86/intel_hdmi_audio.c 		for (j = 0; j < ARRAY_SIZE(p->speakers); j++)
p                 401 sound/x86/intel_hdmi_audio.c 			if (p->speakers[j]) {
p                 402 sound/x86/intel_hdmi_audio.c 				p->channels++;
p                 403 sound/x86/intel_hdmi_audio.c 				p->spk_mask |= p->speakers[j];
p                  27 tools/arch/arm64/include/asm/barrier.h #define smp_store_release(p, v)						\
p                  29 tools/arch/arm64/include/asm/barrier.h 	union { typeof(*p) __val; char __c[1]; } __u =			\
p                  32 tools/arch/arm64/include/asm/barrier.h 	switch (sizeof(*p)) {						\
p                  35 tools/arch/arm64/include/asm/barrier.h 				: "=Q" (*p)				\
p                  41 tools/arch/arm64/include/asm/barrier.h 				: "=Q" (*p)				\
p                  47 tools/arch/arm64/include/asm/barrier.h 				: "=Q" (*p)				\
p                  53 tools/arch/arm64/include/asm/barrier.h 				: "=Q" (*p)				\
p                  64 tools/arch/arm64/include/asm/barrier.h #define smp_load_acquire(p)						\
p                  66 tools/arch/arm64/include/asm/barrier.h 	union { typeof(*p) __val; char __c[1]; } __u =			\
p                  69 tools/arch/arm64/include/asm/barrier.h 	switch (sizeof(*p)) {						\
p                  73 tools/arch/arm64/include/asm/barrier.h 			: "Q" (*p) : "memory");				\
p                  78 tools/arch/arm64/include/asm/barrier.h 			: "Q" (*p) : "memory");				\
p                  83 tools/arch/arm64/include/asm/barrier.h 			: "Q" (*p) : "memory");				\
p                  88 tools/arch/arm64/include/asm/barrier.h 			: "Q" (*p) : "memory");				\
p                  49 tools/arch/ia64/include/asm/barrier.h #define smp_store_release(p, v)			\
p                  52 tools/arch/ia64/include/asm/barrier.h 	WRITE_ONCE(*p, v);			\
p                  55 tools/arch/ia64/include/asm/barrier.h #define smp_load_acquire(p)			\
p                  57 tools/arch/ia64/include/asm/barrier.h 	typeof(*p) ___p1 = READ_ONCE(*p);	\
p                  33 tools/arch/powerpc/include/asm/barrier.h #define smp_store_release(p, v)			\
p                  36 tools/arch/powerpc/include/asm/barrier.h 	WRITE_ONCE(*p, v);			\
p                  39 tools/arch/powerpc/include/asm/barrier.h #define smp_load_acquire(p)			\
p                  41 tools/arch/powerpc/include/asm/barrier.h 	typeof(*p) ___p1 = READ_ONCE(*p);	\
p                  31 tools/arch/s390/include/asm/barrier.h #define smp_store_release(p, v)			\
p                  34 tools/arch/s390/include/asm/barrier.h 	WRITE_ONCE(*p, v);			\
p                  37 tools/arch/s390/include/asm/barrier.h #define smp_load_acquire(p)			\
p                  39 tools/arch/s390/include/asm/barrier.h 	typeof(*p) ___p1 = READ_ONCE(*p);	\
p                  43 tools/arch/sparc/include/asm/barrier_64.h #define smp_store_release(p, v)			\
p                  46 tools/arch/sparc/include/asm/barrier_64.h 	WRITE_ONCE(*p, v);			\
p                  49 tools/arch/sparc/include/asm/barrier_64.h #define smp_load_acquire(p)			\
p                  51 tools/arch/sparc/include/asm/barrier_64.h 	typeof(*p) ___p1 = READ_ONCE(*p);	\
p                  33 tools/arch/x86/include/asm/barrier.h #define smp_store_release(p, v)			\
p                  36 tools/arch/x86/include/asm/barrier.h 	WRITE_ONCE(*p, v);			\
p                  39 tools/arch/x86/include/asm/barrier.h #define smp_load_acquire(p)			\
p                  41 tools/arch/x86/include/asm/barrier.h 	typeof(*p) ___p1 = READ_ONCE(*p);	\
p                 217 tools/arch/x86/lib/insn.c 		insn_byte_t m, p;
p                 219 tools/arch/x86/lib/insn.c 		p = insn_vex_p_bits(insn);
p                 220 tools/arch/x86/lib/insn.c 		insn->attr = inat_get_avx_attribute(op, m, p);
p                 238 tools/bpf/bpftool/btf.c 		const struct btf_param *p = (const void *)(t + 1);
p                 250 tools/bpf/bpftool/btf.c 		for (i = 0; i < vlen; i++, p++) {
p                 251 tools/bpf/bpftool/btf.c 			const char *name = btf_str(btf, p->name_off);
p                 256 tools/bpf/bpftool/btf.c 				jsonw_uint_field(w, "type_id", p->type);
p                 259 tools/bpf/bpftool/btf.c 				printf("\n\t'%s' type_id=%u", name, p->type);
p                  94 tools/bpf/bpftool/xlated_dumper.c 	char buf[64], *p;
p                 101 tools/bpf/bpftool/xlated_dumper.c 	p = buf;
p                 102 tools/bpf/bpftool/xlated_dumper.c 	while (*p != '\0') {
p                 103 tools/bpf/bpftool/xlated_dumper.c 		if (*p == '\n') {
p                 104 tools/bpf/bpftool/xlated_dumper.c 			memmove(p + 3, p, strlen(buf) + 1 - (p - buf));
p                 106 tools/bpf/bpftool/xlated_dumper.c 			*p++ = '\\';
p                 107 tools/bpf/bpftool/xlated_dumper.c 			*p++ = 'l';
p                 109 tools/bpf/bpftool/xlated_dumper.c 			*p++ = '\\';
p                 110 tools/bpf/bpftool/xlated_dumper.c 		} else if (*p == '<' || *p == '>' || *p == '|' || *p == '&') {
p                 111 tools/bpf/bpftool/xlated_dumper.c 			memmove(p + 1, p, strlen(buf) + 1 - (p - buf));
p                 113 tools/bpf/bpftool/xlated_dumper.c 			*p++ = '\\';
p                 116 tools/bpf/bpftool/xlated_dumper.c 		p++;
p                   9 tools/build/feature/test-jvmti-cmlr.c 	PCStackInfo				p   __attribute__((unused));
p                  51 tools/build/fixdep.c 	char *p;
p                  62 tools/build/fixdep.c 		p = m;
p                  63 tools/build/fixdep.c 		while (p < end && *p != ' ' && *p != '\\' && *p != '\n')
p                  64 tools/build/fixdep.c 			p++;
p                  66 tools/build/fixdep.c 		is_target = (*(p-1) == ':');
p                  74 tools/build/fixdep.c 			memcpy(s, m, p-m);
p                  75 tools/build/fixdep.c 			s[p - m] = 0;
p                 109 tools/build/fixdep.c 		m = p + 1;
p                 484 tools/firewire/nosy-dump.c 	struct link_packet *p = (struct link_packet *) data;
p                 488 tools/firewire/nosy-dump.c 	t = link_transaction_lookup(p->common.source, p->common.destination,
p                 489 tools/firewire/nosy-dump.c 			p->common.tlabel);
p                 517 tools/firewire/nosy-dump.c 		if (p->common.tcode != TCODE_WRITE_QUADLET_REQUEST &&
p                 518 tools/firewire/nosy-dump.c 		    p->common.tcode != TCODE_WRITE_BLOCK_REQUEST)
p                 549 tools/firewire/nosy-dump.c 	struct link_packet *p = (struct link_packet *) data;
p                 553 tools/firewire/nosy-dump.c 	t = link_transaction_lookup(p->common.destination, p->common.source,
p                 554 tools/firewire/nosy-dump.c 			p->common.tlabel);
p                 625 tools/firewire/nosy-dump.c 		struct link_packet *p = (struct link_packet *) data;
p                 627 tools/firewire/nosy-dump.c 		switch (packet_info[p->common.tcode].type) {
p                 256 tools/firmware/ihex2fw.c 	struct ihex_binrec **p = &records;
p                 258 tools/firmware/ihex2fw.c 	while ((*p) && (!sort_records || (*p)->addr < record->addr))
p                 259 tools/firmware/ihex2fw.c 		p = &((*p)->next);
p                 261 tools/firmware/ihex2fw.c 	record->next = *p;
p                 262 tools/firmware/ihex2fw.c 	*p = record;
p                 265 tools/firmware/ihex2fw.c static uint16_t ihex_binrec_size(struct ihex_binrec *p)
p                 267 tools/firmware/ihex2fw.c 	return p->len + sizeof(p->addr) + sizeof(p->len);
p                 273 tools/firmware/ihex2fw.c 	struct ihex_binrec *p = records;
p                 275 tools/firmware/ihex2fw.c 	while (p) {
p                 276 tools/firmware/ihex2fw.c 		uint16_t writelen = ALIGN(ihex_binrec_size(p), 4);
p                 278 tools/firmware/ihex2fw.c 		p->addr = htonl(p->addr);
p                 279 tools/firmware/ihex2fw.c 		p->len = htons(p->len);
p                 280 tools/firmware/ihex2fw.c 		if (write(outfd, &p->addr, writelen) != writelen)
p                 282 tools/firmware/ihex2fw.c 		p = p->next;
p                  31 tools/hv/hv_fcopy_daemon.c 	char *q, *p;
p                  34 tools/hv/hv_fcopy_daemon.c 	p = (char *)smsg->path_name;
p                  43 tools/hv/hv_fcopy_daemon.c 	while ((q = strchr(p, '/')) != NULL) {
p                  44 tools/hv/hv_fcopy_daemon.c 		if (q == p) {
p                  45 tools/hv/hv_fcopy_daemon.c 			p++;
p                  62 tools/hv/hv_fcopy_daemon.c 		p = q + 1;
p                 420 tools/hv/hv_kvp_daemon.c 	char	*p, buf[512];
p                 434 tools/hv/hv_kvp_daemon.c 	p = strchr(os_version, '-');
p                 435 tools/hv/hv_kvp_daemon.c 	if (p)
p                 436 tools/hv/hv_kvp_daemon.c 		*p = '\0';
p                 452 tools/hv/hv_kvp_daemon.c 			p = strchr(buf, '=');
p                 453 tools/hv/hv_kvp_daemon.c 			if (!p)
p                 455 tools/hv/hv_kvp_daemon.c 			*p++ = 0;
p                 458 tools/hv/hv_kvp_daemon.c 			value = p;
p                 459 tools/hv/hv_kvp_daemon.c 			q = p;
p                 460 tools/hv/hv_kvp_daemon.c 			while (*p) {
p                 461 tools/hv/hv_kvp_daemon.c 				if (*p == '\\') {
p                 462 tools/hv/hv_kvp_daemon.c 					++p;
p                 463 tools/hv/hv_kvp_daemon.c 					if (!*p)
p                 465 tools/hv/hv_kvp_daemon.c 					*q++ = *p++;
p                 466 tools/hv/hv_kvp_daemon.c 				} else if (*p == '\'' || *p == '"' ||
p                 467 tools/hv/hv_kvp_daemon.c 					   *p == '\n') {
p                 468 tools/hv/hv_kvp_daemon.c 					++p;
p                 470 tools/hv/hv_kvp_daemon.c 					*q++ = *p++;
p                 476 tools/hv/hv_kvp_daemon.c 				p = strdup(value);
p                 477 tools/hv/hv_kvp_daemon.c 				if (!p)
p                 479 tools/hv/hv_kvp_daemon.c 				os_name = p;
p                 481 tools/hv/hv_kvp_daemon.c 				p = strdup(value);
p                 482 tools/hv/hv_kvp_daemon.c 				if (!p)
p                 484 tools/hv/hv_kvp_daemon.c 				os_major = p;
p                 506 tools/hv/hv_kvp_daemon.c 	p = fgets(buf, sizeof(buf), file);
p                 507 tools/hv/hv_kvp_daemon.c 	if (p) {
p                 508 tools/hv/hv_kvp_daemon.c 		p = strchr(buf, '\n');
p                 509 tools/hv/hv_kvp_daemon.c 		if (p)
p                 510 tools/hv/hv_kvp_daemon.c 			*p = '\0';
p                 511 tools/hv/hv_kvp_daemon.c 		p = strdup(buf);
p                 512 tools/hv/hv_kvp_daemon.c 		if (!p)
p                 514 tools/hv/hv_kvp_daemon.c 		os_name = p;
p                 517 tools/hv/hv_kvp_daemon.c 		p = fgets(buf, sizeof(buf), file);
p                 518 tools/hv/hv_kvp_daemon.c 		if (p) {
p                 519 tools/hv/hv_kvp_daemon.c 			p = strchr(buf, '\n');
p                 520 tools/hv/hv_kvp_daemon.c 			if (p)
p                 521 tools/hv/hv_kvp_daemon.c 				*p = '\0';
p                 522 tools/hv/hv_kvp_daemon.c 			p = strdup(buf);
p                 523 tools/hv/hv_kvp_daemon.c 			if (!p)
p                 525 tools/hv/hv_kvp_daemon.c 			os_major = p;
p                 528 tools/hv/hv_kvp_daemon.c 			p = fgets(buf, sizeof(buf), file);
p                 529 tools/hv/hv_kvp_daemon.c 			if (p)  {
p                 530 tools/hv/hv_kvp_daemon.c 				p = strchr(buf, '\n');
p                 531 tools/hv/hv_kvp_daemon.c 				if (p)
p                 532 tools/hv/hv_kvp_daemon.c 					*p = '\0';
p                 533 tools/hv/hv_kvp_daemon.c 				p = strdup(buf);
p                 534 tools/hv/hv_kvp_daemon.c 				if (p)
p                 535 tools/hv/hv_kvp_daemon.c 					os_minor = p;
p                 560 tools/hv/hv_kvp_daemon.c 	char    *p, *x;
p                 580 tools/hv/hv_kvp_daemon.c 		p = fgets(buf, sizeof(buf), file);
p                 581 tools/hv/hv_kvp_daemon.c 		if (p) {
p                 582 tools/hv/hv_kvp_daemon.c 			x = strchr(p, '\n');
p                 586 tools/hv/hv_kvp_daemon.c 			if (!strcmp(p, guid)) {
p                 610 tools/hv/hv_kvp_daemon.c 	char    *p, *x;
p                 623 tools/hv/hv_kvp_daemon.c 	p = fgets(buf, sizeof(buf), file);
p                 624 tools/hv/hv_kvp_daemon.c 	if (p) {
p                 625 tools/hv/hv_kvp_daemon.c 		x = strchr(p, '\n');
p                 628 tools/hv/hv_kvp_daemon.c 		for (i = 0; i < strlen(p); i++)
p                 629 tools/hv/hv_kvp_daemon.c 			p[i] = toupper(p[i]);
p                 630 tools/hv/hv_kvp_daemon.c 		mac_addr = strdup(p);
p                 642 tools/hv/hv_kvp_daemon.c 	char *p;
p                 655 tools/hv/hv_kvp_daemon.c 	while ((p = fgets(buf, sizeof(buf), file)) != NULL) {
p                 659 tools/hv/hv_kvp_daemon.c 		x = strchr(p, '\n');
p                 663 tools/hv/hv_kvp_daemon.c 		strcat(config_buf, p);
p                 674 tools/hv/hv_kvp_daemon.c 	char *p;
p                 740 tools/hv/hv_kvp_daemon.c 	p = fgets(dhcp_info, sizeof(dhcp_info), file);
p                 741 tools/hv/hv_kvp_daemon.c 	if (p == NULL) {
p                 746 tools/hv/hv_kvp_daemon.c 	if (!strncmp(p, "Enabled", 7))
p                 951 tools/hv/hv_kvp_daemon.c 	char    *p, *x;
p                 973 tools/hv/hv_kvp_daemon.c 		p = fgets(buf, sizeof(buf), file);
p                 975 tools/hv/hv_kvp_daemon.c 		if (!p)
p                 978 tools/hv/hv_kvp_daemon.c 		x = strchr(p, '\n');
p                 982 tools/hv/hv_kvp_daemon.c 		for (i = 0; i < strlen(p); i++)
p                 983 tools/hv/hv_kvp_daemon.c 			p[i] = toupper(p[i]);
p                 985 tools/hv/hv_kvp_daemon.c 		if (strcmp(p, mac))
p                1366 tools/hv/hv_kvp_daemon.c 	char    *p;
p                1479 tools/hv/hv_kvp_daemon.c 			p = (char *)hv_msg->body.kvp_register.version;
p                1480 tools/hv/hv_kvp_daemon.c 			lic_version = malloc(strlen(p) + 1);
p                1482 tools/hv/hv_kvp_daemon.c 				strcpy(lic_version, p);
p                  19 tools/include/asm-generic/bitops/non-atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  21 tools/include/asm-generic/bitops/non-atomic.h 	*p  |= mask;
p                  27 tools/include/asm-generic/bitops/non-atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  29 tools/include/asm-generic/bitops/non-atomic.h 	*p &= ~mask;
p                  44 tools/include/asm-generic/bitops/non-atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  46 tools/include/asm-generic/bitops/non-atomic.h 	*p ^= mask;
p                  61 tools/include/asm-generic/bitops/non-atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  62 tools/include/asm-generic/bitops/non-atomic.h 	unsigned long old = *p;
p                  64 tools/include/asm-generic/bitops/non-atomic.h 	*p = old | mask;
p                  80 tools/include/asm-generic/bitops/non-atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  81 tools/include/asm-generic/bitops/non-atomic.h 	unsigned long old = *p;
p                  83 tools/include/asm-generic/bitops/non-atomic.h 	*p = old & ~mask;
p                  92 tools/include/asm-generic/bitops/non-atomic.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  93 tools/include/asm-generic/bitops/non-atomic.h 	unsigned long old = *p;
p                  95 tools/include/asm-generic/bitops/non-atomic.h 	*p = old ^ mask;
p                  51 tools/include/asm/barrier.h # define smp_store_release(p, v)		\
p                  54 tools/include/asm/barrier.h 	WRITE_ONCE(*p, v);			\
p                  59 tools/include/asm/barrier.h # define smp_load_acquire(p)			\
p                  61 tools/include/asm/barrier.h 	typeof(*p) ___p1 = READ_ONCE(*p);	\
p                  91 tools/include/linux/bitmap.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                  94 tools/include/linux/bitmap.h 	old = *p;
p                  95 tools/include/linux/bitmap.h 	*p = old | mask;
p                 108 tools/include/linux/bitmap.h 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
p                 111 tools/include/linux/bitmap.h 	old = *p;
p                 112 tools/include/linux/bitmap.h 	*p = old & ~mask;
p                 105 tools/include/linux/compiler.h static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
p                 108 tools/include/linux/compiler.h 	case 1: *(__u8_alias_t  *) res = *(volatile __u8_alias_t  *) p; break;
p                 109 tools/include/linux/compiler.h 	case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
p                 110 tools/include/linux/compiler.h 	case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
p                 111 tools/include/linux/compiler.h 	case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
p                 114 tools/include/linux/compiler.h 		__builtin_memcpy((void *)res, (const void *)p, size);
p                 119 tools/include/linux/compiler.h static __always_inline void __write_once_size(volatile void *p, void *res, int size)
p                 122 tools/include/linux/compiler.h 	case 1: *(volatile  __u8_alias_t *) p = *(__u8_alias_t  *) res; break;
p                 123 tools/include/linux/compiler.h 	case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
p                 124 tools/include/linux/compiler.h 	case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
p                 125 tools/include/linux/compiler.h 	case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
p                 128 tools/include/linux/compiler.h 		__builtin_memcpy((void *)p, (const void *)res, size);
p                   5 tools/include/linux/irqflags.h # define trace_hardirq_context(p)	0
p                   6 tools/include/linux/irqflags.h # define trace_softirq_context(p)	0
p                   7 tools/include/linux/irqflags.h # define trace_hardirqs_enabled(p)	0
p                   8 tools/include/linux/irqflags.h # define trace_softirqs_enabled(p)	0
p                 273 tools/include/linux/overflow.h #define struct_size(p, member, n)					\
p                 275 tools/include/linux/overflow.h 		    sizeof(*(p)->member) + __must_be_array((p)->member),\
p                 276 tools/include/linux/overflow.h 		    sizeof(*(p)))
p                 159 tools/include/linux/rbtree_augmented.h static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
p                 161 tools/include/linux/rbtree_augmented.h 	rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
p                 165 tools/include/linux/rbtree_augmented.h 				       struct rb_node *p, int color)
p                 167 tools/include/linux/rbtree_augmented.h 	rb->__rb_parent_color = (unsigned long)p | color;
p                  22 tools/include/linux/rcu.h #define rcu_assign_pointer(p, v)	do { (p) = (v); } while (0)
p                  23 tools/include/linux/rcu.h #define RCU_INIT_POINTER(p, v)	do { (p) = (v); } while (0)
p                  11 tools/include/linux/unaligned/packed_struct.h static inline u16 __get_unaligned_cpu16(const void *p)
p                  13 tools/include/linux/unaligned/packed_struct.h 	const struct __una_u16 *ptr = (const struct __una_u16 *)p;
p                  17 tools/include/linux/unaligned/packed_struct.h static inline u32 __get_unaligned_cpu32(const void *p)
p                  19 tools/include/linux/unaligned/packed_struct.h 	const struct __una_u32 *ptr = (const struct __una_u32 *)p;
p                  23 tools/include/linux/unaligned/packed_struct.h static inline u64 __get_unaligned_cpu64(const void *p)
p                  25 tools/include/linux/unaligned/packed_struct.h 	const struct __una_u64 *ptr = (const struct __una_u64 *)p;
p                  29 tools/include/linux/unaligned/packed_struct.h static inline void __put_unaligned_cpu16(u16 val, void *p)
p                  31 tools/include/linux/unaligned/packed_struct.h 	struct __una_u16 *ptr = (struct __una_u16 *)p;
p                  35 tools/include/linux/unaligned/packed_struct.h static inline void __put_unaligned_cpu32(u32 val, void *p)
p                  37 tools/include/linux/unaligned/packed_struct.h 	struct __una_u32 *ptr = (struct __una_u32 *)p;
p                  41 tools/include/linux/unaligned/packed_struct.h static inline void __put_unaligned_cpu64(u64 val, void *p)
p                  43 tools/include/linux/unaligned/packed_struct.h 	struct __una_u64 *ptr = (struct __una_u64 *)p;
p                2299 tools/include/nolibc/nolibc.h 	char *p = dst;
p                2302 tools/include/nolibc/nolibc.h 		*(p++) = b;
p                   7 tools/include/tools/be_byteshift.h static inline uint16_t __get_unaligned_be16(const uint8_t *p)
p                   9 tools/include/tools/be_byteshift.h 	return p[0] << 8 | p[1];
p                  12 tools/include/tools/be_byteshift.h static inline uint32_t __get_unaligned_be32(const uint8_t *p)
p                  14 tools/include/tools/be_byteshift.h 	return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
p                  17 tools/include/tools/be_byteshift.h static inline uint64_t __get_unaligned_be64(const uint8_t *p)
p                  19 tools/include/tools/be_byteshift.h 	return (uint64_t)__get_unaligned_be32(p) << 32 |
p                  20 tools/include/tools/be_byteshift.h 	       __get_unaligned_be32(p + 4);
p                  23 tools/include/tools/be_byteshift.h static inline void __put_unaligned_be16(uint16_t val, uint8_t *p)
p                  25 tools/include/tools/be_byteshift.h 	*p++ = val >> 8;
p                  26 tools/include/tools/be_byteshift.h 	*p++ = val;
p                  29 tools/include/tools/be_byteshift.h static inline void __put_unaligned_be32(uint32_t val, uint8_t *p)
p                  31 tools/include/tools/be_byteshift.h 	__put_unaligned_be16(val >> 16, p);
p                  32 tools/include/tools/be_byteshift.h 	__put_unaligned_be16(val, p + 2);
p                  35 tools/include/tools/be_byteshift.h static inline void __put_unaligned_be64(uint64_t val, uint8_t *p)
p                  37 tools/include/tools/be_byteshift.h 	__put_unaligned_be32(val >> 32, p);
p                  38 tools/include/tools/be_byteshift.h 	__put_unaligned_be32(val, p + 4);
p                  41 tools/include/tools/be_byteshift.h static inline uint16_t get_unaligned_be16(const void *p)
p                  43 tools/include/tools/be_byteshift.h 	return __get_unaligned_be16((const uint8_t *)p);
p                  46 tools/include/tools/be_byteshift.h static inline uint32_t get_unaligned_be32(const void *p)
p                  48 tools/include/tools/be_byteshift.h 	return __get_unaligned_be32((const uint8_t *)p);
p                  51 tools/include/tools/be_byteshift.h static inline uint64_t get_unaligned_be64(const void *p)
p                  53 tools/include/tools/be_byteshift.h 	return __get_unaligned_be64((const uint8_t *)p);
p                  56 tools/include/tools/be_byteshift.h static inline void put_unaligned_be16(uint16_t val, void *p)
p                  58 tools/include/tools/be_byteshift.h 	__put_unaligned_be16(val, p);
p                  61 tools/include/tools/be_byteshift.h static inline void put_unaligned_be32(uint32_t val, void *p)
p                  63 tools/include/tools/be_byteshift.h 	__put_unaligned_be32(val, p);
p                  66 tools/include/tools/be_byteshift.h static inline void put_unaligned_be64(uint64_t val, void *p)
p                  68 tools/include/tools/be_byteshift.h 	__put_unaligned_be64(val, p);
p                   7 tools/include/tools/le_byteshift.h static inline uint16_t __get_unaligned_le16(const uint8_t *p)
p                   9 tools/include/tools/le_byteshift.h 	return p[0] | p[1] << 8;
p                  12 tools/include/tools/le_byteshift.h static inline uint32_t __get_unaligned_le32(const uint8_t *p)
p                  14 tools/include/tools/le_byteshift.h 	return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
p                  17 tools/include/tools/le_byteshift.h static inline uint64_t __get_unaligned_le64(const uint8_t *p)
p                  19 tools/include/tools/le_byteshift.h 	return (uint64_t)__get_unaligned_le32(p + 4) << 32 |
p                  20 tools/include/tools/le_byteshift.h 	       __get_unaligned_le32(p);
p                  23 tools/include/tools/le_byteshift.h static inline void __put_unaligned_le16(uint16_t val, uint8_t *p)
p                  25 tools/include/tools/le_byteshift.h 	*p++ = val;
p                  26 tools/include/tools/le_byteshift.h 	*p++ = val >> 8;
p                  29 tools/include/tools/le_byteshift.h static inline void __put_unaligned_le32(uint32_t val, uint8_t *p)
p                  31 tools/include/tools/le_byteshift.h 	__put_unaligned_le16(val >> 16, p + 2);
p                  32 tools/include/tools/le_byteshift.h 	__put_unaligned_le16(val, p);
p                  35 tools/include/tools/le_byteshift.h static inline void __put_unaligned_le64(uint64_t val, uint8_t *p)
p                  37 tools/include/tools/le_byteshift.h 	__put_unaligned_le32(val >> 32, p + 4);
p                  38 tools/include/tools/le_byteshift.h 	__put_unaligned_le32(val, p);
p                  41 tools/include/tools/le_byteshift.h static inline uint16_t get_unaligned_le16(const void *p)
p                  43 tools/include/tools/le_byteshift.h 	return __get_unaligned_le16((const uint8_t *)p);
p                  46 tools/include/tools/le_byteshift.h static inline uint32_t get_unaligned_le32(const void *p)
p                  48 tools/include/tools/le_byteshift.h 	return __get_unaligned_le32((const uint8_t *)p);
p                  51 tools/include/tools/le_byteshift.h static inline uint64_t get_unaligned_le64(const void *p)
p                  53 tools/include/tools/le_byteshift.h 	return __get_unaligned_le64((const uint8_t *)p);
p                  56 tools/include/tools/le_byteshift.h static inline void put_unaligned_le16(uint16_t val, void *p)
p                  58 tools/include/tools/le_byteshift.h 	__put_unaligned_le16(val, p);
p                  61 tools/include/tools/le_byteshift.h static inline void put_unaligned_le32(uint32_t val, void *p)
p                  63 tools/include/tools/le_byteshift.h 	__put_unaligned_le32(val, p);
p                  66 tools/include/tools/le_byteshift.h static inline void put_unaligned_le64(uint64_t val, void *p)
p                  68 tools/include/tools/le_byteshift.h 	__put_unaligned_le64(val, p);
p                  26 tools/include/uapi/linux/erspan.h 		p:1;
p                  32 tools/include/uapi/linux/erspan.h 	__u8	p:1,
p                 650 tools/include/uapi/linux/pkt_sched.h 	__u32 p;
p                 393 tools/io_uring/io_uring-bench.c 	struct io_uring_params p;
p                 397 tools/io_uring/io_uring-bench.c 	memset(&p, 0, sizeof(p));
p                 400 tools/io_uring/io_uring-bench.c 		p.flags |= IORING_SETUP_IOPOLL;
p                 402 tools/io_uring/io_uring-bench.c 		p.flags |= IORING_SETUP_SQPOLL;
p                 404 tools/io_uring/io_uring-bench.c 			p.flags |= IORING_SETUP_SQ_AFF;
p                 405 tools/io_uring/io_uring-bench.c 			p.sq_thread_cpu = sq_thread_cpu;
p                 409 tools/io_uring/io_uring-bench.c 	fd = io_uring_setup(DEPTH, &p);
p                 432 tools/io_uring/io_uring-bench.c 	ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
p                 436 tools/io_uring/io_uring-bench.c 	sring->head = ptr + p.sq_off.head;
p                 437 tools/io_uring/io_uring-bench.c 	sring->tail = ptr + p.sq_off.tail;
p                 438 tools/io_uring/io_uring-bench.c 	sring->ring_mask = ptr + p.sq_off.ring_mask;
p                 439 tools/io_uring/io_uring-bench.c 	sring->ring_entries = ptr + p.sq_off.ring_entries;
p                 440 tools/io_uring/io_uring-bench.c 	sring->flags = ptr + p.sq_off.flags;
p                 441 tools/io_uring/io_uring-bench.c 	sring->array = ptr + p.sq_off.array;
p                 444 tools/io_uring/io_uring-bench.c 	s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
p                 449 tools/io_uring/io_uring-bench.c 	ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe),
p                 453 tools/io_uring/io_uring-bench.c 	cring->head = ptr + p.cq_off.head;
p                 454 tools/io_uring/io_uring-bench.c 	cring->tail = ptr + p.cq_off.tail;
p                 455 tools/io_uring/io_uring-bench.c 	cring->ring_mask = ptr + p.cq_off.ring_mask;
p                 456 tools/io_uring/io_uring-bench.c 	cring->ring_entries = ptr + p.cq_off.ring_entries;
p                 457 tools/io_uring/io_uring-bench.c 	cring->cqes = ptr + p.cq_off.cqes;
p                 466 tools/io_uring/io_uring-bench.c 	char *p;
p                 469 tools/io_uring/io_uring-bench.c 	p = buf;
p                 474 tools/io_uring/io_uring-bench.c 			p += sprintf(p, "%d", f->pending_ios);
p                 476 tools/io_uring/io_uring-bench.c 			p += sprintf(p, "%d, ", f->pending_ios);
p                  54 tools/io_uring/liburing.h extern int io_uring_setup(unsigned entries, struct io_uring_params *p);
p                  65 tools/io_uring/liburing.h extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
p                  10 tools/io_uring/setup.c static int io_uring_mmap(int fd, struct io_uring_params *p,
p                  17 tools/io_uring/setup.c 	sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned);
p                  22 tools/io_uring/setup.c 	sq->khead = ptr + p->sq_off.head;
p                  23 tools/io_uring/setup.c 	sq->ktail = ptr + p->sq_off.tail;
p                  24 tools/io_uring/setup.c 	sq->kring_mask = ptr + p->sq_off.ring_mask;
p                  25 tools/io_uring/setup.c 	sq->kring_entries = ptr + p->sq_off.ring_entries;
p                  26 tools/io_uring/setup.c 	sq->kflags = ptr + p->sq_off.flags;
p                  27 tools/io_uring/setup.c 	sq->kdropped = ptr + p->sq_off.dropped;
p                  28 tools/io_uring/setup.c 	sq->array = ptr + p->sq_off.array;
p                  30 tools/io_uring/setup.c 	size = p->sq_entries * sizeof(struct io_uring_sqe);
p                  41 tools/io_uring/setup.c 	cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
p                  46 tools/io_uring/setup.c 		munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe));
p                  49 tools/io_uring/setup.c 	cq->khead = ptr + p->cq_off.head;
p                  50 tools/io_uring/setup.c 	cq->ktail = ptr + p->cq_off.tail;
p                  51 tools/io_uring/setup.c 	cq->kring_mask = ptr + p->cq_off.ring_mask;
p                  52 tools/io_uring/setup.c 	cq->kring_entries = ptr + p->cq_off.ring_entries;
p                  53 tools/io_uring/setup.c 	cq->koverflow = ptr + p->cq_off.overflow;
p                  54 tools/io_uring/setup.c 	cq->cqes = ptr + p->cq_off.cqes;
p                  64 tools/io_uring/setup.c int io_uring_queue_mmap(int fd, struct io_uring_params *p, struct io_uring *ring)
p                  69 tools/io_uring/setup.c 	ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq);
p                  81 tools/io_uring/setup.c 	struct io_uring_params p;
p                  84 tools/io_uring/setup.c 	memset(&p, 0, sizeof(p));
p                  85 tools/io_uring/setup.c 	p.flags = flags;
p                  87 tools/io_uring/setup.c 	fd = io_uring_setup(entries, &p);
p                  91 tools/io_uring/setup.c 	ret = io_uring_queue_mmap(fd, &p, ring);
p                  42 tools/io_uring/syscall.c int io_uring_setup(unsigned int entries, struct io_uring_params *p)
p                  44 tools/io_uring/syscall.c 	return syscall(__NR_io_uring_setup, entries, p);
p                  42 tools/lib/argv_split.c 	char **p;
p                  43 tools/lib/argv_split.c 	for (p = argv; *p; p++) {
p                  44 tools/lib/argv_split.c 		free(*p);
p                  45 tools/lib/argv_split.c 		*p = NULL;
p                  81 tools/lib/argv_split.c 			const char *p = str;
p                  86 tools/lib/argv_split.c 			t = strndup(p, str-p);
p                1557 tools/lib/bpf/btf.c 	const struct btf_str_ptr *p = pelem;
p                1559 tools/lib/bpf/btf.c 	if (str_ptr != p->str)
p                1560 tools/lib/bpf/btf.c 		return (const char *)str_ptr < p->str ? -1 : 1;
p                1614 tools/lib/bpf/btf.c 	char *p = start, *tmp_strs = NULL;
p                1625 tools/lib/bpf/btf.c 	while (p < end) {
p                1639 tools/lib/bpf/btf.c 		strs.ptrs[strs.cnt].str = p;
p                1642 tools/lib/bpf/btf.c 		p += strlen(p) + 1;
p                1666 tools/lib/bpf/btf.c 	p = tmp_strs;
p                1688 tools/lib/bpf/btf.c 			int new_off = p - tmp_strs;
p                1691 tools/lib/bpf/btf.c 			memmove(p, strs.ptrs[grp_idx].str, len + 1);
p                1694 tools/lib/bpf/btf.c 			p += len + 1;
p                1704 tools/lib/bpf/btf.c 	d->btf->hdr->str_len = p - tmp_strs;
p                2686 tools/lib/bpf/btf.c 	char *types_start, *p;
p                2695 tools/lib/bpf/btf.c 	p = types_start;
p                2705 tools/lib/bpf/btf.c 		memmove(p, d->btf->types[i], len);
p                2707 tools/lib/bpf/btf.c 		d->btf->types[next_type_id] = (struct btf_type *)p;
p                2708 tools/lib/bpf/btf.c 		p += len;
p                2715 tools/lib/bpf/btf.c 	d->btf->hdr->type_len = p - types_start;
p                2723 tools/lib/bpf/btf.c 	d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data;
p                2724 tools/lib/bpf/btf.c 	memmove(p, d->btf->strings, d->btf->hdr->str_len);
p                2725 tools/lib/bpf/btf.c 	d->btf->strings = p;
p                2726 tools/lib/bpf/btf.c 	p += d->btf->hdr->str_len;
p                2728 tools/lib/bpf/btf.c 	d->btf->data_size = p - (char *)d->btf->data;
p                 291 tools/lib/bpf/btf_dump.c 			const struct btf_param *p = btf_params(t);
p                 293 tools/lib/bpf/btf_dump.c 			for (j = 0; j < vlen; j++, p++)
p                 294 tools/lib/bpf/btf_dump.c 				d->type_states[p->type].referenced = 1;
p                 517 tools/lib/bpf/btf_dump.c 		const struct btf_param *p = btf_params(t);
p                 526 tools/lib/bpf/btf_dump.c 		for (i = 0; i < vlen; i++, p++) {
p                 527 tools/lib/bpf/btf_dump.c 			err = btf_dump_order_type(d, p->type, through_ptr);
p                 740 tools/lib/bpf/btf_dump.c 		const struct btf_param *p = btf_params(t);
p                 745 tools/lib/bpf/btf_dump.c 		for (i = 0; i < vlen; i++, p++)
p                 746 tools/lib/bpf/btf_dump.c 			btf_dump_emit_type(d, p->type, cont_id);
p                1273 tools/lib/bpf/btf_dump.c 			const struct btf_param *p = btf_params(t);
p                1292 tools/lib/bpf/btf_dump.c 			if (vlen == 1 && p->type == 0) {
p                1297 tools/lib/bpf/btf_dump.c 			for (i = 0; i < vlen; i++, p++) {
p                1302 tools/lib/bpf/btf_dump.c 				if (i == vlen - 1 && p->type == 0) {
p                1307 tools/lib/bpf/btf_dump.c 				name = btf_name_of(d, p->name_off);
p                1308 tools/lib/bpf/btf_dump.c 				btf_dump_emit_type_decl(d, p->type, name, lvl);
p                 349 tools/lib/bpf/libbpf.c 	char *name, *p;
p                 351 tools/lib/bpf/libbpf.c 	name = p = strdup(prog->section_name);
p                 352 tools/lib/bpf/libbpf.c 	while ((p = strchr(p, '/')))
p                 353 tools/lib/bpf/libbpf.c 		*p = '_';
p                4282 tools/lib/bpf/libbpf.c __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
p                4291 tools/lib/bpf/libbpf.c 	if (!p)
p                4296 tools/lib/bpf/libbpf.c 	if (p->obj != obj) {
p                4301 tools/lib/bpf/libbpf.c 	idx = (p - obj->programs) + (forward ? 1 : -1);
p                5426 tools/lib/bpf/libbpf.c 					      struct perf_buffer_params *p);
p                5431 tools/lib/bpf/libbpf.c 	struct perf_buffer_params p = {};
p                5440 tools/lib/bpf/libbpf.c 	p.attr = &attr;
p                5441 tools/lib/bpf/libbpf.c 	p.sample_cb = opts ? opts->sample_cb : NULL;
p                5442 tools/lib/bpf/libbpf.c 	p.lost_cb = opts ? opts->lost_cb : NULL;
p                5443 tools/lib/bpf/libbpf.c 	p.ctx = opts ? opts->ctx : NULL;
p                5445 tools/lib/bpf/libbpf.c 	return __perf_buffer__new(map_fd, page_cnt, &p);
p                5452 tools/lib/bpf/libbpf.c 	struct perf_buffer_params p = {};
p                5454 tools/lib/bpf/libbpf.c 	p.attr = opts->attr;
p                5455 tools/lib/bpf/libbpf.c 	p.event_cb = opts->event_cb;
p                5456 tools/lib/bpf/libbpf.c 	p.ctx = opts->ctx;
p                5457 tools/lib/bpf/libbpf.c 	p.cpu_cnt = opts->cpu_cnt;
p                5458 tools/lib/bpf/libbpf.c 	p.cpus = opts->cpus;
p                5459 tools/lib/bpf/libbpf.c 	p.map_keys = opts->map_keys;
p                5461 tools/lib/bpf/libbpf.c 	return __perf_buffer__new(map_fd, page_cnt, &p);
p                5465 tools/lib/bpf/libbpf.c 					      struct perf_buffer_params *p)
p                5498 tools/lib/bpf/libbpf.c 	pb->event_cb = p->event_cb;
p                5499 tools/lib/bpf/libbpf.c 	pb->sample_cb = p->sample_cb;
p                5500 tools/lib/bpf/libbpf.c 	pb->lost_cb = p->lost_cb;
p                5501 tools/lib/bpf/libbpf.c 	pb->ctx = p->ctx;
p                5515 tools/lib/bpf/libbpf.c 	if (p->cpu_cnt > 0) {
p                5516 tools/lib/bpf/libbpf.c 		pb->cpu_cnt = p->cpu_cnt;
p                5544 tools/lib/bpf/libbpf.c 		cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
p                5545 tools/lib/bpf/libbpf.c 		map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
p                5547 tools/lib/bpf/libbpf.c 		cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
p                  31 tools/lib/string.c 	void *p = malloc(len);
p                  33 tools/lib/string.c 	if (p)
p                  34 tools/lib/string.c 		memcpy(p, src, len);
p                  36 tools/lib/string.c 	return p;
p                  48 tools/lib/subcmd/parse-options.c static int get_arg(struct parse_opt_ctx_t *p, const struct option *opt,
p                  53 tools/lib/subcmd/parse-options.c 	if (p->opt) {
p                  54 tools/lib/subcmd/parse-options.c 		res = p->opt;
p                  55 tools/lib/subcmd/parse-options.c 		p->opt = NULL;
p                  56 tools/lib/subcmd/parse-options.c 	} else if ((opt->flags & PARSE_OPT_LASTARG_DEFAULT) && (p->argc == 1 ||
p                  57 tools/lib/subcmd/parse-options.c 		    **(p->argv + 1) == '-')) {
p                  59 tools/lib/subcmd/parse-options.c 	} else if (p->argc > 1) {
p                  60 tools/lib/subcmd/parse-options.c 		p->argc--;
p                  61 tools/lib/subcmd/parse-options.c 		res = *++p->argv;
p                  69 tools/lib/subcmd/parse-options.c static int get_value(struct parse_opt_ctx_t *p,
p                  76 tools/lib/subcmd/parse-options.c 	if (unset && p->opt)
p                  84 tools/lib/subcmd/parse-options.c 		if (p->excl_opt && p->excl_opt != opt) {
p                  87 tools/lib/subcmd/parse-options.c 			if (((flags & OPT_SHORT) && p->excl_opt->short_name) ||
p                  88 tools/lib/subcmd/parse-options.c 			    p->excl_opt->long_name == NULL) {
p                  90 tools/lib/subcmd/parse-options.c 					 p->excl_opt->short_name);
p                  93 tools/lib/subcmd/parse-options.c 					 p->excl_opt->long_name);
p                  98 tools/lib/subcmd/parse-options.c 		p->excl_opt = opt;
p                 100 tools/lib/subcmd/parse-options.c 	if (!(flags & OPT_SHORT) && p->opt) {
p                 151 tools/lib/subcmd/parse-options.c 		if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
p                 177 tools/lib/subcmd/parse-options.c 			err = get_arg(p, opt, flags, NULL);
p                 215 tools/lib/subcmd/parse-options.c 		else if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
p                 218 tools/lib/subcmd/parse-options.c 			err = get_arg(p, opt, flags, (const char **)opt->value);
p                 244 tools/lib/subcmd/parse-options.c 		if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
p                 246 tools/lib/subcmd/parse-options.c 		if (get_arg(p, opt, flags, &arg))
p                 255 tools/lib/subcmd/parse-options.c 		if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
p                 259 tools/lib/subcmd/parse-options.c 		if (get_arg(p, opt, flags, &arg))
p                 271 tools/lib/subcmd/parse-options.c 		if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
p                 275 tools/lib/subcmd/parse-options.c 		if (get_arg(p, opt, flags, &arg))
p                 289 tools/lib/subcmd/parse-options.c 		if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
p                 293 tools/lib/subcmd/parse-options.c 		if (get_arg(p, opt, flags, &arg))
p                 305 tools/lib/subcmd/parse-options.c 		if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
p                 309 tools/lib/subcmd/parse-options.c 		if (get_arg(p, opt, flags, &arg))
p                 321 tools/lib/subcmd/parse-options.c 		if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
p                 325 tools/lib/subcmd/parse-options.c 		if (get_arg(p, opt, flags, &arg))
p                 342 tools/lib/subcmd/parse-options.c static int parse_short_opt(struct parse_opt_ctx_t *p, const struct option *options)
p                 346 tools/lib/subcmd/parse-options.c 		if (options->short_name == *p->opt) {
p                 347 tools/lib/subcmd/parse-options.c 			p->opt = p->opt[1] ? p->opt + 1 : NULL;
p                 348 tools/lib/subcmd/parse-options.c 			return get_value(p, options, OPT_SHORT);
p                 360 tools/lib/subcmd/parse-options.c static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg,
p                 386 tools/lib/subcmd/parse-options.c 			p->out[p->cpidx++] = arg - 2;
p                 422 tools/lib/subcmd/parse-options.c 					p->opt = arg_end + 1;
p                 447 tools/lib/subcmd/parse-options.c 			p->opt = rest + 1;
p                 449 tools/lib/subcmd/parse-options.c 		return get_value(p, options, flags);
p                 463 tools/lib/subcmd/parse-options.c 		return get_value(p, abbrev_option, abbrev_flags);
p                 121 tools/lib/subcmd/parse-options.h #define OPT_PARENT(p)               { .type = OPTION_END, .parent = (p) }
p                 133 tools/lib/subcmd/parse-options.h #define OPT_SET_PTR(s, l, v, h, p)  { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) }
p                  24 tools/lib/symbol/kallsyms.c 	char *p;
p                  26 tools/lib/symbol/kallsyms.c 	*long_val = strtoull(ptr, &p, 16);
p                  28 tools/lib/symbol/kallsyms.c 	return p - ptr;
p                 758 tools/lib/traceevent/event-parse.c 	char *p;
p                 773 tools/lib/traceevent/event-parse.c 	p = item->printk + strlen(item->printk) - 1;
p                 774 tools/lib/traceevent/event-parse.c 	if (*p == '"')
p                 775 tools/lib/traceevent/event-parse.c 		*p = 0;
p                 777 tools/lib/traceevent/event-parse.c 	p -= 2;
p                 778 tools/lib/traceevent/event-parse.c 	if (strcmp(p, "\\n") == 0)
p                 779 tools/lib/traceevent/event-parse.c 		*p = 0;
p                4865 tools/lib/traceevent/event-parse.c static int is_printable_array(char *p, unsigned int len)
p                4869 tools/lib/traceevent/event-parse.c 	for (i = 0; i < len && p[i]; i++)
p                4870 tools/lib/traceevent/event-parse.c 		if (!isprint(p[i]) && !isspace(p[i]))
p                4963 tools/lib/traceevent/event-parse.c 	struct trace_seq p;
p                5120 tools/lib/traceevent/event-parse.c 					char *p;
p                5123 tools/lib/traceevent/event-parse.c 					if (ls == 1 && (p = strchr(format, 'l')))
p                5124 tools/lib/traceevent/event-parse.c 						memmove(p+1, p, strlen(p)+1);
p                5188 tools/lib/traceevent/event-parse.c 				trace_seq_init(&p);
p                5189 tools/lib/traceevent/event-parse.c 				print_str_arg(&p, data, size, event,
p                5191 tools/lib/traceevent/event-parse.c 				trace_seq_terminate(&p);
p                5192 tools/lib/traceevent/event-parse.c 				trace_seq_puts(s, p.buffer);
p                5193 tools/lib/traceevent/event-parse.c 				trace_seq_destroy(&p);
p                 165 tools/lib/traceevent/event-plugin.c 		char *p;
p                 169 tools/lib/traceevent/event-plugin.c 		p = strstr(plugin, ".");
p                 170 tools/lib/traceevent/event-plugin.c 		if (p)
p                 171 tools/lib/traceevent/event-plugin.c 			*p = '\0';
p                 157 tools/lib/traceevent/plugins/plugin_scsi.c scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
p                 159 tools/lib/traceevent/plugins/plugin_scsi.c 	const char *ret = p->buffer + p->len;
p                 167 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_printf(p, "lba=%llu txlen=%llu",
p                 169 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_putc(p, 0);
p                 174 tools/lib/traceevent/plugins/plugin_scsi.c scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
p                 176 tools/lib/traceevent/plugins/plugin_scsi.c 	const char *ret = p->buffer + p->len;
p                 186 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
p                 191 tools/lib/traceevent/plugins/plugin_scsi.c 		trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
p                 193 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_putc(p, 0);
p                 198 tools/lib/traceevent/plugins/plugin_scsi.c scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
p                 200 tools/lib/traceevent/plugins/plugin_scsi.c 	const char *ret = p->buffer + p->len;
p                 212 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
p                 215 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_putc(p, 0);
p                 220 tools/lib/traceevent/plugins/plugin_scsi.c scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
p                 222 tools/lib/traceevent/plugins/plugin_scsi.c 	const char *ret = p->buffer + p->len;
p                 238 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
p                 243 tools/lib/traceevent/plugins/plugin_scsi.c 		trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
p                 245 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_putc(p, 0);
p                 250 tools/lib/traceevent/plugins/plugin_scsi.c scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
p                 252 tools/lib/traceevent/plugins/plugin_scsi.c 	const char *ret = p->buffer + p->len, *cmd;
p                 270 tools/lib/traceevent/plugins/plugin_scsi.c 		trace_seq_printf(p, "UNKNOWN");
p                 291 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
p                 296 tools/lib/traceevent/plugins/plugin_scsi.c 		trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
p                 299 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_putc(p, 0);
p                 304 tools/lib/traceevent/plugins/plugin_scsi.c scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
p                 306 tools/lib/traceevent/plugins/plugin_scsi.c 	const char *ret = p->buffer + p->len;
p                 309 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
p                 310 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_putc(p, 0);
p                 315 tools/lib/traceevent/plugins/plugin_scsi.c scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
p                 317 tools/lib/traceevent/plugins/plugin_scsi.c 	const char *ret = p->buffer + p->len, *cmd;
p                 329 tools/lib/traceevent/plugins/plugin_scsi.c 		trace_seq_printf(p, "UNKNOWN");
p                 346 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
p                 350 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_putc(p, 0);
p                 355 tools/lib/traceevent/plugins/plugin_scsi.c scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
p                 362 tools/lib/traceevent/plugins/plugin_scsi.c 		return scsi_trace_rw32(p, cdb, len);
p                 364 tools/lib/traceevent/plugins/plugin_scsi.c 		return scsi_trace_misc(p, cdb, len);
p                 369 tools/lib/traceevent/plugins/plugin_scsi.c scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
p                 371 tools/lib/traceevent/plugins/plugin_scsi.c 	const char *ret = p->buffer + p->len;
p                 373 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_printf(p, "-");
p                 374 tools/lib/traceevent/plugins/plugin_scsi.c 	trace_seq_putc(p, 0);
p                 379 tools/lib/traceevent/plugins/plugin_scsi.c scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len)
p                 384 tools/lib/traceevent/plugins/plugin_scsi.c 		return scsi_trace_rw6(p, cdb, len);
p                 389 tools/lib/traceevent/plugins/plugin_scsi.c 		return scsi_trace_rw10(p, cdb, len);
p                 393 tools/lib/traceevent/plugins/plugin_scsi.c 		return scsi_trace_rw12(p, cdb, len);
p                 398 tools/lib/traceevent/plugins/plugin_scsi.c 		return scsi_trace_rw16(p, cdb, len);
p                 400 tools/lib/traceevent/plugins/plugin_scsi.c 		return scsi_trace_unmap(p, cdb, len);
p                 402 tools/lib/traceevent/plugins/plugin_scsi.c 		return scsi_trace_service_action_in(p, cdb, len);
p                 404 tools/lib/traceevent/plugins/plugin_scsi.c 		return scsi_trace_varlen(p, cdb, len);
p                 406 tools/lib/traceevent/plugins/plugin_scsi.c 		return scsi_trace_misc(p, cdb, len);
p                  91 tools/objtool/objtool.c 		struct cmd_struct *p = objtool_cmds+i;
p                  93 tools/objtool/objtool.c 		if (strcmp(p->name, cmd))
p                  96 tools/objtool/objtool.c 		ret = p->fn(argc, argv);
p                  12 tools/pcmcia/crc32hash.c static unsigned int crc32(unsigned char const *p, unsigned int len)
p                  17 tools/pcmcia/crc32hash.c 		crc ^= *p++;
p                  44 tools/perf/arch/s390/util/machine.c void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
p                  46 tools/perf/arch/s390/util/machine.c 	if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
p                  48 tools/perf/arch/s390/util/machine.c 		p->end = roundup(p->end, page_size);
p                  50 tools/perf/arch/s390/util/machine.c 		p->end = c->start;
p                  51 tools/perf/arch/s390/util/machine.c 	pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
p                 435 tools/perf/arch/x86/util/intel-pt.c 	int p = 0;
p                 444 tools/perf/arch/x86/util/intel-pt.c 				p += scnprintf(str + p, len - p, ",");
p                 447 tools/perf/arch/x86/util/intel-pt.c 				p += scnprintf(str + p, len - p, "%u", val);
p                 462 tools/perf/arch/x86/util/intel-pt.c 				p += scnprintf(str + p, len - p, ",%u", last);
p                 466 tools/perf/arch/x86/util/intel-pt.c 				p += scnprintf(str + p, len - p, "-%u", last);
p                 368 tools/perf/bench/epoll-wait.c static void *writerfn(void *p)
p                 370 tools/perf/bench/epoll-wait.c 	struct worker *worker = p;
p                  47 tools/perf/bench/numa.c #define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0)
p                  53 tools/perf/bench/numa.c #define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0)
p                 158 tools/perf/bench/numa.c 	struct params		p;
p                 229 tools/perf/bench/numa.c 	for (i = 0; i < g->p.nr_nodes; i++) {
p                 276 tools/perf/bench/numa.c 		for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
p                 279 tools/perf/bench/numa.c 		BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus);
p                 291 tools/perf/bench/numa.c 	int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
p                 296 tools/perf/bench/numa.c 	BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
p                 305 tools/perf/bench/numa.c 		for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
p                 311 tools/perf/bench/numa.c 		BUG_ON(cpu_stop > g->p.nr_cpus);
p                 335 tools/perf/bench/numa.c 	ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1);
p                 348 tools/perf/bench/numa.c 	BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)*8);
p                 450 tools/perf/bench/numa.c 	return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0,  g->p.thp, g->p.init_random);
p                 458 tools/perf/bench/numa.c 	return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
p                 467 tools/perf/bench/numa.c 	return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
p                 497 tools/perf/bench/numa.c 	if (!g->p.cpu_list_str)
p                 500 tools/perf/bench/numa.c 	dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
p                 502 tools/perf/bench/numa.c 	str0 = str = strdup(g->p.cpu_list_str);
p                 537 tools/perf/bench/numa.c 			BUG_ON(step <= 0 || step >= g->p.nr_cpus);
p                 549 tools/perf/bench/numa.c 			BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus);
p                 562 tools/perf/bench/numa.c 		if (bind_cpu_0 >= g->p.nr_cpus || bind_cpu_1 >= g->p.nr_cpus) {
p                 563 tools/perf/bench/numa.c 			printf("\nTest not applicable, system has only %d CPUs.\n", g->p.nr_cpus);
p                 576 tools/perf/bench/numa.c 				if (t >= g->p.nr_tasks) {
p                 592 tools/perf/bench/numa.c 					BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus);
p                 603 tools/perf/bench/numa.c 	if (t < g->p.nr_tasks)
p                 604 tools/perf/bench/numa.c 		printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
p                 634 tools/perf/bench/numa.c 	if (!g->p.node_list_str)
p                 637 tools/perf/bench/numa.c 	dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
p                 639 tools/perf/bench/numa.c 	str0 = str = strdup(g->p.node_list_str);
p                 673 tools/perf/bench/numa.c 			BUG_ON(step <= 0 || step >= g->p.nr_nodes);
p                 686 tools/perf/bench/numa.c 		if (bind_node_0 >= g->p.nr_nodes || bind_node_1 >= g->p.nr_nodes) {
p                 687 tools/perf/bench/numa.c 			printf("\nTest not applicable, system has only %d nodes.\n", g->p.nr_nodes);
p                 698 tools/perf/bench/numa.c 				if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
p                 718 tools/perf/bench/numa.c 	if (t < g->p.nr_tasks)
p                 719 tools/perf/bench/numa.c 		printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
p                 752 tools/perf/bench/numa.c 	if (g->p.data_reads)
p                 754 tools/perf/bench/numa.c 	if (g->p.data_writes)
p                 783 tools/perf/bench/numa.c 	if (g->p.data_zero_memset && !g->p.data_rand_walk) {
p                 790 tools/perf/bench/numa.c 	chunk_1 = words/g->p.nr_loops;
p                 796 tools/perf/bench/numa.c 	if (g->p.data_rand_walk) {
p                 808 tools/perf/bench/numa.c 			if (g->p.data_zero_memset) {
p                 815 tools/perf/bench/numa.c 	} else if (!g->p.data_backwards || (nr + loop) & 1) {
p                 881 tools/perf/bench/numa.c 	for (t = 0; t < g->p.nr_threads; t++) {
p                 886 tools/perf/bench/numa.c 		task_nr = process_nr*g->p.nr_threads + t;
p                 914 tools/perf/bench/numa.c 	int t, p;
p                 916 tools/perf/bench/numa.c 	for (p = 0; p < g->p.nr_proc; p++) {
p                 917 tools/perf/bench/numa.c 		for (t = 0; t < g->p.nr_threads; t++) {
p                 922 tools/perf/bench/numa.c 			task_nr = p*g->p.nr_threads + t;
p                 939 tools/perf/bench/numa.c 	int p;
p                 944 tools/perf/bench/numa.c 	for (p = 0; p < g->p.nr_proc; p++) {
p                 945 tools/perf/bench/numa.c 		unsigned int nodes = count_process_nodes(p);
p                 980 tools/perf/bench/numa.c 	if (!g->p.show_convergence && !g->p.measure_convergence)
p                 983 tools/perf/bench/numa.c 	for (node = 0; node < g->p.nr_nodes; node++)
p                 989 tools/perf/bench/numa.c 	for (t = 0; t < g->p.nr_tasks; t++) {
p                1009 tools/perf/bench/numa.c 	nr_min = g->p.nr_tasks;
p                1012 tools/perf/bench/numa.c 	for (node = 0; node < g->p.nr_nodes; node++) {
p                1022 tools/perf/bench/numa.c 	BUG_ON(sum > g->p.nr_tasks);
p                1024 tools/perf/bench/numa.c 	if (0 && (sum < g->p.nr_tasks))
p                1034 tools/perf/bench/numa.c 	for (node = 0; node < g->p.nr_nodes; node++) {
p                1061 tools/perf/bench/numa.c 	if (strong && process_groups == g->p.nr_proc) {
p                1065 tools/perf/bench/numa.c 			if (g->p.measure_convergence) {
p                1082 tools/perf/bench/numa.c 		(double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max / NSEC_PER_SEC / 60.0);
p                1086 tools/perf/bench/numa.c 	if (g->p.show_details >= 0)
p                1098 tools/perf/bench/numa.c 	int details = g->p.show_details;
p                1118 tools/perf/bench/numa.c 	thread_data = setup_private_data(g->p.bytes_thread);
p                1123 tools/perf/bench/numa.c 	if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1)
p                1135 tools/perf/bench/numa.c 	if (g->p.serialize_startup) {
p                1145 tools/perf/bench/numa.c 		if (g->nr_tasks_working == g->p.nr_tasks)
p                1156 tools/perf/bench/numa.c 	for (l = 0; l < g->p.nr_loops; l++) {
p                1162 tools/perf/bench/numa.c 		val += do_work(global_data,  g->p.bytes_global,  process_nr, g->p.nr_proc,	l, val);
p                1163 tools/perf/bench/numa.c 		val += do_work(process_data, g->p.bytes_process, thread_nr,  g->p.nr_threads,	l, val);
p                1164 tools/perf/bench/numa.c 		val += do_work(thread_data,  g->p.bytes_thread,  0,          1,		l, val);
p                1166 tools/perf/bench/numa.c 		if (g->p.sleep_usecs) {
p                1168 tools/perf/bench/numa.c 			usleep(g->p.sleep_usecs);
p                1174 tools/perf/bench/numa.c 		if (g->p.bytes_process_locked) {
p                1176 tools/perf/bench/numa.c 			val += do_work(process_data, g->p.bytes_process_locked, thread_nr,  g->p.nr_threads,	l, val);
p                1180 tools/perf/bench/numa.c 		work_done = g->p.bytes_global + g->p.bytes_process +
p                1181 tools/perf/bench/numa.c 			    g->p.bytes_process_locked + g->p.bytes_thread;
p                1186 tools/perf/bench/numa.c 		if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs)
p                1194 tools/perf/bench/numa.c 		if (g->p.nr_secs) {
p                1196 tools/perf/bench/numa.c 			if ((u32)diff.tv_sec >= g->p.nr_secs) {
p                1210 tools/perf/bench/numa.c 		if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
p                1223 tools/perf/bench/numa.c 			if (this_cpu < g->p.nr_cpus/2)
p                1224 tools/perf/bench/numa.c 				target_cpu = g->p.nr_cpus-1;
p                1271 tools/perf/bench/numa.c 	free_data(thread_data, g->p.bytes_thread);
p                1300 tools/perf/bench/numa.c 	task_nr = process_nr*g->p.nr_threads;
p                1306 tools/perf/bench/numa.c 	pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t));
p                1307 tools/perf/bench/numa.c 	process_data = setup_private_data(g->p.bytes_process);
p                1309 tools/perf/bench/numa.c 	if (g->p.show_details >= 3) {
p                1314 tools/perf/bench/numa.c 	for (t = 0; t < g->p.nr_threads; t++) {
p                1315 tools/perf/bench/numa.c 		task_nr = process_nr*g->p.nr_threads + t;
p                1330 tools/perf/bench/numa.c 	for (t = 0; t < g->p.nr_threads; t++) {
p                1335 tools/perf/bench/numa.c 	free_data(process_data, g->p.bytes_process);
p                1341 tools/perf/bench/numa.c 	if (g->p.show_details < 0)
p                1346 tools/perf/bench/numa.c 		g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
p                1348 tools/perf/bench/numa.c 			g->p.nr_loops, g->p.bytes_global/1024/1024);
p                1350 tools/perf/bench/numa.c 			g->p.nr_loops, g->p.bytes_process/1024/1024);
p                1352 tools/perf/bench/numa.c 			g->p.nr_loops, g->p.bytes_thread/1024/1024);
p                1361 tools/perf/bench/numa.c 	ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
p                1366 tools/perf/bench/numa.c 	for (t = 0; t < g->p.nr_tasks; t++) {
p                1375 tools/perf/bench/numa.c 		for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
p                1382 tools/perf/bench/numa.c 	ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
p                1392 tools/perf/bench/numa.c 	g->p = p0;
p                1394 tools/perf/bench/numa.c 	g->p.nr_cpus = numa_num_configured_cpus();
p                1396 tools/perf/bench/numa.c 	g->p.nr_nodes = numa_max_node() + 1;
p                1399 tools/perf/bench/numa.c 	BUG_ON(g->p.nr_nodes > MAX_NR_NODES || g->p.nr_nodes < 0);
p                1401 tools/perf/bench/numa.c 	if (g->p.show_quiet && !g->p.show_details)
p                1402 tools/perf/bench/numa.c 		g->p.show_details = -1;
p                1405 tools/perf/bench/numa.c 	if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str)
p                1408 tools/perf/bench/numa.c 	if (g->p.mb_global_str) {
p                1409 tools/perf/bench/numa.c 		g->p.mb_global = atof(g->p.mb_global_str);
p                1410 tools/perf/bench/numa.c 		BUG_ON(g->p.mb_global < 0);
p                1413 tools/perf/bench/numa.c 	if (g->p.mb_proc_str) {
p                1414 tools/perf/bench/numa.c 		g->p.mb_proc = atof(g->p.mb_proc_str);
p                1415 tools/perf/bench/numa.c 		BUG_ON(g->p.mb_proc < 0);
p                1418 tools/perf/bench/numa.c 	if (g->p.mb_proc_locked_str) {
p                1419 tools/perf/bench/numa.c 		g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str);
p                1420 tools/perf/bench/numa.c 		BUG_ON(g->p.mb_proc_locked < 0);
p                1421 tools/perf/bench/numa.c 		BUG_ON(g->p.mb_proc_locked > g->p.mb_proc);
p                1424 tools/perf/bench/numa.c 	if (g->p.mb_thread_str) {
p                1425 tools/perf/bench/numa.c 		g->p.mb_thread = atof(g->p.mb_thread_str);
p                1426 tools/perf/bench/numa.c 		BUG_ON(g->p.mb_thread < 0);
p                1429 tools/perf/bench/numa.c 	BUG_ON(g->p.nr_threads <= 0);
p                1430 tools/perf/bench/numa.c 	BUG_ON(g->p.nr_proc <= 0);
p                1432 tools/perf/bench/numa.c 	g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads;
p                1434 tools/perf/bench/numa.c 	g->p.bytes_global		= g->p.mb_global	*1024L*1024L;
p                1435 tools/perf/bench/numa.c 	g->p.bytes_process		= g->p.mb_proc		*1024L*1024L;
p                1436 tools/perf/bench/numa.c 	g->p.bytes_process_locked	= g->p.mb_proc_locked	*1024L*1024L;
p                1437 tools/perf/bench/numa.c 	g->p.bytes_thread		= g->p.mb_thread	*1024L*1024L;
p                1439 tools/perf/bench/numa.c 	g->data = setup_shared_data(g->p.bytes_global);
p                1461 tools/perf/bench/numa.c 	free_data(g->data, g->p.bytes_global);
p                1479 tools/perf/bench/numa.c 	if (!g->p.show_quiet)
p                1496 tools/perf/bench/numa.c 	int i, t, p;
p                1501 tools/perf/bench/numa.c 	pids = zalloc(g->p.nr_proc * sizeof(*pids));
p                1507 tools/perf/bench/numa.c 	if (g->p.serialize_startup) {
p                1514 tools/perf/bench/numa.c 	for (i = 0; i < g->p.nr_proc; i++) {
p                1529 tools/perf/bench/numa.c 	while (g->nr_tasks_started != g->p.nr_tasks)
p                1532 tools/perf/bench/numa.c 	BUG_ON(g->nr_tasks_started != g->p.nr_tasks);
p                1534 tools/perf/bench/numa.c 	if (g->p.serialize_startup) {
p                1565 tools/perf/bench/numa.c 	for (i = 0; i < g->p.nr_proc; i++) {
p                1575 tools/perf/bench/numa.c 	for (t = 0; t < g->p.nr_tasks; t++) {
p                1597 tools/perf/bench/numa.c 	runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / NSEC_PER_SEC;
p                1599 tools/perf/bench/numa.c 	if (g->p.measure_convergence) {
p                1617 tools/perf/bench/numa.c 	print_res(name, bytes / g->p.nr_tasks / 1e9,
p                1623 tools/perf/bench/numa.c 	print_res(name, runtime_sec_max * NSEC_PER_SEC / (bytes / g->p.nr_tasks),
p                1626 tools/perf/bench/numa.c 	print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max,
p                1632 tools/perf/bench/numa.c 	if (g->p.show_details >= 2) {
p                1635 tools/perf/bench/numa.c 		for (p = 0; p < g->p.nr_proc; p++) {
p                1636 tools/perf/bench/numa.c 			for (t = 0; t < g->p.nr_threads; t++) {
p                1638 tools/perf/bench/numa.c 				td = g->threads + p*g->p.nr_threads + t;
p                1639 tools/perf/bench/numa.c 				snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
p                1673 tools/perf/bench/numa.c static void init_params(struct params *p, const char *name, int argc, const char **argv)
p                1684 tools/perf/bench/numa.c 	memset(p, 0, sizeof(*p));
p                1688 tools/perf/bench/numa.c 	p->serialize_startup		= 1;
p                1689 tools/perf/bench/numa.c 	p->data_reads			= true;
p                1690 tools/perf/bench/numa.c 	p->data_writes			= true;
p                1691 tools/perf/bench/numa.c 	p->data_backwards		= true;
p                1692 tools/perf/bench/numa.c 	p->data_rand_walk		= true;
p                1693 tools/perf/bench/numa.c 	p->nr_loops			= -1;
p                1694 tools/perf/bench/numa.c 	p->init_random			= true;
p                1695 tools/perf/bench/numa.c 	p->mb_global_str		= "1";
p                1696 tools/perf/bench/numa.c 	p->nr_proc			= 1;
p                1697 tools/perf/bench/numa.c 	p->nr_threads			= 1;
p                1698 tools/perf/bench/numa.c 	p->nr_secs			= 5;
p                1699 tools/perf/bench/numa.c 	p->run_all			= argc == 1;
p                  36 tools/perf/builtin-buildid-cache.c 	char *p;
p                  40 tools/perf/builtin-buildid-cache.c 	p = strrchr(root_dir, '/');
p                  41 tools/perf/builtin-buildid-cache.c 	if (!p)
p                  43 tools/perf/builtin-buildid-cache.c 	*p = '\0';
p                 121 tools/perf/builtin-buildid-cache.c 	char *p;
p                 125 tools/perf/builtin-buildid-cache.c 	p = strrchr(from_dir, '/');
p                 126 tools/perf/builtin-buildid-cache.c 	if (!p || strcmp(p + 1, "kcore"))
p                 128 tools/perf/builtin-buildid-cache.c 	*p = '\0';
p                 154 tools/perf/builtin-buildid-cache.c 			p = strrchr(to_dir, '/');
p                 155 tools/perf/builtin-buildid-cache.c 			if (p)
p                 156 tools/perf/builtin-buildid-cache.c 				*p = '\0';
p                 159 tools/perf/builtin-buildid-cache.c 				p = strrchr(to_dir, '/');
p                 160 tools/perf/builtin-buildid-cache.c 				if (p)
p                 161 tools/perf/builtin-buildid-cache.c 					*p = '\0';
p                 822 tools/perf/builtin-c2c.c 	double p;
p                 844 tools/perf/builtin-c2c.c 	p = tot ? (double) st / tot : 0;
p                 846 tools/perf/builtin-c2c.c 	return 100 * p;
p                1088 tools/perf/builtin-diff.c 	char *p = *pstr;
p                1095 tools/perf/builtin-diff.c 	p = strchr(*pstr, ':');
p                1096 tools/perf/builtin-diff.c 	if (p) {
p                1097 tools/perf/builtin-diff.c 		if (p == *pstr) {
p                1102 tools/perf/builtin-diff.c 		*p = 0;
p                1103 tools/perf/builtin-diff.c 		p++;
p                1104 tools/perf/builtin-diff.c 		if (*p == 0) {
p                1117 tools/perf/builtin-diff.c 	if (!p || *p == 0)
p                1120 tools/perf/builtin-diff.c 		*pstr = p;
p                 191 tools/perf/builtin-help.c 	struct man_viewer_list **p = &man_viewer_list;
p                 194 tools/perf/builtin-help.c 	while (*p)
p                 195 tools/perf/builtin-help.c 		p = &((*p)->next);
p                 196 tools/perf/builtin-help.c 	*p = zalloc(sizeof(**p) + len + 1);
p                 197 tools/perf/builtin-help.c 	strcpy((*p)->name, name);
p                 512 tools/perf/builtin-kvm.c 	struct kvm_event *p;
p                 515 tools/perf/builtin-kvm.c 		p = container_of(*rb, struct kvm_event, rb);
p                 518 tools/perf/builtin-kvm.c 		if (bigger(event, p, vcpu))
p                 146 tools/perf/builtin-lock.c 	struct thread_stat *p;
p                 149 tools/perf/builtin-lock.c 		p = container_of(*rb, struct thread_stat, rb);
p                 152 tools/perf/builtin-lock.c 		if (new->tid < p->tid)
p                 154 tools/perf/builtin-lock.c 		else if (new->tid > p->tid)
p                 287 tools/perf/builtin-lock.c 	struct lock_stat *p;
p                 290 tools/perf/builtin-lock.c 		p = container_of(*rb, struct lock_stat, rb);
p                 293 tools/perf/builtin-lock.c 		if (bigger(st, p))
p                 340 tools/perf/builtin-mem.c 	char *s, *os = NULL, *p;
p                 357 tools/perf/builtin-mem.c 			p = strchr(s, ',');
p                 358 tools/perf/builtin-mem.c 			if (p)
p                 359 tools/perf/builtin-mem.c 				*p = '\0';
p                 373 tools/perf/builtin-mem.c 			if (!p)
p                 376 tools/perf/builtin-mem.c 			s = p + 1;
p                1944 tools/perf/builtin-record.c 	char *s, *p;
p                1955 tools/perf/builtin-record.c 	p = strchr(s, ',');
p                1956 tools/perf/builtin-record.c 	if (p)
p                1957 tools/perf/builtin-record.c 		*p = '\0';
p                1966 tools/perf/builtin-record.c 	if (!p) {
p                1971 tools/perf/builtin-record.c 	ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
p                1873 tools/perf/builtin-sched.c 		void *p = r->last_time;
p                1875 tools/perf/builtin-sched.c 		p = realloc(r->last_time, n * sizeof(u64));
p                1876 tools/perf/builtin-sched.c 		if (!p)
p                1879 tools/perf/builtin-sched.c 		r->last_time = p;
p                2256 tools/perf/builtin-sched.c 		void *p;
p                2258 tools/perf/builtin-sched.c 		p = realloc(idle_threads, j * sizeof(struct thread *));
p                2259 tools/perf/builtin-sched.c 		if (!p)
p                2262 tools/perf/builtin-sched.c 		idle_threads = (struct thread **) p;
p                2887 tools/perf/builtin-script.c 	const char *p = str;
p                2890 tools/perf/builtin-script.c 		p = str + strlen(str) - suffix_len;
p                2891 tools/perf/builtin-script.c 		if (!strncmp(p, suffix, suffix_len))
p                2892 tools/perf/builtin-script.c 			return p;
p                2900 tools/perf/builtin-script.c 	char line[BUFSIZ], *p;
p                2908 tools/perf/builtin-script.c 		p = skip_spaces(line);
p                2909 tools/perf/builtin-script.c 		if (strlen(p) == 0)
p                2911 tools/perf/builtin-script.c 		if (*p != '#')
p                2913 tools/perf/builtin-script.c 		p++;
p                2914 tools/perf/builtin-script.c 		if (strlen(p) && *p == '!')
p                2917 tools/perf/builtin-script.c 		p = skip_spaces(p);
p                2918 tools/perf/builtin-script.c 		if (strlen(p) && p[strlen(p) - 1] == '\n')
p                2919 tools/perf/builtin-script.c 			p[strlen(p) - 1] = '\0';
p                2921 tools/perf/builtin-script.c 		if (!strncmp(p, "description:", strlen("description:"))) {
p                2922 tools/perf/builtin-script.c 			p += strlen("description:");
p                2923 tools/perf/builtin-script.c 			desc->half_liner = strdup(skip_spaces(p));
p                2927 tools/perf/builtin-script.c 		if (!strncmp(p, "args:", strlen("args:"))) {
p                2928 tools/perf/builtin-script.c 			p += strlen("args:");
p                2929 tools/perf/builtin-script.c 			desc->args = strdup(skip_spaces(p));
p                3024 tools/perf/builtin-script.c 	char line[BUFSIZ], *p;
p                3036 tools/perf/builtin-script.c 		p = skip_spaces(line);
p                3037 tools/perf/builtin-script.c 		if (*p == '#')
p                3040 tools/perf/builtin-script.c 		while (strlen(p)) {
p                3041 tools/perf/builtin-script.c 			p = strstr(p, "-e");
p                3042 tools/perf/builtin-script.c 			if (!p)
p                3045 tools/perf/builtin-script.c 			p += 2;
p                3046 tools/perf/builtin-script.c 			p = skip_spaces(p);
p                3047 tools/perf/builtin-script.c 			len = strcspn(p, " \t");
p                3051 tools/perf/builtin-script.c 			snprintf(evname, len + 1, "%s", p);
p                3199 tools/perf/builtin-script.c 	char *p;
p                3209 tools/perf/builtin-script.c 	for (p = desc->args; *p; p++)
p                3210 tools/perf/builtin-script.c 		if (*p == '<')
p                 218 tools/perf/builtin-timechart.c 	struct per_pid *p;
p                 220 tools/perf/builtin-timechart.c 	p = find_create_pid(tchart, pid);
p                 221 tools/perf/builtin-timechart.c 	c = p->all;
p                 224 tools/perf/builtin-timechart.c 			p->current = c;
p                 229 tools/perf/builtin-timechart.c 			p->current = c;
p                 237 tools/perf/builtin-timechart.c 	p->current = c;
p                 238 tools/perf/builtin-timechart.c 	c->next = p->all;
p                 239 tools/perf/builtin-timechart.c 	p->all = c;
p                 244 tools/perf/builtin-timechart.c 	struct per_pid *p, *pp;
p                 245 tools/perf/builtin-timechart.c 	p = find_create_pid(tchart, pid);
p                 247 tools/perf/builtin-timechart.c 	p->ppid = ppid;
p                 248 tools/perf/builtin-timechart.c 	if (pp->current && pp->current->comm && !p->current)
p                 251 tools/perf/builtin-timechart.c 	p->start_time = timestamp;
p                 252 tools/perf/builtin-timechart.c 	if (p->current && !p->current->start_time) {
p                 253 tools/perf/builtin-timechart.c 		p->current->start_time = timestamp;
p                 254 tools/perf/builtin-timechart.c 		p->current->state_since = timestamp;
p                 260 tools/perf/builtin-timechart.c 	struct per_pid *p;
p                 261 tools/perf/builtin-timechart.c 	p = find_create_pid(tchart, pid);
p                 262 tools/perf/builtin-timechart.c 	p->end_time = timestamp;
p                 263 tools/perf/builtin-timechart.c 	if (p->current)
p                 264 tools/perf/builtin-timechart.c 		p->current->end_time = timestamp;
p                 271 tools/perf/builtin-timechart.c 	struct per_pid *p;
p                 275 tools/perf/builtin-timechart.c 	p = find_create_pid(tchart, pid);
p                 276 tools/perf/builtin-timechart.c 	c = p->current;
p                 280 tools/perf/builtin-timechart.c 		p->current = c;
p                 281 tools/perf/builtin-timechart.c 		c->next = p->all;
p                 282 tools/perf/builtin-timechart.c 		p->all = c;
p                 297 tools/perf/builtin-timechart.c 		p->total_time += (end-start);
p                 302 tools/perf/builtin-timechart.c 	if (p->start_time == 0 || p->start_time > start)
p                 303 tools/perf/builtin-timechart.c 		p->start_time = start;
p                 409 tools/perf/builtin-timechart.c 	struct per_pid *p;
p                 425 tools/perf/builtin-timechart.c 	p = find_create_pid(tchart, we->wakee);
p                 427 tools/perf/builtin-timechart.c 	if (p && p->current && p->current->state == TYPE_NONE) {
p                 428 tools/perf/builtin-timechart.c 		p->current->state_since = timestamp;
p                 429 tools/perf/builtin-timechart.c 		p->current->state = TYPE_WAITING;
p                 431 tools/perf/builtin-timechart.c 	if (p && p->current && p->current->state == TYPE_BLOCKED) {
p                 432 tools/perf/builtin-timechart.c 		pid_put_sample(tchart, p->pid, p->current->state, cpu,
p                 433 tools/perf/builtin-timechart.c 			       p->current->state_since, timestamp, NULL);
p                 434 tools/perf/builtin-timechart.c 		p->current->state_since = timestamp;
p                 435 tools/perf/builtin-timechart.c 		p->current->state = TYPE_WAITING;
p                 443 tools/perf/builtin-timechart.c 	struct per_pid *p = NULL, *prev_p;
p                 447 tools/perf/builtin-timechart.c 	p = find_create_pid(tchart, next_pid);
p                 453 tools/perf/builtin-timechart.c 	if (p && p->current) {
p                 454 tools/perf/builtin-timechart.c 		if (p->current->state != TYPE_NONE)
p                 455 tools/perf/builtin-timechart.c 			pid_put_sample(tchart, next_pid, p->current->state, cpu,
p                 456 tools/perf/builtin-timechart.c 				       p->current->state_since, timestamp,
p                 459 tools/perf/builtin-timechart.c 		p->current->state_since = timestamp;
p                 460 tools/perf/builtin-timechart.c 		p->current->state = TYPE_RUNNING;
p                 479 tools/perf/builtin-timechart.c 	char *p = NULL;
p                 484 tools/perf/builtin-timechart.c 	FILE *f = open_memstream(&p, &p_len);
p                 527 tools/perf/builtin-timechart.c 				zfree(&p);
p                 544 tools/perf/builtin-timechart.c 	return p;
p                 721 tools/perf/builtin-timechart.c 	struct per_pid *p = find_create_pid(tchart, pid);
p                 722 tools/perf/builtin-timechart.c 	struct per_pidcomm *c = p->current;
p                 730 tools/perf/builtin-timechart.c 		p->current = c;
p                 731 tools/perf/builtin-timechart.c 		c->next = p->all;
p                 732 tools/perf/builtin-timechart.c 		p->all = c;
p                 766 tools/perf/builtin-timechart.c 	struct per_pid *p = find_create_pid(tchart, pid);
p                 767 tools/perf/builtin-timechart.c 	struct per_pidcomm *c = p->current;
p                 814 tools/perf/builtin-timechart.c 		p->total_bytes += ret;
p                 966 tools/perf/builtin-timechart.c 	struct per_pid *new_list, *p, *cursor, *prev;
p                 972 tools/perf/builtin-timechart.c 		p = tchart->all_data;
p                 973 tools/perf/builtin-timechart.c 		tchart->all_data = p->next;
p                 974 tools/perf/builtin-timechart.c 		p->next = NULL;
p                 977 tools/perf/builtin-timechart.c 			new_list = p;
p                 978 tools/perf/builtin-timechart.c 			p->next = NULL;
p                 984 tools/perf/builtin-timechart.c 			if (cursor->ppid > p->ppid ||
p                 985 tools/perf/builtin-timechart.c 				(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
p                 988 tools/perf/builtin-timechart.c 					p->next = prev->next;
p                 989 tools/perf/builtin-timechart.c 					prev->next = p;
p                 993 tools/perf/builtin-timechart.c 					p->next = new_list;
p                 994 tools/perf/builtin-timechart.c 					new_list = p;
p                1003 tools/perf/builtin-timechart.c 				prev->next = p;
p                1038 tools/perf/builtin-timechart.c 	struct per_pid *p;
p                1047 tools/perf/builtin-timechart.c 		p = tchart->all_data;
p                1048 tools/perf/builtin-timechart.c 		while (p) {
p                1049 tools/perf/builtin-timechart.c 			if (p->pid == we->waker || p->pid == we->wakee) {
p                1050 tools/perf/builtin-timechart.c 				c = p->all;
p                1053 tools/perf/builtin-timechart.c 						if (p->pid == we->waker && !from) {
p                1057 tools/perf/builtin-timechart.c 						if (p->pid == we->wakee && !to) {
p                1064 tools/perf/builtin-timechart.c 				c = p->all;
p                1066 tools/perf/builtin-timechart.c 					if (p->pid == we->waker && !from) {
p                1070 tools/perf/builtin-timechart.c 					if (p->pid == we->wakee && !to) {
p                1077 tools/perf/builtin-timechart.c 			p = p->next;
p                1105 tools/perf/builtin-timechart.c 	struct per_pid *p;
p                1108 tools/perf/builtin-timechart.c 	p = tchart->all_data;
p                1109 tools/perf/builtin-timechart.c 	while (p) {
p                1110 tools/perf/builtin-timechart.c 		c = p->all;
p                1118 tools/perf/builtin-timechart.c 						    p->pid,
p                1127 tools/perf/builtin-timechart.c 		p = p->next;
p                1136 tools/perf/builtin-timechart.c 	struct per_pid *p;
p                1141 tools/perf/builtin-timechart.c 	p = tchart->all_data;
p                1142 tools/perf/builtin-timechart.c 	while (p) {
p                1143 tools/perf/builtin-timechart.c 		c = p->all;
p                1235 tools/perf/builtin-timechart.c 			sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
p                1242 tools/perf/builtin-timechart.c 		p = p->next;
p                1248 tools/perf/builtin-timechart.c 	struct per_pid *p;
p                1255 tools/perf/builtin-timechart.c 	p = tchart->all_data;
p                1256 tools/perf/builtin-timechart.c 	while (p) {
p                1257 tools/perf/builtin-timechart.c 		c = p->all;
p                1289 tools/perf/builtin-timechart.c 					sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / (double)NSEC_PER_SEC);
p                1291 tools/perf/builtin-timechart.c 					sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / (double)NSEC_PER_MSEC);
p                1299 tools/perf/builtin-timechart.c 		p = p->next;
p                1318 tools/perf/builtin-timechart.c static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
p                1326 tools/perf/builtin-timechart.c 		if (filt->pid && p->pid == filt->pid)
p                1337 tools/perf/builtin-timechart.c 	struct per_pid *p;
p                1341 tools/perf/builtin-timechart.c 	p = tchart->all_data;
p                1342 tools/perf/builtin-timechart.c 	while (p) {
p                1343 tools/perf/builtin-timechart.c 		p->display = 0;
p                1344 tools/perf/builtin-timechart.c 		if (p->start_time == 1)
p                1345 tools/perf/builtin-timechart.c 			p->start_time = tchart->first_time;
p                1348 tools/perf/builtin-timechart.c 		if (p->end_time == 0)
p                1349 tools/perf/builtin-timechart.c 			p->end_time = tchart->last_time;
p                1351 tools/perf/builtin-timechart.c 		c = p->all;
p                1359 tools/perf/builtin-timechart.c 			if (passes_filter(p, c)) {
p                1361 tools/perf/builtin-timechart.c 				p->display = 1;
p                1370 tools/perf/builtin-timechart.c 		p = p->next;
p                1377 tools/perf/builtin-timechart.c 	struct per_pid *p;
p                1381 tools/perf/builtin-timechart.c 	p = tchart->all_data;
p                1382 tools/perf/builtin-timechart.c 	while (p) {
p                1383 tools/perf/builtin-timechart.c 		p->display = 0;
p                1384 tools/perf/builtin-timechart.c 		if (p->start_time == 1)
p                1385 tools/perf/builtin-timechart.c 			p->start_time = tchart->first_time;
p                1388 tools/perf/builtin-timechart.c 		if (p->end_time == 0)
p                1389 tools/perf/builtin-timechart.c 			p->end_time = tchart->last_time;
p                1390 tools/perf/builtin-timechart.c 		if (p->total_time >= threshold)
p                1391 tools/perf/builtin-timechart.c 			p->display = 1;
p                1393 tools/perf/builtin-timechart.c 		c = p->all;
p                1411 tools/perf/builtin-timechart.c 		p = p->next;
p                1418 tools/perf/builtin-timechart.c 	struct per_pid *p;
p                1422 tools/perf/builtin-timechart.c 	p = timechart->all_data;
p                1423 tools/perf/builtin-timechart.c 	while (p) {
p                1425 tools/perf/builtin-timechart.c 		if (p->end_time == 0)
p                1426 tools/perf/builtin-timechart.c 			p->end_time = timechart->last_time;
p                1428 tools/perf/builtin-timechart.c 		c = p->all;
p                1443 tools/perf/builtin-timechart.c 		p = p->next;
p                1645 tools/perf/builtin-timechart.c 	const char **p;
p                1731 tools/perf/builtin-timechart.c 	p = rec_argv;
p                1733 tools/perf/builtin-timechart.c 		*p++ = strdup(common_args[i]);
p                1741 tools/perf/builtin-timechart.c 		*p++ = "-e";
p                1742 tools/perf/builtin-timechart.c 		*p++ = strdup(disk_events[i]);
p                1743 tools/perf/builtin-timechart.c 		*p++ = "--filter";
p                1744 tools/perf/builtin-timechart.c 		*p++ = filter;
p                1752 tools/perf/builtin-timechart.c 		*p++ = "-e";
p                1753 tools/perf/builtin-timechart.c 		*p++ = strdup(net_events[i]);
p                1754 tools/perf/builtin-timechart.c 		*p++ = "--filter";
p                1755 tools/perf/builtin-timechart.c 		*p++ = filter;
p                1763 tools/perf/builtin-timechart.c 		*p++ = "-e";
p                1764 tools/perf/builtin-timechart.c 		*p++ = strdup(poll_events[i]);
p                1765 tools/perf/builtin-timechart.c 		*p++ = "--filter";
p                1766 tools/perf/builtin-timechart.c 		*p++ = filter;
p                1770 tools/perf/builtin-timechart.c 		*p++ = argv[i];
p                1780 tools/perf/builtin-timechart.c 	const char **p;
p                1844 tools/perf/builtin-timechart.c 	p = rec_argv;
p                1846 tools/perf/builtin-timechart.c 		*p++ = strdup(common_args[i]);
p                1849 tools/perf/builtin-timechart.c 		*p++ = strdup(backtrace_args[i]);
p                1852 tools/perf/builtin-timechart.c 		*p++ = strdup(tasks_args[i]);
p                1855 tools/perf/builtin-timechart.c 		*p++ = strdup(power_args[i]);
p                1858 tools/perf/builtin-timechart.c 		*p++ = strdup(old_power_args[i]);
p                1861 tools/perf/builtin-timechart.c 		*p++ = argv[j];
p                 354 tools/perf/builtin-top.c 	char *buf = malloc(0), *p;
p                 362 tools/perf/builtin-top.c 	p = strchr(buf, '\n');
p                 363 tools/perf/builtin-top.c 	if (p)
p                 364 tools/perf/builtin-top.c 		*p = 0;
p                 366 tools/perf/builtin-top.c 	p = buf;
p                 367 tools/perf/builtin-top.c 	while(*p) {
p                 368 tools/perf/builtin-top.c 		if (!isdigit(*p))
p                 370 tools/perf/builtin-top.c 		p++;
p                 389 tools/perf/builtin-top.c 	char *buf = malloc(0), *p;
p                 405 tools/perf/builtin-top.c 	p = strchr(buf, '\n');
p                 406 tools/perf/builtin-top.c 	if (p)
p                 407 tools/perf/builtin-top.c 		*p = 0;
p                1663 tools/perf/builtin-trace.c 	unsigned char *p = arg->args + sizeof(unsigned long) * idx;
p                1665 tools/perf/builtin-trace.c 	memcpy(&val, p, sizeof(val));
p                 133 tools/perf/jvmti/jvmti_agent.c 	char *base, *p;
p                 187 tools/perf/jvmti/jvmti_agent.c 	p = mkdtemp(jit_path);
p                 188 tools/perf/jvmti/jvmti_agent.c 	if (p != jit_path) {
p                 341 tools/perf/jvmti/jvmti_agent.c 	rec.p.id = JIT_CODE_CLOSE;
p                 342 tools/perf/jvmti/jvmti_agent.c 	rec.p.total_size = sizeof(rec);
p                 344 tools/perf/jvmti/jvmti_agent.c 	rec.p.timestamp = perf_get_timestamp();
p                 379 tools/perf/jvmti/jvmti_agent.c 	rec.p.id           = JIT_CODE_LOAD;
p                 380 tools/perf/jvmti/jvmti_agent.c 	rec.p.total_size   = sizeof(rec) + sym_len;
p                 381 tools/perf/jvmti/jvmti_agent.c 	rec.p.timestamp    = perf_get_timestamp();
p                 390 tools/perf/jvmti/jvmti_agent.c 		rec.p.total_size += size;
p                 442 tools/perf/jvmti/jvmti_agent.c 	rec.p.id        = JIT_CODE_DEBUG_INFO;
p                 444 tools/perf/jvmti/jvmti_agent.c 	rec.p.timestamp = perf_get_timestamp();
p                 457 tools/perf/jvmti/jvmti_agent.c 	rec.p.total_size = size;
p                 150 tools/perf/jvmti/libjvmti.c 		char *p = strrchr(class_sign, '/');
p                 151 tools/perf/jvmti/libjvmti.c 		if (p) {
p                 153 tools/perf/jvmti/libjvmti.c 			for (i = 0; i < (p - class_sign); i++)
p                 161 tools/perf/lib/cpumap.c 	char *p = NULL;
p                 178 tools/perf/lib/cpumap.c 		p = NULL;
p                 179 tools/perf/lib/cpumap.c 		start_cpu = strtoul(cpu_list, &p, 0);
p                 181 tools/perf/lib/cpumap.c 		    || (*p != '\0' && *p != ',' && *p != '-'))
p                 184 tools/perf/lib/cpumap.c 		if (*p == '-') {
p                 185 tools/perf/lib/cpumap.c 			cpu_list = ++p;
p                 186 tools/perf/lib/cpumap.c 			p = NULL;
p                 187 tools/perf/lib/cpumap.c 			end_cpu = strtoul(cpu_list, &p, 0);
p                 189 tools/perf/lib/cpumap.c 			if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
p                 216 tools/perf/lib/cpumap.c 		if (*p)
p                 217 tools/perf/lib/cpumap.c 			++p;
p                 219 tools/perf/lib/cpumap.c 		cpu_list = p;
p                 256 tools/perf/perf.c 				struct cmd_struct *p = commands+i;
p                 257 tools/perf/perf.c 				printf("%s ", p->cmd);
p                 265 tools/perf/perf.c 				struct option *p = options+i;
p                 266 tools/perf/perf.c 				printf("--%s ", p->long_name);
p                 295 tools/perf/perf.c static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
p                 302 tools/perf/perf.c 		use_browser = check_browser_config(p->cmd);
p                 304 tools/perf/perf.c 	if (use_pager == -1 && p->option & RUN_SETUP)
p                 305 tools/perf/perf.c 		use_pager = check_pager_config(p->cmd);
p                 306 tools/perf/perf.c 	if (use_pager == -1 && p->option & USE_PAGER)
p                 312 tools/perf/perf.c 	status = p->fn(argc, argv);
p                 361 tools/perf/perf.c 		struct cmd_struct *p = commands+i;
p                 362 tools/perf/perf.c 		if (strcmp(p->cmd, cmd))
p                 364 tools/perf/perf.c 		exit(run_builtin(p, argc, argv));
p                 127 tools/perf/pmu-events/jevents.c 	char *p, *q;
p                 130 tools/perf/pmu-events/jevents.c 	for (p = s; *p; p++) {
p                 132 tools/perf/pmu-events/jevents.c 		if (*p == '\\')
p                 146 tools/perf/pmu-events/jevents.c 	for (p = s; *p; p++) {
p                 147 tools/perf/pmu-events/jevents.c 		if (*p == '\\') {
p                 151 tools/perf/pmu-events/jevents.c 		*q = *p;
p                 758 tools/perf/pmu-events/jevents.c 	char *line, *p;
p                 780 tools/perf/pmu-events/jevents.c 	p = fgets(line, n, mapfp);
p                 781 tools/perf/pmu-events/jevents.c 	if (!p)
p                 789 tools/perf/pmu-events/jevents.c 		p = fgets(line, n, mapfp);
p                 790 tools/perf/pmu-events/jevents.c 		if (!p)
p                 805 tools/perf/pmu-events/jevents.c 		cpuid = fixregex(strtok_r(p, ",", &save));
p                 108 tools/perf/tests/code-reading.c 	const char *p;
p                 112 tools/perf/tests/code-reading.c 	p = strchr(line, ':');
p                 113 tools/perf/tests/code-reading.c 	if (!p)
p                 115 tools/perf/tests/code-reading.c 	p++;
p                 118 tools/perf/tests/code-reading.c 	while (*p) {
p                 119 tools/perf/tests/code-reading.c 		if (!isspace(*p))
p                 121 tools/perf/tests/code-reading.c 		p++;
p                 125 tools/perf/tests/code-reading.c 		ret = read_objdump_chunk(&p, &buf, &buf_len);
p                 127 tools/perf/tests/code-reading.c 		p++;
p                  21 tools/perf/tests/expr.c 	const char *p;
p                  46 tools/perf/tests/expr.c 	p = "FOO/0";
p                  47 tools/perf/tests/expr.c 	ret = expr__parse(&val, &ctx, &p);
p                  50 tools/perf/tests/expr.c 	p = "BAR/";
p                  51 tools/perf/tests/expr.c 	ret = expr__parse(&val, &ctx, &p);
p                 248 tools/perf/ui/browsers/annotate.c 	struct rb_node **p = &root->rb_node;
p                 252 tools/perf/ui/browsers/annotate.c 	while (*p != NULL) {
p                 253 tools/perf/ui/browsers/annotate.c 		parent = *p;
p                 257 tools/perf/ui/browsers/annotate.c 			p = &(*p)->rb_left;
p                 259 tools/perf/ui/browsers/annotate.c 			p = &(*p)->rb_right;
p                 261 tools/perf/ui/browsers/annotate.c 	rb_link_node(&al->rb_node, parent, p);
p                  10 tools/perf/ui/gtk/progress.c static void gtk_ui_progress__update(struct ui_progress *p)
p                  12 tools/perf/ui/gtk/progress.c 	double fraction = p->total ? 1.0 * p->curr / p->total : 0.0;
p                  17 tools/perf/ui/gtk/progress.c 		GtkWidget *label = gtk_label_new(p->title);
p                  35 tools/perf/ui/gtk/progress.c 	snprintf(buf, sizeof(buf), "%"PRIu64" / %"PRIu64, p->curr, p->total);
p                  80 tools/perf/ui/gtk/util.c 	char *msg, *p;
p                  94 tools/perf/ui/gtk/util.c 	p = strchr(msg, '\n');
p                  95 tools/perf/ui/gtk/util.c 	if (p)
p                  96 tools/perf/ui/gtk/util.c 		*p = '\0';
p                 748 tools/perf/ui/hist.c 		char *p;
p                 750 tools/perf/ui/hist.c 		int len = strtol(ptr, &p, 10);
p                 753 tools/perf/ui/hist.c 		if (*p == ',')
p                 754 tools/perf/ui/hist.c 			ptr = p + 1;
p                   5 tools/perf/ui/progress.c static void null_progress__update(struct ui_progress *p __maybe_unused)
p                  16 tools/perf/ui/progress.c void ui_progress__update(struct ui_progress *p, u64 adv)
p                  18 tools/perf/ui/progress.c 	u64 last = p->curr;
p                  20 tools/perf/ui/progress.c 	p->curr += adv;
p                  22 tools/perf/ui/progress.c 	if (p->curr >= p->next) {
p                  23 tools/perf/ui/progress.c 		u64 nr = DIV_ROUND_UP(p->curr - last, p->step);
p                  25 tools/perf/ui/progress.c 		p->next += nr * p->step;
p                  26 tools/perf/ui/progress.c 		ui_progress__ops->update(p);
p                  30 tools/perf/ui/progress.c void __ui_progress__init(struct ui_progress *p, u64 total,
p                  33 tools/perf/ui/progress.c 	p->curr = 0;
p                  34 tools/perf/ui/progress.c 	p->next = p->step = total / 16 ?: 1;
p                  35 tools/perf/ui/progress.c 	p->total = total;
p                  36 tools/perf/ui/progress.c 	p->title = title;
p                  37 tools/perf/ui/progress.c 	p->size  = size;
p                  40 tools/perf/ui/progress.c 		ui_progress__ops->init(p);
p                  15 tools/perf/ui/progress.h void __ui_progress__init(struct ui_progress *p, u64 total,
p                  18 tools/perf/ui/progress.h #define ui_progress__init(p, total, title) \
p                  19 tools/perf/ui/progress.h 	__ui_progress__init(p, total, title, false)
p                  21 tools/perf/ui/progress.h #define ui_progress__init_size(p, total, title) \
p                  22 tools/perf/ui/progress.h 	__ui_progress__init(p, total, title, true)
p                  24 tools/perf/ui/progress.h void ui_progress__update(struct ui_progress *p, u64 adv);
p                  27 tools/perf/ui/progress.h 	void (*init)(struct ui_progress *p);
p                  28 tools/perf/ui/progress.h 	void (*update)(struct ui_progress *p);
p                  10 tools/perf/ui/tui/progress.c static void __tui_progress__init(struct ui_progress *p)
p                  12 tools/perf/ui/tui/progress.c 	p->next = p->step = p->total / (SLtt_Screen_Cols - 2) ?: 1;
p                  15 tools/perf/ui/tui/progress.c static int get_title(struct ui_progress *p, char *buf, size_t size)
p                  21 tools/perf/ui/tui/progress.c 	ret  = unit_number__scnprintf(buf_cur, sizeof(buf_cur), p->curr);
p                  22 tools/perf/ui/tui/progress.c 	ret += unit_number__scnprintf(buf_tot, sizeof(buf_tot), p->total);
p                  25 tools/perf/ui/tui/progress.c 			       p->title, buf_cur, buf_tot);
p                  28 tools/perf/ui/tui/progress.c static void tui_progress__update(struct ui_progress *p)
p                  30 tools/perf/ui/tui/progress.c 	char buf[100], *title = (char *) p->title;
p                  39 tools/perf/ui/tui/progress.c 	if (p->total == 0)
p                  42 tools/perf/ui/tui/progress.c 	if (p->size) {
p                  43 tools/perf/ui/tui/progress.c 		get_title(p, buf, sizeof(buf));
p                  56 tools/perf/ui/tui/progress.c 	bar = ((SLtt_Screen_Cols - 2) * p->curr) / p->total;
p                1311 tools/perf/util/annotate.c 		double p;
p                1319 tools/perf/util/annotate.c 		p = 100 *(double)br->entry / branch->coverage;
p                1321 tools/perf/util/annotate.c 		if (p > 0.1) {
p                1331 tools/perf/util/annotate.c 			printf(" +%.2f%%", p);
p                1336 tools/perf/util/annotate.c 		double p = 100*(double)br->taken / br->coverage;
p                1338 tools/perf/util/annotate.c 		if (p > 0.1) {
p                1348 tools/perf/util/annotate.c 			printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred  / br->taken);
p                2119 tools/perf/util/annotate.c 	struct rb_node **p = &root->rb_node;
p                2123 tools/perf/util/annotate.c 	while (*p != NULL) {
p                2124 tools/perf/util/annotate.c 		parent = *p;
p                2137 tools/perf/util/annotate.c 			p = &(*p)->rb_left;
p                2139 tools/perf/util/annotate.c 			p = &(*p)->rb_right;
p                2147 tools/perf/util/annotate.c 	rb_link_node(&al->rb_node, parent, p);
p                2167 tools/perf/util/annotate.c 	struct rb_node **p = &root->rb_node;
p                2170 tools/perf/util/annotate.c 	while (*p != NULL) {
p                2171 tools/perf/util/annotate.c 		parent = *p;
p                2175 tools/perf/util/annotate.c 			p = &(*p)->rb_left;
p                2177 tools/perf/util/annotate.c 			p = &(*p)->rb_right;
p                2180 tools/perf/util/annotate.c 	rb_link_node(&al->rb_node, parent, p);
p                 217 tools/perf/util/auxtrace.c 	void *p;
p                 223 tools/perf/util/auxtrace.c 	p = malloc(size);
p                 224 tools/perf/util/auxtrace.c 	if (!p)
p                 227 tools/perf/util/auxtrace.c 	ret = readn(fd, p, size);
p                 229 tools/perf/util/auxtrace.c 		free(p);
p                 233 tools/perf/util/auxtrace.c 	return p;
p                1005 tools/perf/util/auxtrace.c 	const char *p;
p                1023 tools/perf/util/auxtrace.c 	for (p = str; *p;) {
p                1024 tools/perf/util/auxtrace.c 		switch (*p++) {
p                1027 tools/perf/util/auxtrace.c 			while (*p == ' ' || *p == ',')
p                1028 tools/perf/util/auxtrace.c 				p += 1;
p                1029 tools/perf/util/auxtrace.c 			if (isdigit(*p)) {
p                1030 tools/perf/util/auxtrace.c 				synth_opts->period = strtoull(p, &endptr, 10);
p                1032 tools/perf/util/auxtrace.c 				p = endptr;
p                1033 tools/perf/util/auxtrace.c 				while (*p == ' ' || *p == ',')
p                1034 tools/perf/util/auxtrace.c 					p += 1;
p                1035 tools/perf/util/auxtrace.c 				switch (*p++) {
p                1053 tools/perf/util/auxtrace.c 					if (*p++ != 's')
p                1099 tools/perf/util/auxtrace.c 			while (*p == ' ' || *p == ',')
p                1100 tools/perf/util/auxtrace.c 				p += 1;
p                1101 tools/perf/util/auxtrace.c 			if (isdigit(*p)) {
p                1104 tools/perf/util/auxtrace.c 				val = strtoul(p, &endptr, 10);
p                1105 tools/perf/util/auxtrace.c 				p = endptr;
p                1115 tools/perf/util/auxtrace.c 			while (*p == ' ' || *p == ',')
p                1116 tools/perf/util/auxtrace.c 				p += 1;
p                1117 tools/perf/util/auxtrace.c 			if (isdigit(*p)) {
p                1120 tools/perf/util/auxtrace.c 				val = strtoul(p, &endptr, 10);
p                1121 tools/perf/util/auxtrace.c 				p = endptr;
p                1129 tools/perf/util/auxtrace.c 			synth_opts->initial_skip = strtoul(p, &endptr, 10);
p                1130 tools/perf/util/auxtrace.c 			if (p == endptr)
p                1132 tools/perf/util/auxtrace.c 			p = endptr;
p                  35 tools/perf/util/block-range.c 	struct rb_node **p = &block_ranges.root.rb_node;
p                  39 tools/perf/util/block-range.c 	while (*p != NULL) {
p                  40 tools/perf/util/block-range.c 		parent = *p;
p                  44 tools/perf/util/block-range.c 			p = &parent->rb_left;
p                  46 tools/perf/util/block-range.c 			p = &parent->rb_right;
p                  56 tools/perf/util/block-range.c 	struct rb_node **p = &node->rb_left;
p                  57 tools/perf/util/block-range.c 	while (*p) {
p                  58 tools/perf/util/block-range.c 		node = *p;
p                  59 tools/perf/util/block-range.c 		p = &node->rb_right;
p                  61 tools/perf/util/block-range.c 	rb_link_node(left, node, p);
p                  66 tools/perf/util/block-range.c 	struct rb_node **p = &node->rb_right;
p                  67 tools/perf/util/block-range.c 	while (*p) {
p                  68 tools/perf/util/block-range.c 		node = *p;
p                  69 tools/perf/util/block-range.c 		p = &node->rb_left;
p                  71 tools/perf/util/block-range.c 	rb_link_node(right, node, p);
p                  83 tools/perf/util/block-range.c 	struct rb_node **p = &block_ranges.root.rb_node;
p                  88 tools/perf/util/block-range.c 	while (*p != NULL) {
p                  89 tools/perf/util/block-range.c 		parent = *p;
p                  93 tools/perf/util/block-range.c 			p = &parent->rb_left;
p                  95 tools/perf/util/block-range.c 			p = &parent->rb_right;
p                 104 tools/perf/util/block-range.c 	if (!*p) {
p                 154 tools/perf/util/block-range.c 		rb_link_node(&entry->node, parent, p);
p                 103 tools/perf/util/bpf-prologue.c #define ins(i, p) append_insn((i), (p))
p                 196 tools/perf/util/build-id.c 	char *ret = NULL, *p;
p                 210 tools/perf/util/build-id.c 	p = strrchr(buf, '/');	/* Cut off the "/<sbuild_id>" */
p                 211 tools/perf/util/build-id.c 	if (p && (p > buf + offs)) {
p                 212 tools/perf/util/build-id.c 		*p = '\0';
p                  81 tools/perf/util/call-path.c 	struct rb_node **p;
p                  92 tools/perf/util/call-path.c 	p = &parent->children.rb_node;
p                  93 tools/perf/util/call-path.c 	while (*p != NULL) {
p                  94 tools/perf/util/call-path.c 		node_parent = *p;
p                 101 tools/perf/util/call-path.c 			p = &(*p)->rb_left;
p                 103 tools/perf/util/call-path.c 			p = &(*p)->rb_right;
p                 110 tools/perf/util/call-path.c 	rb_link_node(&cp->rb_node, node_parent, p);
p                 375 tools/perf/util/callchain.c 	struct rb_node **p = &root->rb_node;
p                 380 tools/perf/util/callchain.c 	while (*p) {
p                 383 tools/perf/util/callchain.c 		parent = *p;
p                 391 tools/perf/util/callchain.c 				p = &(*p)->rb_left;
p                 393 tools/perf/util/callchain.c 				p = &(*p)->rb_right;
p                 398 tools/perf/util/callchain.c 				p = &(*p)->rb_left;
p                 400 tools/perf/util/callchain.c 				p = &(*p)->rb_right;
p                 408 tools/perf/util/callchain.c 	rb_link_node(&chain->rb_node, parent, p);
p                 826 tools/perf/util/callchain.c 		struct rb_node *p, **pp;
p                 842 tools/perf/util/callchain.c 		p = parent->rb_root_in.rb_node;
p                 843 tools/perf/util/callchain.c 		first = rb_entry(p, struct callchain_node, rb_node_in);
p                 848 tools/perf/util/callchain.c 			pp = &p->rb_left;
p                 850 tools/perf/util/callchain.c 			pp = &p->rb_right;
p                 852 tools/perf/util/callchain.c 		rb_link_node(&new->rb_node_in, p, pp);
p                 873 tools/perf/util/callchain.c 	struct rb_node **p = &root->rb_root_in.rb_node;
p                 881 tools/perf/util/callchain.c 	while (*p) {
p                 884 tools/perf/util/callchain.c 		parent = *p;
p                 895 tools/perf/util/callchain.c 			p = &parent->rb_left;
p                 897 tools/perf/util/callchain.c 			p = &parent->rb_right;
p                 904 tools/perf/util/callchain.c 	rb_link_node(&rnode->rb_node_in, parent, p);
p                 208 tools/perf/util/cgroup.c 	const char *p, *e, *eos = str + strlen(str);
p                 218 tools/perf/util/cgroup.c 		p = strchr(str, ',');
p                 219 tools/perf/util/cgroup.c 		e = p ? p : eos;
p                 234 tools/perf/util/cgroup.c 		if (!p)
p                 236 tools/perf/util/cgroup.c 		str = p+1;
p                 108 tools/perf/util/color.c 		char *p = memchr(buf, '\n', count);
p                 110 tools/perf/util/color.c 		if (p != buf && (fputs(color, fp) < 0 ||
p                 111 tools/perf/util/color.c 				fwrite(buf, p ? (size_t)(p - buf) : count, 1, fp) != 1 ||
p                 114 tools/perf/util/color.c 		if (!p)
p                 118 tools/perf/util/color.c 		count -= p + 1 - buf;
p                 119 tools/perf/util/color.c 		buf = p + 1;
p                  63 tools/perf/util/comm.c 	struct rb_node **p = &root->rb_node;
p                  68 tools/perf/util/comm.c 	while (*p != NULL) {
p                  69 tools/perf/util/comm.c 		parent = *p;
p                  82 tools/perf/util/comm.c 			p = &(*p)->rb_left;
p                  84 tools/perf/util/comm.c 			p = &(*p)->rb_right;
p                  91 tools/perf/util/comm.c 	rb_link_node(&new->rb_node, parent, p);
p                  35 tools/perf/util/cputopo.c 	char *buf = NULL, *p;
p                  52 tools/perf/util/cputopo.c 	p = strchr(buf, '\n');
p                  53 tools/perf/util/cputopo.c 	if (p)
p                  54 tools/perf/util/cputopo.c 		*p = '\0';
p                  83 tools/perf/util/cputopo.c 	p = strchr(buf, '\n');
p                  84 tools/perf/util/cputopo.c 	if (p)
p                  85 tools/perf/util/cputopo.c 		*p = '\0';
p                 113 tools/perf/util/cputopo.c 	p = strchr(buf, '\n');
p                 114 tools/perf/util/cputopo.c 	if (p)
p                 115 tools/perf/util/cputopo.c 		*p = '\0';
p                 237 tools/perf/util/cputopo.c 	char *buf = NULL, *p;
p                 278 tools/perf/util/cputopo.c 	p = strchr(buf, '\n');
p                 279 tools/perf/util/cputopo.c 	if (p)
p                 280 tools/perf/util/cputopo.c 		*p = '\0';
p                 251 tools/perf/util/data-convert-bt.c 	size_t len = strlen(string), i, p;
p                 254 tools/perf/util/data-convert-bt.c 	for (i = p = 0; i < len; i++, p++) {
p                 258 tools/perf/util/data-convert-bt.c 			buffer[p] = string[i];
p                 274 tools/perf/util/data-convert-bt.c 			memcpy(buffer + p, numstr, 4);
p                 275 tools/perf/util/data-convert-bt.c 			p += 3;
p                 160 tools/perf/util/demangle-java.c 	char *p;
p                 167 tools/perf/util/demangle-java.c 	p = strrchr(str, ')');
p                 168 tools/perf/util/demangle-java.c 	if (!p)
p                 184 tools/perf/util/demangle-java.c 		ptr = __demangle_java_sym(p + 1, NULL, buf, len, MODE_TYPE);
p                 194 tools/perf/util/demangle-java.c 	ptr = __demangle_java_sym(str, p + 1, buf + l1, len - l1, MODE_PREFIX);
p                 774 tools/perf/util/dso.c 	struct rb_node * const *p = &root->rb_node;
p                 778 tools/perf/util/dso.c 	while (*p != NULL) {
p                 781 tools/perf/util/dso.c 		parent = *p;
p                 786 tools/perf/util/dso.c 			p = &(*p)->rb_left;
p                 788 tools/perf/util/dso.c 			p = &(*p)->rb_right;
p                 800 tools/perf/util/dso.c 	struct rb_node **p = &root->rb_node;
p                 806 tools/perf/util/dso.c 	while (*p != NULL) {
p                 809 tools/perf/util/dso.c 		parent = *p;
p                 814 tools/perf/util/dso.c 			p = &(*p)->rb_left;
p                 816 tools/perf/util/dso.c 			p = &(*p)->rb_right;
p                 821 tools/perf/util/dso.c 	rb_link_node(&new->rb_node, parent, p);
p                 925 tools/perf/util/dso.c 	u8 *p = data;
p                 930 tools/perf/util/dso.c 		ret = dso_cache_read(dso, machine, offset, p, size);
p                 941 tools/perf/util/dso.c 		p      += ret;
p                  44 tools/perf/util/dsos.c 	struct rb_node **p = &root->rb_node;
p                  52 tools/perf/util/dsos.c 	while (*p) {
p                  53 tools/perf/util/dsos.c 		struct dso *this = rb_entry(*p, struct dso, rb_node);
p                  56 tools/perf/util/dsos.c 		parent = *p;
p                  77 tools/perf/util/dsos.c 			p = &parent->rb_left;
p                  79 tools/perf/util/dsos.c 			p = &parent->rb_right;
p                  83 tools/perf/util/dsos.c 		rb_link_node(&dso->rb_node, parent, p);
p                  22 tools/perf/util/env.c 	struct rb_node **p;
p                  25 tools/perf/util/env.c 	p = &env->bpf_progs.infos.rb_node;
p                  27 tools/perf/util/env.c 	while (*p != NULL) {
p                  28 tools/perf/util/env.c 		parent = *p;
p                  31 tools/perf/util/env.c 			p = &(*p)->rb_left;
p                  33 tools/perf/util/env.c 			p = &(*p)->rb_right;
p                  40 tools/perf/util/env.c 	rb_link_node(&info_node->rb_node, parent, p);
p                  77 tools/perf/util/env.c 	struct rb_node **p;
p                  80 tools/perf/util/env.c 	p = &env->bpf_progs.btfs.rb_node;
p                  82 tools/perf/util/env.c 	while (*p != NULL) {
p                  83 tools/perf/util/env.c 		parent = *p;
p                  86 tools/perf/util/env.c 			p = &(*p)->rb_left;
p                  88 tools/perf/util/env.c 			p = &(*p)->rb_right;
p                  95 tools/perf/util/env.c 	rb_link_node(&btf_node->rb_node, parent, p);
p                 266 tools/perf/util/event.h static inline void *perf_synth__raw_data(void *p)
p                 268 tools/perf/util/event.h 	return p + 4;
p                  23 tools/perf/util/expr.h int expr__find_other(const char *p, const char *one, const char ***other,
p                  96 tools/perf/util/expr.y static int expr__symbol(YYSTYPE *res, const char *p, const char **pp)
p                  99 tools/perf/util/expr.y 	const char *s = p;
p                 101 tools/perf/util/expr.y 	if (*p == '#')
p                 102 tools/perf/util/expr.y 		*dst++ = *p++;
p                 104 tools/perf/util/expr.y 	while (isalnum(*p) || *p == '_' || *p == '.' || *p == ':' || *p == '@' || *p == '\\') {
p                 105 tools/perf/util/expr.y 		if (p - s >= MAXIDLEN)
p                 111 tools/perf/util/expr.y 		if (*p == '@')
p                 113 tools/perf/util/expr.y 		else if (*p == '\\')
p                 114 tools/perf/util/expr.y 			*dst++ = *++p;
p                 116 tools/perf/util/expr.y 			*dst++ = *p;
p                 117 tools/perf/util/expr.y 		p++;
p                 120 tools/perf/util/expr.y 	*pp = p;
p                 149 tools/perf/util/expr.y 	const char *p = *pp;
p                 151 tools/perf/util/expr.y 	while (isspace(*p))
p                 152 tools/perf/util/expr.y 		p++;
p                 153 tools/perf/util/expr.y 	s = p;
p                 154 tools/perf/util/expr.y 	switch (*p++) {
p                 158 tools/perf/util/expr.y 		return expr__symbol(res, p - 1, pp);
p                 160 tools/perf/util/expr.y 		res->num = strtod(s, (char **)&p);
p                 167 tools/perf/util/expr.y 	*pp = p;
p                 199 tools/perf/util/expr.y int expr__find_other(const char *p, const char *one, const char ***other,
p                 202 tools/perf/util/expr.y 	const char *orig = p;
p                 213 tools/perf/util/expr.y 		int tok = expr__lex(&val, &p);
p                 138 tools/perf/util/header.c 	u64 *p = (u64 *) set;
p                 146 tools/perf/util/header.c 		ret = do_write(ff, p + i, sizeof(*p));
p                 270 tools/perf/util/header.c 	u64 size, *p;
p                 281 tools/perf/util/header.c 	p = (u64 *) set;
p                 284 tools/perf/util/header.c 		ret = do_read_u64(ff, p + i);
p                 378 tools/perf/util/header.c 	char *s, *p;
p                 403 tools/perf/util/header.c 	p = strchr(buf, ':');
p                 404 tools/perf/util/header.c 	if (p && *(p+1) == ' ' && *(p+2))
p                 405 tools/perf/util/header.c 		s = p + 2;
p                 406 tools/perf/util/header.c 	p = strchr(s, '\n');
p                 407 tools/perf/util/header.c 	if (p)
p                 408 tools/perf/util/header.c 		*p = '\0';
p                 411 tools/perf/util/header.c 	p = s;
p                 412 tools/perf/util/header.c 	while (*p) {
p                 413 tools/perf/util/header.c 		if (isspace(*p)) {
p                 414 tools/perf/util/header.c 			char *r = p + 1;
p                 416 tools/perf/util/header.c 			*p = ' ';
p                 417 tools/perf/util/header.c 			if (q != (p+1))
p                 420 tools/perf/util/header.c 		p++;
p                2950 tools/perf/util/header.c 			 struct perf_file_section **p,
p                2963 tools/perf/util/header.c 		(*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
p                2970 tools/perf/util/header.c 			lseek(ff->fd, (*p)->offset, SEEK_SET);
p                2974 tools/perf/util/header.c 		(*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
p                2975 tools/perf/util/header.c 		(*p)++;
p                2985 tools/perf/util/header.c 	struct perf_file_section *feat_sec, *p;
p                3000 tools/perf/util/header.c 	feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
p                3010 tools/perf/util/header.c 		if (do_write_feat(&ff, feat, &p, evlist))
p                 569 tools/perf/util/hist.c 	struct rb_node **p;
p                 577 tools/perf/util/hist.c 	p = &hists->entries_in->rb_root.rb_node;
p                 579 tools/perf/util/hist.c 	while (*p != NULL) {
p                 580 tools/perf/util/hist.c 		parent = *p;
p                 621 tools/perf/util/hist.c 			p = &(*p)->rb_left;
p                 623 tools/perf/util/hist.c 			p = &(*p)->rb_right;
p                 636 tools/perf/util/hist.c 	rb_link_node(&he->rb_node_in, parent, p);
p                1400 tools/perf/util/hist.c 	struct rb_node **p = &root->rb_root.rb_node;
p                1407 tools/perf/util/hist.c 	while (*p != NULL) {
p                1408 tools/perf/util/hist.c 		parent = *p;
p                1424 tools/perf/util/hist.c 			p = &parent->rb_left;
p                1426 tools/perf/util/hist.c 			p = &parent->rb_right;
p                1461 tools/perf/util/hist.c 	rb_link_node(&new->rb_node_in, parent, p);
p                1517 tools/perf/util/hist.c 	struct rb_node **p = &root->rb_root.rb_node;
p                1526 tools/perf/util/hist.c 	while (*p != NULL) {
p                1527 tools/perf/util/hist.c 		parent = *p;
p                1551 tools/perf/util/hist.c 			p = &(*p)->rb_left;
p                1553 tools/perf/util/hist.c 			p = &(*p)->rb_right;
p                1559 tools/perf/util/hist.c 	rb_link_node(&he->rb_node_in, parent, p);
p                1703 tools/perf/util/hist.c 	struct rb_node **p = &root->rb_root.rb_node;
p                1709 tools/perf/util/hist.c 	while (*p != NULL) {
p                1710 tools/perf/util/hist.c 		parent = *p;
p                1714 tools/perf/util/hist.c 			p = &parent->rb_left;
p                1716 tools/perf/util/hist.c 			p = &parent->rb_right;
p                1721 tools/perf/util/hist.c 	rb_link_node(&he->rb_node, parent, p);
p                1790 tools/perf/util/hist.c 	struct rb_node **p = &entries->rb_root.rb_node;
p                1809 tools/perf/util/hist.c 	while (*p != NULL) {
p                1810 tools/perf/util/hist.c 		parent = *p;
p                1814 tools/perf/util/hist.c 			p = &(*p)->rb_left;
p                1816 tools/perf/util/hist.c 			p = &(*p)->rb_right;
p                1821 tools/perf/util/hist.c 	rb_link_node(&he->rb_node, parent, p);
p                2113 tools/perf/util/hist.c 	struct rb_node **p = &root->rb_root.rb_node;
p                2120 tools/perf/util/hist.c 	while (*p != NULL) {
p                2121 tools/perf/util/hist.c 		parent = *p;
p                2125 tools/perf/util/hist.c 			p = &(*p)->rb_left;
p                2127 tools/perf/util/hist.c 			p = &(*p)->rb_right;
p                2132 tools/perf/util/hist.c 	rb_link_node(&he->rb_node, parent, p);
p                2280 tools/perf/util/hist.c 	struct rb_node **p;
p                2291 tools/perf/util/hist.c 	p = &root->rb_root.rb_node;
p                2293 tools/perf/util/hist.c 	while (*p != NULL) {
p                2294 tools/perf/util/hist.c 		parent = *p;
p                2303 tools/perf/util/hist.c 			p = &(*p)->rb_left;
p                2305 tools/perf/util/hist.c 			p = &(*p)->rb_right;
p                2316 tools/perf/util/hist.c 		rb_link_node(&he->rb_node_in, parent, p);
p                2329 tools/perf/util/hist.c 	struct rb_node **p;
p                2335 tools/perf/util/hist.c 	p = &root->rb_root.rb_node;
p                2336 tools/perf/util/hist.c 	while (*p != NULL) {
p                2339 tools/perf/util/hist.c 		parent = *p;
p                2351 tools/perf/util/hist.c 			p = &parent->rb_left;
p                2353 tools/perf/util/hist.c 			p = &parent->rb_right;
p                2360 tools/perf/util/hist.c 		rb_link_node(&he->rb_node_in, parent, p);
p                2718 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 	unsigned char *p;
p                2726 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 		p = memrchr(buf, n[0], k);
p                2727 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 		if (!p)
p                2729 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 		if (!memcmp(p + 1, n + 1, INTEL_PT_PSB_LEN - 1))
p                2730 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 			return p;
p                2731 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 		k = p - buf;
p                2823 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 	unsigned char *p = buf_b - MAX_PADDING;
p                2827 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 	for (i = MAX_PADDING; i; i--, p++, q++) {
p                2828 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 		if (*p != *q)
p                2832 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 	return p;
p                2859 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 	unsigned char *p;
p                2862 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 	p = intel_pt_last_psb(buf_a, len_a);
p                2863 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 	if (!p)
p                2866 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 	len = len_a - (p - buf_a);
p                2867 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 	if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) {
p                2870 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 		p = intel_pt_last_psb(buf_a, len_a);
p                2871 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 		if (!p)
p                2873 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 		len = len_a - (p - buf_a);
p                2874 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 		if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a))
p                2993 tools/perf/util/intel-pt.c 	struct perf_time_interval *p = opts->ptime_range;
p                2997 tools/perf/util/intel-pt.c 	if (!n || !p || pt->timeless_decoding)
p                3010 tools/perf/util/intel-pt.c 		u64 ts = p[i].start;
p                3011 tools/perf/util/intel-pt.c 		u64 te = p[i].end;
p                 412 tools/perf/util/jitdump.c 	code  = (unsigned long)jr + jr->load.p.total_size - csize;
p                 475 tools/perf/util/jitdump.c 		id->time = convert_timestamp(jd, jr->load.p.timestamp);
p                 567 tools/perf/util/jitdump.c 		id->time = convert_timestamp(jd, jr->load.p.timestamp);
p                 700 tools/perf/util/jitdump.c 	char *p;
p                 709 tools/perf/util/jitdump.c 	p = strrchr(mmap_name, '/');
p                 710 tools/perf/util/jitdump.c 	if (!p)
p                 716 tools/perf/util/jitdump.c 	if (strncmp(p, "/jit-", 5))
p                 722 tools/perf/util/jitdump.c 	p += 5;
p                 727 tools/perf/util/jitdump.c 	if (!isdigit(*p))
p                 730 tools/perf/util/jitdump.c 	pid2 = (int)strtol(p, &end, 10);
p                  66 tools/perf/util/jitdump.h 	struct jr_prefix p;
p                  77 tools/perf/util/jitdump.h 	struct jr_prefix p;
p                  81 tools/perf/util/jitdump.h 	struct jr_prefix p;
p                 100 tools/perf/util/jitdump.h 	struct jr_prefix p;
p                 108 tools/perf/util/jitdump.h 	struct jr_prefix p;
p                 399 tools/perf/util/llvm-utils.c 	char *p;
p                 406 tools/perf/util/llvm-utils.c 	p = strrchr(obj_path, '.');
p                 407 tools/perf/util/llvm-utils.c 	if (!p || (strcmp(p, ".c") != 0)) {
p                 413 tools/perf/util/llvm-utils.c 	p[1] = 'o';
p                 263 tools/perf/util/machine.c 	struct rb_node **p = &machines->guests.rb_root.rb_node;
p                 276 tools/perf/util/machine.c 	while (*p != NULL) {
p                 277 tools/perf/util/machine.c 		parent = *p;
p                 280 tools/perf/util/machine.c 			p = &(*p)->rb_left;
p                 282 tools/perf/util/machine.c 			p = &(*p)->rb_right;
p                 287 tools/perf/util/machine.c 	rb_link_node(&machine->rb_node, parent, p);
p                 308 tools/perf/util/machine.c 	struct rb_node **p = &machines->guests.rb_root.rb_node;
p                 316 tools/perf/util/machine.c 	while (*p != NULL) {
p                 317 tools/perf/util/machine.c 		parent = *p;
p                 320 tools/perf/util/machine.c 			p = &(*p)->rb_left;
p                 322 tools/perf/util/machine.c 			p = &(*p)->rb_right;
p                 498 tools/perf/util/machine.c 	struct rb_node **p = &threads->entries.rb_root.rb_node;
p                 507 tools/perf/util/machine.c 	while (*p != NULL) {
p                 508 tools/perf/util/machine.c 		parent = *p;
p                 518 tools/perf/util/machine.c 			p = &(*p)->rb_left;
p                 520 tools/perf/util/machine.c 			p = &(*p)->rb_right;
p                 530 tools/perf/util/machine.c 		rb_link_node(&th->rb_node, parent, p);
p                2539 tools/perf/util/machine.c 			     int (*fn)(struct thread *thread, void *p),
p                2568 tools/perf/util/machine.c 			      int (*fn)(struct thread *thread, void *p),
p                 248 tools/perf/util/machine.h 			     int (*fn)(struct thread *thread, void *p),
p                 251 tools/perf/util/machine.h 			      int (*fn)(struct thread *thread, void *p),
p                 913 tools/perf/util/map.c 	struct rb_node **p = &maps->entries.rb_node;
p                 918 tools/perf/util/map.c 	while (*p != NULL) {
p                 919 tools/perf/util/map.c 		parent = *p;
p                 922 tools/perf/util/map.c 			p = &(*p)->rb_left;
p                 924 tools/perf/util/map.c 			p = &(*p)->rb_right;
p                 927 tools/perf/util/map.c 	rb_link_node(&map->rb_node, parent, p);
p                 934 tools/perf/util/map.c 	struct rb_node **p = &maps->names.rb_node;
p                 939 tools/perf/util/map.c 	while (*p != NULL) {
p                 940 tools/perf/util/map.c 		parent = *p;
p                 944 tools/perf/util/map.c 			p = &(*p)->rb_left;
p                 946 tools/perf/util/map.c 			p = &(*p)->rb_right;
p                 948 tools/perf/util/map.c 	rb_link_node(&map->rb_node_name, parent, p);
p                 979 tools/perf/util/map.c 	struct rb_node *p;
p                 984 tools/perf/util/map.c 	p = maps->entries.rb_node;
p                 985 tools/perf/util/map.c 	while (p != NULL) {
p                 986 tools/perf/util/map.c 		m = rb_entry(p, struct map, rb_node);
p                 988 tools/perf/util/map.c 			p = p->rb_left;
p                 990 tools/perf/util/map.c 			p = p->rb_right;
p                  19 tools/perf/util/mem2node.c 	struct rb_node **p = &root->rb_node;
p                  23 tools/perf/util/mem2node.c 	while (*p != NULL) {
p                  24 tools/perf/util/mem2node.c 		parent = *p;
p                  28 tools/perf/util/mem2node.c 			p = &(*p)->rb_left;
p                  30 tools/perf/util/mem2node.c 			p = &(*p)->rb_right;
p                  33 tools/perf/util/mem2node.c 	rb_link_node(&entry->rb_node, parent, p);
p                 119 tools/perf/util/mem2node.c 	struct rb_node **p, *parent = NULL;
p                 122 tools/perf/util/mem2node.c 	p = &map->root.rb_node;
p                 123 tools/perf/util/mem2node.c 	while (*p != NULL) {
p                 124 tools/perf/util/mem2node.c 		parent = *p;
p                 127 tools/perf/util/mem2node.c 			p = &(*p)->rb_left;
p                 129 tools/perf/util/mem2node.c 			p = &(*p)->rb_right;
p                 476 tools/perf/util/metricgroup.c 	char *llist, *nlist, *p;
p                 487 tools/perf/util/metricgroup.c 	while ((p = strsep(&llist, ",")) != NULL) {
p                 488 tools/perf/util/metricgroup.c 		ret = metricgroup__add_metric(p, events, group_list);
p                 491 tools/perf/util/metricgroup.c 					p);
p                  22 tools/perf/util/ordered-events.c 	struct list_head *p;
p                  42 tools/perf/util/ordered-events.c 			p = last->list.next;
p                  43 tools/perf/util/ordered-events.c 			if (p == &oe->events) {
p                  48 tools/perf/util/ordered-events.c 			last = list_entry(p, struct ordered_event, list);
p                  53 tools/perf/util/ordered-events.c 			p = last->list.prev;
p                  54 tools/perf/util/ordered-events.c 			if (p == &oe->events) {
p                  58 tools/perf/util/ordered-events.c 			last = list_entry(p, struct ordered_event, list);
p                  46 tools/perf/util/parse-branch-options.c 	char *p, *s;
p                  61 tools/perf/util/parse-branch-options.c 		p = strchr(s, ',');
p                  62 tools/perf/util/parse-branch-options.c 		if (p)
p                  63 tools/perf/util/parse-branch-options.c 			*p = '\0';
p                  78 tools/perf/util/parse-branch-options.c 		if (!p)
p                  81 tools/perf/util/parse-branch-options.c 		s = p + 1;
p                1704 tools/perf/util/parse-events.c 	char *p = str;
p                1710 tools/perf/util/parse-events.c 	while (*p) {
p                1711 tools/perf/util/parse-events.c 		if (*p != 'p' && strchr(p + 1, *p))
p                1713 tools/perf/util/parse-events.c 		p++;
p                1780 tools/perf/util/parse-events.c 		struct perf_pmu_event_symbol *p;
p                1784 tools/perf/util/parse-events.c 			p = perf_pmu_events_list + i;
p                1785 tools/perf/util/parse-events.c 			zfree(&p->symbol);
p                1794 tools/perf/util/parse-events.c 	p->symbol = str;		\
p                1795 tools/perf/util/parse-events.c 	if (!p->symbol)			\
p                1797 tools/perf/util/parse-events.c 	p->type = stype;		\
p                1833 tools/perf/util/parse-events.c 			struct perf_pmu_event_symbol *p = perf_pmu_events_list + len;
p                1839 tools/perf/util/parse-events.c 				p++;
p                1859 tools/perf/util/parse-events.c 	struct perf_pmu_event_symbol p, *r;
p                1872 tools/perf/util/parse-events.c 	p.symbol = strdup(name);
p                1873 tools/perf/util/parse-events.c 	r = bsearch(&p, perf_pmu_events_list,
p                1876 tools/perf/util/parse-events.c 	zfree(&p.symbol);
p                  17 tools/perf/util/parse-regs-options.c 	char *s, *os = NULL, *p;
p                  43 tools/perf/util/parse-regs-options.c 			p = strchr(s, ',');
p                  44 tools/perf/util/parse-regs-options.c 			if (p)
p                  45 tools/perf/util/parse-regs-options.c 				*p = '\0';
p                  69 tools/perf/util/parse-regs-options.c 			if (!p)
p                  72 tools/perf/util/parse-regs-options.c 			s = p + 1;
p                  44 tools/perf/util/print_binary.c int is_printable_array(char *p, unsigned int len)
p                  48 tools/perf/util/print_binary.c 	if (!p || !len || p[len - 1] != 0)
p                  54 tools/perf/util/print_binary.c 		if (!isprint(p[i]) && !isspace(p[i]))
p                  35 tools/perf/util/print_binary.h int is_printable_array(char *p, unsigned int len);
p                1734 tools/perf/util/probe-event.c 	char *p;
p                1777 tools/perf/util/probe-event.c 	p = strchr(argv[1], ':');
p                1778 tools/perf/util/probe-event.c 	if (p) {
p                1779 tools/perf/util/probe-event.c 		tp->module = strndup(argv[1], p - argv[1]);
p                1785 tools/perf/util/probe-event.c 		p++;
p                1787 tools/perf/util/probe-event.c 		p = argv[1];
p                1788 tools/perf/util/probe-event.c 	fmt1_str = strtok_r(p, "+", &fmt);
p                1828 tools/perf/util/probe-event.c 		fmt2_str = strchr(p, '(');
p                1840 tools/perf/util/probe-event.c 		p = strchr(argv[i + 2], '=');
p                1841 tools/perf/util/probe-event.c 		if (p)	/* We don't need which register is assigned. */
p                1842 tools/perf/util/probe-event.c 			*p++ = '\0';
p                1844 tools/perf/util/probe-event.c 			p = argv[i + 2];
p                1847 tools/perf/util/probe-event.c 		tev->args[i].value = strdup(p);
p                2360 tools/perf/util/probe-event.c 	char buf[PATH_MAX], *p;
p                2388 tools/perf/util/probe-event.c 		p = strchr(buf, '\t');
p                2389 tools/perf/util/probe-event.c 		if (p) {
p                2390 tools/perf/util/probe-event.c 			p++;
p                2391 tools/perf/util/probe-event.c 			if (p[strlen(p) - 1] == '\n')
p                2392 tools/perf/util/probe-event.c 				p[strlen(p) - 1] = '\0';
p                2394 tools/perf/util/probe-event.c 			p = (char *)"unknown";
p                2395 tools/perf/util/probe-event.c 		node->symbol = strdup(p);
p                2600 tools/perf/util/probe-event.c 	char *p, *nbase;
p                2609 tools/perf/util/probe-event.c 	p = strpbrk(nbase, ".@");
p                2610 tools/perf/util/probe-event.c 	if (p && p != nbase)
p                2611 tools/perf/util/probe-event.c 		*p = '\0';
p                 138 tools/perf/util/probe-file.c 	char *p;
p                 157 tools/perf/util/probe-file.c 		p = fgets(buf, MAX_CMDLEN, fp);
p                 158 tools/perf/util/probe-file.c 		if (!p)
p                 161 tools/perf/util/probe-file.c 		idx = strlen(p) - 1;
p                 162 tools/perf/util/probe-file.c 		if (p[idx] == '\n')
p                 163 tools/perf/util/probe-file.c 			p[idx] = '\0';
p                 256 tools/perf/util/probe-file.c 	char *p;
p                 265 tools/perf/util/probe-file.c 	p = strchr(buf + 2, ':');
p                 266 tools/perf/util/probe-file.c 	if (!p) {
p                 272 tools/perf/util/probe-file.c 	*p = '/';
p                 293 tools/perf/util/probe-file.c 	const char *p;
p                 304 tools/perf/util/probe-file.c 		p = strchr(ent->s, ':');
p                 305 tools/perf/util/probe-file.c 		if ((p && strfilter__compare(filter, p + 1)) ||
p                 473 tools/perf/util/probe-file.c 	char buf[MAX_CMDLEN], *p;
p                 489 tools/perf/util/probe-file.c 		p = strchr(buf, '\n');
p                 490 tools/perf/util/probe-file.c 		if (p)
p                 491 tools/perf/util/probe-file.c 			*p = '\0';
p                1551 tools/perf/util/probe-finder.c 	const char *p;
p                1563 tools/perf/util/probe-finder.c 		p = dwfl_module_relocation_info(dbg->mod, i, &shndx);
p                1564 tools/perf/util/probe-finder.c 		if (strcmp(p, ".text") == 0) {
p                  79 tools/perf/util/rb_resort.h 	struct rb_node **p = &sorted->entries.rb_node, *parent = NULL;		\
p                  80 tools/perf/util/rb_resort.h 	while (*p != NULL) {							\
p                  81 tools/perf/util/rb_resort.h 		parent = *p;							\
p                  83 tools/perf/util/rb_resort.h 			p = &(*p)->rb_left;					\
p                  85 tools/perf/util/rb_resort.h 			p = &(*p)->rb_right;					\
p                  87 tools/perf/util/rb_resort.h 	rb_link_node(sorted_nd, parent, p);					\
p                  15 tools/perf/util/rblist.c 	struct rb_node **p = &rblist->entries.rb_root.rb_node;
p                  19 tools/perf/util/rblist.c 	while (*p != NULL) {
p                  22 tools/perf/util/rblist.c 		parent = *p;
p                  26 tools/perf/util/rblist.c 			p = &(*p)->rb_left;
p                  28 tools/perf/util/rblist.c 			p = &(*p)->rb_right;
p                  39 tools/perf/util/rblist.c 	rb_link_node(new_node, parent, p);
p                  57 tools/perf/util/rblist.c 	struct rb_node **p = &rblist->entries.rb_root.rb_node;
p                  61 tools/perf/util/rblist.c 	while (*p != NULL) {
p                  64 tools/perf/util/rblist.c 		parent = *p;
p                  68 tools/perf/util/rblist.c 			p = &(*p)->rb_left;
p                  70 tools/perf/util/rblist.c 			p = &(*p)->rb_right;
p                  80 tools/perf/util/rblist.c 			rb_link_node(new_node, parent, p);
p                 164 tools/perf/util/s390-sample-raw.c 	u64 *p;
p                 184 tools/perf/util/s390-sample-raw.c 		for (i = 0, p = (u64 *)(cep + 1); i < ce.ctr; ++i, ++p) {
p                 189 tools/perf/util/s390-sample-raw.c 				      ev_name ?: "<unknown>", be64_to_cpu(*p));
p                1345 tools/perf/util/scripting-engines/trace-event-python.c 	char *p = str;
p                1349 tools/perf/util/scripting-engines/trace-event-python.c 	while ((p = strchr(p, ':'))) {
p                1350 tools/perf/util/scripting-engines/trace-event-python.c 		*p = '_';
p                1351 tools/perf/util/scripting-engines/trace-event-python.c 		p++;
p                 710 tools/perf/util/session.c static void swap_bitfield(u8 *p, unsigned len)
p                 715 tools/perf/util/session.c 		*p = revbyte(*p);
p                 716 tools/perf/util/session.c 		p++;
p                1836 tools/perf/util/session.c 					    void *p __maybe_unused)
p                1863 tools/perf/util/session.c 	void *p;
p                1904 tools/perf/util/session.c 	p = event;
p                1905 tools/perf/util/session.c 	p += sizeof(struct perf_event_header);
p                1908 tools/perf/util/session.c 		err = readn(fd, p, size - sizeof(struct perf_event_header));
p                 501 tools/perf/util/sort.c 	char *sf, *p;
p                 511 tools/perf/util/sort.c 	p = strchr(sf, ':');
p                 512 tools/perf/util/sort.c 	if (p && *sf) {
p                 513 tools/perf/util/sort.c 		*p = 0;
p                 950 tools/perf/util/sort.c 	unsigned char mp, p;
p                 956 tools/perf/util/sort.c 	p  = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
p                 957 tools/perf/util/sort.c 	return mp || p;
p                1507 tools/perf/util/sort.c static inline char *add_str(char *p, const char *str)
p                1509 tools/perf/util/sort.c 	strcpy(p, str);
p                1510 tools/perf/util/sort.c 	return p + strlen(str);
p                1547 tools/perf/util/sort.c 	char *p = buf;
p                1553 tools/perf/util/sort.c 			p = add_str(p, txbits[i].name);
p                1555 tools/perf/util/sort.c 		p = add_str(p, "NEITHER ");
p                1557 tools/perf/util/sort.c 		sprintf(p, ":%" PRIx64,
p                1560 tools/perf/util/sort.c 		p += strlen(p);
p                  51 tools/perf/util/srccode.c 	char *p = map;
p                  56 tools/perf/util/srccode.c 	while (p < end && (p = memchr(p, '\n', end - p)) != NULL) {
p                  58 tools/perf/util/srccode.c 		p++;
p                  60 tools/perf/util/srccode.c 	if (p < end)
p                  69 tools/perf/util/srccode.c 	char *p = map;
p                  75 tools/perf/util/srccode.c 	while (p < end && (p = memchr(p, '\n', end - p)) != NULL) {
p                  78 tools/perf/util/srccode.c 		lines[l++] = ++p;
p                  80 tools/perf/util/srccode.c 	if (p < end)
p                  81 tools/perf/util/srccode.c 		lines[l] = p;
p                 166 tools/perf/util/srccode.c 	char *l, *p;
p                 176 tools/perf/util/srccode.c 	p = memchr(l, '\n', sf->map + sf->maplen - l);
p                 177 tools/perf/util/srccode.c 	*lenp = p - l;
p                 616 tools/perf/util/srcline.c 	struct rb_node **p = &tree->rb_root.rb_node;
p                 630 tools/perf/util/srcline.c 	while (*p != NULL) {
p                 631 tools/perf/util/srcline.c 		parent = *p;
p                 634 tools/perf/util/srcline.c 			p = &(*p)->rb_left;
p                 636 tools/perf/util/srcline.c 			p = &(*p)->rb_right;
p                 640 tools/perf/util/srcline.c 	rb_link_node(&node->rb_node, parent, p);
p                 708 tools/perf/util/srcline.c 	struct rb_node **p = &tree->rb_root.rb_node;
p                 714 tools/perf/util/srcline.c 	while (*p != NULL) {
p                 715 tools/perf/util/srcline.c 		parent = *p;
p                 718 tools/perf/util/srcline.c 			p = &(*p)->rb_left;
p                 720 tools/perf/util/srcline.c 			p = &(*p)->rb_right;
p                 724 tools/perf/util/srcline.c 	rb_link_node(&inlines->rb_node, parent, p);
p                 780 tools/perf/util/stat-shadow.c 		const char *p = metric_expr;
p                 782 tools/perf/util/stat-shadow.c 		if (expr__parse(&ratio, &pctx, &p) == 0) {
p                  22 tools/perf/util/strfilter.c 		if (node->p && !is_operator(*node->p))
p                  23 tools/perf/util/strfilter.c 			zfree((char **)&node->p);
p                  40 tools/perf/util/strfilter.c 	const char *p;
p                  45 tools/perf/util/strfilter.c 		p = s;
p                  49 tools/perf/util/strfilter.c 	p = s + 1;
p                  53 tools/perf/util/strfilter.c 		while (*p && !is_separator(*p) && !isspace(*p))
p                  54 tools/perf/util/strfilter.c 			p++;
p                  56 tools/perf/util/strfilter.c 		if (*(p - 1) == '\\' || (*p == '!' && *(p - 1) == '[')) {
p                  57 tools/perf/util/strfilter.c 			p++;
p                  62 tools/perf/util/strfilter.c 	*e = p;
p                  73 tools/perf/util/strfilter.c 		node->p = op;
p                 138 tools/perf/util/strfilter.c 			cur->r->p = strndup(s, e - s);
p                 139 tools/perf/util/strfilter.c 			if (!cur->r->p)
p                 221 tools/perf/util/strfilter.c 	if (!node || !node->p)
p                 224 tools/perf/util/strfilter.c 	switch (*node->p) {
p                 234 tools/perf/util/strfilter.c 		return strglobmatch(str, node->p);
p                 268 tools/perf/util/strfilter.c 	if (!node || !node->p)
p                 271 tools/perf/util/strfilter.c 	switch (*node->p) {
p                 280 tools/perf/util/strfilter.c 			*(buf + len++) = *node->p;
p                 290 tools/perf/util/strfilter.c 		len = strlen(node->p);
p                 292 tools/perf/util/strfilter.c 			strcpy(buf, node->p);
p                  13 tools/perf/util/strfilter.h 	const char *p;		/* Operator or rule */
p                  27 tools/perf/util/string.c 	char *p;
p                  33 tools/perf/util/string.c 	length = strtoll(str, &p, 10);
p                  34 tools/perf/util/string.c 	switch (c = *p++) {
p                  36 tools/perf/util/string.c 			if (*p)
p                  60 tools/perf/util/string.c 		if (strcmp(p, "b") != 0)
p                  63 tools/perf/util/string.c 		if (strcmp(p, "B") != 0)
p                 270 tools/perf/util/string.c 	char *s, *d, *p, *ret = strdup(str);
p                 285 tools/perf/util/string.c 		p = strchr(s + 1, '\\');
p                 286 tools/perf/util/string.c 		if (p) {
p                 287 tools/perf/util/string.c 			memmove(d, s, p - s);
p                 288 tools/perf/util/string.c 			d += p - s;
p                 289 tools/perf/util/string.c 			s = p + 1;
p                 292 tools/perf/util/string.c 	} while (p);
p                1458 tools/perf/util/symbol-elf.c #define kcore_copy__for_each_phdr(k, p) \
p                1459 tools/perf/util/symbol-elf.c 	list_for_each_entry((p), &(k)->phdrs, node)
p                1463 tools/perf/util/symbol-elf.c 	struct phdr_data *p = zalloc(sizeof(*p));
p                1465 tools/perf/util/symbol-elf.c 	if (p) {
p                1466 tools/perf/util/symbol-elf.c 		p->addr   = addr;
p                1467 tools/perf/util/symbol-elf.c 		p->len    = len;
p                1468 tools/perf/util/symbol-elf.c 		p->offset = offset;
p                1471 tools/perf/util/symbol-elf.c 	return p;
p                1478 tools/perf/util/symbol-elf.c 	struct phdr_data *p = phdr_data__new(addr, len, offset);
p                1480 tools/perf/util/symbol-elf.c 	if (p)
p                1481 tools/perf/util/symbol-elf.c 		list_add_tail(&p->node, &kci->phdrs);
p                1483 tools/perf/util/symbol-elf.c 	return p;
p                1488 tools/perf/util/symbol-elf.c 	struct phdr_data *p, *tmp;
p                1490 tools/perf/util/symbol-elf.c 	list_for_each_entry_safe(p, tmp, &kci->phdrs, node) {
p                1491 tools/perf/util/symbol-elf.c 		list_del_init(&p->node);
p                1492 tools/perf/util/symbol-elf.c 		free(p);
p                1648 tools/perf/util/symbol-elf.c 	struct phdr_data *p, *k = NULL;
p                1655 tools/perf/util/symbol-elf.c 	kcore_copy__for_each_phdr(kci, p) {
p                1656 tools/perf/util/symbol-elf.c 		u64 pend = p->addr + p->len - 1;
p                1658 tools/perf/util/symbol-elf.c 		if (p->addr <= kci->stext && pend >= kci->stext) {
p                1659 tools/perf/util/symbol-elf.c 			k = p;
p                1670 tools/perf/util/symbol-elf.c 	kcore_copy__for_each_phdr(kci, p) {
p                1671 tools/perf/util/symbol-elf.c 		u64 pend = p->offset + p->len;
p                1673 tools/perf/util/symbol-elf.c 		if (p == k)
p                1676 tools/perf/util/symbol-elf.c 		if (p->offset >= k->offset && pend <= kend)
p                1677 tools/perf/util/symbol-elf.c 			p->remaps = k;
p                1683 tools/perf/util/symbol-elf.c 	struct phdr_data *p;
p                1688 tools/perf/util/symbol-elf.c 	kcore_copy__for_each_phdr(kci, p) {
p                1689 tools/perf/util/symbol-elf.c 		if (!p->remaps) {
p                1690 tools/perf/util/symbol-elf.c 			p->rel = rel;
p                1691 tools/perf/util/symbol-elf.c 			rel += p->len;
p                1696 tools/perf/util/symbol-elf.c 	kcore_copy__for_each_phdr(kci, p) {
p                1697 tools/perf/util/symbol-elf.c 		struct phdr_data *k = p->remaps;
p                1700 tools/perf/util/symbol-elf.c 			p->rel = p->offset - k->offset + k->rel;
p                1872 tools/perf/util/symbol-elf.c 	struct phdr_data *p;
p                1902 tools/perf/util/symbol-elf.c 	kcore_copy__for_each_phdr(&kci, p) {
p                1903 tools/perf/util/symbol-elf.c 		off_t offs = p->rel + offset;
p                1905 tools/perf/util/symbol-elf.c 		if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len))
p                1913 tools/perf/util/symbol-elf.c 	kcore_copy__for_each_phdr(&kci, p) {
p                1914 tools/perf/util/symbol-elf.c 		off_t offs = p->rel + offset;
p                1916 tools/perf/util/symbol-elf.c 		if (p->remaps)
p                1918 tools/perf/util/symbol-elf.c 		if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len))
p                 103 tools/perf/util/symbol.c void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
p                 105 tools/perf/util/symbol.c 	p->end = c->start;
p                 320 tools/perf/util/symbol.c 	struct rb_node **p = &symbols->rb_root.rb_node;
p                 337 tools/perf/util/symbol.c 	while (*p != NULL) {
p                 338 tools/perf/util/symbol.c 		parent = *p;
p                 341 tools/perf/util/symbol.c 			p = &(*p)->rb_left;
p                 343 tools/perf/util/symbol.c 			p = &(*p)->rb_right;
p                 347 tools/perf/util/symbol.c 	rb_link_node(&sym->rb_node, parent, p);
p                 411 tools/perf/util/symbol.c 	struct rb_node **p = &symbols->rb_root.rb_node;
p                 418 tools/perf/util/symbol.c 	while (*p != NULL) {
p                 419 tools/perf/util/symbol.c 		parent = *p;
p                 422 tools/perf/util/symbol.c 			p = &(*p)->rb_left;
p                 424 tools/perf/util/symbol.c 			p = &(*p)->rb_right;
p                 428 tools/perf/util/symbol.c 	rb_link_node(&symn->rb_node, parent, p);
p                 927 tools/perf/util/symbol.c 	struct rb_node **p = &modules->rb_node;
p                 931 tools/perf/util/symbol.c 	while (*p != NULL) {
p                 932 tools/perf/util/symbol.c 		parent = *p;
p                 935 tools/perf/util/symbol.c 			p = &(*p)->rb_left;
p                 937 tools/perf/util/symbol.c 			p = &(*p)->rb_right;
p                 939 tools/perf/util/symbol.c 	rb_link_node(&mi->rb_node, parent, p);
p                 236 tools/perf/util/symbol.h void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
p                 788 tools/perf/util/thread-stack.c 	const char *p = strstr(name, "__x86_indirect_thunk_");
p                 790 tools/perf/util/thread-stack.c 	return p == name || !strcmp(name, "__indirect_thunk_start");
p                 120 tools/perf/util/thread.h static inline void thread__set_priv(struct thread *thread, void *p)
p                 122 tools/perf/util/thread.h 	thread->priv = p;
p                 124 tools/perf/util/time-utils.c 	char *str, *arg, *p;
p                 141 tools/perf/util/time-utils.c 	for (i = 0, p = str; i < num - 1; i++) {
p                 142 tools/perf/util/time-utils.c 		arg = p;
p                 144 tools/perf/util/time-utils.c 		p = skip_spaces(strchr(p, ',') + 1);
p                 146 tools/perf/util/time-utils.c 		while (*p && !isspace(*p)) {
p                 147 tools/perf/util/time-utils.c 			if (*p++ == ',') {
p                 153 tools/perf/util/time-utils.c 		if (*p)
p                 154 tools/perf/util/time-utils.c 			*p++ = 0;
p                 161 tools/perf/util/time-utils.c 	rc = perf_time__parse_str(ptime + i, p);
p                 221 tools/perf/util/time-utils.c 	char *p, *end_str;
p                 231 tools/perf/util/time-utils.c 	p = strchr(str, '/');
p                 232 tools/perf/util/time-utils.c 	if (!p)
p                 235 tools/perf/util/time-utils.c 	*p = '\0';
p                 239 tools/perf/util/time-utils.c 	p++;
p                 240 tools/perf/util/time-utils.c 	i = (int)strtol(p, &end_str, 10);
p                 111 tools/perf/util/unwind-libunwind-local.c static int __dw_read_encoded_value(u8 **p, u8 *end, u64 *val,
p                 114 tools/perf/util/unwind-libunwind-local.c 	u8 *cur = *p;
p                 159 tools/perf/util/unwind-libunwind-local.c 	*p = cur;
p                 125 tools/power/acpi/tools/acpidbg/acpidbg.c 	char *p;
p                 128 tools/power/acpi/tools/acpidbg/acpidbg.c 	p = &crc->buf[crc->head];
p                 130 tools/power/acpi/tools/acpidbg/acpidbg.c 	len = read(fd, p, len);
p                 140 tools/power/acpi/tools/acpidbg/acpidbg.c 	char *p;
p                 144 tools/power/acpi/tools/acpidbg/acpidbg.c 	p = &crc->buf[crc->head];
p                 147 tools/power/acpi/tools/acpidbg/acpidbg.c 		memcpy(p, acpi_aml_batch_pos, remained);
p                 151 tools/power/acpi/tools/acpidbg/acpidbg.c 		memcpy(p, acpi_aml_batch_pos, len);
p                 161 tools/power/acpi/tools/acpidbg/acpidbg.c 	char *p;
p                 165 tools/power/acpi/tools/acpidbg/acpidbg.c 	p = &crc->buf[crc->head];
p                 169 tools/power/acpi/tools/acpidbg/acpidbg.c 			*p = acpi_aml_batch_roll;
p                 175 tools/power/acpi/tools/acpidbg/acpidbg.c 			len = read(fd, p, 1);
p                 185 tools/power/acpi/tools/acpidbg/acpidbg.c 			if (*p == '\n')
p                 191 tools/power/acpi/tools/acpidbg/acpidbg.c 			if (*p == ACPI_DEBUGGER_COMMAND_PROMPT ||
p                 192 tools/power/acpi/tools/acpidbg/acpidbg.c 			    *p == ACPI_DEBUGGER_EXECUTE_PROMPT) {
p                 193 tools/power/acpi/tools/acpidbg/acpidbg.c 				acpi_aml_batch_prompt = *p;
p                 196 tools/power/acpi/tools/acpidbg/acpidbg.c 				if (*p != '\n')
p                 203 tools/power/acpi/tools/acpidbg/acpidbg.c 			if (*p == ' ') {
p                 209 tools/power/acpi/tools/acpidbg/acpidbg.c 				acpi_aml_batch_roll = *p;
p                 210 tools/power/acpi/tools/acpidbg/acpidbg.c 				*p = acpi_aml_batch_prompt;
p                 225 tools/power/acpi/tools/acpidbg/acpidbg.c 	char *p;
p                 228 tools/power/acpi/tools/acpidbg/acpidbg.c 	p = &crc->buf[crc->tail];
p                 230 tools/power/acpi/tools/acpidbg/acpidbg.c 	len = write(fd, p, len);
p                 240 tools/power/acpi/tools/acpidbg/acpidbg.c 	char *p;
p                 243 tools/power/acpi/tools/acpidbg/acpidbg.c 	p = &crc->buf[crc->tail];
p                 246 tools/power/acpi/tools/acpidbg/acpidbg.c 		len = write(fd, p, len);
p                  61 tools/power/cpupower/debug/i386/dump_psb.c decode_pst(char *p, int npstates)
p                  67 tools/power/cpupower/debug/i386/dump_psb.c 		fid = *p++;
p                  68 tools/power/cpupower/debug/i386/dump_psb.c 		vid = *p++;
p                  82 tools/power/cpupower/debug/i386/dump_psb.c void decode_psb(char *p, int numpst)
p                  88 tools/power/cpupower/debug/i386/dump_psb.c 	psb = (struct psb_header*) p;
p                 104 tools/power/cpupower/debug/i386/dump_psb.c 	p = ((char *) psb) + sizeof(struct psb_header);
p                 112 tools/power/cpupower/debug/i386/dump_psb.c 		pst = (struct pst_header*) p;
p                 128 tools/power/cpupower/debug/i386/dump_psb.c 		decode_pst(p + sizeof(struct pst_header), pst->numpstates);
p                 131 tools/power/cpupower/debug/i386/dump_psb.c 		p += sizeof(struct pst_header) + 2*pst->numpstates;
p                 155 tools/power/cpupower/debug/i386/dump_psb.c 	char *p;
p                 187 tools/power/cpupower/debug/i386/dump_psb.c 	for (p = mem; p - mem < LEN; p+=16) {
p                 188 tools/power/cpupower/debug/i386/dump_psb.c 		if (memcmp(p, "AMDK7PNOW!", 10) == 0) {
p                 189 tools/power/cpupower/debug/i386/dump_psb.c 			decode_psb(p, numpst);
p                 222 tools/power/cpupower/utils/cpupower.c 		struct cmd_struct *p = commands + i;
p                 223 tools/power/cpupower/utils/cpupower.c 		if (strcmp(p->cmd, cmd))
p                 225 tools/power/cpupower/utils/cpupower.c 		if (!run_as_root && p->needs_root) {
p                 230 tools/power/cpupower/utils/cpupower.c 		ret = p->main(argc, argv);
p                 194 tools/power/cpupower/utils/helpers/bitmask.c 	const char *p, *q;
p                 199 tools/power/cpupower/utils/helpers/bitmask.c 	while (p = q, q = nexttoken(q, ','), p) {
p                 207 tools/power/cpupower/utils/helpers/bitmask.c 		sret = sscanf(p, "%u%c", &a, &nextc);
p                 212 tools/power/cpupower/utils/helpers/bitmask.c 		c1 = nexttoken(p, '-');
p                 213 tools/power/cpupower/utils/helpers/bitmask.c 		c2 = nexttoken(p, ',');
p                 332 tools/power/x86/turbostat/turbostat.c 					struct pkg_data *p;
p                 343 tools/power/x86/turbostat/turbostat.c 					p = GET_PKG(pkg_base, pkg_no);
p                 345 tools/power/x86/turbostat/turbostat.c 					retval = func(t, c, p);
p                 800 tools/power/x86/turbostat/turbostat.c 	struct pkg_data *p)
p                 805 tools/power/x86/turbostat/turbostat.c 	outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p);
p                 841 tools/power/x86/turbostat/turbostat.c 	if (p) {
p                 842 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "package: %d\n", p->package_id);
p                 844 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0);
p                 845 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0);
p                 846 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0);
p                 847 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0);
p                 849 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "pc2: %016llX\n", p->pc2);
p                 851 tools/power/x86/turbostat/turbostat.c 			outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
p                 853 tools/power/x86/turbostat/turbostat.c 			outp += sprintf(outp, "pc6: %016llX\n", p->pc6);
p                 855 tools/power/x86/turbostat/turbostat.c 			outp += sprintf(outp, "pc7: %016llX\n", p->pc7);
p                 856 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
p                 857 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
p                 858 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
p                 859 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi);
p                 860 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi);
p                 861 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
p                 862 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores);
p                 863 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx);
p                 864 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram);
p                 866 tools/power/x86/turbostat/turbostat.c 			p->rapl_pkg_perf_status);
p                 868 tools/power/x86/turbostat/turbostat.c 			p->rapl_dram_perf_status);
p                 869 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c);
p                 873 tools/power/x86/turbostat/turbostat.c 				i, mp->msr_num, p->counter[i]);
p                 886 tools/power/x86/turbostat/turbostat.c 	struct pkg_data *p)
p                 942 tools/power/x86/turbostat/turbostat.c 			if (p)
p                 943 tools/power/x86/turbostat/turbostat.c 				outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->package_id);
p                1085 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->pkg_temp_c);
p                1089 tools/power/x86/turbostat/turbostat.c 		if (p->gfx_rc6_ms == -1) {	/* detect GFX counter reset */
p                1093 tools/power/x86/turbostat/turbostat.c 				p->gfx_rc6_ms / 10.0 / interval_float);
p                1099 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->gfx_mhz);
p                1103 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_wtd_core_c0/tsc);
p                1105 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_core_c0/tsc);
p                1107 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_gfxe_c0/tsc);
p                1109 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_both_core_gfxe_c0/tsc);
p                1112 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc2/tsc);
p                1114 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc3/tsc);
p                1116 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc6/tsc);
p                1118 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc7/tsc);
p                1120 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc8/tsc);
p                1122 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc9/tsc);
p                1124 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc10/tsc);
p                1127 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->cpu_lpi / 1000000.0 / interval_float);
p                1129 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float);
p                1132 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float);
p                1134 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float);
p                1136 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float);
p                1138 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float);
p                1140 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units);
p                1142 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units);
p                1144 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units);
p                1146 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units);
p                1148 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
p                1150 tools/power/x86/turbostat/turbostat.c 		outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
p                1155 tools/power/x86/turbostat/turbostat.c 				outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) p->counter[i]);
p                1157 tools/power/x86/turbostat/turbostat.c 				outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), p->counter[i]);
p                1160 tools/power/x86/turbostat/turbostat.c 				outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), p->counter[i]);
p                1162 tools/power/x86/turbostat/turbostat.c 				outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), p->counter[i]);
p                1164 tools/power/x86/turbostat/turbostat.c 			outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->counter[i]/tsc);
p                1195 tools/power/x86/turbostat/turbostat.c void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
p                1209 tools/power/x86/turbostat/turbostat.c 	for_all_cpus(format_counters, t, c, p);
p                1393 tools/power/x86/turbostat/turbostat.c 	struct pkg_data *p, struct thread_data *t2,
p                1409 tools/power/x86/turbostat/turbostat.c 		retval = delta_package(p, p2);
p                1414 tools/power/x86/turbostat/turbostat.c void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
p                1444 tools/power/x86/turbostat/turbostat.c 	p->pkg_wtd_core_c0 = 0;
p                1445 tools/power/x86/turbostat/turbostat.c 	p->pkg_any_core_c0 = 0;
p                1446 tools/power/x86/turbostat/turbostat.c 	p->pkg_any_gfxe_c0 = 0;
p                1447 tools/power/x86/turbostat/turbostat.c 	p->pkg_both_core_gfxe_c0 = 0;
p                1449 tools/power/x86/turbostat/turbostat.c 	p->pc2 = 0;
p                1451 tools/power/x86/turbostat/turbostat.c 		p->pc3 = 0;
p                1453 tools/power/x86/turbostat/turbostat.c 		p->pc6 = 0;
p                1455 tools/power/x86/turbostat/turbostat.c 		p->pc7 = 0;
p                1456 tools/power/x86/turbostat/turbostat.c 	p->pc8 = 0;
p                1457 tools/power/x86/turbostat/turbostat.c 	p->pc9 = 0;
p                1458 tools/power/x86/turbostat/turbostat.c 	p->pc10 = 0;
p                1459 tools/power/x86/turbostat/turbostat.c 	p->cpu_lpi = 0;
p                1460 tools/power/x86/turbostat/turbostat.c 	p->sys_lpi = 0;
p                1462 tools/power/x86/turbostat/turbostat.c 	p->energy_pkg = 0;
p                1463 tools/power/x86/turbostat/turbostat.c 	p->energy_dram = 0;
p                1464 tools/power/x86/turbostat/turbostat.c 	p->energy_cores = 0;
p                1465 tools/power/x86/turbostat/turbostat.c 	p->energy_gfx = 0;
p                1466 tools/power/x86/turbostat/turbostat.c 	p->rapl_pkg_perf_status = 0;
p                1467 tools/power/x86/turbostat/turbostat.c 	p->rapl_dram_perf_status = 0;
p                1468 tools/power/x86/turbostat/turbostat.c 	p->pkg_temp_c = 0;
p                1470 tools/power/x86/turbostat/turbostat.c 	p->gfx_rc6_ms = 0;
p                1471 tools/power/x86/turbostat/turbostat.c 	p->gfx_mhz = 0;
p                1479 tools/power/x86/turbostat/turbostat.c 		p->counter[i] = 0;
p                1482 tools/power/x86/turbostat/turbostat.c 	struct pkg_data *p)
p                1538 tools/power/x86/turbostat/turbostat.c 		average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0;
p                1540 tools/power/x86/turbostat/turbostat.c 		average.packages.pkg_any_core_c0 += p->pkg_any_core_c0;
p                1542 tools/power/x86/turbostat/turbostat.c 		average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0;
p                1544 tools/power/x86/turbostat/turbostat.c 		average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0;
p                1546 tools/power/x86/turbostat/turbostat.c 	average.packages.pc2 += p->pc2;
p                1548 tools/power/x86/turbostat/turbostat.c 		average.packages.pc3 += p->pc3;
p                1550 tools/power/x86/turbostat/turbostat.c 		average.packages.pc6 += p->pc6;
p                1552 tools/power/x86/turbostat/turbostat.c 		average.packages.pc7 += p->pc7;
p                1553 tools/power/x86/turbostat/turbostat.c 	average.packages.pc8 += p->pc8;
p                1554 tools/power/x86/turbostat/turbostat.c 	average.packages.pc9 += p->pc9;
p                1555 tools/power/x86/turbostat/turbostat.c 	average.packages.pc10 += p->pc10;
p                1557 tools/power/x86/turbostat/turbostat.c 	average.packages.cpu_lpi = p->cpu_lpi;
p                1558 tools/power/x86/turbostat/turbostat.c 	average.packages.sys_lpi = p->sys_lpi;
p                1560 tools/power/x86/turbostat/turbostat.c 	average.packages.energy_pkg += p->energy_pkg;
p                1561 tools/power/x86/turbostat/turbostat.c 	average.packages.energy_dram += p->energy_dram;
p                1562 tools/power/x86/turbostat/turbostat.c 	average.packages.energy_cores += p->energy_cores;
p                1563 tools/power/x86/turbostat/turbostat.c 	average.packages.energy_gfx += p->energy_gfx;
p                1565 tools/power/x86/turbostat/turbostat.c 	average.packages.gfx_rc6_ms = p->gfx_rc6_ms;
p                1566 tools/power/x86/turbostat/turbostat.c 	average.packages.gfx_mhz = p->gfx_mhz;
p                1568 tools/power/x86/turbostat/turbostat.c 	average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c);
p                1570 tools/power/x86/turbostat/turbostat.c 	average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status;
p                1571 tools/power/x86/turbostat/turbostat.c 	average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
p                1576 tools/power/x86/turbostat/turbostat.c 		average.packages.counter[i] += p->counter[i];
p                1585 tools/power/x86/turbostat/turbostat.c 	struct pkg_data *p)
p                1592 tools/power/x86/turbostat/turbostat.c 	for_all_cpus(sum_counters, t, c, p);
p                1778 tools/power/x86/turbostat/turbostat.c int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
p                1919 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
p                1923 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
p                1927 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
p                1931 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
p                1935 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
p                1939 tools/power/x86/turbostat/turbostat.c 			if (get_msr(cpu, MSR_ATOM_PKG_C6_RESIDENCY, &p->pc6))
p                1942 tools/power/x86/turbostat/turbostat.c 			if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
p                1948 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
p                1951 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
p                1954 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
p                1957 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
p                1960 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
p                1964 tools/power/x86/turbostat/turbostat.c 		p->cpu_lpi = cpuidle_cur_cpu_lpi_us;
p                1966 tools/power/x86/turbostat/turbostat.c 		p->sys_lpi = cpuidle_cur_sys_lpi_us;
p                1971 tools/power/x86/turbostat/turbostat.c 		p->energy_pkg = msr & 0xFFFFFFFF;
p                1976 tools/power/x86/turbostat/turbostat.c 		p->energy_cores = msr & 0xFFFFFFFF;
p                1981 tools/power/x86/turbostat/turbostat.c 		p->energy_dram = msr & 0xFFFFFFFF;
p                1986 tools/power/x86/turbostat/turbostat.c 		p->energy_gfx = msr & 0xFFFFFFFF;
p                1991 tools/power/x86/turbostat/turbostat.c 		p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
p                1996 tools/power/x86/turbostat/turbostat.c 		p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
p                2001 tools/power/x86/turbostat/turbostat.c 		p->energy_pkg = msr & 0xFFFFFFFF;
p                2006 tools/power/x86/turbostat/turbostat.c 		p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
p                2010 tools/power/x86/turbostat/turbostat.c 		p->gfx_rc6_ms = gfx_cur_rc6_ms;
p                2013 tools/power/x86/turbostat/turbostat.c 		p->gfx_mhz = gfx_cur_mhz;
p                2016 tools/power/x86/turbostat/turbostat.c 		if (get_mp(cpu, mp, &p->counter[i]))
p                2699 tools/power/x86/turbostat/turbostat.c 					struct pkg_data *p, *p2;
p                2718 tools/power/x86/turbostat/turbostat.c 					p = GET_PKG(pkg_base, pkg_no);
p                2721 tools/power/x86/turbostat/turbostat.c 					retval = func(t, c, p, t2, c2, p2);
p                3594 tools/power/x86/turbostat/turbostat.c int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
p                3639 tools/power/x86/turbostat/turbostat.c int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
p                3730 tools/power/x86/turbostat/turbostat.c int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
p                4097 tools/power/x86/turbostat/turbostat.c int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
p                4170 tools/power/x86/turbostat/turbostat.c int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
p                4456 tools/power/x86/turbostat/turbostat.c int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
p                5102 tools/power/x86/turbostat/turbostat.c 		  struct pkg_data **p)
p                5123 tools/power/x86/turbostat/turbostat.c 	*p = calloc(topo.num_packages, sizeof(struct pkg_data));
p                5124 tools/power/x86/turbostat/turbostat.c 	if (*p == NULL)
p                5128 tools/power/x86/turbostat/turbostat.c 		(*p)[i].package_id = i;
p                5148 tools/power/x86/turbostat/turbostat.c 	struct pkg_data *p;
p                5159 tools/power/x86/turbostat/turbostat.c 	p = GET_PKG(pkg_base, pkg_id);
p                5169 tools/power/x86/turbostat/turbostat.c 	p->package_id = pkg_id;
p                 398 tools/testing/nvdimm/test/iomap.c 		struct acpi_object_list *p, struct acpi_buffer *buf)
p                 404 tools/testing/nvdimm/test/iomap.c 		return acpi_evaluate_object(handle, path, p, buf);
p                   7 tools/testing/radix-tree/bitmap.c 	unsigned long *p = map + BIT_WORD(start);
p                  13 tools/testing/radix-tree/bitmap.c 		*p &= ~mask_to_clear;
p                  17 tools/testing/radix-tree/bitmap.c 		p++;
p                  21 tools/testing/radix-tree/bitmap.c 		*p &= ~mask_to_clear;
p                  16 tools/testing/radix-tree/idr-test.c int item_idr_free(int id, void *p, void *data)
p                  18 tools/testing/radix-tree/idr-test.c 	struct item *item = p;
p                  20 tools/testing/radix-tree/idr-test.c 	free(p);
p                  90 tools/testing/radix-tree/linux.c void kfree(void *p)
p                  92 tools/testing/radix-tree/linux.c 	if (!p)
p                  96 tools/testing/radix-tree/linux.c 		printf("Freeing %p to malloc\n", p);
p                  97 tools/testing/radix-tree/linux.c 	free(p);
p                   7 tools/testing/radix-tree/linux/rcupdate.h #define rcu_dereference_raw(p) rcu_dereference(p)
p                   8 tools/testing/radix-tree/linux/rcupdate.h #define rcu_dereference_protected(p, cond) rcu_dereference(p)
p                   9 tools/testing/radix-tree/linux/rcupdate.h #define rcu_dereference_check(p, cond) rcu_dereference(p)
p                  10 tools/testing/radix-tree/linux/rcupdate.h #define RCU_INIT_POINTER(p, v)	do { (p) = (v); } while (0)
p                  57 tools/testing/radix-tree/regression1.c 	struct page *p;
p                  58 tools/testing/radix-tree/regression1.c 	p = malloc(sizeof(struct page));
p                  59 tools/testing/radix-tree/regression1.c 	p->count = 1;
p                  60 tools/testing/radix-tree/regression1.c 	p->index = index;
p                  61 tools/testing/radix-tree/regression1.c 	pthread_mutex_init(&p->lock, NULL);
p                  63 tools/testing/radix-tree/regression1.c 	return p;
p                  68 tools/testing/radix-tree/regression1.c 	struct page *p = container_of(rcu, struct page, rcu);
p                  69 tools/testing/radix-tree/regression1.c 	assert(!p->count);
p                  70 tools/testing/radix-tree/regression1.c 	pthread_mutex_destroy(&p->lock);
p                  71 tools/testing/radix-tree/regression1.c 	free(p);
p                  74 tools/testing/radix-tree/regression1.c static void page_free(struct page *p)
p                  76 tools/testing/radix-tree/regression1.c 	call_rcu(&p->rcu, page_rcu_free);
p                 125 tools/testing/radix-tree/regression1.c 			struct page *p;
p                 127 tools/testing/radix-tree/regression1.c 			p = page_alloc(0);
p                 129 tools/testing/radix-tree/regression1.c 			radix_tree_insert(&mt_tree, 0, p);
p                 132 tools/testing/radix-tree/regression1.c 			p = page_alloc(1);
p                 134 tools/testing/radix-tree/regression1.c 			radix_tree_insert(&mt_tree, 1, p);
p                 138 tools/testing/radix-tree/regression1.c 			p = radix_tree_delete(&mt_tree, 1);
p                 139 tools/testing/radix-tree/regression1.c 			pthread_mutex_lock(&p->lock);
p                 140 tools/testing/radix-tree/regression1.c 			p->count--;
p                 141 tools/testing/radix-tree/regression1.c 			pthread_mutex_unlock(&p->lock);
p                 143 tools/testing/radix-tree/regression1.c 			page_free(p);
p                 146 tools/testing/radix-tree/regression1.c 			p = radix_tree_delete(&mt_tree, 0);
p                 147 tools/testing/radix-tree/regression1.c 			pthread_mutex_lock(&p->lock);
p                 148 tools/testing/radix-tree/regression1.c 			p->count--;
p                 149 tools/testing/radix-tree/regression1.c 			pthread_mutex_unlock(&p->lock);
p                 151 tools/testing/radix-tree/regression1.c 			page_free(p);
p                  69 tools/testing/radix-tree/regression2.c 	struct page *p;
p                  70 tools/testing/radix-tree/regression2.c 	p = malloc(sizeof(struct page));
p                  71 tools/testing/radix-tree/regression2.c 	p->index = page_count++;
p                  73 tools/testing/radix-tree/regression2.c 	return p;
p                  79 tools/testing/radix-tree/regression2.c 	struct page *p;
p                  87 tools/testing/radix-tree/regression2.c 		p = page_alloc();
p                  88 tools/testing/radix-tree/regression2.c 		radix_tree_insert(&mt_tree, i, p);
p                  99 tools/testing/radix-tree/regression2.c 	p = page_alloc();
p                 100 tools/testing/radix-tree/regression2.c 	radix_tree_insert(&mt_tree, max_slots, p);
p                  39 tools/testing/scatterlist/linux/mm.h #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
p                 122 tools/testing/scatterlist/linux/mm.h #define PageSlab(p) (0)
p                 123 tools/testing/scatterlist/linux/mm.h #define flush_kernel_dcache_page(p)
p                 137 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c 	const int_t *p;
p                 201 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c 		int p;
p                  25 tools/testing/selftests/bpf/progs/socket_cookie_prog.c 	struct socket_cookie *p;
p                  30 tools/testing/selftests/bpf/progs/socket_cookie_prog.c 	p = bpf_sk_storage_get(&socket_cookies, ctx->sk, 0,
p                  32 tools/testing/selftests/bpf/progs/socket_cookie_prog.c 	if (!p)
p                  35 tools/testing/selftests/bpf/progs/socket_cookie_prog.c 	p->cookie_value = 0xFF;
p                  36 tools/testing/selftests/bpf/progs/socket_cookie_prog.c 	p->cookie_key = bpf_get_socket_cookie(ctx);
p                  45 tools/testing/selftests/bpf/progs/socket_cookie_prog.c 	struct socket_cookie *p;
p                  56 tools/testing/selftests/bpf/progs/socket_cookie_prog.c 	p = bpf_sk_storage_get(&socket_cookies, ctx->sk, 0, 0);
p                  57 tools/testing/selftests/bpf/progs/socket_cookie_prog.c 	if (!p)
p                  60 tools/testing/selftests/bpf/progs/socket_cookie_prog.c 	if (p->cookie_key != bpf_get_socket_cookie(ctx))
p                  63 tools/testing/selftests/bpf/progs/socket_cookie_prog.c 	p->cookie_value = (ctx->local_port << 8) | p->cookie_value;
p                 215 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	__u16 *p = data;
p                 217 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	p[7] = p[5]; /* delete p[7] was vlan_hdr->h_vlan_TCI */
p                 218 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	p[6] = p[4]; /* delete p[6] was ethhdr->h_proto */
p                 219 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	p[5] = p[3];
p                 220 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	p[4] = p[2];
p                 221 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	p[3] = p[1];
p                 222 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	p[2] = p[0];
p                 228 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	__u32 *p = data;
p                 235 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	p[3] = p[2];
p                 236 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	p[2] = p[1];
p                 237 tools/testing/selftests/bpf/progs/test_xdp_vlan.c 	p[1] = p[0];
p                  29 tools/testing/selftests/bpf/progs/xdping_kern.c 	unsigned short *p = data;
p                  32 tools/testing/selftests/bpf/progs/xdping_kern.c 	dst[0] = p[0];
p                  33 tools/testing/selftests/bpf/progs/xdping_kern.c 	dst[1] = p[1];
p                  34 tools/testing/selftests/bpf/progs/xdping_kern.c 	dst[2] = p[2];
p                  35 tools/testing/selftests/bpf/progs/xdping_kern.c 	p[0] = p[3];
p                  36 tools/testing/selftests/bpf/progs/xdping_kern.c 	p[1] = p[4];
p                  37 tools/testing/selftests/bpf/progs/xdping_kern.c 	p[2] = p[5];
p                  38 tools/testing/selftests/bpf/progs/xdping_kern.c 	p[3] = dst[0];
p                  39 tools/testing/selftests/bpf/progs/xdping_kern.c 	p[4] = dst[1];
p                  40 tools/testing/selftests/bpf/progs/xdping_kern.c 	p[5] = dst[2];
p                 867 tools/testing/selftests/bpf/test_verifier.c 	const char *p, *q;
p                 871 tools/testing/selftests/bpf/test_verifier.c 		p = strchr(exp, '\t');
p                 872 tools/testing/selftests/bpf/test_verifier.c 		if (!p)
p                 873 tools/testing/selftests/bpf/test_verifier.c 			p = exp + strlen(exp);
p                 875 tools/testing/selftests/bpf/test_verifier.c 		len = p - exp;
p                 889 tools/testing/selftests/bpf/test_verifier.c 		exp = p + 1;
p                 890 tools/testing/selftests/bpf/test_verifier.c 	} while (*p);
p                  60 tools/testing/selftests/kvm/include/x86_64/processor.h 	unsigned base1:8, s:1, type:4, dpl:2, p:1;
p                 452 tools/testing/selftests/kvm/lib/x86_64/processor.c 	desc->p = segp->present;
p                 125 tools/testing/selftests/memfd/fuse_test.c 	void *p;
p                 127 tools/testing/selftests/memfd/fuse_test.c 	p = mmap(NULL,
p                 133 tools/testing/selftests/memfd/fuse_test.c 	if (p == MAP_FAILED) {
p                 138 tools/testing/selftests/memfd/fuse_test.c 	return p;
p                 143 tools/testing/selftests/memfd/fuse_test.c 	void *p;
p                 145 tools/testing/selftests/memfd/fuse_test.c 	p = mmap(NULL,
p                 151 tools/testing/selftests/memfd/fuse_test.c 	if (p == MAP_FAILED) {
p                 156 tools/testing/selftests/memfd/fuse_test.c 	return p;
p                 231 tools/testing/selftests/memfd/fuse_test.c 	void *p;
p                 273 tools/testing/selftests/memfd/fuse_test.c 	p = mfd_assert_mmap_shared(mfd);
p                 278 tools/testing/selftests/memfd/fuse_test.c 	global_p = p;
p                 288 tools/testing/selftests/memfd/fuse_test.c 	r = read(fd, p, mfd_def_size);
p                 314 tools/testing/selftests/memfd/fuse_test.c 	p = mfd_assert_mmap_private(mfd);
p                 315 tools/testing/selftests/memfd/fuse_test.c 	if (was_sealed && memcmp(p, zero, mfd_def_size)) {
p                 318 tools/testing/selftests/memfd/fuse_test.c 	} else if (!was_sealed && !memcmp(p, zero, mfd_def_size)) {
p                 173 tools/testing/selftests/memfd/memfd_test.c 	void *p;
p                 175 tools/testing/selftests/memfd/memfd_test.c 	p = mmap(NULL,
p                 181 tools/testing/selftests/memfd/memfd_test.c 	if (p == MAP_FAILED) {
p                 186 tools/testing/selftests/memfd/memfd_test.c 	return p;
p                 191 tools/testing/selftests/memfd/memfd_test.c 	void *p;
p                 193 tools/testing/selftests/memfd/memfd_test.c 	p = mmap(NULL,
p                 199 tools/testing/selftests/memfd/memfd_test.c 	if (p == MAP_FAILED) {
p                 204 tools/testing/selftests/memfd/memfd_test.c 	return p;
p                 238 tools/testing/selftests/memfd/memfd_test.c 	void *p;
p                 248 tools/testing/selftests/memfd/memfd_test.c 	p = mmap(NULL,
p                 254 tools/testing/selftests/memfd/memfd_test.c 	if (p == MAP_FAILED) {
p                 258 tools/testing/selftests/memfd/memfd_test.c 	munmap(p, mfd_def_size);
p                 261 tools/testing/selftests/memfd/memfd_test.c 	p = mmap(NULL,
p                 267 tools/testing/selftests/memfd/memfd_test.c 	if (p == MAP_FAILED) {
p                 271 tools/testing/selftests/memfd/memfd_test.c 	munmap(p, mfd_def_size);
p                 277 tools/testing/selftests/memfd/memfd_test.c 	void *p;
p                 280 tools/testing/selftests/memfd/memfd_test.c 	p = mmap(NULL,
p                 286 tools/testing/selftests/memfd/memfd_test.c 	if (p == MAP_FAILED) {
p                 290 tools/testing/selftests/memfd/memfd_test.c 	munmap(p, mfd_def_size);
p                 296 tools/testing/selftests/memfd/memfd_test.c 	void *p;
p                 313 tools/testing/selftests/memfd/memfd_test.c 	p = mmap(NULL,
p                 319 tools/testing/selftests/memfd/memfd_test.c 	if (p == MAP_FAILED) {
p                 323 tools/testing/selftests/memfd/memfd_test.c 	*(char *)p = 0;
p                 324 tools/testing/selftests/memfd/memfd_test.c 	munmap(p, mfd_def_size);
p                 327 tools/testing/selftests/memfd/memfd_test.c 	p = mmap(NULL,
p                 333 tools/testing/selftests/memfd/memfd_test.c 	if (p == MAP_FAILED) {
p                 337 tools/testing/selftests/memfd/memfd_test.c 	*(char *)p = 0;
p                 338 tools/testing/selftests/memfd/memfd_test.c 	munmap(p, mfd_def_size);
p                 342 tools/testing/selftests/memfd/memfd_test.c 	p = mmap(NULL,
p                 348 tools/testing/selftests/memfd/memfd_test.c 	if (p == MAP_FAILED) {
p                 353 tools/testing/selftests/memfd/memfd_test.c 	r = mprotect(p, mfd_def_size, PROT_READ | PROT_WRITE);
p                 359 tools/testing/selftests/memfd/memfd_test.c 	*(char *)p = 0;
p                 360 tools/testing/selftests/memfd/memfd_test.c 	munmap(p, mfd_def_size);
p                 376 tools/testing/selftests/memfd/memfd_test.c 	void *p;
p                 387 tools/testing/selftests/memfd/memfd_test.c 	p = mmap(NULL,
p                 393 tools/testing/selftests/memfd/memfd_test.c 	if (p != MAP_FAILED) {
p                 399 tools/testing/selftests/memfd/memfd_test.c 	p = mmap(NULL,
p                 405 tools/testing/selftests/memfd/memfd_test.c 	if (p != MAP_FAILED) {
p                 412 tools/testing/selftests/memfd/memfd_test.c 	p = mmap(NULL,
p                 418 tools/testing/selftests/memfd/memfd_test.c 	if (p != MAP_FAILED) {
p                 419 tools/testing/selftests/memfd/memfd_test.c 		r = mprotect(p, mfd_def_size, PROT_READ | PROT_WRITE);
p                 737 tools/testing/selftests/memfd/memfd_test.c 	void *p;
p                 745 tools/testing/selftests/memfd/memfd_test.c 	p = mfd_assert_mmap_shared(fd);
p                 763 tools/testing/selftests/memfd/memfd_test.c 	munmap(p, mfd_def_size);
p                 894 tools/testing/selftests/memfd/memfd_test.c 	void *p;
p                 904 tools/testing/selftests/memfd/memfd_test.c 	p = mfd_assert_mmap_shared(fd);
p                 909 tools/testing/selftests/memfd/memfd_test.c 	munmap(p, mfd_def_size);
p                 912 tools/testing/selftests/memfd/memfd_test.c 	p = mfd_assert_mmap_private(fd);
p                 915 tools/testing/selftests/memfd/memfd_test.c 	munmap(p, mfd_def_size);
p                 143 tools/testing/selftests/net/reuseport_bpf.c 	struct sock_fprog p = {
p                 148 tools/testing/selftests/net/reuseport_bpf.c 	if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, &p, sizeof(p)))
p                 152 tools/testing/selftests/net/reuseport_bpf.c static void build_recv_group(const struct test_params p, int fd[], uint16_t mod,
p                 156 tools/testing/selftests/net/reuseport_bpf.c 		new_any_sockaddr(p.recv_family, p.recv_port);
p                 159 tools/testing/selftests/net/reuseport_bpf.c 	for (i = 0; i < p.recv_socks; ++i) {
p                 160 tools/testing/selftests/net/reuseport_bpf.c 		fd[i] = socket(p.recv_family, p.protocol, 0);
p                 175 tools/testing/selftests/net/reuseport_bpf.c 		if (p.protocol == SOCK_STREAM) {
p                 181 tools/testing/selftests/net/reuseport_bpf.c 			if (listen(fd[i], p.recv_socks * 10))
p                 188 tools/testing/selftests/net/reuseport_bpf.c static void send_from(struct test_params p, uint16_t sport, char *buf,
p                 191 tools/testing/selftests/net/reuseport_bpf.c 	struct sockaddr * const saddr = new_any_sockaddr(p.send_family, sport);
p                 193 tools/testing/selftests/net/reuseport_bpf.c 		new_loopback_sockaddr(p.send_family, p.recv_port);
p                 194 tools/testing/selftests/net/reuseport_bpf.c 	const int fd = socket(p.send_family, p.protocol, 0), one = 1;
p                 213 tools/testing/selftests/net/reuseport_bpf.c static void test_recv_order(const struct test_params p, int fd[], int mod)
p                 225 tools/testing/selftests/net/reuseport_bpf.c 	for (i = 0; i < p.recv_socks; ++i) {
p                 236 tools/testing/selftests/net/reuseport_bpf.c 	for (data = 0; data < p.recv_socks * 2; ++data) {
p                 237 tools/testing/selftests/net/reuseport_bpf.c 		sport = p.send_port_min + data;
p                 240 tools/testing/selftests/net/reuseport_bpf.c 		send_from(p, sport, send_buf, sizeof(ndata));
p                 246 tools/testing/selftests/net/reuseport_bpf.c 		if (p.protocol == SOCK_STREAM) {
p                 261 tools/testing/selftests/net/reuseport_bpf.c 		for (i = 0; i < p.recv_socks; ++i)
p                 273 tools/testing/selftests/net/reuseport_bpf.c static void test_reuseport_ebpf(struct test_params p)
p                 275 tools/testing/selftests/net/reuseport_bpf.c 	int i, fd[p.recv_socks];
p                 277 tools/testing/selftests/net/reuseport_bpf.c 	fprintf(stderr, "Testing EBPF mod %zd...\n", p.recv_socks);
p                 278 tools/testing/selftests/net/reuseport_bpf.c 	build_recv_group(p, fd, p.recv_socks, attach_ebpf);
p                 279 tools/testing/selftests/net/reuseport_bpf.c 	test_recv_order(p, fd, p.recv_socks);
p                 281 tools/testing/selftests/net/reuseport_bpf.c 	p.send_port_min += p.recv_socks * 2;
p                 282 tools/testing/selftests/net/reuseport_bpf.c 	fprintf(stderr, "Reprograming, testing mod %zd...\n", p.recv_socks / 2);
p                 283 tools/testing/selftests/net/reuseport_bpf.c 	attach_ebpf(fd[0], p.recv_socks / 2);
p                 284 tools/testing/selftests/net/reuseport_bpf.c 	test_recv_order(p, fd, p.recv_socks / 2);
p                 286 tools/testing/selftests/net/reuseport_bpf.c 	for (i = 0; i < p.recv_socks; ++i)
p                 290 tools/testing/selftests/net/reuseport_bpf.c static void test_reuseport_cbpf(struct test_params p)
p                 292 tools/testing/selftests/net/reuseport_bpf.c 	int i, fd[p.recv_socks];
p                 294 tools/testing/selftests/net/reuseport_bpf.c 	fprintf(stderr, "Testing CBPF mod %zd...\n", p.recv_socks);
p                 295 tools/testing/selftests/net/reuseport_bpf.c 	build_recv_group(p, fd, p.recv_socks, attach_cbpf);
p                 296 tools/testing/selftests/net/reuseport_bpf.c 	test_recv_order(p, fd, p.recv_socks);
p                 298 tools/testing/selftests/net/reuseport_bpf.c 	p.send_port_min += p.recv_socks * 2;
p                 299 tools/testing/selftests/net/reuseport_bpf.c 	fprintf(stderr, "Reprograming, testing mod %zd...\n", p.recv_socks / 2);
p                 300 tools/testing/selftests/net/reuseport_bpf.c 	attach_cbpf(fd[0], p.recv_socks / 2);
p                 301 tools/testing/selftests/net/reuseport_bpf.c 	test_recv_order(p, fd, p.recv_socks / 2);
p                 303 tools/testing/selftests/net/reuseport_bpf.c 	for (i = 0; i < p.recv_socks; ++i)
p                 307 tools/testing/selftests/net/reuseport_bpf.c static void test_extra_filter(const struct test_params p)
p                 310 tools/testing/selftests/net/reuseport_bpf.c 		new_any_sockaddr(p.recv_family, p.recv_port);
p                 314 tools/testing/selftests/net/reuseport_bpf.c 	fd1 = socket(p.recv_family, p.protocol, 0);
p                 317 tools/testing/selftests/net/reuseport_bpf.c 	fd2 = socket(p.recv_family, p.protocol, 0);
p                 339 tools/testing/selftests/net/reuseport_bpf.c static void test_filter_no_reuseport(const struct test_params p)
p                 342 tools/testing/selftests/net/reuseport_bpf.c 		new_any_sockaddr(p.recv_family, p.recv_port);
p                 370 tools/testing/selftests/net/reuseport_bpf.c 	fd = socket(p.recv_family, p.protocol, 0);
p                  85 tools/testing/selftests/net/reuseport_bpf_cpu.c 	struct sock_fprog p = {
p                  90 tools/testing/selftests/net/reuseport_bpf_cpu.c 	if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, &p, sizeof(p)))
p                 398 tools/testing/selftests/net/tls.c 	int p[2];
p                 400 tools/testing/selftests/net/tls.c 	ASSERT_GE(pipe(p), 0);
p                 401 tools/testing/selftests/net/tls.c 	EXPECT_GE(write(p[1], mem_send, send_len), 0);
p                 402 tools/testing/selftests/net/tls.c 	EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), 0);
p                 413 tools/testing/selftests/net/tls.c 	int p[2];
p                 415 tools/testing/selftests/net/tls.c 	ASSERT_GE(pipe(p), 0);
p                 417 tools/testing/selftests/net/tls.c 	EXPECT_GE(write(p[1], mem_send, 8000), 0);
p                 418 tools/testing/selftests/net/tls.c 	EXPECT_GE(splice(p[0], NULL, self->fd, NULL, 8000, 0), 0);
p                 433 tools/testing/selftests/net/tls.c 	int p[2];
p                 435 tools/testing/selftests/net/tls.c 	ASSERT_GE(pipe(p), 0);
p                 440 tools/testing/selftests/net/tls.c 	EXPECT_GE(write(p[1], mem_send, send_len), send_len);
p                 441 tools/testing/selftests/net/tls.c 	EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), send_len);
p                 452 tools/testing/selftests/net/tls.c 	int p[2];
p                 454 tools/testing/selftests/net/tls.c 	ASSERT_GE(pipe(p), 0);
p                 456 tools/testing/selftests/net/tls.c 	EXPECT_GE(splice(self->cfd, NULL, p[1], NULL, send_len, 0), 0);
p                 457 tools/testing/selftests/net/tls.c 	EXPECT_GE(read(p[0], mem_recv, send_len), 0);
p                 335 tools/testing/selftests/networking/timestamping/txtimestamp.c static int fill_header_ipv4(void *p)
p                 337 tools/testing/selftests/networking/timestamping/txtimestamp.c 	struct iphdr *iph = p;
p                 353 tools/testing/selftests/networking/timestamping/txtimestamp.c static int fill_header_ipv6(void *p)
p                 355 tools/testing/selftests/networking/timestamping/txtimestamp.c 	struct ipv6hdr *ip6h = p;
p                 372 tools/testing/selftests/networking/timestamping/txtimestamp.c static void fill_header_udp(void *p, bool is_ipv4)
p                 374 tools/testing/selftests/networking/timestamping/txtimestamp.c 	struct udphdr *udph = p;
p                 253 tools/testing/selftests/powerpc/benchmarks/context_switch.c static unsigned long cmpxchg(unsigned long *p, unsigned long expected,
p                 258 tools/testing/selftests/powerpc/benchmarks/context_switch.c 	__atomic_compare_exchange_n(p, &exp, desired, 0,
p                 263 tools/testing/selftests/powerpc/benchmarks/context_switch.c static unsigned long xchg(unsigned long *p, unsigned long val)
p                 265 tools/testing/selftests/powerpc/benchmarks/context_switch.c 	return __atomic_exchange_n(p, val, __ATOMIC_SEQ_CST);
p                  68 tools/testing/selftests/powerpc/benchmarks/null_syscall.c 	char *p, *end;
p                  84 tools/testing/selftests/powerpc/benchmarks/null_syscall.c 			p = strchr(line, ':');
p                  85 tools/testing/selftests/powerpc/benchmarks/null_syscall.c 			if (p != NULL) {
p                  86 tools/testing/selftests/powerpc/benchmarks/null_syscall.c 				v = strtoull(p + 1, &end, 0);
p                  87 tools/testing/selftests/powerpc/benchmarks/null_syscall.c 				if (end != p + 1)
p                  94 tools/testing/selftests/powerpc/benchmarks/null_syscall.c 			p = strchr(line, ':');
p                  95 tools/testing/selftests/powerpc/benchmarks/null_syscall.c 			if (p != NULL) {
p                  96 tools/testing/selftests/powerpc/benchmarks/null_syscall.c 				d = strtod(p + 1, &end);
p                  97 tools/testing/selftests/powerpc/benchmarks/null_syscall.c 				if (end != p + 1) {
p                  54 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	ElfW(auxv_t) *p;
p                  61 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	p = find_auxv_entry(AT_L1I_CACHESIZE, buffer);
p                  62 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	if (p) {
p                  64 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 		print_size("L1I ", (uint32_t)p->a_un.a_val);
p                  67 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	p = find_auxv_entry(AT_L1I_CACHEGEOMETRY, buffer);
p                  68 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	if (p) {
p                  70 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 		print_geo("L1I ", (uint32_t)p->a_un.a_val);
p                  73 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	p = find_auxv_entry(AT_L1D_CACHESIZE, buffer);
p                  74 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	if (p) {
p                  76 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 		print_size("L1D ", (uint32_t)p->a_un.a_val);
p                  79 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	p = find_auxv_entry(AT_L1D_CACHEGEOMETRY, buffer);
p                  80 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	if (p) {
p                  82 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 		print_geo("L1D ", (uint32_t)p->a_un.a_val);
p                  85 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	p = find_auxv_entry(AT_L2_CACHESIZE, buffer);
p                  86 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	if (p) {
p                  88 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 		print_size("L2  ", (uint32_t)p->a_un.a_val);
p                  91 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	p = find_auxv_entry(AT_L2_CACHEGEOMETRY, buffer);
p                  92 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	if (p) {
p                  94 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 		print_geo("L2  ", (uint32_t)p->a_un.a_val);
p                  97 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	p = find_auxv_entry(AT_L3_CACHESIZE, buffer);
p                  98 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	if (p) {
p                 100 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 		print_size("L3  ", (uint32_t)p->a_un.a_val);
p                 103 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	p = find_auxv_entry(AT_L3_CACHEGEOMETRY, buffer);
p                 104 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 	if (p) {
p                 106 tools/testing/selftests/powerpc/cache_shape/cache_shape.c 		print_geo("L3  ", (uint32_t)p->a_un.a_val);
p                  82 tools/testing/selftests/powerpc/copyloops/exc_validate.c 	static char *p, *q;
p                  86 tools/testing/selftests/powerpc/copyloops/exc_validate.c 	p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
p                  89 tools/testing/selftests/powerpc/copyloops/exc_validate.c 	if (p == MAP_FAILED) {
p                  94 tools/testing/selftests/powerpc/copyloops/exc_validate.c 	memset(p, 0, page_size);
p                  98 tools/testing/selftests/powerpc/copyloops/exc_validate.c 	if (mprotect(p + page_size, page_size, PROT_NONE)) {
p                 103 tools/testing/selftests/powerpc/copyloops/exc_validate.c 	q = p + page_size - MAX_LEN;
p                  42 tools/testing/selftests/powerpc/math/fpu_preempt.c void *preempt_fpu_c(void *p)
p                  52 tools/testing/selftests/powerpc/math/fpu_preempt.c 	return p;
p                  55 tools/testing/selftests/powerpc/math/fpu_signal.c void *signal_fpu_c(void *p)
p                  64 tools/testing/selftests/powerpc/math/fpu_signal.c 		return p;
p                  42 tools/testing/selftests/powerpc/math/vmx_preempt.c void *preempt_vmx_c(void *p)
p                  52 tools/testing/selftests/powerpc/math/vmx_preempt.c 	return p;
p                  72 tools/testing/selftests/powerpc/math/vmx_signal.c void *signal_vmx_c(void *p)
p                  81 tools/testing/selftests/powerpc/math/vmx_signal.c 		return p;
p                  62 tools/testing/selftests/powerpc/math/vsx_preempt.c 		long *p = (long *)a;
p                  66 tools/testing/selftests/powerpc/math/vsx_preempt.c 					i/2 + i%2 + 20, p[i], p[i + 1], p[i + 24], p[i + 25]);
p                  72 tools/testing/selftests/powerpc/math/vsx_preempt.c void *preempt_vsx_c(void *p)
p                  14 tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c 	char *p;
p                  18 tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c 	p = mmap(addr, SIZE, PROT_READ | PROT_WRITE,
p                  20 tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c 	if (p != MAP_FAILED) {
p                  34 tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c 	p = mmap(addr, SIZE, PROT_READ | PROT_WRITE,
p                  36 tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c 	if (p == MAP_FAILED) {
p                  55 tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c 	*p = 0xf;
p                  26 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c 	int p2c[2], c2p[2], rc, status, c, *p;
p                  34 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c 	p = mmap((void *)(512ul << 40), page_size, PROT_READ | PROT_WRITE,
p                  36 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c 	if (p == MAP_FAILED) {
p                  42 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c 	printf("parent writing %p = 1\n", p);
p                  43 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c 	*p = 1;
p                  52 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c 		printf("child writing  %p = %d\n", p, pid);
p                  53 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c 		*p = pid;
p                  68 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c 	printf("parent reading %p = %d\n", p, *p);
p                  69 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c 	if (*p != 1) {
p                  70 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c 		printf("Error: BUG! parent saw child's write! *p = %d\n", *p);
p                  19 tools/testing/selftests/powerpc/mm/prot_sao.c 	char *p;
p                  29 tools/testing/selftests/powerpc/mm/prot_sao.c 	p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE | PROT_SAO,
p                  31 tools/testing/selftests/powerpc/mm/prot_sao.c 	FAIL_IF(p == MAP_FAILED);
p                  34 tools/testing/selftests/powerpc/mm/prot_sao.c 	memset(p, 0xaa, SIZE);
p                  41 tools/testing/selftests/powerpc/mm/segv_errors.c 	char c, *p = NULL;
p                  43 tools/testing/selftests/powerpc/mm/segv_errors.c 	p = mmap(NULL, getpagesize(), 0, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
p                  44 tools/testing/selftests/powerpc/mm/segv_errors.c 	FAIL_IF(p == MAP_FAILED);
p                  56 tools/testing/selftests/powerpc/mm/segv_errors.c 	c = *p;
p                  66 tools/testing/selftests/powerpc/mm/segv_errors.c 	*p = c;
p                  38 tools/testing/selftests/powerpc/pmu/ebb/trace.c static bool trace_check_bounds(struct trace_buffer *tb, void *p)
p                  40 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	return p < ((void *)tb + tb->size);
p                  43 tools/testing/selftests/powerpc/pmu/ebb/trace.c static bool trace_check_alloc(struct trace_buffer *tb, void *p)
p                  54 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	if (!trace_check_bounds(tb, p)) {
p                  64 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	void *p, *newtail;
p                  66 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	p = tb->tail;
p                  73 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	return p;
p                  90 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	u64 *p;
p                  97 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	p = (u64 *)e->data;
p                  98 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	*p++ = reg;
p                  99 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	*p++ = value;
p                 107 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	u64 *p;
p                 114 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	p = (u64 *)e->data;
p                 115 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	*p++ = value;
p                 123 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	char *p;
p                 134 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	p = (char *)e->data;
p                 135 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	memcpy(p, str, len);
p                 136 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	p += len;
p                 137 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	*p = '\0';
p                 202 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	u64 *p, *reg, *value;
p                 205 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	p = (u64 *)e->data;
p                 206 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	reg = p++;
p                 207 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	value = p;
p                 273 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	void *p;
p                 282 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	p = tb->data;
p                 287 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	while (trace_check_bounds(tb, p) && p < tb->tail) {
p                 288 tools/testing/selftests/powerpc/pmu/ebb/trace.c 		e = p;
p                 293 tools/testing/selftests/powerpc/pmu/ebb/trace.c 		p = (void *)e + sizeof(*e) + e->length;
p                  20 tools/testing/selftests/powerpc/pmu/l3_bank_test.c 	char *p;
p                  23 tools/testing/selftests/powerpc/pmu/l3_bank_test.c 	p = malloc(MALLOC_SIZE);
p                  24 tools/testing/selftests/powerpc/pmu/l3_bank_test.c 	FAIL_IF(!p);
p                  31 tools/testing/selftests/powerpc/pmu/l3_bank_test.c 		p[i] = i;
p                  40 tools/testing/selftests/powerpc/pmu/l3_bank_test.c 	free(p);
p                 102 tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c static int do_one_test(char *p, int page_offset)
p                 108 tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c 	should = *(unsigned long *)p;
p                 111 tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c 	got = load_unaligned_zeropad(p);
p                 202 tools/testing/selftests/powerpc/ptrace/core-pkey.c 	void *p = ehdr, *note;
p                 223 tools/testing/selftests/powerpc/ptrace/core-pkey.c 	for (phdr = p + ehdr->e_phoff;
p                 224 tools/testing/selftests/powerpc/ptrace/core-pkey.c 	     (void *) phdr < p + ehdr->e_phoff + phdr_size;
p                 229 tools/testing/selftests/powerpc/ptrace/core-pkey.c 	FAIL_IF((void *) phdr >= p + ehdr->e_phoff + phdr_size);
p                 232 tools/testing/selftests/powerpc/ptrace/core-pkey.c 	for (nhdr = p + phdr->p_offset;
p                 233 tools/testing/selftests/powerpc/ptrace/core-pkey.c 	     (void *) nhdr < p + phdr->p_offset + phdr->p_filesz;
p                 238 tools/testing/selftests/powerpc/ptrace/core-pkey.c 	FAIL_IF((void *) nhdr >= p + phdr->p_offset + phdr->p_filesz);
p                 241 tools/testing/selftests/powerpc/ptrace/core-pkey.c 	p = nhdr;
p                 242 tools/testing/selftests/powerpc/ptrace/core-pkey.c 	note = p + sizeof(*nhdr) + __ALIGN_KERNEL(nhdr->n_namesz, 4);
p                 122 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h void loadvsx(void *p, int tmp);
p                 123 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h void storevsx(void *p, int tmp);
p                  34 tools/testing/selftests/powerpc/security/rfi_flush.c static void syscall_loop(char *p, unsigned long iterations,
p                  39 tools/testing/selftests/powerpc/security/rfi_flush.c 			load(p + j);
p                  46 tools/testing/selftests/powerpc/security/rfi_flush.c 	char *p;
p                  67 tools/testing/selftests/powerpc/security/rfi_flush.c 	p = (char *)memalign(zero_size, CACHELINE_SIZE);
p                  84 tools/testing/selftests/powerpc/security/rfi_flush.c 	syscall_loop(p, iterations, zero_size);
p                  61 tools/testing/selftests/powerpc/utils.c 	ElfW(auxv_t) *p;
p                  63 tools/testing/selftests/powerpc/utils.c 	p = (ElfW(auxv_t) *)auxv;
p                  65 tools/testing/selftests/powerpc/utils.c 	while (p->a_type != AT_NULL) {
p                  66 tools/testing/selftests/powerpc/utils.c 		if (p->a_type == type)
p                  67 tools/testing/selftests/powerpc/utils.c 			return p;
p                  69 tools/testing/selftests/powerpc/utils.c 		p++;
p                  77 tools/testing/selftests/powerpc/utils.c 	ElfW(auxv_t) *p;
p                  82 tools/testing/selftests/powerpc/utils.c 	p = find_auxv_entry(type, auxv);
p                  83 tools/testing/selftests/powerpc/utils.c 	if (p)
p                  84 tools/testing/selftests/powerpc/utils.c 		return (void *)p->a_un.a_val;
p                  65 tools/testing/selftests/proc/fd-003-kthread.c 		char *p, *end;
p                  73 tools/testing/selftests/proc/fd-003-kthread.c 			p = strrchr(buf, ' ');
p                  74 tools/testing/selftests/proc/fd-003-kthread.c 			assert(p);
p                  75 tools/testing/selftests/proc/fd-003-kthread.c 			*p = '\0';
p                  78 tools/testing/selftests/proc/fd-003-kthread.c 		p = strrchr(buf, ' ');
p                  79 tools/testing/selftests/proc/fd-003-kthread.c 		assert(p);
p                  81 tools/testing/selftests/proc/fd-003-kthread.c 		flags_ull = xstrtoull(p + 1, &end);
p                  41 tools/testing/selftests/proc/proc-loadavg-001.c 		char buf[128], *p;
p                  51 tools/testing/selftests/proc/proc-loadavg-001.c 		p = buf + rv;
p                  54 tools/testing/selftests/proc/proc-loadavg-001.c 		if (!(p[-3] == ' ' && p[-2] == '1' && p[-1] == '\n'))
p                  69 tools/testing/selftests/proc/proc-loadavg-001.c 		p = buf + rv;
p                  72 tools/testing/selftests/proc/proc-loadavg-001.c 		if (!(p[-3] == ' ' && p[-2] == '2' && p[-1] == '\n'))
p                  50 tools/testing/selftests/proc/proc-self-map-files-001.c 	void *p;
p                  58 tools/testing/selftests/proc/proc-self-map-files-001.c 	p = mmap(NULL, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE, fd, 0);
p                  59 tools/testing/selftests/proc/proc-self-map-files-001.c 	if (p == MAP_FAILED)
p                  62 tools/testing/selftests/proc/proc-self-map-files-001.c 	a = (unsigned long)p;
p                  63 tools/testing/selftests/proc/proc-self-map-files-001.c 	b = (unsigned long)p + PAGE_SIZE;
p                  56 tools/testing/selftests/proc/proc-self-map-files-002.c 	void *p;
p                  65 tools/testing/selftests/proc/proc-self-map-files-002.c 		p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
p                  66 tools/testing/selftests/proc/proc-self-map-files-002.c 		if (p == (void *)va)
p                  74 tools/testing/selftests/proc/proc-self-map-files-002.c 	a = (unsigned long)p;
p                  75 tools/testing/selftests/proc/proc-self-map-files-002.c 	b = (unsigned long)p + PAGE_SIZE;
p                  28 tools/testing/selftests/proc/proc-uptime.h 	char buf[64], *p;
p                  37 tools/testing/selftests/proc/proc-uptime.h 	p = buf;
p                  39 tools/testing/selftests/proc/proc-uptime.h 	val1 = xstrtoull(p, &p);
p                  40 tools/testing/selftests/proc/proc-uptime.h 	assert(p[0] == '.');
p                  41 tools/testing/selftests/proc/proc-uptime.h 	assert('0' <= p[1] && p[1] <= '9');
p                  42 tools/testing/selftests/proc/proc-uptime.h 	assert('0' <= p[2] && p[2] <= '9');
p                  43 tools/testing/selftests/proc/proc-uptime.h 	assert(p[3] == ' ');
p                  45 tools/testing/selftests/proc/proc-uptime.h 	val2 = (p[1] - '0') * 10 + p[2] - '0';
p                  48 tools/testing/selftests/proc/proc-uptime.h 	p += 4;
p                  50 tools/testing/selftests/proc/proc-uptime.h 	val1 = xstrtoull(p, &p);
p                  51 tools/testing/selftests/proc/proc-uptime.h 	assert(p[0] == '.');
p                  52 tools/testing/selftests/proc/proc-uptime.h 	assert('0' <= p[1] && p[1] <= '9');
p                  53 tools/testing/selftests/proc/proc-uptime.h 	assert('0' <= p[2] && p[2] <= '9');
p                  54 tools/testing/selftests/proc/proc-uptime.h 	assert(p[3] == '\n');
p                  56 tools/testing/selftests/proc/proc-uptime.h 	val2 = (p[1] - '0') * 10 + p[2] - '0';
p                  59 tools/testing/selftests/proc/proc-uptime.h 	assert(p + 4 == buf + rv);
p                  27 tools/testing/selftests/proc/proc.h static unsigned long long xstrtoull(const char *p, char **end)
p                  29 tools/testing/selftests/proc/proc.h 	if (*p == '0') {
p                  30 tools/testing/selftests/proc/proc.h 		*end = (char *)p + 1;
p                  32 tools/testing/selftests/proc/proc.h 	} else if ('1' <= *p && *p <= '9') {
p                  36 tools/testing/selftests/proc/proc.h 		val = strtoull(p, end, 10);
p                  67 tools/testing/selftests/rseq/rseq-arm.h #define rseq_smp_load_acquire(p)					\
p                  69 tools/testing/selftests/rseq/rseq-arm.h 	__typeof(*p) ____p1 = RSEQ_READ_ONCE(*p);			\
p                  76 tools/testing/selftests/rseq/rseq-arm.h #define rseq_smp_store_release(p, v)					\
p                  79 tools/testing/selftests/rseq/rseq-arm.h 	RSEQ_WRITE_ONCE(*p, v);						\
p                  28 tools/testing/selftests/rseq/rseq-arm64.h #define rseq_smp_load_acquire(p)						\
p                  30 tools/testing/selftests/rseq/rseq-arm64.h 	__typeof(*p) ____p1;							\
p                  31 tools/testing/selftests/rseq/rseq-arm64.h 	switch (sizeof(*p)) {							\
p                  34 tools/testing/selftests/rseq/rseq-arm64.h 			: "=r" (*(__u8 *)p)					\
p                  35 tools/testing/selftests/rseq/rseq-arm64.h 			: "Q" (*p) : "memory");					\
p                  39 tools/testing/selftests/rseq/rseq-arm64.h 			: "=r" (*(__u16 *)p)					\
p                  40 tools/testing/selftests/rseq/rseq-arm64.h 			: "Q" (*p) : "memory");					\
p                  44 tools/testing/selftests/rseq/rseq-arm64.h 			: "=r" (*(__u32 *)p)					\
p                  45 tools/testing/selftests/rseq/rseq-arm64.h 			: "Q" (*p) : "memory");					\
p                  49 tools/testing/selftests/rseq/rseq-arm64.h 			: "=r" (*(__u64 *)p)					\
p                  50 tools/testing/selftests/rseq/rseq-arm64.h 			: "Q" (*p) : "memory");					\
p                  58 tools/testing/selftests/rseq/rseq-arm64.h #define rseq_smp_store_release(p, v)						\
p                  60 tools/testing/selftests/rseq/rseq-arm64.h 	switch (sizeof(*p)) {							\
p                  63 tools/testing/selftests/rseq/rseq-arm64.h 				: "=Q" (*p)					\
p                  69 tools/testing/selftests/rseq/rseq-arm64.h 				: "=Q" (*p)					\
p                  75 tools/testing/selftests/rseq/rseq-arm64.h 				: "=Q" (*p)					\
p                  81 tools/testing/selftests/rseq/rseq-arm64.h 				: "=Q" (*p)					\
p                  48 tools/testing/selftests/rseq/rseq-mips.h #define rseq_smp_load_acquire(p)					\
p                  50 tools/testing/selftests/rseq/rseq-mips.h 	__typeof(*p) ____p1 = RSEQ_READ_ONCE(*p);			\
p                  57 tools/testing/selftests/rseq/rseq-mips.h #define rseq_smp_store_release(p, v)					\
p                  60 tools/testing/selftests/rseq/rseq-mips.h 	RSEQ_WRITE_ONCE(*p, v);						\
p                  24 tools/testing/selftests/rseq/rseq-ppc.h #define rseq_smp_load_acquire(p)					\
p                  26 tools/testing/selftests/rseq/rseq-ppc.h 	__typeof(*p) ____p1 = RSEQ_READ_ONCE(*p);			\
p                  33 tools/testing/selftests/rseq/rseq-ppc.h #define rseq_smp_store_release(p, v)					\
p                  36 tools/testing/selftests/rseq/rseq-ppc.h 	RSEQ_WRITE_ONCE(*p, v);						\
p                  16 tools/testing/selftests/rseq/rseq-s390.h #define rseq_smp_load_acquire(p)					\
p                  18 tools/testing/selftests/rseq/rseq-s390.h 	__typeof(*p) ____p1 = RSEQ_READ_ONCE(*p);			\
p                  25 tools/testing/selftests/rseq/rseq-s390.h #define rseq_smp_store_release(p, v)					\
p                  28 tools/testing/selftests/rseq/rseq-s390.h 	RSEQ_WRITE_ONCE(*p, v);						\
p                  36 tools/testing/selftests/rseq/rseq-x86.h #define rseq_smp_load_acquire(p)					\
p                  38 tools/testing/selftests/rseq/rseq-x86.h 	__typeof(*p) ____p1 = RSEQ_READ_ONCE(*p);			\
p                  45 tools/testing/selftests/rseq/rseq-x86.h #define rseq_smp_store_release(p, v)					\
p                  48 tools/testing/selftests/rseq/rseq-x86.h 	RSEQ_WRITE_ONCE(*p, v);						\
p                 555 tools/testing/selftests/rseq/rseq-x86.h #define rseq_smp_load_acquire(p)					\
p                 557 tools/testing/selftests/rseq/rseq-x86.h 	__typeof(*p) ____p1 = RSEQ_READ_ONCE(*p);			\
p                 564 tools/testing/selftests/rseq/rseq-x86.h #define rseq_smp_store_release(p, v)					\
p                 567 tools/testing/selftests/rseq/rseq-x86.h 	RSEQ_WRITE_ONCE(*p, v);						\
p                  97 tools/testing/selftests/safesetid/safesetid-test.c 	struct passwd p;
p                 103 tools/testing/selftests/safesetid/safesetid-test.c 		memset(&p,0x00,sizeof(p));
p                 110 tools/testing/selftests/safesetid/safesetid-test.c 		p.pw_name=name_str;
p                 111 tools/testing/selftests/safesetid/safesetid-test.c 		p.pw_uid=uid;
p                 112 tools/testing/selftests/safesetid/safesetid-test.c 		p.pw_gecos="Test account";
p                 113 tools/testing/selftests/safesetid/safesetid-test.c 		p.pw_dir="/dev/null";
p                 114 tools/testing/selftests/safesetid/safesetid-test.c 		p.pw_shell="/bin/false";
p                 115 tools/testing/selftests/safesetid/safesetid-test.c 		int value = putpwent(&p,fd);
p                  41 tools/testing/selftests/sigaltstack/sas.c 	struct stk_data *p;
p                  56 tools/testing/selftests/sigaltstack/sas.c 	p = (struct stk_data *)(aa + 512);
p                  57 tools/testing/selftests/sigaltstack/sas.c 	strcpy(p->msg, msg);
p                  58 tools/testing/selftests/sigaltstack/sas.c 	p->flag = 1;
p                  72 tools/testing/selftests/sigaltstack/sas.c 	ksft_print_msg("%s\n", p->msg);
p                  73 tools/testing/selftests/sigaltstack/sas.c 	if (!p->flag) {
p                  82 tools/testing/selftests/sigaltstack/sas.c 	struct stk_data *p;
p                  88 tools/testing/selftests/sigaltstack/sas.c 	p = memmem(aa, 1024, msg, strlen(msg));
p                  89 tools/testing/selftests/sigaltstack/sas.c 	if (p) {
p                  92 tools/testing/selftests/sigaltstack/sas.c 		strcpy(p->msg, msg2);
p                  94 tools/testing/selftests/sigaltstack/sas.c 		p->flag = 0;
p                  38 tools/testing/selftests/vm/gup_benchmark.c 	char *p;
p                  94 tools/testing/selftests/vm/gup_benchmark.c 	p = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, filed, 0);
p                  95 tools/testing/selftests/vm/gup_benchmark.c 	if (p == MAP_FAILED)
p                  97 tools/testing/selftests/vm/gup_benchmark.c 	gup.addr = (unsigned long)p;
p                 100 tools/testing/selftests/vm/gup_benchmark.c 		madvise(p, size, MADV_HUGEPAGE);
p                 102 tools/testing/selftests/vm/gup_benchmark.c 		madvise(p, size, MADV_NOHUGEPAGE);
p                 104 tools/testing/selftests/vm/gup_benchmark.c 	for (; (unsigned long)p < gup.addr + size; p += PAGE_SIZE)
p                 105 tools/testing/selftests/vm/gup_benchmark.c 		p[0] = 0;
p                  34 tools/testing/selftests/vm/map_fixed_noreplace.c 	char *p;
p                  44 tools/testing/selftests/vm/map_fixed_noreplace.c 	p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
p                  46 tools/testing/selftests/vm/map_fixed_noreplace.c 	printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
p                  48 tools/testing/selftests/vm/map_fixed_noreplace.c 	if (p == MAP_FAILED) {
p                  65 tools/testing/selftests/vm/map_fixed_noreplace.c 	p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
p                  66 tools/testing/selftests/vm/map_fixed_noreplace.c 	printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
p                  68 tools/testing/selftests/vm/map_fixed_noreplace.c 	if (p == MAP_FAILED) {
p                  85 tools/testing/selftests/vm/map_fixed_noreplace.c 	p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
p                  86 tools/testing/selftests/vm/map_fixed_noreplace.c 	printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
p                  88 tools/testing/selftests/vm/map_fixed_noreplace.c 	if (p != MAP_FAILED) {
p                 106 tools/testing/selftests/vm/map_fixed_noreplace.c 	p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
p                 107 tools/testing/selftests/vm/map_fixed_noreplace.c 	printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
p                 109 tools/testing/selftests/vm/map_fixed_noreplace.c 	if (p != MAP_FAILED) {
p                 126 tools/testing/selftests/vm/map_fixed_noreplace.c 	p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
p                 127 tools/testing/selftests/vm/map_fixed_noreplace.c 	printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
p                 129 tools/testing/selftests/vm/map_fixed_noreplace.c 	if (p != MAP_FAILED) {
p                 146 tools/testing/selftests/vm/map_fixed_noreplace.c 	p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
p                 147 tools/testing/selftests/vm/map_fixed_noreplace.c 	printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
p                 149 tools/testing/selftests/vm/map_fixed_noreplace.c 	if (p != MAP_FAILED) {
p                 166 tools/testing/selftests/vm/map_fixed_noreplace.c 	p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
p                 167 tools/testing/selftests/vm/map_fixed_noreplace.c 	printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
p                 169 tools/testing/selftests/vm/map_fixed_noreplace.c 	if (p == MAP_FAILED) {
p                 186 tools/testing/selftests/vm/map_fixed_noreplace.c 	p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
p                 187 tools/testing/selftests/vm/map_fixed_noreplace.c 	printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
p                 189 tools/testing/selftests/vm/map_fixed_noreplace.c 	if (p == MAP_FAILED) {
p                 138 tools/testing/selftests/vm/mlock-random-test.c int test_mlock_within_limit(char *p, int alloc_size)
p                 166 tools/testing/selftests/vm/mlock-random-test.c 			ret = mlock(p + start_offset, lock_size);
p                 168 tools/testing/selftests/vm/mlock-random-test.c 			ret = mlock2_(p + start_offset, lock_size,
p                 174 tools/testing/selftests/vm/mlock-random-test.c 					p, alloc_size,
p                 175 tools/testing/selftests/vm/mlock-random-test.c 					p + start_offset, lock_size);
p                 184 tools/testing/selftests/vm/mlock-random-test.c 	page_size = get_proc_page_size((unsigned long)p);
p                 215 tools/testing/selftests/vm/mlock-random-test.c int test_mlock_outof_limit(char *p, int alloc_size)
p                 238 tools/testing/selftests/vm/mlock-random-test.c 			ret = mlock(p + start_offset, lock_size);
p                 240 tools/testing/selftests/vm/mlock-random-test.c 			ret = mlock2_(p + start_offset, lock_size,
p                 245 tools/testing/selftests/vm/mlock-random-test.c 					p, alloc_size,
p                 246 tools/testing/selftests/vm/mlock-random-test.c 					p + start_offset, lock_size);
p                 264 tools/testing/selftests/vm/mlock-random-test.c 	char *p = NULL;
p                 270 tools/testing/selftests/vm/mlock-random-test.c 	p = malloc(MLOCK_WITHIN_LIMIT_SIZE);
p                 271 tools/testing/selftests/vm/mlock-random-test.c 	if (p == NULL) {
p                 275 tools/testing/selftests/vm/mlock-random-test.c 	ret = test_mlock_within_limit(p, MLOCK_WITHIN_LIMIT_SIZE);
p                 278 tools/testing/selftests/vm/mlock-random-test.c 	munlock(p, MLOCK_WITHIN_LIMIT_SIZE);
p                 279 tools/testing/selftests/vm/mlock-random-test.c 	free(p);
p                 282 tools/testing/selftests/vm/mlock-random-test.c 	p = malloc(MLOCK_OUTOF_LIMIT_SIZE);
p                 283 tools/testing/selftests/vm/mlock-random-test.c 	if (p == NULL) {
p                 287 tools/testing/selftests/vm/mlock-random-test.c 	ret = test_mlock_outof_limit(p, MLOCK_OUTOF_LIMIT_SIZE);
p                 290 tools/testing/selftests/vm/mlock-random-test.c 	munlock(p, MLOCK_OUTOF_LIMIT_SIZE);
p                 291 tools/testing/selftests/vm/mlock-random-test.c 	free(p);
p                  61 tools/testing/selftests/vm/transhuge-stress.c 	void *ptr, *p;
p                 109 tools/testing/selftests/vm/transhuge-stress.c 		for (p = ptr; p < ptr + len; p += HPAGE_SIZE) {
p                 112 tools/testing/selftests/vm/transhuge-stress.c 			pfn = allocate_transhuge(p);
p                 133 tools/testing/selftests/vm/transhuge-stress.c 			if (madvise(p, HPAGE_SIZE - PAGE_SIZE, MADV_DONTNEED))
p                 233 tools/testing/selftests/vm/va_128TBswitch.c 	void *p;
p                 239 tools/testing/selftests/vm/va_128TBswitch.c 		p = mmap(t->addr, t->size, PROT_READ | PROT_WRITE, t->flags, -1, 0);
p                 241 tools/testing/selftests/vm/va_128TBswitch.c 		printf("%s: %p - ", t->msg, p);
p                 243 tools/testing/selftests/vm/va_128TBswitch.c 		if (p == MAP_FAILED) {
p                 249 tools/testing/selftests/vm/va_128TBswitch.c 		if (t->low_addr_required && p >= (void *)(ADDR_SWITCH_HINT)) {
p                 257 tools/testing/selftests/vm/va_128TBswitch.c 			memset(p, 0, t->size);
p                 261 tools/testing/selftests/vm/va_128TBswitch.c 			munmap(p, t->size);
p                  57 tools/testing/selftests/x86/protection_keys.c #define ALIGN_PTR_UP(p, ptr_align_to)	((typeof(p))ALIGN_UP((unsigned long)(p),	ptr_align_to))
p                  58 tools/testing/selftests/x86/protection_keys.c #define ALIGN_PTR_DOWN(p, ptr_align_to)	((typeof(p))ALIGN_DOWN((unsigned long)(p),	ptr_align_to))
p                  62 tools/thermal/tmon/tui.c static void close_panel(PANEL *p)
p                  64 tools/thermal/tmon/tui.c 	if (p) {
p                  65 tools/thermal/tmon/tui.c 		del_panel(p);
p                  66 tools/thermal/tmon/tui.c 		p = NULL;
p                 504 tools/usb/ffs-test.c 	__u8 *p;
p                 514 tools/usb/ffs-test.c 		for (p = buf, i = 0; i < nbytes; ++i, ++p)
p                 515 tools/usb/ffs-test.c 			*p = i % 63;
p                 528 tools/usb/ffs-test.c 	const __u8 *p;
p                 538 tools/usb/ffs-test.c 		for (p = buf, len = 0; len < nbytes; ++p, ++len)
p                 539 tools/usb/ffs-test.c 			if (*p)
p                 544 tools/usb/ffs-test.c 		for (p = buf, len = 0; len < nbytes; ++p, ++len)
p                 545 tools/usb/ffs-test.c 			if (*p != len % 63) {
p                 559 tools/usb/ffs-test.c 		    len, expected, *p);
p                 560 tools/usb/ffs-test.c 		for (p = buf, len = 0; len < nbytes; ++p, ++len) {
p                 563 tools/usb/ffs-test.c 			fprintf(stderr, " %02x", *p);
p                  96 tools/usb/usbip/libsrc/names.c 	struct product *p;
p                  98 tools/usb/usbip/libsrc/names.c 	p = products[hashnum((vendorid << 16) | productid)];
p                  99 tools/usb/usbip/libsrc/names.c 	for (; p; p = p->next)
p                 100 tools/usb/usbip/libsrc/names.c 		if (p->vendorid == vendorid && p->productid == productid)
p                 101 tools/usb/usbip/libsrc/names.c 			return p->name;
p                 130 tools/usb/usbip/libsrc/names.c 	struct protocol *p;
p                 132 tools/usb/usbip/libsrc/names.c 	p = protocols[hashnum((classid << 16) | (subclassid << 8)
p                 134 tools/usb/usbip/libsrc/names.c 	for (; p; p = p->next)
p                 135 tools/usb/usbip/libsrc/names.c 		if (p->classid == classid && p->subclassid == subclassid &&
p                 136 tools/usb/usbip/libsrc/names.c 		    p->protocolid == protocolid)
p                 137 tools/usb/usbip/libsrc/names.c 			return p->name;
p                 151 tools/usb/usbip/libsrc/names.c 	struct pool *p;
p                 153 tools/usb/usbip/libsrc/names.c 	p = calloc(1, sizeof(struct pool));
p                 154 tools/usb/usbip/libsrc/names.c 	if (!p)
p                 157 tools/usb/usbip/libsrc/names.c 	p->mem = calloc(1, size);
p                 158 tools/usb/usbip/libsrc/names.c 	if (!p->mem) {
p                 159 tools/usb/usbip/libsrc/names.c 		free(p);
p                 163 tools/usb/usbip/libsrc/names.c 	p->next = pool_head;
p                 164 tools/usb/usbip/libsrc/names.c 	pool_head = p;
p                 166 tools/usb/usbip/libsrc/names.c 	return p->mem;
p                 210 tools/usb/usbip/libsrc/names.c 	struct product *p;
p                 213 tools/usb/usbip/libsrc/names.c 	p = products[h];
p                 214 tools/usb/usbip/libsrc/names.c 	for (; p; p = p->next)
p                 215 tools/usb/usbip/libsrc/names.c 		if (p->vendorid == vendorid && p->productid == productid)
p                 217 tools/usb/usbip/libsrc/names.c 	p = my_malloc(sizeof(struct product) + strlen(name));
p                 218 tools/usb/usbip/libsrc/names.c 	if (!p)
p                 220 tools/usb/usbip/libsrc/names.c 	strcpy(p->name, name);
p                 221 tools/usb/usbip/libsrc/names.c 	p->vendorid = vendorid;
p                 222 tools/usb/usbip/libsrc/names.c 	p->productid = productid;
p                 223 tools/usb/usbip/libsrc/names.c 	p->next = products[h];
p                 224 tools/usb/usbip/libsrc/names.c 	products[h] = p;
p                 270 tools/usb/usbip/libsrc/names.c 	struct protocol *p;
p                 274 tools/usb/usbip/libsrc/names.c 	p = protocols[h];
p                 275 tools/usb/usbip/libsrc/names.c 	for (; p; p = p->next)
p                 276 tools/usb/usbip/libsrc/names.c 		if (p->classid == classid && p->subclassid == subclassid
p                 277 tools/usb/usbip/libsrc/names.c 		    && p->protocolid == protocolid)
p                 279 tools/usb/usbip/libsrc/names.c 	p = my_malloc(sizeof(struct protocol) + strlen(name));
p                 280 tools/usb/usbip/libsrc/names.c 	if (!p)
p                 282 tools/usb/usbip/libsrc/names.c 	strcpy(p->name, name);
p                 283 tools/usb/usbip/libsrc/names.c 	p->classid = classid;
p                 284 tools/usb/usbip/libsrc/names.c 	p->subclassid = subclassid;
p                 285 tools/usb/usbip/libsrc/names.c 	p->protocolid = protocolid;
p                 286 tools/usb/usbip/libsrc/names.c 	p->next = protocols[h];
p                 287 tools/usb/usbip/libsrc/names.c 	protocols[h] = p;
p                 298 tools/usb/usbip/libsrc/usbip_common.c 	const char *c, *s, *p;
p                 305 tools/usb/usbip/libsrc/usbip_common.c 	p = names_protocol(class, subclass, protocol);
p                 306 tools/usb/usbip/libsrc/usbip_common.c 	if (!p)
p                 307 tools/usb/usbip/libsrc/usbip_common.c 		p = "unknown protocol";
p                 317 tools/usb/usbip/libsrc/usbip_common.c 	snprintf(buff, size, "%s / %s / %s (%02x/%02x/%02x)", c, s, p, class, subclass, protocol);
p                  22 tools/virtio/linux/dma-mapping.h #define dma_free_coherent(d, s, p, h) kfree(p)
p                  24 tools/virtio/linux/dma-mapping.h #define dma_map_page(d, p, o, s, dir) (page_to_phys(p) + (o))
p                  26 tools/virtio/linux/dma-mapping.h #define dma_map_single(d, p, s, dir) (virt_to_phys(p))
p                  40 tools/virtio/linux/kernel.h #define virt_to_phys(p) ((unsigned long)p)
p                  43 tools/virtio/linux/kernel.h #define page_to_phys(p) ((dma_addr_t)(unsigned long)(p))
p                  44 tools/virtio/linux/kernel.h #define virt_to_page(p) ((struct page *)((unsigned long)p & PAGE_MASK))
p                  46 tools/virtio/linux/kernel.h #define offset_in_page(p) (((unsigned long)p) % PAGE_SIZE)
p                  66 tools/virtio/linux/kernel.h 	void *p = kmalloc(s, gfp);
p                  68 tools/virtio/linux/kernel.h 	memset(p, 0, s);
p                  69 tools/virtio/linux/kernel.h 	return p;
p                  77 tools/virtio/linux/kernel.h static inline void kfree(void *p)
p                  79 tools/virtio/linux/kernel.h 	if (p >= __kfree_ignore_start && p < __kfree_ignore_end)
p                  81 tools/virtio/linux/kernel.h 	free(p);
p                  84 tools/virtio/linux/kernel.h static inline void free_pages_exact(void *p, size_t s)
p                  86 tools/virtio/linux/kernel.h 	kfree(p);
p                  89 tools/virtio/linux/kernel.h static inline void *krealloc(void *p, size_t s, gfp_t gfp)
p                  91 tools/virtio/linux/kernel.h 	return realloc(p, s);
p                  97 tools/virtio/linux/kernel.h 	void *p;
p                  99 tools/virtio/linux/kernel.h 	posix_memalign(&p, PAGE_SIZE, PAGE_SIZE);
p                 100 tools/virtio/linux/kernel.h 	return (unsigned long)p;
p                   9 tools/virtio/linux/uaccess.h static inline void __chk_user_ptr(const volatile void *p, size_t size)
p                  11 tools/virtio/linux/uaccess.h 	assert(p >= __user_addr_min && p + size <= __user_addr_max);
p                 150 tools/virtio/ringtest/main.h void __read_once_size(const volatile void *p, void *res, int size)
p                 153 tools/virtio/ringtest/main.h         case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break;              \
p                 154 tools/virtio/ringtest/main.h         case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break;            \
p                 155 tools/virtio/ringtest/main.h         case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break;            \
p                 156 tools/virtio/ringtest/main.h         case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break;            \
p                 159 tools/virtio/ringtest/main.h                 __builtin_memcpy((void *)res, (const void *)p, size);   \
p                 164 tools/virtio/ringtest/main.h static __always_inline void __write_once_size(volatile void *p, void *res, int size)
p                 167 tools/virtio/ringtest/main.h 	case 1: *(volatile unsigned char *)p = *(unsigned char *)res; break;
p                 168 tools/virtio/ringtest/main.h 	case 2: *(volatile unsigned short *)p = *(unsigned short *)res; break;
p                 169 tools/virtio/ringtest/main.h 	case 4: *(volatile unsigned int *)p = *(unsigned int *)res; break;
p                 170 tools/virtio/ringtest/main.h 	case 8: *(volatile unsigned long long *)p = *(unsigned long long *)res; break;
p                 173 tools/virtio/ringtest/main.h 		__builtin_memcpy((void *)p, (const void *)res, size);
p                  29 tools/virtio/ringtest/ptr_ring.c 	void *p = memalign(64, size);
p                  30 tools/virtio/ringtest/ptr_ring.c 	if (!p)
p                  31 tools/virtio/ringtest/ptr_ring.c 		return p;
p                  34 tools/virtio/ringtest/ptr_ring.c 		memset(p, 0, size);
p                  35 tools/virtio/ringtest/ptr_ring.c 	return p;
p                  55 tools/virtio/ringtest/ptr_ring.c static void kfree(void *p)
p                  57 tools/virtio/ringtest/ptr_ring.c 	if (p)
p                  58 tools/virtio/ringtest/ptr_ring.c 		free(p);
p                  68 tools/virtio/ringtest/virtio_ring_0_9.c 	void *p;
p                  70 tools/virtio/ringtest/virtio_ring_0_9.c 	ret = posix_memalign(&p, 0x1000, vring_size(ring_size, 0x1000));
p                  75 tools/virtio/ringtest/virtio_ring_0_9.c 	memset(p, 0, vring_size(ring_size, 0x1000));
p                  76 tools/virtio/ringtest/virtio_ring_0_9.c 	vring_init(&ring, ring_size, p, 0x1000);
p                1115 tools/vm/page-types.c 	char *p;
p                1117 tools/vm/page-types.c 	p = strchr(optarg, ',');
p                1118 tools/vm/page-types.c 	if (!p)
p                1119 tools/vm/page-types.c 		p = strchr(optarg, '+');
p                1121 tools/vm/page-types.c 	if (p == optarg) {
p                1123 tools/vm/page-types.c 		size   = parse_number(p + 1);
p                1124 tools/vm/page-types.c 	} else if (p) {
p                1126 tools/vm/page-types.c 		if (p[1] == '\0')
p                1129 tools/vm/page-types.c 			size = parse_number(p + 1);
p                1130 tools/vm/page-types.c 			if (*p == ',') {
p                1177 tools/vm/page-types.c 	const char *p    = str;
p                1181 tools/vm/page-types.c 		if (*p == ',' || *p == '=' || *p == '\0') {
p                1183 tools/vm/page-types.c 				flags |= parse_flag_name(str, p - str);
p                1184 tools/vm/page-types.c 			if (*p != ',')
p                1186 tools/vm/page-types.c 			str = p + 1;
p                1188 tools/vm/page-types.c 		p++;
p                1198 tools/vm/page-types.c 	const char *p;
p                1200 tools/vm/page-types.c 	p = strchr(optarg, '=');
p                1201 tools/vm/page-types.c 	if (p == optarg) {
p                1203 tools/vm/page-types.c 		bits = parse_flag_names(p + 1, 0);
p                1204 tools/vm/page-types.c 	} else if (p) {
p                1206 tools/vm/page-types.c 		bits = parse_flag_names(p + 1, 0);
p                 187 tools/vm/slabinfo.c 	char *p;
p                 195 tools/vm/slabinfo.c 	result = strtoul(buffer, &p, 10);
p                 196 tools/vm/slabinfo.c 	while (*p == ' ')
p                 197 tools/vm/slabinfo.c 		p++;
p                 198 tools/vm/slabinfo.c 	if (*p)
p                 199 tools/vm/slabinfo.c 		*x = strdup(p);
p                 576 tools/vm/slabinfo.c 	char *p = flags;
p                 606 tools/vm/slabinfo.c 		*p++ = '*';
p                 608 tools/vm/slabinfo.c 		*p++ = 'd';
p                 610 tools/vm/slabinfo.c 		*p++ = 'A';
p                 612 tools/vm/slabinfo.c 		*p++ = 'P';
p                 614 tools/vm/slabinfo.c 		*p++ = 'a';
p                 616 tools/vm/slabinfo.c 		*p++ = 'Z';
p                 618 tools/vm/slabinfo.c 		*p++ = 'F';
p                 620 tools/vm/slabinfo.c 		*p++ = 'U';
p                 622 tools/vm/slabinfo.c 		*p++ = 'T';
p                 624 tools/vm/slabinfo.c 	*p = 0;
p                1181 tools/vm/slabinfo.c 	char *p;
p                1202 tools/vm/slabinfo.c 			p = buffer + count;
p                1203 tools/vm/slabinfo.c 			while (p > buffer && p[-1] != '/')
p                1204 tools/vm/slabinfo.c 				p--;
p                1205 tools/vm/slabinfo.c 			alias->ref = strdup(p);
p                 153 virt/kvm/arm/mmu.c 	void *p;
p                 156 virt/kvm/arm/mmu.c 	p = mc->objects[--mc->nobjs];
p                 157 virt/kvm/arm/mmu.c 	return p;
p                 264 virt/kvm/arm/vgic/vgic-its.c #define LPI_PROP_ENABLE_BIT(p)	((p) & LPI_PROP_ENABLED)
p                 265 virt/kvm/arm/vgic/vgic-its.c #define LPI_PROP_PRIORITY(p)	((p) & 0xfc)
p                2143 virt/kvm/arm/vgic/vgic-its.c 	u64 *p = (u64 *)ptr;
p                2149 virt/kvm/arm/vgic/vgic-its.c 	val = *p;
p                  96 virt/kvm/arm/vgic/vgic.h #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
p                  98 virt/kvm/arm/vgic/vgic.h #define DEBUG_SPINLOCK_BUG_ON(p)
p                 675 virt/kvm/eventfd.c ioeventfd_release(struct _ioeventfd *p)
p                 677 virt/kvm/eventfd.c 	eventfd_ctx_put(p->eventfd);
p                 678 virt/kvm/eventfd.c 	list_del(&p->list);
p                 679 virt/kvm/eventfd.c 	kfree(p);
p                 683 virt/kvm/eventfd.c ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
p                 687 virt/kvm/eventfd.c 	if (addr != p->addr)
p                 691 virt/kvm/eventfd.c 	if (!p->length)
p                 695 virt/kvm/eventfd.c 	if (len != p->length)
p                 699 virt/kvm/eventfd.c 	if (p->wildcard)
p                 724 virt/kvm/eventfd.c 	return _val == p->datamatch ? true : false;
p                 732 virt/kvm/eventfd.c 	struct _ioeventfd *p = to_ioeventfd(this);
p                 734 virt/kvm/eventfd.c 	if (!ioeventfd_in_range(p, addr, len, val))
p                 737 virt/kvm/eventfd.c 	eventfd_signal(p->eventfd, 1);
p                 748 virt/kvm/eventfd.c 	struct _ioeventfd *p = to_ioeventfd(this);
p                 750 virt/kvm/eventfd.c 	ioeventfd_release(p);
p                 760 virt/kvm/eventfd.c ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
p                 765 virt/kvm/eventfd.c 		if (_p->bus_idx == p->bus_idx &&
p                 766 virt/kvm/eventfd.c 		    _p->addr == p->addr &&
p                 767 virt/kvm/eventfd.c 		    (!_p->length || !p->length ||
p                 768 virt/kvm/eventfd.c 		     (_p->length == p->length &&
p                 769 virt/kvm/eventfd.c 		      (_p->wildcard || p->wildcard ||
p                 770 virt/kvm/eventfd.c 		       _p->datamatch == p->datamatch))))
p                 791 virt/kvm/eventfd.c 	struct _ioeventfd *p;
p                 798 virt/kvm/eventfd.c 	p = kzalloc(sizeof(*p), GFP_KERNEL_ACCOUNT);
p                 799 virt/kvm/eventfd.c 	if (!p) {
p                 804 virt/kvm/eventfd.c 	INIT_LIST_HEAD(&p->list);
p                 805 virt/kvm/eventfd.c 	p->addr    = args->addr;
p                 806 virt/kvm/eventfd.c 	p->bus_idx = bus_idx;
p                 807 virt/kvm/eventfd.c 	p->length  = args->len;
p                 808 virt/kvm/eventfd.c 	p->eventfd = eventfd;
p                 812 virt/kvm/eventfd.c 		p->datamatch = args->datamatch;
p                 814 virt/kvm/eventfd.c 		p->wildcard = true;
p                 819 virt/kvm/eventfd.c 	if (ioeventfd_check_collision(kvm, p)) {
p                 824 virt/kvm/eventfd.c 	kvm_iodevice_init(&p->dev, &ioeventfd_ops);
p                 826 virt/kvm/eventfd.c 	ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
p                 827 virt/kvm/eventfd.c 				      &p->dev);
p                 832 virt/kvm/eventfd.c 	list_add_tail(&p->list, &kvm->ioeventfds);
p                 842 virt/kvm/eventfd.c 	kfree(p);
p                 852 virt/kvm/eventfd.c 	struct _ioeventfd        *p, *tmp;
p                 863 virt/kvm/eventfd.c 	list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
p                 866 virt/kvm/eventfd.c 		if (p->bus_idx != bus_idx ||
p                 867 virt/kvm/eventfd.c 		    p->eventfd != eventfd  ||
p                 868 virt/kvm/eventfd.c 		    p->addr != args->addr  ||
p                 869 virt/kvm/eventfd.c 		    p->length != args->len ||
p                 870 virt/kvm/eventfd.c 		    p->wildcard != wildcard)
p                 873 virt/kvm/eventfd.c 		if (!p->wildcard && p->datamatch != args->datamatch)
p                 876 virt/kvm/eventfd.c 		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
p                 880 virt/kvm/eventfd.c 		ioeventfd_release(p);
p                 639 virt/kvm/kvm_main.c 	struct kvm_stats_debugfs_item *p;
p                 653 virt/kvm/kvm_main.c 	for (p = debugfs_entries; p->name; p++) {
p                 659 virt/kvm/kvm_main.c 		stat_data->offset = p->offset;
p                 660 virt/kvm/kvm_main.c 		stat_data->mode = p->mode ? p->mode : 0644;
p                 661 virt/kvm/kvm_main.c 		kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
p                 662 virt/kvm/kvm_main.c 		debugfs_create_file(p->name, stat_data->mode, kvm->debugfs_dentry,
p                 663 virt/kvm/kvm_main.c 				    stat_data, stat_fops_per_vm[p->kind]);
p                1344 virt/kvm/kvm_main.c 		atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
p                1348 virt/kvm/kvm_main.c 		mask &= atomic_long_fetch_andnot(mask, p);
p                3027 virt/kvm/kvm_main.c 		sigset_t sigset, *p;
p                3029 virt/kvm/kvm_main.c 		p = NULL;
p                3042 virt/kvm/kvm_main.c 			p = &sigset;
p                3044 virt/kvm/kvm_main.c 		r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
p                4314 virt/kvm/kvm_main.c 		char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
p                4316 virt/kvm/kvm_main.c 		if (p) {
p                4317 virt/kvm/kvm_main.c 			tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
p                4320 virt/kvm/kvm_main.c 			kfree(p);
p                4331 virt/kvm/kvm_main.c 	struct kvm_stats_debugfs_item *p;
p                4336 virt/kvm/kvm_main.c 	for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
p                4337 virt/kvm/kvm_main.c 		int mode = p->mode ? p->mode : 0644;
p                4338 virt/kvm/kvm_main.c 		debugfs_create_file(p->name, mode, kvm_debugfs_dir,
p                4339 virt/kvm/kvm_main.c 				    (void *)(long)p->offset,
p                4340 virt/kvm/kvm_main.c 				    stat_fops[p->kind]);